text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
from keras.backend.common import backend_utils
from keras.backend.common.dtypes import result_type
from keras.backend.common.variables import AutocastScope
from keras.backend.common.variables import KerasVariable
from keras.backend.common.variables import get_autocast_scope
from keras.backend.common.variables import is_float_dtype
from keras.backend.common.variables import is_int_dtype
from keras.backend.common.variables import standardize_dtype
from keras.backend.common.variables import standardize_shape
from keras.random import random
| keras/keras/backend/common/__init__.py/0 | {
"file_path": "keras/keras/backend/common/__init__.py",
"repo_id": "keras",
"token_count": 163
} | 151 |
import json
import os
from keras.api_export import keras_export
# The type of float to use throughout a session.
_FLOATX = "float32"
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = "channels_last"
# Default backend: TensorFlow.
_BACKEND = "tensorflow"
@keras_export(["keras.config.floatx", "keras.backend.floatx"])
def floatx():
"""Return the default float type, as a string.
E.g. `'bfloat16'`, `'float16'`, `'float32'`, `'float64'`.
Returns:
String, the current default float type.
Example:
>>> keras.config.floatx()
'float32'
"""
return _FLOATX
@keras_export(["keras.config.set_floatx", "keras.backend.set_floatx"])
def set_floatx(value):
"""Set the default float dtype.
Note: It is not recommended to set this to `"float16"` for training,
as this will likely cause numeric stability issues.
Instead, mixed precision, which leverages
a mix of `float16` and `float32`. It can be configured by calling
`keras.mixed_precision.set_dtype_policy('mixed_float16')`.
Args:
value: String; `'bfloat16'`, `'float16'`, `'float32'`, or `'float64'`.
Examples:
>>> keras.config.floatx()
'float32'
>>> keras.config.set_floatx('float64')
>>> keras.config.floatx()
'float64'
>>> # Set it back to float32
>>> keras.config.set_floatx('float32')
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
accepted_dtypes = {"bfloat16", "float16", "float32", "float64"}
if value not in accepted_dtypes:
raise ValueError(
f"Unknown `floatx` value: {value}. "
f"Expected one of {accepted_dtypes}"
)
_FLOATX = str(value)
@keras_export(["keras.config.epsilon", "keras.backend.epsilon"])
def epsilon():
"""Return the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
>>> keras.config.epsilon()
1e-07
"""
return _EPSILON
@keras_export(["keras.config.set_epsilon", "keras.backend.set_epsilon"])
def set_epsilon(value):
"""Set the value of the fuzz factor used in numeric expressions.
Args:
value: float. New value of epsilon.
Examples:
>>> keras.config.epsilon()
1e-07
>>> keras.config.set_epsilon(1e-5)
>>> keras.config.epsilon()
1e-05
>>> # Set it back to the default value.
>>> keras.config.set_epsilon(1e-7)
"""
global _EPSILON
_EPSILON = value
@keras_export(
[
"keras.config.image_data_format",
"keras.backend.image_data_format",
]
)
def image_data_format():
"""Return the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`.
Example:
>>> keras.config.image_data_format()
'channels_last'
"""
return _IMAGE_DATA_FORMAT
@keras_export(
[
"keras.config.set_image_data_format",
"keras.backend.set_image_data_format",
]
)
def set_image_data_format(data_format):
"""Set the value of the image data format convention.
Args:
data_format: string. `'channels_first'` or `'channels_last'`.
Examples:
>>> keras.config.image_data_format()
'channels_last'
>>> keras.config.set_image_data_format('channels_first')
>>> keras.config.image_data_format()
'channels_first'
>>> # Set it back to `'channels_last'`
>>> keras.config.set_image_data_format('channels_last')
"""
global _IMAGE_DATA_FORMAT
data_format = str(data_format).lower()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"The `data_format` argument must be one of "
"{'channels_first', 'channels_last'}. "
f"Received: data_format={data_format}"
)
_IMAGE_DATA_FORMAT = data_format
def standardize_data_format(data_format):
if data_format is None:
return image_data_format()
data_format = str(data_format).lower()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"The `data_format` argument must be one of "
"{'channels_first', 'channels_last'}. "
f"Received: data_format={data_format}"
)
return data_format
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if "KERAS_HOME" in os.environ:
_KERAS_DIR = os.environ.get("KERAS_HOME")
else:
_keras_base_dir = os.path.expanduser("~")
if not os.access(_keras_base_dir, os.W_OK):
_keras_base_dir = "/tmp"
_KERAS_DIR = os.path.join(_keras_base_dir, ".keras")
def keras_home():
# Private accessor for the keras home location.
return _KERAS_DIR
# Attempt to read Keras config file.
_config_path = os.path.expanduser(os.path.join(_KERAS_DIR, "keras.json"))
if os.path.exists(_config_path):
try:
with open(_config_path) as f:
_config = json.load(f)
except ValueError:
_config = {}
_floatx = _config.get("floatx", floatx())
assert _floatx in {"float16", "float32", "float64"}
_epsilon = _config.get("epsilon", epsilon())
assert isinstance(_epsilon, float)
_backend = _config.get("backend", _BACKEND)
_image_data_format = _config.get("image_data_format", image_data_format())
assert _image_data_format in {"channels_last", "channels_first"}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
_BACKEND = _backend
# Save config file, if possible.
if not os.path.exists(_KERAS_DIR):
try:
os.makedirs(_KERAS_DIR)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
"floatx": floatx(),
"epsilon": epsilon(),
"backend": _BACKEND,
"image_data_format": image_data_format(),
}
try:
with open(_config_path, "w") as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
# Set backend based on KERAS_BACKEND flag, if applicable.
if "KERAS_BACKEND" in os.environ:
_backend = os.environ["KERAS_BACKEND"]
if _backend:
_BACKEND = _backend
if _BACKEND != "tensorflow":
# If we are not running on the tensorflow backend, we should stop tensorflow
# from using all available GPU memory. See
# https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
@keras_export(
[
"keras.config.backend",
"keras.backend.backend",
]
)
def backend():
"""Publicly accessible method for determining the current backend.
Returns:
String, the name of the backend Keras is currently using. One of
`"tensorflow"`, `"torch"`, or `"jax"`.
Example:
>>> keras.config.backend()
'tensorflow'
"""
return _BACKEND
| keras/keras/backend/config.py/0 | {
"file_path": "keras/keras/backend/config.py",
"repo_id": "keras",
"token_count": 2989
} | 152 |
import collections
import itertools
from functools import partial
import jax
import numpy as np
import tree
from keras import backend
from keras import callbacks as callbacks_module
from keras import ops
from keras import optimizers as optimizers_module
from keras.backend import distribution_lib as jax_distribution_lib
from keras.distribution import distribution_lib
from keras.trainers import trainer as base_trainer
from keras.trainers.data_adapters import data_adapter_utils
from keras.trainers.epoch_iterator import EpochIterator
from keras.utils import traceback_utils
class JAXTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.train_function = None
self.test_function = None
self.predict_function = None
self._jax_state_synced = True
def compute_loss_and_updates(
self,
trainable_variables,
non_trainable_variables,
x,
y,
sample_weight,
training=False,
optimizer_variables=None,
):
"""This method is stateless and is intended for use with jax.grad."""
kwargs = {}
if self._call_has_training_arg:
kwargs["training"] = training
y_pred, non_trainable_variables, losses = self.stateless_call(
trainable_variables,
non_trainable_variables,
x,
return_losses=True,
**kwargs,
)
var_mapping = list(zip(self.trainable_variables, trainable_variables))
var_mapping.extend(
zip(self.non_trainable_variables, non_trainable_variables)
)
with backend.StatelessScope(state_mapping=var_mapping):
# Note that this is needed for the regularization loss, which need
# the latest value of train/non-trainable variables.
loss = self.compute_loss(
x, y, y_pred, sample_weight, allow_empty=True
)
if losses:
loss += ops.sum(losses)
unscaled_loss = loss
if training and self.optimizer is not None:
# Scale loss with a StatelessScope, to use an update scale variable.
mapping = list(zip(self.optimizer.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping):
loss = self.optimizer.scale_loss(loss)
return loss, (unscaled_loss, y_pred, non_trainable_variables)
def train_step(self, state, data):
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
grad_fn = jax.value_and_grad(
self.compute_loss_and_updates, has_aux=True
)
(loss, aux), grads = grad_fn(
trainable_variables,
non_trainable_variables,
x,
y,
sample_weight,
training=True,
optimizer_variables=optimizer_variables,
)
(unscaled_loss, y_pred, non_trainable_variables) = aux
(
trainable_variables,
optimizer_variables,
) = self.optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
with backend.StatelessScope(
state_mapping=[
(ref_v, v)
for ref_v, v in zip(self.metrics_variables, metrics_variables)
]
) as scope:
self._loss_tracker.update_state(unscaled_loss)
logs = self.compute_metrics(x, y, y_pred, sample_weight)
new_metrics_variables = []
for ref_v in self.metrics_variables:
new_v = scope.get_current_value(ref_v)
if new_v is None:
new_v = ref_v.value
new_metrics_variables.append(new_v)
metrics_variables = new_metrics_variables
state = self._enforce_jax_state_sharding(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
)
return logs, state
def test_step(self, state, data):
(
trainable_variables,
non_trainable_variables,
metrics_variables,
) = state
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
loss, aux = self.compute_loss_and_updates(
trainable_variables,
non_trainable_variables,
x,
y,
sample_weight,
training=False,
)
(unscaled_loss, y_pred, non_trainable_variables) = aux
with backend.StatelessScope(
state_mapping=[
(ref_v, v)
for ref_v, v in zip(self.metrics_variables, metrics_variables)
]
) as scope:
self._loss_tracker.update_state(unscaled_loss)
logs = self.compute_metrics(x, y, y_pred, sample_weight)
new_metrics_variables = []
for ref_v in self.metrics_variables:
new_v = scope.get_current_value(ref_v)
if new_v is None:
new_v = ref_v.value
new_metrics_variables.append(new_v)
metrics_variables = new_metrics_variables
(
trainable_variables,
non_trainable_variables,
_,
metrics_variables,
) = self._enforce_jax_state_sharding(
trainable_variables=trainable_variables,
non_trainable_variables=non_trainable_variables,
optimizer_variables=None,
metrics_variables=metrics_variables,
)
state = (
trainable_variables,
non_trainable_variables,
metrics_variables,
)
return logs, state
def predict_step(self, state, data):
trainable_variables, non_trainable_variables = state
kwargs = {}
if self._call_has_training_arg:
kwargs["training"] = False
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
outputs, non_trainable_variables = self.stateless_call(
trainable_variables, non_trainable_variables, x, **kwargs
)
(
_,
non_trainable_variables,
_,
_,
) = self._enforce_jax_state_sharding(
trainable_variables=None,
non_trainable_variables=non_trainable_variables,
optimizer_variables=None,
metrics_variables=None,
)
return outputs, non_trainable_variables
def make_train_function(self, force=False):
if self.train_function is not None and not force:
return self.train_function
def one_train_step(state, data):
data = data[0]
return self.train_step(state, data)
def multi_train_steps(state, data):
for single_step_data in data:
logs, state = one_train_step(state, [single_step_data])
return logs, state
if self.steps_per_execution > 1:
train_step = multi_train_steps
else:
train_step = one_train_step
if not self.run_eagerly and self.jit_compile:
# Note that we mark the state and data to be donated to jax,
# so that jax will reuse the memory buffer for outputs.
# This will reduce the memory usage of the training function by
# half.
@partial(jax.jit, donate_argnames="state")
def compiled_train_step(state, data):
return train_step(state, data)
self.train_function = compiled_train_step
else:
self.train_function = train_step
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
def one_test_step(state, data):
data = data[0]
return self.test_step(state, data)
def multi_test_steps(state, data):
for single_step_data in data:
logs, state = one_test_step(state, [single_step_data])
return logs, state
if self.steps_per_execution > 1:
test_step = multi_test_steps
else:
test_step = one_test_step
if not self.run_eagerly and self.jit_compile:
# Note that we mark the state and data to be donated to jax,
# so that jax will reuse the memory buffer for outputs.
# This will reduce the memory usage of the training function by
# half.
@partial(jax.jit, donate_argnames="state")
def compiled_test_step(state, data):
return test_step(state, data)
self.test_function = compiled_test_step
else:
self.test_function = test_step
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
def one_predict_step(state, data):
data = data[0]
return self.predict_step(state, data)
def multi_predict_steps(state, data):
outputs, trainable_variables = one_predict_step(state, data[:1])
for single_step_data in data[1:]:
step_outputs, trainable_variables = one_predict_step(
state,
[single_step_data],
)
outputs = tree.map_structure(
lambda t1, t2: jax.numpy.concatenate([t1, t2]),
outputs,
step_outputs,
)
return outputs, trainable_variables
if self.steps_per_execution > 1:
predict_step = multi_predict_steps
else:
predict_step = one_predict_step
if not self.run_eagerly and self.jit_compile:
@jax.jit
def compiled_predict_step(state, data):
return predict_step(state, data)
self.predict_function = compiled_predict_step
else:
self.predict_function = predict_step
@traceback_utils.filter_traceback
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
self._assert_compile_called("fit")
# TODO: respect compiled trainable state
self._eval_epoch_iterator = None
if validation_split and validation_data is None:
# Create the validation data using the training data. Only supported
# for TF/numpy/jax arrays.
(
x,
y,
sample_weight,
), validation_data = data_adapter_utils.train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
if validation_data is not None:
(
val_x,
val_y,
val_sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(validation_data)
# Create an iterator that yields batches for one epoch.
epoch_iterator = JAXEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=epochs,
steps=epoch_iterator.num_batches,
model=self,
)
self._record_training_state_sharding_spec()
self.make_train_function()
self.stop_training = False
callbacks.on_train_begin()
for epoch in range(initial_epoch, epochs):
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
self._jax_state_synced = True
for step, data in epoch_iterator.enumerate_epoch():
# Callbacks
callbacks.on_train_batch_begin(step)
# Train step
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
optimizer_variables=True,
metrics_variables=True,
purge_model_variables=True,
)
self._jax_state_synced = False
logs, state = self.train_function(state, data)
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
# Setting _jax_state enables callbacks to force a state sync
# if they need to.
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"optimizer_variables": optimizer_variables,
"metrics_variables": metrics_variables,
}
# Callbacks
callbacks.on_train_batch_end(step, self._pythonify_logs(logs))
if self.stop_training:
break
# Reattach state to the model (if not already done by a callback).
# NOTE: doing this after each step would be a big performance
# bottleneck.
self.jax_state_sync()
# Override with model metrics instead of last step logs
# The jax spmd_mode is need for multi-process context, since the
# metrics values are replicated, and we don't want to do a all
# gather, and only need the local copy of the value.
with jax.spmd_mode("allow_all"):
epoch_logs = self.get_metrics_result()
# Run validation.
if validation_data is not None and self._should_eval(
epoch, validation_freq
):
# Create JAXEpochIterator for evaluation and cache it.
if getattr(self, "_eval_epoch_iterator", None) is None:
self._eval_epoch_iterator = JAXEpochIterator(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps_per_execution=self.steps_per_execution,
steps_per_epoch=validation_steps,
shuffle=False,
)
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
return_dict=True,
_use_cached_eval_dataset=True,
)
val_logs = {
"val_" + name: val for name, val in val_logs.items()
}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
training_logs = epoch_logs
if self.stop_training:
break
if (
isinstance(self.optimizer, optimizers_module.Optimizer)
and epochs > 0
):
self.optimizer.finalize_variable_values(self.trainable_weights)
# If _eval_epoch_iterator exists, delete it after all epochs are done.
if getattr(self, "_eval_epoch_iterator", None) is not None:
del self._eval_epoch_iterator
callbacks.on_train_end(logs=training_logs)
self._jax_state = None
return self.history
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
self._assert_compile_called("evaluate")
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of input/target data.
epoch_iterator = JAXEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self._record_training_state_sharding_spec()
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = None
self.reset_metrics()
self._jax_state_synced = True
for step, data in epoch_iterator.enumerate_epoch():
callbacks.on_test_batch_begin(step)
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=True,
purge_model_variables=True,
)
self._jax_state_synced = False
logs, state = self.test_function(state, data)
(
trainable_variables,
non_trainable_variables,
metrics_variables,
) = state
# Setting _jax_state enables callbacks to force a state sync
# if they need to.
self._jax_state = {
# I wouldn't recommend modifying non-trainable model state
# during evaluate(), but it's allowed.
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"metrics_variables": metrics_variables,
}
callbacks.on_test_batch_end(step, self._pythonify_logs(logs))
if self.stop_evaluating:
break
# Reattach state back to model (if not already done by a callback).
self.jax_state_sync()
# The jax spmd_mode is need for multi-process context, since the
# metrics values are replicated, and we don't want to do a all
# gather, and only need the local copy of the value.
with jax.spmd_mode("allow_all"):
logs = self.get_metrics_result()
callbacks.on_test_end(logs)
self._jax_state = None
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = JAXEpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
if not all(layer.built for layer in self._flatten_layers()):
# Build the model on one batch of data.
for _, data in epoch_iterator.enumerate_epoch():
# Build model
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data[0])
with backend.StatelessScope():
self(x)
break
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self._record_training_state_sharding_spec()
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
self._jax_state_synced = True
outputs = None
non_trainable_variables = None
for step, x in epoch_iterator.enumerate_epoch():
callbacks.on_predict_batch_begin(step)
if self._jax_state_synced:
# The state may have been synced by a callback.
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
)
self._purge_model_variables(non_trainable_variables=True)
self._jax_state_synced = False
else:
state = (state[0], non_trainable_variables)
batch_outputs, non_trainable_variables = self.predict_function(
state, x
)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(step, {"outputs": batch_outputs})
if self.stop_predicting:
break
self._jax_state = {
# I wouldn't recommend modifying non-trainable model state
# during predict(), but it's allowed.
"non_trainable_variables": non_trainable_variables,
}
self.jax_state_sync()
callbacks.on_predict_end()
self._jax_state = None
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
self._assert_compile_called("train_on_batch")
if class_weight is not None:
if sample_weight is not None:
raise ValueError(
"Arguments `sample_weight` and `class_weight` "
"cannot be specified at the same time. "
f"Received: sample_weight={sample_weight}, "
f"class_weight={class_weight}"
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
data = (x, y, sample_weight)
data = _distribute_data(data)
# Maybe build model
self._symbolic_build(data_batch=data)
self._record_training_state_sharding_spec()
self.make_train_function()
# Train step
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
optimizer_variables=True,
metrics_variables=True,
purge_model_variables=False,
)
self._jax_state_synced = False
logs, state = self.train_function(state, [data])
# State sync
(
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
) = state
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"optimizer_variables": optimizer_variables,
"metrics_variables": metrics_variables,
}
self.jax_state_sync()
# Format return values
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
data = (x, y, sample_weight)
data = _distribute_data(data)
# Maybe build model
self._symbolic_build(data_batch=data)
self._record_training_state_sharding_spec()
self.make_test_function()
# Test step
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=True,
purge_model_variables=False,
)
self._jax_state_synced = False
logs, state = self.test_function(state, [data])
# State sync
trainable_variables, non_trainable_variables, metrics_variables = state
self._jax_state = {
"trainable_variables": trainable_variables,
"non_trainable_variables": non_trainable_variables,
"metrics_variables": metrics_variables,
}
self.jax_state_sync()
# Format return values.
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
if not all(layer.built for layer in self._flatten_layers()):
# Build model
with backend.StatelessScope():
self(x)
self._record_training_state_sharding_spec()
self.make_predict_function()
state = self._get_jax_state(
trainable_variables=True,
non_trainable_variables=True,
metrics_variables=False,
purge_model_variables=False,
)
self._jax_state_synced = False
batch_outputs, non_trainable_variables = self.predict_function(
state, [(x,)]
)
self._jax_state = {
"non_trainable_variables": non_trainable_variables,
}
self.jax_state_sync()
batch_outputs = tree.map_structure(lambda x: np.array(x), batch_outputs)
return batch_outputs
def jax_state_sync(self):
if not getattr(self, "_jax_state", None) or self._jax_state_synced:
return
trainable_variables = self._jax_state.get("trainable_variables", None)
non_trainable_variables = self._jax_state.get(
"non_trainable_variables", None
)
optimizer_variables = self._jax_state.get("optimizer_variables", None)
metrics_variables = self._jax_state.get("metrics_variables", None)
if trainable_variables:
for ref_v, v in zip(self.trainable_variables, trainable_variables):
ref_v.assign(v)
if non_trainable_variables:
for ref_v, v in zip(
self.non_trainable_variables, non_trainable_variables
):
ref_v.assign(v)
if optimizer_variables:
for ref_v, v in zip(self.optimizer.variables, optimizer_variables):
ref_v.assign(v)
if metrics_variables:
for ref_v, v in zip(self.metrics_variables, metrics_variables):
ref_v.assign(v)
self._jax_state_synced = True
def _record_training_state_sharding_spec(self):
self._trainable_variable_shardings = [
v.value.sharding for v in self.trainable_variables
]
self._non_trainable_variable_shardings = [
v.value.sharding for v in self.non_trainable_variables
]
if hasattr(self, "optimizer") and self.optimizer is not None:
self._optimizer_variable_shardings = [
v.value.sharding for v in self.optimizer.variables
]
else:
self._optimizer_variable_shardings = []
self._metrics_variable_shardings = [
v.value.sharding for v in self.metrics_variables
]
def _enforce_jax_state_sharding(
self,
trainable_variables=None,
non_trainable_variables=None,
optimizer_variables=None,
metrics_variables=None,
):
"""Enforce the sharding spec constraint for all the training state.
Since the output of the train/eval step will be used as inputs to next
step, we need to ensure that they have the same sharding spec, so that
jax.jit won't have to recompile the train/eval function.
Note that this function will also rely on the recorded sharding spec
for each of states.
This function is expected to be called within the jitted train/eval
function, especially around the end of the function.
"""
trainable_variables = trainable_variables or []
non_trainable_variables = non_trainable_variables or []
optimizer_variables = optimizer_variables or []
metrics_variables = metrics_variables or []
for i in range(len(trainable_variables)):
trainable_variables[i] = jax.lax.with_sharding_constraint(
trainable_variables[i], self._trainable_variable_shardings[i]
)
for i in range(len(non_trainable_variables)):
non_trainable_variables[i] = jax.lax.with_sharding_constraint(
non_trainable_variables[i],
self._non_trainable_variable_shardings[i],
)
for i in range(len(optimizer_variables)):
optimizer_variables[i] = jax.lax.with_sharding_constraint(
optimizer_variables[i], self._optimizer_variable_shardings[i]
)
for i in range(len(metrics_variables)):
metrics_variables[i] = jax.lax.with_sharding_constraint(
metrics_variables[i], self._metrics_variable_shardings[i]
)
return (
trainable_variables,
non_trainable_variables,
optimizer_variables,
metrics_variables,
)
def _purge_model_variables(
self,
trainable_variables=False,
non_trainable_variables=False,
optimizer_variables=False,
metrics_variables=False,
):
"""Remove all the model variable for memory saving.
During JAX training, since the training function are stateless, we have
to pass in and get the model weights over and over, during which the
copy of the weights that attached to the KerasVariable are still and
occupying extra memory. We remove those variable to save memory (for
better memory utilization) at the beginning of the epoch, and reattach
the value back to variables at the end of the epoch, via
`jax_state_sync()`.
"""
if trainable_variables:
for v in self.trainable_variables:
v._value = None
if non_trainable_variables:
for v in self.non_trainable_variables:
v._value = None
if optimizer_variables:
for v in self.optimizer.variables:
v._value = None
if metrics_variables:
for v in self.metrics_variables:
v._value = None
def _get_jax_state(
self,
trainable_variables=False,
non_trainable_variables=False,
optimizer_variables=False,
metrics_variables=False,
purge_model_variables=False,
):
state = []
if trainable_variables:
state.append([v.value for v in self.trainable_variables])
if non_trainable_variables:
state.append([v.value for v in self.non_trainable_variables])
if optimizer_variables:
state.append([v.value for v in self.optimizer.variables])
if metrics_variables:
state.append([v.value for v in self.metrics_variables])
if purge_model_variables:
self._purge_model_variables(
trainable_variables=trainable_variables,
non_trainable_variables=non_trainable_variables,
optimizer_variables=optimizer_variables,
metrics_variables=metrics_variables,
)
return tuple(state)
def _distribute_data(data):
distribution = distribution_lib.distribution()
if distribution is not None:
def distribute_single_value(d):
layout = distribution.get_data_layout(d.shape)
return jax_distribution_lib.distribute_data_input(d, layout)
return tree.map_structure(distribute_single_value, data)
else:
return tree.map_structure(jax.device_put, data)
class JAXEpochIterator(EpochIterator):
def _get_iterator(self):
return self._prefetch_numpy_iterator(
self.data_adapter.get_jax_iterator()
)
def _prefetch_numpy_iterator(self, numpy_iterator):
"""Shard and prefetch batches on device.
Most of the implementation has been borrowed from
`flax.jax_utils.prefetch_to_device`
This utility takes an iterator and returns a new iterator which fills an
on device prefetch buffer. Eager prefetching can improve the performance
of training loops significantly by overlapping compute and data
transfer.
"""
queue = collections.deque()
# If you're training on GPUs, 2 is generally the best choice because
# this guarantees that you can overlap a training step on GPU with a
# data prefetch step on CPU.
def enqueue(n=2):
for data in itertools.islice(numpy_iterator, n):
queue.append(_distribute_data(data))
enqueue(n=2) # TODO: should we make `n` configurable?
while queue:
yield queue.popleft()
enqueue(1)
| keras/keras/backend/jax/trainer.py/0 | {
"file_path": "keras/keras/backend/jax/trainer.py",
"repo_id": "keras",
"token_count": 17649
} | 153 |
import functools
import itertools
import operator
import tensorflow as tf
from keras.backend.tensorflow.core import convert_to_tensor
RESIZE_INTERPOLATIONS = (
"bilinear",
"nearest",
"lanczos3",
"lanczos5",
"bicubic",
)
def resize(
image,
size,
interpolation="bilinear",
antialias=False,
data_format="channels_last",
):
if interpolation not in RESIZE_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}"
)
if not len(size) == 2:
raise ValueError(
"Argument `size` must be a tuple of two elements "
f"(height, width). Received: size={size}"
)
size = tuple(size)
if data_format == "channels_first":
if len(image.shape) == 4:
image = tf.transpose(image, (0, 2, 3, 1))
elif len(image.shape) == 3:
image = tf.transpose(image, (1, 2, 0))
else:
raise ValueError(
"Invalid input rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
resized = tf.image.resize(
image, size, method=interpolation, antialias=antialias
)
if data_format == "channels_first":
if len(image.shape) == 4:
resized = tf.transpose(resized, (0, 3, 1, 2))
elif len(image.shape) == 3:
resized = tf.transpose(resized, (2, 0, 1))
return tf.cast(resized, image.dtype)
AFFINE_TRANSFORM_INTERPOLATIONS = (
"nearest",
"bilinear",
)
AFFINE_TRANSFORM_FILL_MODES = (
"constant",
"nearest",
"wrap",
# "mirror", not supported by TF
"reflect",
)
def affine_transform(
image,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format="channels_last",
):
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{AFFINE_TRANSFORM_INTERPOLATIONS}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
if len(image.shape) not in (3, 4):
raise ValueError(
"Invalid image rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"image.shape={image.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
# unbatched case
need_squeeze = False
if len(image.shape) == 3:
image = tf.expand_dims(image, axis=0)
need_squeeze = True
if len(transform.shape) == 1:
transform = tf.expand_dims(transform, axis=0)
if data_format == "channels_first":
image = tf.transpose(image, (0, 2, 3, 1))
affined = tf.raw_ops.ImageProjectiveTransformV3(
images=image,
transforms=tf.cast(transform, dtype=tf.float32),
output_shape=tf.shape(image)[1:-1],
fill_value=fill_value,
interpolation=interpolation.upper(),
fill_mode=fill_mode.upper(),
)
affined = tf.ensure_shape(affined, image.shape)
if data_format == "channels_first":
affined = tf.transpose(affined, (0, 3, 1, 2))
if need_squeeze:
affined = tf.squeeze(affined, axis=0)
return affined
def _mirror_index_fixer(index, size):
s = size - 1 # Half-wavelength of triangular wave
# Scaled, integer-valued version of the triangular wave |x - round(x)|
return tf.abs((index + s) % (2 * s) - s)
def _reflect_index_fixer(index, size):
return tf.math.floordiv(
_mirror_index_fixer(2 * index + 1, 2 * size + 1) - 1, 2
)
_INDEX_FIXERS = {
"constant": lambda index, size: index,
"nearest": lambda index, size: tf.clip_by_value(index, 0, size - 1),
"wrap": lambda index, size: index % size,
"mirror": _mirror_index_fixer,
"reflect": _reflect_index_fixer,
}
def _nearest_indices_and_weights(coordinate):
coordinate = (
coordinate if coordinate.dtype.is_integer else tf.round(coordinate)
)
index = tf.cast(coordinate, tf.int32)
weight = tf.constant(1, coordinate.dtype)
return [(index, weight)]
def _linear_indices_and_weights(coordinate):
lower = tf.floor(coordinate)
upper_weight = coordinate - lower
lower_weight = 1 - upper_weight
index = tf.cast(lower, tf.int32)
return [(index, lower_weight), (index + 1, upper_weight)]
def map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0.0
):
input_arr = convert_to_tensor(input)
coordinate_arrs = convert_to_tensor(coordinates)
# unstack into a list of tensors for following operations
coordinate_arrs = tf.unstack(coordinate_arrs, axis=0)
fill_value = convert_to_tensor(tf.cast(fill_value, input_arr.dtype))
if len(coordinates) != len(input_arr.shape):
raise ValueError(
"coordinates must be a sequence of length input.shape, but "
f"{len(coordinates)} != {len(input_arr.shape)}"
)
index_fixer = _INDEX_FIXERS.get(fill_mode)
if index_fixer is None:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected one of "
f"{set(_INDEX_FIXERS.keys())}. Received: "
f"fill_mode={fill_mode}"
)
def is_valid(index, size):
if fill_mode == "constant":
return (0 <= index) & (index < size)
else:
return True
if order == 0:
interp_fun = _nearest_indices_and_weights
elif order == 1:
interp_fun = _linear_indices_and_weights
else:
raise NotImplementedError("map_coordinates currently requires order<=1")
valid_1d_interpolations = []
for coordinate, size in zip(coordinate_arrs, input_arr.shape):
interp_nodes = interp_fun(coordinate)
valid_interp = []
for index, weight in interp_nodes:
fixed_index = index_fixer(index, size)
valid = is_valid(index, size)
valid_interp.append((fixed_index, valid, weight))
valid_1d_interpolations.append(valid_interp)
outputs = []
for items in itertools.product(*valid_1d_interpolations):
indices, validities, weights = zip(*items)
indices = tf.transpose(tf.stack(indices))
def fast_path():
return tf.transpose(tf.gather_nd(input_arr, indices))
def slow_path():
all_valid = functools.reduce(operator.and_, validities)
return tf.where(
all_valid,
tf.transpose(tf.gather_nd(input_arr, indices)),
fill_value,
)
contribution = tf.cond(tf.reduce_all(validities), fast_path, slow_path)
outputs.append(
functools.reduce(operator.mul, weights)
* tf.cast(contribution, weights[0].dtype)
)
result = functools.reduce(operator.add, outputs)
if input_arr.dtype.is_integer:
result = result if result.dtype.is_integer else tf.round(result)
return tf.cast(result, input_arr.dtype)
| keras/keras/backend/tensorflow/image.py/0 | {
"file_path": "keras/keras/backend/tensorflow/image.py",
"repo_id": "keras",
"token_count": 3389
} | 154 |
import unittest
import pytest
from keras import backend
from keras import ops
from keras.backend.common.keras_tensor import KerasTensor
def single_arg_test_fn(x):
return ops.concatenate([(x + 1) ** 2, x], axis=-1)
def three_args_2_kwarg_test_fn(x1, x2, x3=None):
x1 = ops.max(x1, axis=1)
x2 = ops.max(x2, axis=1)
if x3 is not None:
x1 += ops.max(x3, axis=1)
return x1 + x2
class ComputeOutputSpecTest(unittest.TestCase):
def test_dynamic_batch_size(self):
x = KerasTensor(shape=(None, 3, 5))
y = backend.compute_output_spec(single_arg_test_fn, x)
self.assertEqual(y.shape, (None, 3, 10))
x1 = KerasTensor(shape=(None, 3, 5))
x2 = KerasTensor(shape=(None, 3, 5))
x3 = KerasTensor(shape=(None, 3, 5))
y = backend.compute_output_spec(
three_args_2_kwarg_test_fn, x1, x2, x3=x3
)
self.assertEqual(y.shape, (None, 5))
def test_dynamic_everything(self):
x = KerasTensor(shape=(2, None, 3))
y = backend.compute_output_spec(single_arg_test_fn, x)
self.assertEqual(y.shape, (2, None, 6))
x1 = KerasTensor(shape=(None, None, 5))
x2 = KerasTensor(shape=(None, None, 5))
x3 = KerasTensor(shape=(None, None, 5))
y = backend.compute_output_spec(
three_args_2_kwarg_test_fn, x1, x2, x3=x3
)
self.assertEqual(y.shape, (None, 5))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse_to_sparse(self):
def single_arg_sparse_fn(x):
y0 = ops.transpose(x, axes=(0, 2, 1))
y1 = ops.squeeze(ops.expand_dims(x, axis=3), axis=3)
y2 = ops.reshape(ops.reshape(x, (-1, 9)), (-1, 3, 3))
return (y0, y1, y2)
x = KerasTensor(shape=(None, 3, 3), sparse=True)
ys = backend.compute_output_spec(single_arg_sparse_fn, x)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertTrue(y.sparse)
def three_args_sparse_fn(x1, x2, x3=None):
y0 = ops.add(x1, x2) # sparse, sparse
y1 = ops.concatenate([x1, x2], axis=0) # sparse, sparse
y2 = ops.divide(x1, x3) # sparse, dense
y3 = ops.matmul(x1, x2) # sparse, sparse
y4 = ops.multiply(x1, x2) # sparse, sparse
y5 = ops.multiply(x1, x3) # sparse, dense
return (y0, y1, y2, y3, y4, y5)
x1 = KerasTensor(shape=(None, 3, 3), sparse=True)
x2 = KerasTensor(shape=(None, 3, 3), sparse=True)
x3 = KerasTensor(shape=(None, 3, 3), sparse=False)
ys = backend.compute_output_spec(three_args_sparse_fn, x1, x2, x3=x3)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertTrue(y.sparse)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse_to_dense(self):
def single_arg_dense_fn(x):
y0 = ops.exp(x)
return (y0,)
x = KerasTensor(shape=(None, 3, 3), sparse=True)
ys = backend.compute_output_spec(single_arg_dense_fn, x)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertFalse(y.sparse)
def three_args_dense_fn(x1, x2, x3=None):
y0 = ops.add(x1, x2) # sparse, dense
y1 = ops.add(x2, x1) # dense, sparse
y2 = ops.concatenate([x1, x2], axis=0) # sparse, dense
y3 = ops.matmul(x1, x2) # sparse, dense
y4 = ops.matmul(x2, x1) # dense, sparse
y5 = ops.take(x2, indices=x3, axis=1) # dense, sparse
y6 = ops.divide(x1, x1) # sparse, sparse
return (y0, y1, y2, y3, y4, y5, y6)
x1 = KerasTensor(shape=(None, 3, 3), sparse=True)
x2 = KerasTensor(shape=(None, 3, 3), sparse=False)
x3 = KerasTensor(shape=(3,), dtype="int64", sparse=True)
ys = backend.compute_output_spec(three_args_dense_fn, x1, x2, x3=x3)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertFalse(y.sparse)
| keras/keras/backend/tests/compute_output_spec_test.py/0 | {
"file_path": "keras/keras/backend/tests/compute_output_spec_test.py",
"repo_id": "keras",
"token_count": 2245
} | 155 |
import torch
from keras import ops
from keras import optimizers
from keras.backend.torch.optimizers import torch_parallel_optimizer
class Lion(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Lion):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
m_list = [
self._momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
c_t = torch._foreach_mul(m_list, self.beta_1)
torch._foreach_add_(c_t, grads, alpha=1 - self.beta_1)
c_t = [c.sign() for c in c_t]
torch._foreach_add_(
variables,
torch._foreach_mul(c_t, lr),
alpha=-1,
)
torch._foreach_mul_(m_list, self.beta_2)
torch._foreach_add_(m_list, grads, alpha=1 - self.beta_2)
| keras/keras/backend/torch/optimizers/torch_lion.py/0 | {
"file_path": "keras/keras/backend/torch/optimizers/torch_lion.py",
"repo_id": "keras",
"token_count": 496
} | 156 |
import csv
import os
import re
import tempfile
import numpy as np
import pytest
from keras import callbacks
from keras import initializers
from keras import layers
from keras import testing
from keras.models import Sequential
from keras.utils import numerical_utils
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
INPUT_DIM = 3
BATCH_SIZE = 4
class CSVLoggerTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_CSVLogger(self):
OUTPUT_DIM = 1
np.random.seed(1337)
temp_dir = tempfile.TemporaryDirectory()
filepath = os.path.join(temp_dir.name, "log.tsv")
sep = "\t"
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.random((TRAIN_SAMPLES, OUTPUT_DIM))
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.random((TEST_SAMPLES, OUTPUT_DIM))
def make_model():
np.random.seed(1337)
model = Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(OUTPUT_DIM),
]
)
model.compile(
loss="mse",
optimizer="sgd",
metrics=["mse"],
)
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0,
)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = " ".join(list_lines)
assert len(re.findall("epoch", output)) == 1
os.remove(filepath)
# case 3, Verify Val. loss also registered when Validation Freq > 1
model = make_model()
cbks = [callbacks.CSVLogger(filepath, separator=sep)]
hist = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
validation_freq=3,
callbacks=cbks,
epochs=5,
verbose=0,
)
assert os.path.exists(filepath)
# Verify that validation loss is registered at val. freq
with open(filepath) as csvfile:
rows = csv.DictReader(csvfile, delimiter=sep)
for idx, row in enumerate(rows, 1):
self.assertIn("val_loss", row)
if idx == 3:
self.assertEqual(
row["val_loss"], str(hist.history["val_loss"][0])
)
else:
self.assertEqual(row["val_loss"], "NA")
@pytest.mark.requires_trainable_backend
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN
# callback does not result in invalid CSVs.
tmpdir = tempfile.TemporaryDirectory()
csv_logfile = os.path.join(tmpdir.name, "csv_logger.csv")
NUM_CLASSES = 2
np.random.seed(1337)
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES)
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential()
initializer = initializers.Constant(value=1e5)
for _ in range(5):
model.add(
layers.Dense(
2,
activation="relu",
kernel_initializer=initializer,
)
)
model.add(layers.Dense(NUM_CLASSES))
model.compile(loss="mean_squared_error", optimizer="sgd")
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[
callbacks.TerminateOnNaN(),
callbacks.CSVLogger(csv_logfile),
],
epochs=20,
)
loss = history.history["loss"]
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]) or np.isinf(loss[0]))
values = []
with open(csv_logfile) as f:
# On Windows, due to \r\n line ends, we may end up reading empty
# lines after each line. Skip empty lines.
values = [x for x in csv.reader(f) if x]
self.assertIn("nan", values[-1], "NaN not logged in CSV Logger.")
| keras/keras/callbacks/csv_logger_test.py/0 | {
"file_path": "keras/keras/callbacks/csv_logger_test.py",
"repo_id": "keras",
"token_count": 3061
} | 157 |
import tempfile
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras import backend
from keras import callbacks
from keras import layers
from keras import losses
from keras import metrics
from keras import optimizers
from keras import saving
from keras import testing
from keras.models import Sequential
from keras.testing import test_utils
from keras.utils import numerical_utils
class SwapEMAWeightsTest(testing.TestCase):
def setUp(self):
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(3,),
num_classes=2,
random_seed=2023,
)
y_train = numerical_utils.to_categorical(y_train)
self.x_train = x_train
self.y_train = y_train
def _get_compiled_model(
self, use_ema=True, jit_compile=True, loss_scale=False
):
optimizer = optimizers.SGD(use_ema=use_ema, ema_momentum=0.9)
if loss_scale:
optimizer = optimizers.LossScaleOptimizer(optimizer)
model = Sequential(
[layers.Dense(2, kernel_initializer="ones", use_bias=False)]
)
model.compile(
optimizer=optimizer,
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
jit_compile=jit_compile,
)
return model
@pytest.mark.requires_trainable_backend
def test_swap_ema_weights_with_invalid_optimizer(self):
model = self._get_compiled_model(use_ema=False)
with self.assertRaisesRegex(
ValueError,
("SwapEMAWeights must be used when " "`use_ema=True` is set"),
):
model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[callbacks.SwapEMAWeights()],
validation_data=(self.x_train, self.y_train),
)
@pytest.mark.requires_trainable_backend
def test_swap_ema_weights(self):
# not using SwapEMAWeights
model = self._get_compiled_model()
history = model.fit(
self.x_train,
self.y_train,
epochs=2,
validation_data=(self.x_train, self.y_train),
)
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
# final metric during fitting is different from the evaluation
self.assertNotEqual(
history.history["val_mean_squared_error"][-1],
logs["mean_squared_error"],
)
# using SwapEMAWeights
model = self._get_compiled_model()
history = model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[callbacks.SwapEMAWeights()],
validation_data=(self.x_train, self.y_train),
)
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
# final metric during fitting is same as the evaluation
self.assertEqual(
history.history["val_mean_squared_error"][-1],
logs["mean_squared_error"],
)
@pytest.mark.requires_trainable_backend
def test_swap_ema_weights_on_epoch(self):
# using SwapEMAWeights together with ModelCheckpoint
model = self._get_compiled_model()
with tempfile.TemporaryDirectory() as temp_dir:
model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[
callbacks.SwapEMAWeights(swap_on_epoch=True),
callbacks.ModelCheckpoint(temp_dir + "/{epoch:1d}.keras"),
],
validation_data=(self.x_train, self.y_train),
)
model2 = saving.load_model(temp_dir + "/2.keras")
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
logs2 = model2.evaluate(self.x_train, self.y_train, return_dict=True)
# saved checkpoint will be applied by EMA weights
self.assertEqual(
logs["mean_squared_error"],
logs2["mean_squared_error"],
)
@pytest.mark.requires_trainable_backend
def test_swap_ema_weights_with_loss_scale_optimizer(self):
model = self._get_compiled_model(loss_scale=True)
history = model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[callbacks.SwapEMAWeights()],
validation_data=(self.x_train, self.y_train),
)
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
# final metric during fitting is same as the evaluation
self.assertEqual(
history.history["val_mean_squared_error"][-1],
logs["mean_squared_error"],
)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
def test_swap_ema_weights_with_tf_distribute(self):
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
# TODO: set jit_compile=True once the issue is resolved in
# integration_tests/tf_distribute_training_test.py#L52
model = self._get_compiled_model(jit_compile=False)
with tempfile.TemporaryDirectory() as temp_dir:
model.fit(
self.x_train,
self.y_train,
epochs=2,
callbacks=[
callbacks.SwapEMAWeights(swap_on_epoch=True),
callbacks.ModelCheckpoint(
temp_dir + "/distributed_{epoch:1d}.keras"
),
],
validation_data=(self.x_train, self.y_train),
)
model2 = saving.load_model(temp_dir + "/distributed_2.keras")
logs = model.evaluate(self.x_train, self.y_train, return_dict=True)
logs2 = model2.evaluate(self.x_train, self.y_train, return_dict=True)
# saved checkpoint will be applied by EMA weights
self.assertEqual(
logs["mean_squared_error"],
logs2["mean_squared_error"],
)
| keras/keras/callbacks/swap_ema_weights_test.py/0 | {
"file_path": "keras/keras/callbacks/swap_ema_weights_test.py",
"repo_id": "keras",
"token_count": 3286
} | 158 |
import numpy as np
from keras import constraints
from keras import initializers
from keras import ops
from keras import regularizers
from keras.api_export import keras_export
from keras.layers.layer import Layer
@keras_export("keras.layers.Embedding")
class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`
This layer can only be used on positive integer inputs of a fixed range.
Example:
>>> model = keras.Sequential()
>>> model.add(keras.layers.Embedding(1000, 64, input_length=10))
>>> # The model will take as input an integer matrix of size (batch,
>>> # input_length), and the largest integer (i.e. word index) in the input
>>> # should be no larger than 999 (vocabulary size).
>>> # Now model.output_shape is (None, 10, 64), where `None` is the batch
>>> # dimension.
>>> input_array = np.random.randint(1000, size=(32, 10))
>>> model.compile('rmsprop', 'mse')
>>> output_array = model.predict(input_array)
>>> print(output_array.shape)
(32, 10, 64)
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out.
This is useful when using recurrent layers which
may take variable length input. If this is `True`,
then all subsequent layers in the model need
to support masking or an exception will be raised.
If mask_zero is set to True, as a consequence,
index 0 cannot be used in the vocabulary (input_dim should
equal size of vocabulary + 1).
lora_rank: Optional integer. If set, the layer's forward pass
will implement LoRA (Low-Rank Adaptation)
with the provided rank. LoRA sets the layer's embeddings
matrix to non-trainable and replaces it with a delta over the
original matrix, obtained via multiplying two lower-rank
trainable matrices. This can be useful to reduce the
computation cost of fine-tuning large embedding layers.
You can also enable LoRA on an existing
`Embedding` layer by calling `layer.enable_lora(rank)`.
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
"""
def __init__(
self,
input_dim,
output_dim,
embeddings_initializer="uniform",
embeddings_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
lora_rank=None,
**kwargs,
):
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.autocast = False
self.lora_rank = lora_rank
self.lora_enabled = False
def build(self, input_shape=None):
self._embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name="embeddings",
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint,
trainable=True,
)
self.built = True
if self.lora_rank:
self.enable_lora(self.lora_rank)
@property
def embeddings(self):
if self.lora_enabled:
return self._embeddings + ops.matmul(
self.lora_embeddings_a, self.lora_embeddings_b
)
return self._embeddings
def call(self, inputs):
if inputs.dtype != "int32" and inputs.dtype != "int64":
inputs = ops.cast(inputs, "int32")
outputs = ops.take(self.embeddings, inputs, axis=0)
return ops.cast(outputs, dtype=self.compute_dtype)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return ops.not_equal(inputs, 0)
def compute_output_shape(self, input_shape):
return input_shape + (self.output_dim,)
def enable_lora(
self, rank, a_initializer="he_uniform", b_initializer="zeros"
):
if self.embeddings_constraint:
raise ValueError(
"Lora is incompatible with embedding constraints. "
"In order to enable lora on this layer, remove the "
"`embeddings_constraint` argument."
)
if not self.built:
raise ValueError(
"Cannot enable lora on a layer that isn't yet built."
)
if self.lora_enabled:
raise ValueError(
"lora is already enabled. "
"This can only be done once per layer."
)
self._tracker.unlock()
self.lora_embeddings_a = self.add_weight(
name="lora_embeddings_a",
shape=(self.embeddings.shape[0], rank),
initializer=initializers.get(a_initializer),
regularizer=self.embeddings_regularizer,
)
self.lora_embeddings_b = self.add_weight(
name="lora_embeddings_b",
shape=(rank, self.embeddings.shape[1]),
initializer=initializers.get(b_initializer),
regularizer=self.embeddings_regularizer,
)
self.embeddings.trainable = False
self._tracker.lock()
self.lora_enabled = True
def save_own_variables(self, store):
if not self.lora_enabled:
return super().save_own_variables(store)
embeddings_value = self.embeddings
store["0"] = embeddings_value
def load_own_variables(self, store):
if not self.lora_enabled:
return super().load_own_variables(store)
self._embeddings.assign(store["0"])
self.lora_embeddings_a.assign(np.zeros(self.lora_embeddings_a.shape))
self.lora_embeddings_b.assign(np.zeros(self.lora_embeddings_b.shape))
def get_config(self):
base_config = super().get_config()
config = {
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"embeddings_initializer": initializers.serialize(
self.embeddings_initializer
),
"embeddings_regularizer": regularizers.serialize(
self.embeddings_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"embeddings_constraint": constraints.serialize(
self.embeddings_constraint
),
"mask_zero": self.mask_zero,
}
if self.lora_rank:
config["lora_rank"] = self.lora_rank
return {**base_config, **config}
| keras/keras/layers/core/embedding.py/0 | {
"file_path": "keras/keras/layers/core/embedding.py",
"repo_id": "keras",
"token_count": 3401
} | 159 |
from keras import ops
from keras.api_export import keras_export
from keras.layers.merging.base_merge import Merge
@keras_export("keras.layers.Add")
class Add(Merge):
"""Performs elementwise addition operation.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Add()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `added = keras.layers.add([x1, x2])`
>>> added = keras.layers.Add()([x1, x2])
>>> out = keras.layers.Dense(4)(added)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.add(output, inputs[i])
return output
@keras_export("keras.layers.add")
def add(inputs, **kwargs):
"""Functional interface to the `keras.layers.Add` layer.
Args:
inputs: A list of input tensors with the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the sum of the inputs. It has the same shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.add([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> added = keras.layers.add([x1, x2])
>>> out = keras.layers.Dense(4)(added)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Add(**kwargs)(inputs)
| keras/keras/layers/merging/add.py/0 | {
"file_path": "keras/keras/layers/merging/add.py",
"repo_id": "keras",
"token_count": 882
} | 160 |
import numpy as np
from tensorflow import data as tf_data
from keras import backend
from keras import layers
from keras import testing
class RandomCropTest(testing.TestCase):
def test_random_crop(self):
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 1,
"width": 1,
},
input_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_crop_full(self):
np.random.seed(1337)
height, width = 8, 16
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
else:
input_shape = (12, 3, 8, 16)
inp = np.random.random(input_shape)
layer = layers.RandomCrop(height, width)
actual_output = layer(inp, training=False)
self.assertAllClose(inp, actual_output)
def test_random_crop_partial(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 8, 8, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 8, 8)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 8,
"width": 8,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_predicting_with_longer_height(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 10, 8, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 10, 8)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 10,
"width": 8,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_predicting_with_longer_width(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (12, 8, 16, 3)
output_shape = (12, 8, 18, 3)
else:
input_shape = (12, 3, 8, 16)
output_shape = (12, 3, 8, 18)
self.run_layer_test(
layers.RandomCrop,
init_kwargs={
"height": 8,
"width": 18,
},
input_shape=input_shape,
expected_output_shape=output_shape,
supports_masking=False,
run_training_check=False,
)
def test_tf_data_compatibility(self):
layer = layers.RandomCrop(8, 9)
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
| keras/keras/layers/preprocessing/random_crop_test.py/0 | {
"file_path": "keras/keras/layers/preprocessing/random_crop_test.py",
"repo_id": "keras",
"token_count": 1801
} | 161 |
import os
import numpy as np
import pytest
import tensorflow as tf
from tensorflow import data as tf_data
from keras import Sequential
from keras import backend
from keras import layers
from keras import models
from keras import saving
from keras import testing
class TextVectorizationTest(testing.TestCase):
# TODO: increase coverage. Most features aren't being tested.
def test_config(self):
layer = layers.TextVectorization(
output_mode="int",
vocabulary=["one", "two"],
output_sequence_length=5,
)
self.run_class_serialization_test(layer)
def test_adapt_flow(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.adapt(["foo bar", "bar baz", "baz bada boom"])
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
def test_fixed_vocabulary(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
vocabulary=["baz", "bar", "foo"],
)
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
def test_set_vocabulary(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.set_vocabulary(["baz", "bar", "foo"])
input_data = [["foo qux bar"], ["qux baz"]]
output = layer(input_data)
self.assertTrue(backend.is_tensor(output))
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string input dtype"
)
def test_save_load_with_ngrams_flow(self):
input_data = np.array(["foo bar", "bar baz", "baz bada boom"])
model = Sequential(
[
layers.Input(dtype="string", shape=(1,)),
layers.TextVectorization(ngrams=(1, 2)),
]
)
model.layers[0].adapt(input_data)
output = model(input_data)
temp_filepath = os.path.join(self.get_temp_dir(), "model.keras")
model.save(temp_filepath)
model = saving.load_model(temp_filepath)
self.assertAllClose(output, model(input_data))
def test_tf_data_compatibility(self):
max_tokens = 5000
max_len = 4
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
vocabulary=["baz", "bar", "foo"],
)
input_data = [["foo qux bar"], ["qux baz"]]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))
# Test adapt flow
layer = layers.TextVectorization(
max_tokens=max_tokens,
output_mode="int",
output_sequence_length=max_len,
)
layer.adapt(input_data)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string tensors."
)
def test_tf_as_first_sequential_layer(self):
layer = layers.TextVectorization(
max_tokens=10,
output_mode="int",
output_sequence_length=3,
)
layer.set_vocabulary(["baz", "bar", "foo"])
model = models.Sequential(
[
layer,
layers.Embedding(5, 4),
]
)
model(backend.convert_to_tensor([["foo qux bar"], ["qux baz"]]))
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires ragged tensors."
)
def test_ragged_tensor(self):
layer = layers.TextVectorization(
output_mode="int",
vocabulary=["baz", "bar", "foo"],
ragged=True,
)
input_data = [["foo qux bar"], ["qux baz"], ["foo"]]
output = layer(input_data)
self.assertIsInstance(output, tf.RaggedTensor)
self.assertEqual(output.shape, (3, None))
self.assertEqual(output.to_list(), [[4, 1, 3], [1, 2], [4]])
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires ragged tensors."
)
def test_ragged_tensor_output_length(self):
layer = layers.TextVectorization(
output_mode="int",
vocabulary=["baz", "bar", "foo"],
ragged=True,
output_sequence_length=2,
)
input_data = [["foo qux bar"], ["qux baz"], ["foo"]]
output = layer(input_data)
self.assertIsInstance(output, tf.RaggedTensor)
self.assertEqual(output.shape, (3, None))
self.assertEqual(output.to_list(), [[4, 1], [1, 2], [4]])
@pytest.mark.skipif(
backend.backend() == "tensorflow",
reason="Verify raises exception for non-TF backends",
)
def test_raises_exception_ragged_tensor(self):
with self.assertRaises(ValueError):
_ = layers.TextVectorization(
output_mode="int",
vocabulary=["baz", "bar", "foo"],
ragged=True,
)
| keras/keras/layers/preprocessing/text_vectorization_test.py/0 | {
"file_path": "keras/keras/layers/preprocessing/text_vectorization_test.py",
"repo_id": "keras",
"token_count": 2892
} | 162 |
from keras import backend
from keras import ops
from keras.api_export import keras_export
from keras.layers.input_spec import InputSpec
from keras.layers.layer import Layer
from keras.utils import argument_validation
@keras_export("keras.layers.UpSampling2D")
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
The implementation uses interpolative resizing, given the resize method
(specified by the `interpolation` argument). Use `interpolation=nearest`
to repeat the rows and columns of the data.
Examples:
>>> input_shape = (2, 2, 1, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[ 0 1 2]]
[[ 3 4 5]]]
[[[ 6 7 8]]
[[ 9 10 11]]]]
>>> y = keras.layers.UpSampling2D(size=(1, 2))(x)
>>> print(y)
[[[[ 0 1 2]
[ 0 1 2]]
[[ 3 4 5]
[ 3 4 5]]]
[[[ 6 7 8]
[ 6 7 8]]
[[ 9 10 11]
[ 9 10 11]]]]
Args:
size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `"channels_last"` (default) or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json` (if exists) else `"channels_last"`.
Defaults to `"channels_last"`.
interpolation: A string, one of `"bicubic"`, `"bilinear"`, `"lanczos3"`,
`"lanczos5"`, `"nearest"`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(
self, size=(2, 2), data_format=None, interpolation="nearest", **kwargs
):
super().__init__(**kwargs)
self.data_format = backend.standardize_data_format(data_format)
self.size = argument_validation.standardize_tuple(size, 2, "size")
self.interpolation = interpolation.lower()
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
height = (
self.size[0] * input_shape[2]
if input_shape[2] is not None
else None
)
width = (
self.size[1] * input_shape[3]
if input_shape[3] is not None
else None
)
return (input_shape[0], input_shape[1], height, width)
else:
height = (
self.size[0] * input_shape[1]
if input_shape[1] is not None
else None
)
width = (
self.size[1] * input_shape[2]
if input_shape[2] is not None
else None
)
return (input_shape[0], height, width, input_shape[3])
def call(self, inputs):
return self._resize_images(
inputs,
self.size[0],
self.size[1],
self.data_format,
interpolation=self.interpolation,
)
def get_config(self):
config = {
"size": self.size,
"data_format": self.data_format,
"interpolation": self.interpolation,
}
base_config = super().get_config()
return {**base_config, **config}
def _resize_images(
self,
x,
height_factor,
width_factor,
data_format,
interpolation="nearest",
):
"""Resizes the images contained in a 4D tensor.
Args:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `"bicubic"`, `"bilinear"`,
`"lanczos3"`, `"lanczos5"`, or `"nearest"`.
Returns:
A tensor.
"""
if data_format == "channels_first":
rows, cols = 2, 3
elif data_format == "channels_last":
rows, cols = 1, 2
else:
raise ValueError(f"Invalid `data_format` argument: {data_format}")
if data_format == "channels_first":
x = ops.transpose(x, [0, 2, 3, 1])
# https://github.com/keras-team/keras/issues/294
# Use `ops.repeat` for `nearest` interpolation
if interpolation == "nearest":
x = ops.repeat(x, height_factor, axis=1)
x = ops.repeat(x, width_factor, axis=2)
else:
# multiply the height and width factor on each dim
# by hand (versus using element-wise multiplication
# by np.array([height_factor, width_factor]) then
# list-ifying the tensor by calling `.tolist()`)
# since when running under torchdynamo, `new_shape`
# will be traced as a symbolic variable (specifically
# a `FakeTensor`) which does not have a `tolist()` method.
new_shape = (
x.shape[rows] * height_factor,
x.shape[cols] * width_factor,
)
x = ops.image.resize(x, new_shape, interpolation=interpolation)
if data_format == "channels_first":
x = ops.transpose(x, [0, 3, 1, 2])
return x
| keras/keras/layers/reshaping/up_sampling2d.py/0 | {
"file_path": "keras/keras/layers/reshaping/up_sampling2d.py",
"repo_id": "keras",
"token_count": 2977
} | 163 |
from keras import backend
from keras import initializers
from keras import ops
from keras.api_export import keras_export
from keras.optimizers import optimizer
from keras.saving import serialization_lib
from keras.utils import tracking
@keras_export(
[
"keras.optimizers.LossScaleOptimizer",
"keras.mixed_precision.LossScaleOptimizer",
]
)
class LossScaleOptimizer(optimizer.Optimizer):
"""An optimizer that dynamically scales the loss to prevent underflow.
Loss scaling is a technique to prevent numeric underflow in intermediate
gradients when float16 is used. To prevent underflow, the loss is multiplied
(or "scaled") by a certain factor called the "loss scale", which causes
intermediate gradients to be scaled by the loss scale as well. The final
gradients are divided (or "unscaled") by the loss scale to bring them back
to their original value.
`LossScaleOptimizer` wraps another optimizer and applies dynamic loss
scaling to it. This loss scale is dynamically updated over time as follows:
- On any train step, if a nonfinite gradient is encountered, the loss scale
is halved, and the train step is skipped.
- If `dynamic_growth_steps` have ocurred since the last time the loss scale
was updated, and no nonfinite gradients have occurred, the loss scale
is doubled.
Args:
inner_optimizer: The `keras.optimizers.Optimizer` instance to wrap.
initial_scale: Float. The initial loss scale. This scale will be updated
during training. It is recommended for this to be a very high
number, because a loss scale that is too high gets lowered far more
quickly than a loss scale that is too low gets raised.
dynamic_growth_steps: Int. How often to update the scale upwards. After
every `dynamic_growth_steps` steps with finite gradients, the
loss scale is doubled.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
inner_optimizer,
initial_scale=2.0**15,
dynamic_growth_steps=2000,
**kwargs,
):
if not kwargs.pop("dynamic", True):
raise ValueError(
"LossScaleOptimizer no longer suports `dynamic=False`. "
"Instead, simply set `loss_scale_factor` directly on the "
"`inner_optimizer`."
)
super().__init__(learning_rate=0.0, **kwargs)
self.inner_optimizer = inner_optimizer
self.initial_scale = initial_scale
self.dynamic_growth_steps = dynamic_growth_steps
@tracking.no_automatic_dependency_tracking
def build(self, var_list):
self.step_counter = self.add_variable(
shape=(),
dtype="int",
initializer=initializers.Zeros(),
name="step_counter",
)
self.dynamic_scale = self.add_variable(
shape=(),
dtype="float32",
initializer=initializers.Constant(self.initial_scale),
name="dynamic_scale",
)
self.inner_optimizer.build(var_list)
self.built = True
@property
def variables(self):
return self._variables + self.inner_optimizer.variables
def stateless_apply(self, optimizer_variables, grads, trainable_variables):
if not self.built:
raise ValueError(
f"To call `stateless_apply`, {self.__class__.__name__} "
"must be built (i.e. its variables must have been created). "
"You can build it via `optimizer.build(trainable_variables)`."
)
finite = self.check_finite(grads)
return ops.cond(
finite,
lambda: self._stateless_handle_finite_grads(
optimizer_variables, grads, trainable_variables
),
lambda: self._stateless_handle_non_finite_grads(
optimizer_variables, trainable_variables
),
)
def _stateless_handle_finite_grads(
self, optimizer_variables, grads, trainable_variables
):
def upscale():
mapping = list(zip(self.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping) as scope:
self.step_counter.assign(0)
self.dynamic_scale.assign(self.dynamic_scale * 2.0)
return [scope.get_current_value(v) for v in self._variables]
def increment():
mapping = list(zip(self.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping) as scope:
self.step_counter.assign_add(1)
return [scope.get_current_value(v) for v in self._variables]
mapping = list(zip(self.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping):
# Potentially upscale loss and reset counter.
own_variables = ops.cond(
ops.equal(self.step_counter, self.dynamic_growth_steps - 1),
upscale,
increment,
)
# Unscale gradients.
scale = self.dynamic_scale
unscaled_grads = [
g if g is None else ops.divide(g, scale) for g in grads
]
(
new_trainable_variables,
new_inner_variables,
) = self.inner_optimizer.stateless_apply(
self.inner_optimizer.variables,
unscaled_grads,
trainable_variables,
)
new_optimizer_variables = own_variables + new_inner_variables
return new_trainable_variables, new_optimizer_variables
def _stateless_handle_non_finite_grads(
self, optimizer_variables, trainable_variables
):
mapping = list(zip(self.variables, optimizer_variables))
with backend.StatelessScope(state_mapping=mapping) as scope:
self.step_counter.assign(0)
self.dynamic_scale.assign(self.dynamic_scale / 2.0)
new_optimizer_variables = []
for v in self.variables:
new_optimizer_variables.append(scope.get_current_value(v))
return trainable_variables, new_optimizer_variables
def apply(self, grads, trainable_variables=None):
# Optionally build optimizer.
if not self.built:
with backend.name_scope(self.name, caller=self):
self.build(trainable_variables)
self.built = True
if backend.backend() == "tensorflow":
self._tf_apply(grads, trainable_variables)
else:
self._common_apply(grads, trainable_variables)
def _stateful_handle_finite_grads(self, grads, trainable_variables):
scale = self.dynamic_scale
# Unscale gradients.
unscaled_grads = [
g if g is None else ops.divide(g, scale) for g in grads
]
self.inner_optimizer.apply(
unscaled_grads, trainable_variables=trainable_variables
)
def upscale():
self.step_counter.assign(0)
self.dynamic_scale.assign(self.dynamic_scale * 2.0)
def increment():
self.step_counter.assign_add(1)
# Potentially upscale loss and reset counter.
ops.cond(
ops.equal(self.step_counter, self.dynamic_growth_steps - 1),
upscale,
increment,
)
def _stateful_handle_non_finite_grads(self):
# If any inf or nan in grads, downscale loss and reset counter.
self.step_counter.assign(0)
self.dynamic_scale.assign(self.dynamic_scale / 2.0)
def _common_apply(self, grads, trainable_variables=None):
finite = self.check_finite(grads)
ops.cond(
finite,
lambda: self._stateful_handle_finite_grads(
grads, trainable_variables
),
self._stateful_handle_non_finite_grads,
)
def _tf_apply(self, grads, trainable_variables=None):
"""Tensorflow specific logic for apply, which handles distribution."""
from keras.utils.module_utils import tensorflow as tf
if tf.distribute.in_cross_replica_context():
raise ValueError("apply() must be called in a replica context.")
if tf.__internal__.distribute.strategy_supports_no_merge_call():
self._common_apply(grads, trainable_variables=trainable_variables)
else:
def _handle_cross_replica(distribution, grads, trainable_variables):
finite_per_replica = (
distribution.extended.call_for_each_replica(
self.check_finite, args=(grads,)
)
)
# Each replica computed the same `finite` value, since
# `grads` is all-reduced across replicas. Arbitrarily take
# `finite` from the first replica.
finite = distribution.experimental_local_results(
finite_per_replica
)[0]
def apply_fn():
distribution.extended.call_for_each_replica(
self._stateful_handle_finite_grads,
args=(grads, trainable_variables),
)
# Note: We must call this cond() in a cross-replica context.
# DistributionStrategy does not support having a cond in a
# replica context with a branch that calls `merge_call`, and
# self._optimizer.apply_gradients calls `merge_call`.
ops.cond(
finite, apply_fn, self._stateful_handle_non_finite_grads
)
tf.distribute.get_replica_context().merge_call(
_handle_cross_replica, args=(grads, trainable_variables)
)
def check_finite(self, grads):
tensor_grads = [g for g in grads if g is not None]
finite_grads = [ops.all(ops.isfinite(g)) for g in tensor_grads]
return ops.all(ops.convert_to_tensor(finite_grads))
@property
def learning_rate(self):
return self.inner_optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, learning_rate):
self.inner_optimizer.learning_rate = learning_rate
def scale_loss(self, loss):
scale = self.dynamic_scale if self.built else self.initial_scale
return loss * scale
def finalize_variable_values(self, var_list):
self.inner_optimizer.finalize_variable_values(var_list)
def get_config(self):
config = super().get_config()
inner_optimizer_config = serialization_lib.serialize_keras_object(
self.inner_optimizer
)
config.update(
{
"inner_optimizer": inner_optimizer_config,
"initial_scale": self.initial_scale,
"dynamic_growth_steps": self.dynamic_growth_steps,
}
)
del config["learning_rate"]
return config
@classmethod
def from_config(cls, config, custom_objects=None):
inner_optimizer = serialization_lib.deserialize_keras_object(
config.pop("inner_optimizer"),
custom_objects=custom_objects,
)
return cls(inner_optimizer, **config)
LossScaleOptimizer.__doc__ = LossScaleOptimizer.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| keras/keras/optimizers/loss_scale_optimizer.py/0 | {
"file_path": "keras/keras/optimizers/loss_scale_optimizer.py",
"repo_id": "keras",
"token_count": 5194
} | 164 |
from keras import backend
from keras.api_export import keras_export
@keras_export("keras.random.normal")
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Draw random samples from a normal (Gaussian) distribution.
Args:
shape: The shape of the random values to generate.
mean: Float, defaults to 0. Mean of the random values to generate.
stddev: Float, defaults to 1. Standard deviation of the random values
to generate.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`).
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
"""
return backend.random.normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
@keras_export("keras.random.categorical")
def categorical(logits, num_samples, dtype="int32", seed=None):
"""Draws samples from a categorical distribution.
This function takes as input `logits`, a 2-D input tensor with shape
(batch_size, num_classes). Each row of the input represents a categorical
distribution, with each column index containing the log-probability for a
given class.
The function will output a 2-D tensor with shape (batch_size, num_samples),
where each row contains samples from the corresponding row in `logits`.
Each column index contains an independent samples drawn from the input
distribution.
Args:
logits: 2-D Tensor with shape (batch_size, num_classes). Each row
should define a categorical distibution with the unnormalized
log-probabilities for all classes.
num_samples: Int, the number of independent samples to draw for each
row of the input. This will be the second dimension of the output
tensor's shape.
dtype: Optional dtype of the output tensor.
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
Returns:
A 2-D tensor with (batch_size, num_samples).
"""
logits_shape = list(backend.convert_to_tensor(logits).shape)
if len(logits_shape) != 2:
raise ValueError(
"`logits` should be a 2-D tensor with shape "
f"[batch_size, num_classes]. Received: logits={logits}"
)
return backend.random.categorical(
logits, num_samples, dtype=dtype, seed=seed
)
@keras_export("keras.random.uniform")
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Draw samples from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range,
while the upper bound `maxval` is excluded.
`dtype` must be a floating point type, the default range is `[0, 1)`.
Args:
shape: The shape of the random values to generate.
minval: Float, defaults to 0. Lower bound of the range of
random values to generate (inclusive).
maxval: Float, defaults to 1. Upper bound of the range of
random values to generate (exclusive).
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`)
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
"""
if dtype and not backend.is_float_dtype(dtype):
raise ValueError(
"`keras.random.uniform` requires a floating point `dtype`. "
f"Received: dtype={dtype} "
)
return backend.random.uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed
)
@keras_export("keras.random.randint")
def randint(shape, minval, maxval, dtype="int32", seed=None):
"""Draw random integers from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range,
while the upper bound `maxval` is excluded.
`dtype` must be an integer type.
Args:
shape: The shape of the random values to generate.
minval: Float, defaults to 0. Lower bound of the range of
random values to generate (inclusive).
maxval: Float, defaults to 1. Upper bound of the range of
random values to generate (exclusive).
dtype: Optional dtype of the tensor. Only integer types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`)
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
"""
if dtype and not backend.is_int_dtype(dtype):
raise ValueError(
"`keras.random.randint` requires an integer `dtype`. "
f"Received: dtype={dtype} "
)
return backend.random.randint(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed
)
@keras_export("keras.random.truncated_normal")
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Draw samples from a truncated normal distribution.
The values are drawn from a normal distribution with specified mean and
standard deviation, discarding and re-drawing any samples that are more
than two standard deviations from the mean.
Args:
shape: The shape of the random values to generate.
mean: Float, defaults to 0. Mean of the random values to generate.
stddev: Float, defaults to 1. Standard deviation of the random values
to generate.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`)
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
"""
return backend.random.truncated_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
@keras_export("keras.random.dropout")
def dropout(inputs, rate, noise_shape=None, seed=None):
return backend.random.dropout(
inputs, rate, noise_shape=noise_shape, seed=seed
)
@keras_export("keras.random.shuffle")
def shuffle(x, axis=0, seed=None):
"""Shuffle the elements of a tensor uniformly at random along an axis.
Args:
x: The tensor to be shuffled.
axis: An integer specifying the axis along which to shuffle. Defaults to
`0`.
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
"""
return backend.random.shuffle(x, axis=axis, seed=seed)
@keras_export("keras.random.gamma")
def gamma(shape, alpha, dtype=None, seed=None):
"""Draw random samples from the Gamma distribution.
Args:
shape: The shape of the random values to generate.
alpha: Float, the parameter of the distribution.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`).
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
"""
return backend.random.gamma(shape, alpha=alpha, dtype=dtype, seed=seed)
def binomial(shape, counts, probabilities, dtype=None, seed=None):
"""Draw samples from a Binomial distribution.
The values are drawn from a Binomial distribution with
specified trial count and probability of success.
Args:
shape: The shape of the random values to generate.
counts: A number or array of numbers representing the
number of trials. It must be broadcastable with `probabilities`.
probabilities: A float or array of floats representing the
probability of success of an individual event.
It must be broadcastable with `counts`.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`).
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
"""
return backend.random.binomial(
shape,
counts=counts,
probabilities=probabilities,
dtype=dtype,
seed=seed,
)
def beta(shape, alpha, beta, dtype=None, seed=None):
"""Draw samples from a Beta distribution.
The values are drawm from a Beta distribution parametrized
by alpha and beta.
Args:
shape: The shape of the random values to generate.
alpha: Float or an array of floats representing the first
parameter alpha. Must be broadcastable with `beta` and `shape`.
beta: Float or an array of floats representing the second
parameter beta. Must be broadcastable with `alpha` and `shape`.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`).
seed: A Python integer or instance of
`keras.random.SeedGenerator`.
Used to make the behavior of the initializer
deterministic. Note that an initializer seeded with an integer
or None (unseeded) will produce the same random values
across multiple calls. To get different random values
across multiple calls, use as seed an instance
of `keras.random.SeedGenerator`.
"""
return backend.random.beta(
shape=shape, alpha=alpha, beta=beta, dtype=dtype, seed=seed
)
| keras/keras/random/random.py/0 | {
"file_path": "keras/keras/random/random.py",
"repo_id": "keras",
"token_count": 5009
} | 165 |
from keras.testing.test_case import TestCase
| keras/keras/testing/__init__.py/0 | {
"file_path": "keras/keras/testing/__init__.py",
"repo_id": "keras",
"token_count": 13
} | 166 |
import tree
from keras.trainers.data_adapters import data_adapter_utils
from keras.trainers.data_adapters.data_adapter import DataAdapter
class TFDatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
def __init__(self, dataset, class_weight=None, distribution=None):
"""Iniitialize the TFDatasetAdapter.
Args:
dataset: The input `tf.data.Dataset` instance.
class_weight: A map where the keys are integer class ids and values
are the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`.
distribution: A `keras.distribution.Distribution` instance. Used to
shard the input dataset into per worker/process dataset
instance.
"""
from keras.utils.module_utils import tensorflow as tf
if not isinstance(
dataset, (tf.data.Dataset, tf.distribute.DistributedDataset)
):
raise ValueError(
"Expected argument `dataset` to be a tf.data.Dataset. "
f"Received: {dataset}"
)
if class_weight is not None:
dataset = dataset.map(
make_class_weight_map_fn(class_weight)
).prefetch(tf.data.AUTOTUNE)
if distribution is not None:
dataset = distribution.distribute_dataset(dataset)
self._dataset = dataset
def get_numpy_iterator(self):
from keras.backend.tensorflow.core import convert_to_numpy
for batch in self._dataset:
yield tree.map_structure(convert_to_numpy, batch)
def get_jax_iterator(self):
import jax.experimental.sparse as jax_sparse
from keras.backend.jax.core import convert_to_tensor
from keras.backend.tensorflow.core import convert_to_numpy
from keras.utils.module_utils import tensorflow as tf
def convert_to_jax(x):
# We use numpy as an intermediary because the conversion
# tf -> numpy -> jax is more than 2x faster than tf -> jax.
if isinstance(x, tf.SparseTensor):
values = convert_to_numpy(x.values)
indices = convert_to_numpy(x.indices)
return jax_sparse.BCOO((values, indices), shape=x.shape)
return convert_to_tensor(convert_to_numpy(x))
for batch in self._dataset:
yield tree.map_structure(convert_to_jax, batch)
def get_tf_dataset(self):
return self._dataset
def get_torch_dataloader(self):
return data_adapter_utils.get_torch_dataloader(self._dataset)
@property
def num_batches(self):
cardinality = self._dataset.cardinality
if callable(cardinality):
# `dataset.cardinality` is normally expected to be a callable.
cardinality = int(self._dataset.cardinality())
else:
# However, in the case of `DistributedDataset`, it's a np.int64.
cardinality = int(cardinality)
# Return None for Unknown and Infinite cardinality datasets
if cardinality < 0:
return None
return cardinality
@property
def batch_size(self):
first_element_spec = tree.flatten(self._dataset.element_spec)[0]
return first_element_spec.shape[0]
@property
def has_partial_batch(self):
return None
@property
def partial_batch_size(self):
return None
def make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
from keras.utils.module_utils import tensorflow as tf
class_weight_tensor = tf.convert_to_tensor(
[
class_weight.get(int(c), 1.0)
for c in range(max(class_weight.keys()) + 1)
]
)
def class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = data_adapter_utils.unpack_x_y_sample_weight(data)
if sw is not None:
raise ValueError(
"You cannot `class_weight` and `sample_weight` "
"at the same time."
)
if tree.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single "
"output."
)
if y.shape.rank >= 2:
y_classes = tf.__internal__.smart_cond.smart_cond(
tf.shape(y)[-1] > 1,
lambda: tf.argmax(y, axis=-1),
lambda: tf.cast(tf.round(tf.squeeze(y, axis=-1)), tf.int32),
)
else:
# Special casing for rank 1, where we can guarantee sparse encoding.
y_classes = tf.cast(tf.round(y), tf.int32)
cw = tf.gather(class_weight_tensor, y_classes)
return x, y, cw
return class_weights_map_fn
| keras/keras/trainers/data_adapters/tf_dataset_adapter.py/0 | {
"file_path": "keras/keras/trainers/data_adapters/tf_dataset_adapter.py",
"repo_id": "keras",
"token_count": 2407
} | 167 |
import numpy as np
from keras.testing import test_case
from keras.utils.dataset_utils import split_dataset
from keras.utils.module_utils import tensorflow as tf
class DatasetUtilsTest(test_case.TestCase):
def test_split_dataset_list(self):
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
dataset = [
np.random.sample((n_sample, n_cols)),
np.random.sample((n_sample, n_pred)),
]
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
dataset = [
np.random.sample((n_sample, 100, n_cols)),
np.random.sample((n_sample, n_pred)),
]
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (100, n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
dataset = [
np.random.sample((n_sample, 10, 10, n_cols)),
np.random.sample((n_sample, n_pred)),
]
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (10, 10, n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
dataset = [
np.random.sample((n_sample, 100, 10, 30, n_cols)),
np.random.sample((n_sample, n_pred)),
]
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape,
(100, 10, 30, n_cols),
)
def test_split_dataset_tuple(self):
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
dataset = (
np.random.sample((n_sample, n_cols)),
np.random.sample((n_sample, n_pred)),
)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
dataset = (
np.random.sample((n_sample, 100, n_cols)),
np.random.sample((n_sample, n_pred)),
)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (100, n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
dataset = (
np.random.sample((n_sample, 10, 10, n_cols)),
np.random.sample((n_sample, n_pred)),
)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (10, 10, n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
dataset = (
np.random.sample((n_sample, 100, 10, 30, n_cols)),
np.random.sample((n_sample, n_pred)),
)
dataset_left, dataset_right = split_dataset(
dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape,
(100, 10, 30, n_cols),
)
def test_split_dataset_tensorflow(self):
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
features, labels = (
np.random.sample((n_sample, n_cols)),
np.random.sample((n_sample, n_pred)),
)
tf_dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset_left, dataset_right = split_dataset(
tf_dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
features, labels = (
np.random.sample((n_sample, 100, n_cols)),
np.random.sample((n_sample, n_pred)),
)
tf_dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset_left, dataset_right = split_dataset(
tf_dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (100, n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
features, labels = (
np.random.sample((n_sample, 10, 10, n_cols)),
np.random.sample((n_sample, n_pred)),
)
tf_dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset_left, dataset_right = split_dataset(
tf_dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (10, 10, n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
features, labels = (
np.random.sample((n_sample, 100, 10, 30, n_cols)),
np.random.sample((n_sample, n_pred)),
)
tf_dataset = tf.data.Dataset.from_tensor_slices((features, labels))
dataset_left, dataset_right = split_dataset(
tf_dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
int(dataset_left.cardinality()), int(n_sample * left_size)
)
self.assertEqual(
int(dataset_right.cardinality()), int(n_sample * right_size)
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape,
(100, 10, 30, n_cols),
)
def test_split_dataset_torch(self):
# sample torch dataset class
from torch.utils.data import Dataset as torchDataset
class Dataset(torchDataset):
"Characterizes a dataset for PyTorch"
def __init__(self, x, y):
"Initialization"
self.x = x
self.y = y
def __len__(self):
"Denotes the total number of samples"
return len(self.x)
def __getitem__(self, index):
"Generates one sample of data"
return self.x[index], self.y[index]
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
features, labels = (
np.random.sample((n_sample, n_cols)),
np.random.sample((n_sample, n_pred)),
)
torch_dataset = Dataset(features, labels)
dataset_left, dataset_right = split_dataset(
torch_dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
len([sample for sample in dataset_left]), int(n_sample * left_size)
)
self.assertEqual(
len([sample for sample in dataset_right]),
int(n_sample * right_size),
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (n_cols,)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
features, labels = (
np.random.sample((n_sample, 100, n_cols)),
np.random.sample((n_sample, n_pred)),
)
torch_dataset = Dataset(features, labels)
dataset_left, dataset_right = split_dataset(
torch_dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
len([sample for sample in dataset_left]), int(n_sample * left_size)
)
self.assertEqual(
len([sample for sample in dataset_right]),
int(n_sample * right_size),
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (100, n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
features, labels = (
np.random.sample((n_sample, 10, 10, n_cols)),
np.random.sample((n_sample, n_pred)),
)
torch_dataset = Dataset(features, labels)
dataset_left, dataset_right = split_dataset(
torch_dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
len([sample for sample in dataset_left]), int(n_sample * left_size)
)
self.assertEqual(
len([sample for sample in dataset_right]),
int(n_sample * right_size),
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape, (10, 10, n_cols)
)
n_sample, n_cols, n_pred, left_size, right_size = 100, 2, 1, 0.2, 0.8
features, labels = (
np.random.sample((n_sample, 100, 10, 30, n_cols)),
np.random.sample((n_sample, n_pred)),
)
torch_dataset = Dataset(features, labels)
dataset_left, dataset_right = split_dataset(
torch_dataset, left_size=left_size, right_size=right_size
)
self.assertEqual(
len([sample for sample in dataset_left]), int(n_sample * left_size)
)
self.assertEqual(
len([sample for sample in dataset_right]),
int(n_sample * right_size),
)
self.assertEqual(
[sample for sample in dataset_right][0][0].shape,
(100, 10, 30, n_cols),
)
| keras/keras/utils/dataset_utils_test.py/0 | {
"file_path": "keras/keras/utils/dataset_utils_test.py",
"repo_id": "keras",
"token_count": 6440
} | 168 |
import numpy as np
from keras import backend
from keras.api_export import keras_export
@keras_export("keras.utils.normalize")
def normalize(x, axis=-1, order=2):
"""Normalizes an array.
If the input is a NumPy array, a NumPy array will be returned.
If it's a backend tensor, a backend tensor will be returned.
Args:
x: Array to normalize.
axis: axis along which to normalize.
order: Normalization order (e.g. `order=2` for L2 norm).
Returns:
A normalized copy of the array.
"""
from keras import ops
if isinstance(x, np.ndarray):
# NumPy input
norm = np.atleast_1d(np.linalg.norm(x, order, axis))
norm[norm == 0] = 1
# axis cannot be `None`
axis = axis or -1
return x / np.expand_dims(norm, axis)
# Backend tensor input
return ops.nn.normalize(x, axis=axis, order=order)
@keras_export("keras.utils.to_categorical")
def to_categorical(x, num_classes=None):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with `categorical_crossentropy`.
Args:
x: Array-like with class values to be converted into a matrix
(integers from 0 to `num_classes - 1`).
num_classes: Total number of classes. If `None`, this would be inferred
as `max(x) + 1`. Defaults to `None`.
Returns:
A binary matrix representation of the input as a NumPy array. The class
axis is placed last.
Example:
>>> a = keras.utils.to_categorical([0, 1, 2, 3], num_classes=4)
>>> print(a)
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
>>> b = np.array([.9, .04, .03, .03,
... .3, .45, .15, .13,
... .04, .01, .94, .05,
... .12, .21, .5, .17],
... shape=[4, 4])
>>> loss = keras.ops.categorical_crossentropy(a, b)
>>> print(np.around(loss, 5))
[0.10536 0.82807 0.1011 1.77196]
>>> loss = keras.ops.categorical_crossentropy(a, a)
>>> print(np.around(loss, 5))
[0. 0. 0. 0.]
"""
if backend.is_tensor(x):
return backend.nn.one_hot(x, num_classes)
x = np.array(x, dtype="int64")
input_shape = x.shape
# Shrink the last dimension if the shape is (..., 1).
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
x = x.reshape(-1)
if not num_classes:
num_classes = np.max(x) + 1
batch_size = x.shape[0]
categorical = np.zeros((batch_size, num_classes))
categorical[np.arange(batch_size), x] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def encode_categorical_inputs(
inputs,
output_mode,
depth,
dtype="float32",
backend_module=None,
):
"""Encodes categorical inputs according to output_mode."""
backend_module = backend_module or backend
if output_mode == "int":
return backend_module.cast(inputs, dtype=dtype)
binary_output = output_mode in ("multi_hot", "one_hot")
original_shape = backend_module.shape(inputs)
rank_of_inputs = len(original_shape)
# In all cases, we should uprank scalar input to a single sample.
if rank_of_inputs == 0:
# We need to update `rank_of_inputs`
# If necessary.
inputs = backend_module.numpy.expand_dims(inputs, -1)
elif rank_of_inputs > 2:
# The `count` mode does not support inputs with a rank greater than 2.
if not binary_output:
raise ValueError(
"When output_mode is anything other than "
"`'multi_hot', 'one_hot', or 'int'`, "
"the rank must be 2 or less. "
f"Received output_mode: {output_mode} "
f"and input shape: {original_shape}, "
f"which would result in output rank {rank_of_inputs}."
)
if binary_output:
if output_mode == "one_hot":
bincounts = backend_module.nn.one_hot(inputs, depth)
elif output_mode == "multi_hot":
one_hot_input = backend_module.nn.one_hot(inputs, depth)
bincounts = backend_module.numpy.where(
backend_module.numpy.any(one_hot_input, axis=-2), 1, 0
)
else:
bincounts = backend_module.numpy.bincount(
inputs,
minlength=depth,
)
bincounts = backend_module.cast(bincounts, dtype)
return bincounts
| keras/keras/utils/numerical_utils.py/0 | {
"file_path": "keras/keras/utils/numerical_utils.py",
"repo_id": "keras",
"token_count": 2055
} | 169 |
import numpy as np
from keras import testing
from keras.utils import timeseries_dataset_utils
class TimeseriesDatasetTest(testing.TestCase):
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5
)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 18:
self.assertEqual(inputs.shape, (5, 9))
if i == 18:
# Last batch: size 2
self.assertEqual(inputs.shape, (2, 9))
# Check target values
self.assertAllClose(targets, inputs[:, 0] * 2)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
self.assertAllClose(
inputs[j], np.arange(i * 5 + j, i * 5 + j + 9)
)
def test_timeseries_regression(self):
# Test simple timeseries regression use case
data = np.arange(10)
offset = 3
targets = data[offset:]
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=offset, batch_size=1
)
i = 0
for batch in dataset:
self.assertLen(batch, 2)
inputs, targets = batch
self.assertEqual(inputs.shape, (1, 3))
# Check values
self.assertAllClose(targets[0], data[offset + i])
self.assertAllClose(inputs[0], data[i : i + offset])
i += 1
self.assertEqual(i, 7) # Expect 7 batches
def test_no_targets(self):
data = np.arange(50)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, None, sequence_length=10, batch_size=5
)
# Expect 9 batches
i = None
for i, batch in enumerate(dataset):
if i < 8:
self.assertEqual(batch.shape, (5, 10))
elif i == 8:
self.assertEqual(batch.shape, (1, 10))
for j in range(min(5, len(batch))):
# Check each sample in the batch
self.assertAllClose(
batch[j], np.arange(i * 5 + j, i * 5 + j + 10)
)
self.assertEqual(i, 8)
def test_shuffle(self):
# Test cross-epoch random order and seed determinism
data = np.arange(10)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
targets,
sequence_length=5,
batch_size=1,
shuffle=True,
seed=123,
)
first_seq = None
for x, y in dataset.take(1):
self.assertNotAllClose(x, np.arange(0, 5))
self.assertAllClose(x[:, 0] * 2, y)
first_seq = x
# Check that a new iteration with the same dataset yields different
# results
for x, _ in dataset.take(1):
self.assertNotAllClose(x, first_seq)
# Check determism with same seed
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
targets,
sequence_length=5,
batch_size=1,
shuffle=True,
seed=123,
)
for x, _ in dataset.take(1):
self.assertAllClose(x, first_seq)
def test_sampling_rate(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sampling_rate=2
)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 16:
self.assertEqual(inputs.shape, (5, 9))
if i == 16:
# Last batch: size 4
self.assertEqual(inputs.shape, (4, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 + j
end_index = start_index + 9 * 2
self.assertAllClose(
inputs[j], np.arange(start_index, end_index, 2)
)
def test_sequence_stride(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sequence_stride=3
)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 6:
self.assertEqual(inputs.shape, (5, 9))
if i == 6:
# Last batch: size 1
self.assertEqual(inputs.shape, (1, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 * 3 + j * 3
end_index = start_index + 9
self.assertAllClose(
inputs[j], np.arange(start_index, end_index)
)
def test_start_and_end_index(self):
data = np.arange(100)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
None,
sequence_length=9,
batch_size=5,
sequence_stride=3,
sampling_rate=2,
start_index=10,
end_index=90,
)
for batch in dataset:
self.assertLess(np.max(batch[0]), 90)
self.assertGreater(np.min(batch[0]), 9)
def test_errors(self):
# bad start index
with self.assertRaisesRegex(ValueError, "`start_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=-1
)
with self.assertRaisesRegex(ValueError, "`start_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=11
)
# bad end index
with self.assertRaisesRegex(ValueError, "`end_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=-1
)
with self.assertRaisesRegex(ValueError, "`end_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=11
)
# bad sampling_rate
with self.assertRaisesRegex(ValueError, "`sampling_rate` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, sampling_rate=0
)
# bad sequence stride
with self.assertRaisesRegex(ValueError, "`sequence_stride` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, sequence_stride=0
)
def test_not_batched(self):
data = np.arange(100)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, None, sequence_length=9, batch_size=None, shuffle=True
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 1)
| keras/keras/utils/timeseries_dataset_utils_test.py/0 | {
"file_path": "keras/keras/utils/timeseries_dataset_utils_test.py",
"repo_id": "keras",
"token_count": 4015
} | 170 |
#!/bin/bash
set -Eeuo pipefail
base_dir=$(dirname $(dirname $0))
isort --sp "${base_dir}/pyproject.toml" .
black --config "${base_dir}/pyproject.toml" .
flake8 --config "${base_dir}/setup.cfg" .
| keras/shell/format.sh/0 | {
"file_path": "keras/shell/format.sh",
"repo_id": "keras",
"token_count": 86
} | 171 |
## How to contribute code
Follow these steps to submit your code contribution.
### Step 1. Open an issue
Before making any changes, we recommend opening an issue (if one doesn't already
exist) and discussing your proposed changes. This way, we can give you feedback
and validate the proposed changes.
If the changes are minor (simple bug fix or documentation fix), then feel free
to open a PR without discussion.
### Step 2. Make code changes
To make code changes, you need to fork the repository. You will need to setup a
development environment and run the unit tests. This is covered in the section
"Setup environment".
### Step 3. Create a pull request
Once the change is ready, open a pull request from your branch in your fork to
the master branch in [keras-team/keras](https://github.com/keras-team/keras).
### Step 4. Sign the Contributor License Agreement
After creating the pull request, the `google-cla` bot will comment on your pull
request with instructions on signing the Contributor License Agreement (CLA) if
you haven't done so. Please follow the instructions to sign the CLA. A `cla:yes`
tag is then added to the pull request.

### Step 5. Code review
A reviewer will review the pull request and provide comments. The reviewer may
add a `kokoro:force-run` label to trigger the continuous integration tests.

If the tests fail, look into the error messages and try to fix them.

There may be
several rounds of comments and code changes before the pull request gets
approved by the reviewer.

### Step 6. Merging
Once the pull request is approved, a `ready to pull` tag will be added to the
pull request. A team member will take care of the merging.

Here is an [example pull request](https://github.com/keras-team/tf-keras/pull/15015)
for your reference.
## Setup environment
To setup the development environment, We provide two options. One is to use our
Dockerfile, which builds into a container the required dev tools. Another one is
to setup a local environment by installing the dev tools needed.
### Option 1: Use a Docker container
We provide a
[Dockerfile](https://github.com/keras-team/tf-keras/blob/master/.devcontainer/Dockerfile)
to build the dev environment. You can build the Dockerfile into a Docker image
named `keras-dev` with the following command at the root directory of your
cloned repo.
```shell
docker build -t keras-dev .devcontainer
```
You can launch a Docker container from the image with the following command. The
`-it` option gives you an interactive shell of the container. The `-v
path/to/repo/:/home/tf_keras/` mounts your cloned repo to the container. Replace
`path/to/repo` with the path to your cloned repo directory.
```shell
docker run -it -v path/to/repo/:/home/tf_keras/ keras-dev
```
In the container shell, you need to install the latest dependencies with the
following command.
```shell
pip install -r /home/tf_keras/requirements.txt && pip uninstall keras-nightly -y
```
Now, the environment setup is complete. You are ready to run the tests.
You may modify the Dockerfile to your specific needs, like installing your own
dev tools. You may also mount more volumes with the `-v` option, like your SSH
credentials.
Many popular editors today support developing in a container. Here is the list of
[supported editors](https://discuss.tensorflow.org/t/setup-your-favorite-editor-to-develop-keras)
with setup instructions.
### Option 2: Setup a local environment
To setup your local dev environment, you will need the following tools.
1. [Bazel](https://bazel.build/) is the tool to build and test TF-Keras. See the
[installation guide](https://docs.bazel.build/versions/4.0.0/install.html)
for how to install and config bazel for your local environment.
2. [git](https://github.com/) for code repository management.
3. [python](https://www.python.org/) to build and code in TF-Keras.
The following commands check the tools above are successfully installed. Note
that TF-Keras requires at least Python 3.7 to run.
```shell
bazel --version
git --version
python --version
```
A [Python virtual environment](https://docs.python.org/3/tutorial/venv.html)
(venv) is a powerful tool to create a self-contained environment that isolates
any change from the system level config. It is highly recommended to avoid any
unexpected dependency or version issues.
With the following commands, you create a new venv, named `venv_dir`.
```shell
mkdir venv_dir
python3 -m venv venv_dir
```
You can activate the venv with the following command. You should always run the
tests with the venv activated. You need to activate the venv every time you open
a new shell.
```shell
source venv_dir/bin/activate # for Linux or MacOS
venv_dir\Scripts\activate.bat # for Windows
```
Clone your forked repo to your local machine. Go to the cloned directory to
install the dependencies into the venv. Since `tf-nightly` uses `keras-nightly`
as a dependency, we need to uninstall `keras-nightly` so that tests will run
against TF-Keras code in the local workspace.
```shell
git clone https://github.com/YOUR_GITHUB_USERNAME/keras.git
cd keras
pip install -r requirements.txt
pip uninstall keras-nightly
```
The environment setup is completed. You may need to update the `tf-nightly`
version regularly to keep your environment up-to-date with the following
command.
```shell
pip install --upgrade tf-nightly
```
## Code style
The TF-Keras uses [Black](https://black.readthedocs.io/en/stable/) and
[isort](https://pycqa.github.io/isort/) to format the code. Please refer to
[requirements.txt](https://github.com/keras-team/tf-keras/blob/master/requirements.txt)
for the required versions. Run the following command **at the root directory of
the repo** to format your code.
```
sh shell/format.sh
```
It will also display the errors that cannot be resolved by autoformatting. You
need to follow the output of the command to resolve them manually.
If you do not want to auto format the code but only show the lint errors, you
can run `sh shell/lint.sh` **at the root directory of the repo**.
### Docstrings
We do not have an automated way to check docstring style, so if you write
or edit any docstring, please make sure to check them manually.
Keras docstrings follow the conventions below:
A **class docstring** may contain the following items:
* A one-line description of the class.
* Paragraph(s) of more detailed information.
* Optional `Examples` section.
* `Args` section for arguments in `__init__()`.
* If it's a layer:
* `Call arguments` section for arguments in `Layer.call()`.
* `Returns` section for the return values of `Layer.call()`.
* Optional `Raises` section for possible errors.
You can check out `MultiHeadAttention` as an example
[(link)](https://github.com/keras-team/tf-keras/blob/v2.12.0-rc1/tf_keras/layers/attention/multi_head_attention.py#L131).
A **function docstring** may contain the following items:
* One-line description of the function.
* Paragraph(s) of more detailed information.
* Optional `Examples` section.
* `Args` section for the function arguments.
* `Returns` section for the return values.
* Optional `Raises` section for possible errors.
You can check out `text_dataset_from_directory` as an example
[(link)](https://github.com/keras-team/tf-keras/blob/v2.12.0-rc1/tf_keras/utils/text_dataset.py#L31).
## Run tests
We use [Bazel](https://bazel.build/) to build and run the tests.
### Run a test file
For example, to run the tests in `keras/engine/base_layer_test.py`,
we can run the following command at the root directory of the repo.
```shell
bazel test keras/engine:base_layer_test
```
`keras/engine` is the relative path to the directory containing the `BUILD` file
defining the test. `base_layer_test` is the test target name defined with
`tf_py_test` in the `BUILD` file.
### Run a single test case
To run a single test, you can use `--test_filter=<your_regex>`
to use the regular expression to match the test you want to run. For example, you
can use the following command to run all the tests in `activations_test.py`,
whose names contain `test_serialization`.
```
bazel test keras:activations_test --test_filter=*test_serialization*
```
### Run all tests
You can run all the tests locally by running the following command in the repo
root directory.
```
bazel test --test_timeout 300,450,1200,3600 --test_output=errors --keep_going --define=use_fast_cpp_protos=false --build_tests_only --build_tag_filters=-no_oss,-oss_excluded --test_tag_filters=-no_oss,-oss_excluded keras/...
```
### Useful configs
Here we provide a list of useful configs you can use with Bazel.
```shell
bazel test [CONFIGS] [YOUR_TEST]
```
To use these configs, just replace `[CONFIGS]` with the actual config in the
command above.
* `-c opt` enables the optimizations during the build.
* `--test_sharding_strategy=disabled` disables the sharding so that all the
test outputs are in one file.
However, it may slow down the tests for not running in parallel
and may cause the test to timeout.
## Contributing to TF-Keras applications
Contributions to the
[pre-trained application library](https://keras.io/api/applications/) are
welcome. Code for TF-Keras applications is located in TF-Keras repository in
[keras/applications](https://github.com/keras-team/tf-keras/blob/master/tf_keras/applications).
When contributing to TF-Keras applications, please keep following checklist in
mind.
- TF-Keras applications must implement an established and widely used model.
Applications should include a link to a paper describing the architecture of
the model with at least 20 citations.
- Applications should be provided with pre-trained weights.
- When submitting a pull request for a TF-Keras application, these weights
can be provided at any publically available URL (e.g. a personal Cloud
Storage bucket). The weights will be uploaded to a TF-Keras storage bucket
while merging the pull request.
- Weights should be downloaded with the
[get_file()](https://keras.io/api/utils/python_utils/#getfile-function)
utility function. Be sure to include the `file_hash` argument, which
allows cache invalidation on the downloaded weights. The command line
programs `shasum` and `sha256sum` can compute a file hash.
- You should help us verify that the accuracy of the model with pre-trained
weighted matches the reported results of the cited paper.
- You should add any new applications to the unit tests defined in
`applications_test.py` and `applications_load_weight_test.py`.
- For backwards compatibility, all applications should provide a
`preprocess_input()` function. For new applications, you should leave the
function empty (pass through inputs unaltered), and write the model so it
can handle raw inputs directly. Adding
[preprocessing layers](https://keras.io/guides/preprocessing_layers/) to the
application model may help with this. For image applications, a
[Rescaling](https://keras.io/api/layers/preprocessing_layers/image_preprocessing/rescaling/)
layer at the beginning of the model is often all that is needed.
- Once the PR is approved, you should create a companion PR to the keras.io
[application page](https://keras.io/api/applications/) updating the
"Available Models" section. The contribution guide for keras.io can be found
[here](https://github.com/keras-team/keras-io/blob/master/contributor_guide.md).
- As every PR requires several CPU/GPU hours of CI testing, we discourage
submitting PRs to fix one typo, one warning,etc. We recommend fixing the
same issue at the file level at least (e.g.: fix all typos in a file, fix
all compiler warnings in a file, etc.)
| tf-keras/CONTRIBUTING.md/0 | {
"file_path": "tf-keras/CONTRIBUTING.md",
"repo_id": "tf-keras",
"token_count": 3541
} | 172 |
# Description:
# Package for TF-Keras.
# Placeholder: load unaliased py_binary
# Placeholder: load unaliased py_library
load("//tf_keras/api:api_gen.bzl", "generate_apis")
load("//tf_keras/api:api_init_files.bzl", "KERAS_API_INIT_FILES", "KERAS_API_INIT_FILES_V1")
package(
# copybara:uncomment default_applicable_licenses = ["//tf_keras:license"],
default_visibility = [
"//tf_keras:friends",
"//third_party/py/tensorflow:__subpackages__",
],
licenses = ["notice"], # Apache 2.0 License
)
# The target used by PIP package which need to generate API init files during OSS build.
py_library(
name = "tf_keras_api",
srcs = [
":tf_keras_python_api_gen",
":tf_keras_python_api_gen_compat_v1",
":tf_keras_python_api_gen_compat_v2",
],
srcs_version = "PY3",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
],
)
py_binary(
name = "extractor_wrapper",
srcs = ["extractor_wrapper.py"],
visibility = ["//visibility:public"],
deps = [
"//:expect_absl_installed", # absl:app
# "//third_party/tensorflow/python/tools/api/generator2/extractor",
],
)
py_binary(
name = "generator_wrapper",
srcs = ["generator_wrapper.py"],
visibility = ["//visibility:public"],
deps = [
"//:expect_absl_installed", # absl:app
# "//third_party/tensorflow/python/tools/api/generator2/generator",
],
)
generate_apis(
name = "tf_keras_python_api_gen",
api_version = 1,
output_files = KERAS_API_INIT_FILES_V1,
output_package = "tf_keras.api",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
# "//third_party/tensorflow/lite/python:analyzer",
# "//third_party/tensorflow/lite/python:lite",
# "//third_party/tensorflow/lite/python/authoring",
],
)
generate_apis(
name = "tf_keras_python_api_gen_compat_v1",
api_version = 1,
output_dir = "_v1/",
output_files = KERAS_API_INIT_FILES_V1,
output_package = "tf_keras.api._v1",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
# "//third_party/tensorflow/lite/python:analyzer",
# "//third_party/tensorflow/lite/python:lite",
# "//third_party/tensorflow/lite/python/authoring",
],
)
generate_apis(
name = "tf_keras_python_api_gen_compat_v2",
api_version = 2,
output_dir = "_v2/",
output_files = KERAS_API_INIT_FILES,
output_package = "tf_keras.api._v2",
deps = [
"//:expect_tensorflow_installed",
"//tf_keras",
# "//third_party/tensorflow/lite/python:analyzer",
# "//third_party/tensorflow/lite/python:lite",
# "//third_party/tensorflow/lite/python/authoring",
],
)
| tf-keras/tf_keras/api/BUILD/0 | {
"file_path": "tf-keras/tf_keras/api/BUILD",
"repo_id": "tf-keras",
"token_count": 1294
} | 173 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for ImageNet data preprocessing & prediction decoding."""
import json
import warnings
import numpy as np
from tf_keras import activations
from tf_keras import backend
from tf_keras.utils import data_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
CLASS_INDEX = None
CLASS_INDEX_PATH = (
"https://storage.googleapis.com/download.tensorflow.org/"
"data/imagenet_class_index.json"
)
PREPROCESS_INPUT_DOC = """
Preprocesses a tensor or Numpy array encoding a batch of images.
Usage example with `applications.MobileNet`:
```python
i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8)
x = tf.cast(i, tf.float32)
x = tf.keras.applications.mobilenet.preprocess_input(x)
core = tf.keras.applications.MobileNet()
x = core(x)
model = tf.keras.Model(inputs=[i], outputs=[x])
image = tf.image.decode_png(tf.io.read_file('file.png'))
result = model(image)
```
Args:
x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color
channels, with values in the range [0, 255].
The preprocessed data are written over the input data
if the data types are compatible. To avoid this
behaviour, `numpy.copy(x)` can be used.
data_format: Optional data format of the image tensor/array. None, means
the global setting `tf.keras.backend.image_data_format()` is used
(unless you changed it, it uses "channels_last").{mode}
Defaults to `None`.
Returns:
Preprocessed `numpy.array` or a `tf.Tensor` with type `float32`.
{ret}
Raises:
{error}
"""
PREPROCESS_INPUT_MODE_DOC = """
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Defaults to "caffe".
"""
PREPROCESS_INPUT_DEFAULT_ERROR_DOC = """
ValueError: In case of unknown `mode` or `data_format` argument."""
PREPROCESS_INPUT_ERROR_DOC = """
ValueError: In case of unknown `data_format` argument."""
PREPROCESS_INPUT_RET_DOC_TF = """
The inputs pixel values are scaled between -1 and 1, sample-wise."""
PREPROCESS_INPUT_RET_DOC_TORCH = """
The input pixels values are scaled between 0 and 1 and each channel is
normalized with respect to the ImageNet dataset."""
PREPROCESS_INPUT_RET_DOC_CAFFE = """
The images are converted from RGB to BGR, then each color channel is
zero-centered with respect to the ImageNet dataset, without scaling."""
@keras_export("keras.applications.imagenet_utils.preprocess_input")
def preprocess_input(x, data_format=None, mode="caffe"):
"""Preprocesses a tensor or Numpy array encoding a batch of images."""
if mode not in {"caffe", "tf", "torch"}:
raise ValueError(
"Expected mode to be one of `caffe`, `tf` or `torch`. "
f"Received: mode={mode}"
)
if data_format is None:
data_format = backend.image_data_format()
elif data_format not in {"channels_first", "channels_last"}:
raise ValueError(
"Expected data_format to be one of `channels_first` or "
f"`channels_last`. Received: data_format={data_format}"
)
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(x, data_format=data_format, mode=mode)
else:
return _preprocess_symbolic_input(x, data_format=data_format, mode=mode)
preprocess_input.__doc__ = PREPROCESS_INPUT_DOC.format(
mode=PREPROCESS_INPUT_MODE_DOC,
ret="",
error=PREPROCESS_INPUT_DEFAULT_ERROR_DOC,
)
@keras_export("keras.applications.imagenet_utils.decode_predictions")
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
Args:
preds: Numpy array encoding a batch of predictions.
top: Integer, how many top-guesses to return. Defaults to 5.
Returns:
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises:
ValueError: In case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError(
"`decode_predictions` expects "
"a batch of predictions "
"(i.e. a 2D array of shape (samples, 1000)). "
"Found array with shape: " + str(preds.shape)
)
if CLASS_INDEX is None:
fpath = data_utils.get_file(
"imagenet_class_index.json",
CLASS_INDEX_PATH,
cache_subdir="models",
file_hash="c2c37ea517e94d9795004a39431a14cb",
)
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
def _preprocess_numpy_input(x, data_format, mode):
"""Preprocesses a Numpy array encoding a batch of images.
Args:
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed Numpy array.
"""
if not issubclass(x.dtype.type, np.floating):
x = x.astype(backend.floatx(), copy=False)
if mode == "tf":
x /= 127.5
x -= 1.0
return x
elif mode == "torch":
x /= 255.0
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == "channels_first":
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
# Zero-center by mean pixel
if data_format == "channels_first":
if x.ndim == 3:
x[0, :, :] -= mean[0]
x[1, :, :] -= mean[1]
x[2, :, :] -= mean[2]
if std is not None:
x[0, :, :] /= std[0]
x[1, :, :] /= std[1]
x[2, :, :] /= std[2]
else:
x[:, 0, :, :] -= mean[0]
x[:, 1, :, :] -= mean[1]
x[:, 2, :, :] -= mean[2]
if std is not None:
x[:, 0, :, :] /= std[0]
x[:, 1, :, :] /= std[1]
x[:, 2, :, :] /= std[2]
else:
x[..., 0] -= mean[0]
x[..., 1] -= mean[1]
x[..., 2] -= mean[2]
if std is not None:
x[..., 0] /= std[0]
x[..., 1] /= std[1]
x[..., 2] /= std[2]
return x
def _preprocess_symbolic_input(x, data_format, mode):
"""Preprocesses a tensor encoding a batch of images.
Args:
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed tensor.
"""
if mode == "tf":
x /= 127.5
x -= 1.0
return x
elif mode == "torch":
x /= 255.0
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == "channels_first":
# 'RGB'->'BGR'
if backend.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
mean_tensor = backend.constant(-np.array(mean))
# Zero-center by mean pixel
if backend.dtype(x) != backend.dtype(mean_tensor):
x = backend.bias_add(
x,
backend.cast(mean_tensor, backend.dtype(x)),
data_format=data_format,
)
else:
x = backend.bias_add(x, mean_tensor, data_format)
if std is not None:
std_tensor = backend.constant(np.array(std), dtype=backend.dtype(x))
if data_format == "channels_first":
std_tensor = backend.reshape(std_tensor, (-1, 1, 1))
x /= std_tensor
return x
def obtain_input_shape(
input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None,
):
"""Internal utility to compute/validate a model's input shape.
Args:
input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: One of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: In case of invalid argument values.
"""
if weights != "imagenet" and input_shape and len(input_shape) == 3:
if data_format == "channels_first":
if input_shape[0] not in {1, 3}:
warnings.warn(
"This model usually expects 1 or 3 input channels. "
"However, it was passed an input_shape with "
+ str(input_shape[0])
+ " input channels.",
stacklevel=2,
)
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(
"This model usually expects 1 or 3 input channels. "
"However, it was passed an input_shape with "
+ str(input_shape[-1])
+ " input channels.",
stacklevel=2,
)
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == "channels_first":
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == "imagenet" and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError(
"When setting `include_top=True` "
"and loading `imagenet` weights, "
f"`input_shape` should be {default_shape}. "
f"Received: input_shape={input_shape}"
)
return default_shape
if input_shape:
if data_format == "channels_first":
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
"`input_shape` must be a tuple of three integers."
)
if input_shape[0] != 3 and weights == "imagenet":
raise ValueError(
"The input must have 3 channels; Received "
f"`input_shape={input_shape}`"
)
if (
input_shape[1] is not None and input_shape[1] < min_size
) or (input_shape[2] is not None and input_shape[2] < min_size):
raise ValueError(
f"Input size must be at least {min_size}"
f"x{min_size}; Received: "
f"input_shape={input_shape}"
)
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
"`input_shape` must be a tuple of three integers."
)
if input_shape[-1] != 3 and weights == "imagenet":
raise ValueError(
"The input must have 3 channels; Received "
f"`input_shape={input_shape}`"
)
if (
input_shape[0] is not None and input_shape[0] < min_size
) or (input_shape[1] is not None and input_shape[1] < min_size):
raise ValueError(
"Input size must be at least "
f"{min_size}x{min_size}; Received: "
f"input_shape={input_shape}"
)
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == "channels_first":
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError(
"If `include_top` is True, "
"you should specify a static `input_shape`. "
f"Received: input_shape={input_shape}"
)
return input_shape
def correct_pad(inputs, kernel_size):
"""Returns a tuple for zero-padding for 2D convolution with downsampling.
Args:
inputs: Input tensor.
kernel_size: An integer or tuple/list of 2 integers.
Returns:
A tuple.
"""
img_dim = 2 if backend.image_data_format() == "channels_first" else 1
input_size = backend.int_shape(inputs)[img_dim : (img_dim + 2)]
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if input_size[0] is None:
adjust = (1, 1)
else:
adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
return (
(correct[0] - adjust[0], correct[0]),
(correct[1] - adjust[1], correct[1]),
)
def validate_activation(classifier_activation, weights):
"""validates that the classifer_activation is compatible with the weights.
Args:
classifier_activation: str or callable activation function
weights: The pretrained weights to load.
Raises:
ValueError: if an activation other than `None` or `softmax` are used with
pretrained weights.
"""
if weights is None:
return
classifier_activation = activations.get(classifier_activation)
if classifier_activation not in {
activations.get("softmax"),
activations.get(None),
}:
raise ValueError(
"Only `None` and `softmax` activations are allowed "
"for the `classifier_activation` argument when using "
"pretrained weights, with `include_top=True`; Received: "
f"classifier_activation={classifier_activation}"
)
| tf-keras/tf_keras/applications/imagenet_utils.py/0 | {
"file_path": "tf-keras/tf_keras/applications/imagenet_utils.py",
"repo_id": "tf-keras",
"token_count": 7736
} | 174 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras backend config API."""
import tensorflow.compat.v2 as tf
# isort: off
from tensorflow.python.util.tf_export import keras_export
# The type of float to use throughout a session.
_FLOATX = "float32"
# Epsilon fuzz factor used throughout the codebase.
_EPSILON = 1e-7
# Default image data format, one of "channels_last", "channels_first".
_IMAGE_DATA_FORMAT = "channels_last"
@keras_export("keras.backend.epsilon")
@tf.__internal__.dispatch.add_dispatch_support
def epsilon():
"""Returns the value of the fuzz factor used in numeric expressions.
Returns:
A float.
Example:
>>> tf.keras.backend.epsilon()
1e-07
"""
return _EPSILON
@keras_export("keras.backend.set_epsilon")
def set_epsilon(value):
"""Sets the value of the fuzz factor used in numeric expressions.
Args:
value: float. New value of epsilon.
Example:
>>> tf.keras.backend.epsilon()
1e-07
>>> tf.keras.backend.set_epsilon(1e-5)
>>> tf.keras.backend.epsilon()
1e-05
>>> tf.keras.backend.set_epsilon(1e-7)
"""
global _EPSILON
_EPSILON = value
@keras_export("keras.backend.floatx")
def floatx():
"""Returns the default float type, as a string.
E.g. `'float16'`, `'float32'`, `'float64'`.
Returns:
String, the current default float type.
Example:
>>> tf.keras.backend.floatx()
'float32'
"""
return _FLOATX
@keras_export("keras.backend.set_floatx")
def set_floatx(value):
"""Sets the default float type.
Note: It is not recommended to set this to float16 for training, as this
will likely cause numeric stability issues. Instead, mixed precision, which
is using a mix of float16 and float32, can be used by calling
`tf.keras.mixed_precision.set_global_policy('mixed_float16')`. See the
[mixed precision guide](
https://www.tensorflow.org/guide/keras/mixed_precision) for details.
Args:
value: String; `'float16'`, `'float32'`, or `'float64'`.
Example:
>>> tf.keras.backend.floatx()
'float32'
>>> tf.keras.backend.set_floatx('float64')
>>> tf.keras.backend.floatx()
'float64'
>>> tf.keras.backend.set_floatx('float32')
Raises:
ValueError: In case of invalid value.
"""
global _FLOATX
accepted_dtypes = {"float16", "float32", "float64"}
if value not in accepted_dtypes:
raise ValueError(
f"Unknown `floatx` value: {value}. "
f"Expected one of {accepted_dtypes}"
)
_FLOATX = str(value)
@keras_export("keras.backend.image_data_format")
@tf.__internal__.dispatch.add_dispatch_support
def image_data_format():
"""Returns the default image data format convention.
Returns:
A string, either `'channels_first'` or `'channels_last'`
Example:
>>> tf.keras.backend.image_data_format()
'channels_last'
"""
return _IMAGE_DATA_FORMAT
@keras_export("keras.backend.set_image_data_format")
def set_image_data_format(data_format):
"""Sets the value of the image data format convention.
Args:
data_format: string. `'channels_first'` or `'channels_last'`.
Example:
>>> tf.keras.backend.image_data_format()
'channels_last'
>>> tf.keras.backend.set_image_data_format('channels_first')
>>> tf.keras.backend.image_data_format()
'channels_first'
>>> tf.keras.backend.set_image_data_format('channels_last')
Raises:
ValueError: In case of invalid `data_format` value.
"""
global _IMAGE_DATA_FORMAT
accepted_formats = {"channels_last", "channels_first"}
if data_format not in accepted_formats:
raise ValueError(
f"Unknown `data_format`: {data_format}. "
f"Expected one of {accepted_formats}"
)
_IMAGE_DATA_FORMAT = str(data_format)
| tf-keras/tf_keras/backend_config.py/0 | {
"file_path": "tf-keras/tf_keras/backend_config.py",
"repo_id": "tf-keras",
"token_count": 1740
} | 175 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks on Convnet on MNIST dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.benchmarks import benchmark_util
class ConvMnistBenchmark(tf.test.Benchmark):
"""Benchmarks for Convnet using `tf.test.Benchmark`."""
def __init__(self):
super().__init__()
self.num_classes = 10
self.input_shape = (28, 28, 1)
(self.x_train, self.y_train), _ = keras.datasets.mnist.load_data()
self.x_train = self.x_train.astype("float32") / 255
self.x_train = np.expand_dims(self.x_train, -1)
self.y_train = keras.utils.to_categorical(
self.y_train, self.num_classes
)
self.epochs = 15
def _build_model(self):
"""Model from https://keras.io/examples/vision/mnist_convnet/."""
model = keras.Sequential(
[
keras.Input(shape=self.input_shape),
keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dropout(0.5),
keras.layers.Dense(self.num_classes, activation="softmax"),
]
)
return model
# In each benchmark test, the required arguments for the
# method `measure_performance` include:
# x: Input data, it could be Numpy or loaded from tfds.
# y: Target data. If `x` is a dataset or generator instance,
# `y` should not be specified.
# loss: Loss function for model.
# optimizer: Optimizer for model.
# Check more details in `measure_performance()` method of
# benchmark_util.
def benchmark_conv_mnist_bs_128(self):
"""Measure performance with batch_size=128."""
batch_size = 128
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
epochs=self.epochs,
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"conv", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_conv_mnist_bs_256(self):
"""Measure performance with batch_size=256."""
batch_size = 256
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
epochs=self.epochs,
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"conv", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_conv_mnist_bs_512(self):
"""Measure performance with batch_size=512."""
batch_size = 512
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
epochs=self.epochs,
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"conv", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
def benchmark_conv_mnist_bs_512_gpu_2(self):
"""Measure performance with batch_size=512, gpu=2 and
distribution_strategy='mirrored'
"""
batch_size = 512
metrics, wall_time, extras = benchmark_util.measure_performance(
self._build_model,
x=self.x_train,
y=self.y_train,
batch_size=batch_size,
num_gpus=2,
distribution_strategy="mirrored",
epochs=self.epochs,
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
metadata = benchmark_util.get_keras_examples_metadata(
"conv", batch_size
)
extras.update(metadata)
self.report_benchmark(
wall_time=wall_time, metrics=metrics, extras=extras
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py/0 | {
"file_path": "tf-keras/tf_keras/benchmarks/keras_examples_benchmarks/mnist_conv_benchmark_test.py",
"repo_id": "tf-keras",
"token_count": 2571
} | 176 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for custom training loops that involves advanced optimizer usage."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.distribute import (
strategy_combinations as keras_strategy_combinations,
)
from tf_keras.optimizers.legacy import gradient_descent
# isort: off
from tensorflow.python.distribute import values
class OptimizerTest(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.times(
tf.__internal__.test.combinations.combine(
distribution=keras_strategy_combinations.multidevice_strategies,
mode=["eager"],
),
tf.__internal__.test.combinations.combine(
experimental_aggregate_gradients=True,
expected=[[[-0.3, -0.3], [-0.3, -0.3]]],
)
+ tf.__internal__.test.combinations.combine(
experimental_aggregate_gradients=False,
expected=[[[-0.1, -0.1], [-0.2, -0.2]]],
),
)
)
def test_custom_aggregation(
self, distribution, experimental_aggregate_gradients, expected
):
with distribution.scope():
v = tf.Variable([0.0, 0.0])
optimizer = gradient_descent.SGD(0.1)
class PerReplica(values.DistributedValues):
"""Holds a map from replica to unsynchronized values."""
@property
def values(self):
"""Returns the per replica values."""
return self._values
@tf.function
def optimize():
with tf.device(distribution.extended.worker_devices[0]):
v1 = tf.convert_to_tensor([1.0, 1.0])
with tf.device(distribution.extended.worker_devices[1]):
v2 = tf.convert_to_tensor([2.0, 2.0])
grads = PerReplica([v1, v2])
def step_fn(grads):
optimizer.apply_gradients(
[(grads, v)],
experimental_aggregate_gradients=experimental_aggregate_gradients, # noqa: E501
)
return v.read_value()
return distribution.experimental_local_results(
distribution.run(step_fn, args=(grads,))
)
self.assertAllClose(optimize(), expected)
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=tf.__internal__.distribute.combinations.one_device_strategy, # noqa: E501
mode=["eager"],
experimental_aggregate_gradients=[True, False],
)
)
def test_custom_aggregation_one_device(
self, distribution, experimental_aggregate_gradients
):
with distribution.scope():
v = tf.Variable([0.0, 0.0])
optimizer = gradient_descent.SGD(0.1)
@tf.function
def optimize():
grads = tf.convert_to_tensor([1.0, 1.0])
def step_fn(grads):
optimizer.apply_gradients(
[(grads, v)],
experimental_aggregate_gradients=experimental_aggregate_gradients, # noqa: E501
)
return v.read_value()
return distribution.experimental_local_results(
distribution.run(step_fn, args=(grads,))
)
self.assertAllClose(optimize(), [[-0.1, -0.1]])
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.central_storage_strategy_with_gpu_and_cpu # noqa: E501
]
)
)
def test_custom_aggregation_central_storage(self, distribution):
with distribution.scope():
v = tf.Variable([0.0, 0.0])
optimizer = gradient_descent.SGD(0.1)
grads = tf.convert_to_tensor([1.0, 1.0])
def step_fn(grads):
with self.assertRaises(NotImplementedError):
optimizer.apply_gradients(
[(grads, v)], experimental_aggregate_gradients=False
)
return distribution.run(step_fn, args=(grads,))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/distribute/custom_training_loop_optimizer_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/custom_training_loop_optimizer_test.py",
"repo_id": "tf-keras",
"token_count": 2257
} | 177 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras high level APIs, e.g. fit, evaluate and predict."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.distribute.strategy_combinations import all_strategies
class KerasModelsTest(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
tf.__internal__.test.combinations.combine(
distribution=all_strategies, mode=["eager"]
)
)
def test_lstm_model_with_dynamic_batch(self, distribution):
input_data = np.random.random([1, 32, 64, 64, 3])
input_shape = tuple(input_data.shape[1:])
def build_model():
model = keras.models.Sequential()
model.add(
keras.layers.ConvLSTM2D(
4,
kernel_size=(4, 4),
activation="sigmoid",
padding="same",
input_shape=input_shape,
)
)
model.add(keras.layers.GlobalMaxPooling2D())
model.add(keras.layers.Dense(2, activation="sigmoid"))
return model
with distribution.scope():
model = build_model()
model.compile(loss="binary_crossentropy", optimizer="adam")
result = model.predict(input_data)
self.assertEqual(result.shape, (1, 2))
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/distribute/keras_models_test.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/keras_models_test.py",
"repo_id": "tf-keras",
"token_count": 874
} | 178 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy and optimizer combinations for combinations.combine()."""
import tensorflow.compat.v2 as tf
from tf_keras.optimizers import adam as adam_experimental
from tf_keras.optimizers.legacy import adadelta as adadelta_keras_v2
from tf_keras.optimizers.legacy import adagrad as adagrad_keras_v2
from tf_keras.optimizers.legacy import adam as adam_keras_v2
from tf_keras.optimizers.legacy import adamax as adamax_keras_v2
from tf_keras.optimizers.legacy import ftrl as ftrl_keras_v2
from tf_keras.optimizers.legacy import (
gradient_descent as gradient_descent_keras_v2,
)
from tf_keras.optimizers.legacy import nadam as nadam_keras_v2
from tf_keras.optimizers.legacy import rmsprop as rmsprop_keras_v2
gradient_descent_optimizer_v1_fn = (
tf.__internal__.test.combinations.NamedObject(
"GradientDescentV1",
lambda: tf.compat.v1.train.GradientDescentOptimizer(0.001),
)
)
adagrad_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"AdagradV1", lambda: tf.compat.v1.train.AdagradOptimizer(0.001)
)
adam_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"AdamV1", lambda: tf.compat.v1.train.AdamOptimizer(0.001, epsilon=1)
)
ftrl_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"FtrlV1", lambda: tf.compat.v1.train.FtrlOptimizer(0.001)
)
rmsprop_optimizer_v1_fn = tf.__internal__.test.combinations.NamedObject(
"RmsPropV1", lambda: tf.compat.v1.train.RMSPropOptimizer(0.001)
)
# TODO(shiningsun): consider adding the other v1 optimizers
optimizers_v1 = [
gradient_descent_optimizer_v1_fn,
adagrad_optimizer_v1_fn,
ftrl_optimizer_v1_fn,
rmsprop_optimizer_v1_fn,
]
adadelta_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"AdadeltaKerasV2", lambda: adadelta_keras_v2.Adadelta(0.001)
)
adagrad_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001)
)
adam_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0)
)
adam_experimental_fn = tf.__internal__.test.combinations.NamedObject(
"AdamExperimental", lambda: adam_experimental.Adam(0.001)
)
adamax_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"AdamaxKerasV2", lambda: adamax_keras_v2.Adamax(0.001, epsilon=1.0)
)
nadam_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"NadamKerasV2", lambda: nadam_keras_v2.Nadam(0.001, epsilon=1.0)
)
ftrl_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"FtrlKerasV2", lambda: ftrl_keras_v2.Ftrl(0.001)
)
gradient_descent_optimizer_keras_v2_fn = (
tf.__internal__.test.combinations.NamedObject(
"GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.001)
)
)
rmsprop_optimizer_keras_v2_fn = tf.__internal__.test.combinations.NamedObject(
"RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001)
)
# TODO(shiningsun): consider adding the other v2 optimizers
optimizers_v2 = [
gradient_descent_optimizer_keras_v2_fn,
adagrad_optimizer_keras_v2_fn,
]
optimizers_v1_and_v2 = optimizers_v1 + optimizers_v2
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and
Optimizers."""
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501
],
optimizer_fn=optimizers_v1,
)
def distributions_and_v2_optimizers():
"""A common set of combination with DistributionStrategies and
Optimizers."""
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501
],
optimizer_fn=optimizers_v2,
)
def distributions_and_v1_and_v2_optimizers():
"""A common set of combination with DistributionStrategies and
Optimizers."""
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.mirrored_strategy_with_gpu_and_cpu, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus, # noqa: E501
tf.__internal__.distribute.combinations.mirrored_strategy_with_two_gpus_no_merge_call, # noqa: E501
],
optimizer_fn=optimizers_v1_and_v2,
)
| tf-keras/tf_keras/distribute/optimizer_combinations.py/0 | {
"file_path": "tf-keras/tf_keras/distribute/optimizer_combinations.py",
"repo_id": "tf-keras",
"token_count": 2378
} | 179 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""E2E test for DTensor with Mnist model.
Note that this is used as prototype and verification of current functionality,
and will be changed rapidly. Please don't reply on any of these methods as a
public API/contract.
"""
import numpy as np
import tensorflow.compat.v2 as tf
from absl import logging
from tf_keras import layers
from tf_keras import losses
from tf_keras import models
from tf_keras.datasets import mnist
from tf_keras.dtensor import dtensor_api as dtensor
from tf_keras.dtensor import layout_map as layout_map_lib
from tf_keras.utils import np_utils
NUM_CLASS = 10 # MNIST has 10 digits
def get_model_with_layout_map(layout_map):
"""Builds a Sequential CNN model to recognize MNIST digits.
Args:
layout_map: dict of string name -> Layout, for weights creation.
Returns:
a CNN TF-Keras model used for MNIST
"""
with layout_map_lib.layout_map_scope(layout_map):
# Define a CNN model to recognize MNIST digits.
return get_model()
def get_model():
"""Builds a Sequential CNN model to recognize MNIST digits."""
model = models.Sequential()
model.add(
layers.Conv2D(
32,
name="conv2d_1",
kernel_size=(3, 3),
activation="relu",
input_shape=(28, 28, 1), # channel last gray scale input
)
)
model.add(
layers.Conv2D(
64,
name="conv2d_2",
kernel_size=(3, 3),
activation="relu",
)
)
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(
layers.Dense(
128,
name="dense_1",
activation="relu",
)
)
model.add(layers.Dropout(0.5))
model.add(
layers.Dense(
NUM_CLASS,
name="dense_2",
activation="softmax",
)
)
return model
def get_all_replicated_layout_map(mesh):
layout_map = layout_map_lib.LayoutMap(mesh=mesh)
layout_4d = dtensor.Layout.replicated(mesh, rank=4)
layout_2d = dtensor.Layout.replicated(mesh, rank=2)
layout_1d = dtensor.Layout.replicated(mesh, rank=1)
layout_map["conv2d.*kernel"] = layout_4d
layout_map["conv2d.*bias"] = layout_1d
layout_map["dense.*kernel"] = layout_2d
layout_map["dense.*bias"] = layout_1d
return layout_map
def get_mnist_datasets(num_class, batch_size):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, axis=-1).astype("float32")
x_test = np.expand_dims(x_test, axis=-1).astype("float32")
x_train /= 255 # normalize to 0~1
x_test /= 255
y_train = np_utils.to_categorical(y_train, num_class)
y_test = np_utils.to_categorical(y_test, num_class)
train_ds = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.repeat()
.batch(batch_size, drop_remainder=True)
)
eval_ds = (
tf.data.Dataset.from_tensor_slices((x_test, y_test))
.repeat()
.batch(batch_size, drop_remainder=True)
)
return train_ds, eval_ds
def train_mnist_model_batch_sharded(
model, optimizer, mesh, num_epochs, steps_per_epoch, global_batch_size
):
dataset, _ = get_mnist_datasets(NUM_CLASS, global_batch_size)
input_image_layout = dtensor.Layout.batch_sharded(mesh, "batch", rank=4)
input_label_layout = dtensor.Layout.batch_sharded(mesh, "batch", rank=2)
loss_obj = losses.CategoricalCrossentropy()
num_local_devices = mesh.num_local_devices()
iterator = iter(dataset)
train_losses = []
for epoch in range(num_epochs):
total_loss = 0.00
for _ in range(steps_per_epoch):
images, labels = next(iterator)
images = tf.split(images, num_local_devices)
labels = tf.split(labels, num_local_devices)
d_images = dtensor.pack(images, input_image_layout)
d_labels = dtensor.pack(labels, input_label_layout)
total_loss += train_step(
model, d_images, d_labels, loss_obj, optimizer
)
train_loss = tf.reduce_mean(total_loss / steps_per_epoch)
logging.info("Epoch %d, Loss: %f", epoch, train_loss)
train_losses.append(train_loss)
return train_losses
# Change to use model.fit when dataset has the correct layout info populated
# in the iterator, which is the long term solution
@tf.function
def train_step(model, feature, label, loss_obj, optimizer):
with tf.GradientTape() as tape:
predict = model(feature, training=True)
loss = loss_obj(label, predict)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
| tf-keras/tf_keras/dtensor/integration_test_utils.py/0 | {
"file_path": "tf-keras/tf_keras/dtensor/integration_test_utils.py",
"repo_id": "tf-keras",
"token_count": 2275
} | 180 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow 2.0 layer behavior."""
import copy
import os
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import layers
from tf_keras import regularizers
from tf_keras.engine import base_layer
from tf_keras.engine import input_layer
from tf_keras.engine import sequential
from tf_keras.engine import training as training_lib
from tf_keras.legacy_tf_layers import core as legacy_core
from tf_keras.optimizers.legacy import rmsprop
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import control_flow_util
class DynamicLayer(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super().__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
samples = tf.TensorArray(dtype=tf.float32, size=tf.shape(inputs)[0])
for idx, sample in enumerate(inputs):
samples = samples.write(idx, tf.square(sample))
return samples.stack()
def compute_output_shape(self, input_shape):
return input_shape
class InvalidLayer(base_layer.Layer):
def call(self, inputs):
raise ValueError("You did something wrong!")
@test_utils.run_v2_only
class BaseLayerTest(test_combinations.TestCase):
@test_combinations.generate(test_combinations.keras_mode_combinations())
def test_layer_instrumentation(self):
layer = layers.Add()
self.assertTrue(layer._instrumented_keras_api)
self.assertTrue(layer._instrumented_keras_layer_class)
self.assertFalse(layer._instrumented_keras_model_class)
@test_combinations.generate(
test_combinations.keras_model_type_combinations()
)
def test_dynamic_layer(self):
model = test_utils.get_model_from_layers(
[DynamicLayer(dynamic=True)], input_shape=(3,)
)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss="mse")
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@test_combinations.generate(
test_combinations.keras_model_type_combinations()
)
def test_dynamic_layer_error(self):
# Functional Models hit the `dyanamic=True` error during construction.
# Subclass Models should just throw the original autograph error during
# execution.
raised_error = False
try:
model = test_utils.get_model_from_layers(
[DynamicLayer()], input_shape=(3,)
)
model.compile(rmsprop.RMSprop(0.001), loss="mse")
model.train_on_batch(
np.random.random((2, 3)), np.random.random((2, 3))
)
except tf.errors.OperatorNotAllowedInGraphError as e:
if "iterating over `tf.Tensor`" in str(e):
raised_error = True
elif "Iterating over a symbolic `tf.Tensor`" in str(e):
raised_error = True
except TypeError as e:
if "attempting to use Python control flow" in str(e):
raised_error = True
elif "Attempting to use Python control flow" in str(e):
raised_error = True
self.assertTrue(raised_error)
@test_combinations.generate(
test_combinations.keras_model_type_combinations()
)
def test_dynamic_layer_error_running_in_graph_mode(self):
with tf.compat.v1.get_default_graph().as_default():
model = test_utils.get_model_from_layers(
[DynamicLayer(dynamic=True)], input_shape=(3,)
)
self.assertEqual(model.dynamic, True)
# But then you cannot run the model since you're in a graph scope.
with self.assertRaisesRegex(
ValueError, "You must enable eager execution"
):
model.compile(rmsprop.RMSprop(0.001), loss="mse")
def test_manual_compute_output_shape(self):
class BuildCounter(base_layer.Layer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.build_counter = 0
def build(self, input_shape):
self.build_counter += 1
self.build_shape = input_shape
def call(self, inputs):
return inputs
layer = BuildCounter(dtype=tf.float64)
output_shape = layer.compute_output_shape((None, 10))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(layer.build_shape.as_list(), [None, 10])
self.assertEqual(output_shape.as_list(), [None, 10])
output_signature = layer.compute_output_signature(
tf.TensorSpec(dtype=tf.float64, shape=[None, 10])
)
self.assertEqual(layer.build_counter, 1)
self.assertEqual(layer.build_shape.as_list(), [None, 10])
self.assertEqual(output_signature.dtype, tf.float64)
self.assertEqual(output_signature.shape.as_list(), [None, 10])
layer(np.ones((5, 10)))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(layer.build_shape.as_list(), [None, 10])
def test_dynamic_layer_with_deferred_sequential_model(self):
model = sequential.Sequential(
[DynamicLayer(dynamic=True), layers.Dense(3)]
)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss="mse")
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_nested_dynamic_layers_in_eager_mode(self):
inputs = input_layer.Input((3,))
outputs = DynamicLayer(dynamic=True)(inputs)
inner_model = training_lib.Model(inputs, outputs)
self.assertEqual(inner_model.dynamic, True)
inputs = input_layer.Input((3,))
x = DynamicLayer(dynamic=True)(inputs)
outputs = inner_model(x)
model = training_lib.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss="mse")
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_subclassed_model_no_shape_inference(self):
class MyModel(training_lib.Model):
def __init__(self):
super().__init__(dynamic=True)
self.layer1 = layers.Dense(3)
self.layer2 = layers.Dense(3)
def call(self, inputs):
if tf.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss="mse")
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs, None)
def test_dynamic_subclassed_model_with_shape_inference(self):
class MyModel(training_lib.Model):
def __init__(self):
super().__init__(dynamic=True)
self.layer1 = layers.Dense(3)
self.layer2 = layers.Dense(3)
def call(self, inputs):
if tf.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
def compute_output_shape(self, input_shape):
return tuple(input_shape[:-1].as_list()) + (3,)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss="mse")
x, y = np.random.random((2, 3)), np.random.random((2, 3))
model.train_on_batch(x, y)
outputs = model(x)
self.assertEqual(outputs.shape.as_list(), [2, 3])
def test_deepcopy(self):
bias_reg = lambda x: 1e-3 * tf.reduce_sum(x)
layer = layers.Conv2D(32, (3, 3), bias_regularizer=bias_reg)
# Call the Layer on data to generate regularize losses.
layer(tf.ones((1, 10, 10, 3)))
self.assertLen(layer.losses, 1)
new_layer = copy.deepcopy(layer)
self.assertEqual(new_layer.bias_regularizer, bias_reg)
self.assertEqual(layer.get_config(), new_layer.get_config())
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_invalid_forward_pass(self):
inputs = input_layer.Input((3,))
with self.assertRaisesRegex(ValueError, "You did something wrong!"):
_ = InvalidLayer()(inputs)
def test_no_legacy_model(self):
inputs = input_layer.Input((1,))
legacy_dense_0 = legacy_core.Dense(1, name="legacy_dense_0")
legacy_dense_1 = legacy_core.Dense(1, name="legacy_dense_1")
layer = legacy_dense_0(inputs)
layer = layers.Dense(1)(layer)
layer = legacy_dense_1(layer)
expected_regex = (
r"The following are legacy tf\.layers\.Layers:\n "
"{}\n {}".format(legacy_dense_0, legacy_dense_1)
)
with self.assertRaisesRegex(TypeError, expected_regex):
_ = training_lib.Model(inputs=[inputs], outputs=[layer])
model = training_lib.Model(inputs=[inputs], outputs=[inputs])
with self.assertRaisesRegex(TypeError, expected_regex):
model._insert_layers([legacy_dense_0, legacy_dense_1])
def test_no_legacy_sequential(self):
layer = [layers.Dense(1), legacy_core.Dense(1, name="legacy_dense_0")]
expected_regex = r"legacy tf\.layers\.Layers:\n {}".format(layer[1])
with self.assertRaisesRegex(TypeError, expected_regex):
_ = sequential.Sequential(layer)
with self.assertRaisesRegex(TypeError, expected_regex):
_ = sequential.Sequential([input_layer.Input(shape=(4,))] + layer)
model = sequential.Sequential()
with self.assertRaisesRegex(TypeError, expected_regex):
for l in layer:
model.add(l)
@test_combinations.generate(
test_combinations.times(
test_combinations.keras_model_type_combinations(),
test_combinations.combine(mode=["graph", "eager"]),
)
)
def test_build_with_numpy_data(self):
model_layers = [
layers.Dense(3, activation="relu", kernel_initializer="ones"),
layers.Dense(1, activation="sigmoid", kernel_initializer="ones"),
]
model = test_utils.get_model_from_layers(model_layers, input_shape=(4,))
model(np.zeros((2, 4), dtype="float32"))
self.assertTrue(model.built)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_default_add_weight(self):
class TestLayer(base_layer.Layer):
def __init__(self):
super().__init__()
self.default_weight = self.add_weight()
self.weight_without_name = self.add_weight(shape=(3, 4))
self.regularized_weight_without_name = self.add_weight(
shape=(3, 4), regularizer="l2"
)
layer = TestLayer()
self.assertEqual(layer.default_weight.shape.as_list(), [])
self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])
self.assertEqual(layer.default_weight.dtype.name, "float32")
self.assertEqual(layer.weight_without_name.dtype.name, "float32")
self.assertEqual(len(layer.losses), 1)
if not tf.executing_eagerly():
# Cannot access tensor.name in eager execution.
self.assertIn("Variable_2/Regularizer", layer.losses[0].name)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_add_weight_by_getter(self):
layer = base_layer.Layer()
variable = tf.Variable("abc")
added = layer.add_weight(
dtype=tf.string, getter=lambda *_, **__: variable
)
self.assertIs(variable, added)
def test_variable_resetting(self):
dense = layers.Dense(1)
dense.build([8, 2])
self.assertIs(dense.trainable_variables[0], dense.kernel)
self.assertIs(dense.trainable_variables[1], dense.bias)
# when we reset the variable to another instance, make sure the ordering
# of the variable in the trainable_variables doesn't change.
# This is important for h5 saving/loading.
dense.bias = tf.Variable(initial_value=tf.zeros(shape=(1,)))
dense.kernel = tf.Variable(initial_value=tf.zeros(shape=(2, 1)))
self.assertIs(dense.trainable_variables[0], dense.kernel)
self.assertIs(dense.trainable_variables[1], dense.bias)
@test_combinations.generate(
test_combinations.keras_mode_combinations(mode=["eager"])
)
def test_learning_phase_freezing_for_layers(self):
class LearningPhaseLayer(base_layer.Layer):
def call(self, inputs):
return backend.in_train_phase(
lambda: tf.ones_like(inputs), lambda: tf.zeros_like(inputs)
)
def get_learning_phase_value():
model = sequential.Sequential(
[LearningPhaseLayer(input_shape=(1,))]
)
model._run_eagerly = test_utils.should_run_eagerly()
return np.sum(model(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 1)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 1)
backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
# Cannot be enabled with `run_eagerly=True`, see b/123904578
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_layer_can_return_variable(self):
class ComputeSum(base_layer.Layer):
def __init__(self):
super().__init__()
self.total = tf.Variable(
initial_value=tf.zeros((1, 1)), trainable=False
)
if not tf.executing_eagerly():
backend.get_session().run(self.total.initializer)
def call(self, inputs):
self.total.assign_add(inputs)
return self.total
inputs = input_layer.Input(shape=(1,))
model = training_lib.Model(inputs, ComputeSum()(inputs))
model.predict(np.ones((1, 1)))
def _get_layer_with_training_arg(self):
class TrainingLayer(base_layer.Layer):
"""A layer with a `training` argument in a defuned `call`."""
@tf.function
def call(self, inputs, training=None):
if training is None:
training = backend.learning_phase()
return control_flow_util.smart_cond(
training,
lambda: tf.ones_like(inputs),
lambda: tf.zeros_like(inputs),
)
return TrainingLayer()
# b/124459427: can't test with `run_eagerly=True` for now.
@test_combinations.generate(
test_combinations.times(
test_combinations.keras_mode_combinations(),
test_combinations.keras_model_type_combinations(),
)
)
def test_training_arg_in_defun(self):
layer = self._get_layer_with_training_arg()
model = test_utils.get_model_from_layers([layer], input_shape=(1,))
model.compile(rmsprop.RMSprop(0.0), loss="mae")
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history["loss"][0], 1.0)
loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(loss, 0.0)
# Test that the argument injection performed in `call` is not active
# when the argument is passed explicitly.
layer = self._get_layer_with_training_arg()
inputs = input_layer.Input(shape=(1,))
# Pass `training` by name
outputs = layer(inputs, training=False)
model = training_lib.Model(inputs, outputs)
model.compile(rmsprop.RMSprop(0.0), loss="mae")
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history["loss"][0], 0.0)
@test_combinations.generate(
test_combinations.times(
test_combinations.keras_mode_combinations(),
test_combinations.keras_model_type_combinations(),
)
)
def test_raw_variable_assignment(self):
class RawVariableLayer(base_layer.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Test variables in nested structure.
self.var_list = [tf.Variable(1.0), {"a": tf.Variable(2.0)}]
def call(self, inputs):
return inputs * self.var_list[0] * self.var_list[1]["a"]
model = test_utils.get_model_from_layers(
[RawVariableLayer()], input_shape=(10,)
)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
# Checks that variables get initialized.
model.fit(x, y, batch_size=2, epochs=2)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_composite_variable_assignment(self):
class Spec(tf.TypeSpec):
value_type = property(lambda self: CompositeVariable)
def _component_specs(self):
pass
def _serialize(self):
pass
def _to_components(self, value):
return value._variables
def _from_components(self, variable_list):
return CompositeVariable(variable_list)
class CompositeVariable(tf.__internal__.CompositeTensor):
def __init__(self, variable_list):
self._variables = variable_list
@property
def _type_spec(self):
return Spec()
class CompositeVariableLayer(base_layer.Layer):
def __init__(self):
super().__init__()
self.composite_var = CompositeVariable(
[tf.Variable(1.0), tf.Variable(2.0)]
)
layer = CompositeVariableLayer()
self.assertLen(layer.weights, 2)
self.assertIsInstance(layer.weights[0], tf.Variable)
self.assertIsInstance(layer.weights[1], tf.Variable)
self.assertEqual(self.evaluate(layer.weights[0]), 1.0)
self.assertEqual(self.evaluate(layer.weights[1]), 2.0)
def test_exception_if_trainable_not_boolean(self):
base_layer.Layer(trainable=True)
base_layer.Layer(trainable=tf.constant(True))
base_layer.Layer(trainable=tf.Variable(tf.constant(True)))
with self.assertRaisesRegex(
TypeError, "Expected `trainable` argument to be a boolean"
):
base_layer.Layer(trainable=0)
def test_exception_if_dynamic_not_boolean(self):
base_layer.Layer(dynamic=True)
with self.assertRaisesRegex(
TypeError, "Expected `dynamic` argument to be a boolean"
):
base_layer.Layer(dynamic=0)
def test_exception_if_name_not_string_or_none(self):
base_layer.Layer(name=None)
base_layer.Layer(name="layer_name")
with self.assertRaisesRegex(
TypeError, "Expected `name` argument to be a string"
):
base_layer.Layer(name=0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_layer_names(self):
inputs = input_layer.Input(shape=[2])
add1 = inputs + inputs
add2 = layers.Add()([inputs, inputs])
add3 = inputs + inputs
add4 = layers.Add()([inputs, inputs])
model = training_lib.Model(
inputs=[inputs], outputs=[add1, add2, add3, add4]
)
actual_names = [l.name for l in model.layers]
graph_names = [
"input_1",
"tf_op_layer_add",
"add",
"tf_op_layer_add_2",
"add_1",
]
eager_names = [
"input_1",
"tf.__operators__.add",
"add",
"tf.__operators__.add_1",
"add_1",
]
for actual, eager, graph in zip(actual_names, graph_names, eager_names):
self.assertIn(actual, {eager, graph})
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_layer_names_after_loading(self):
backend.clear_session()
# Mimic loading a model that already contained add layers with
# name = 'add_1' and 'tf.__operators__.add'
layers.Add(name="add_1")
layers.Add(name="tf.__operators__.add")
inputs = input_layer.Input(shape=[2])
add1 = inputs + inputs
add2 = layers.Add()([inputs, inputs])
add3 = inputs + inputs
add4 = layers.Add()([inputs, inputs])
model = training_lib.Model(
inputs=[inputs], outputs=[add1, add2, add3, add4]
)
actual_names = [l.name for l in model.layers]
# The generated op layer names should have avoided layer names seen in
# the loaded model. (This avoiance should not apply to non-op-layers)
expected_names = [
"input_1",
"tf.__operators__.add_1",
"add",
"tf.__operators__.add_2",
"add_1",
]
self.assertAllEqual(actual_names, expected_names)
def test_add_trainable_weight_on_frozen_layer(self):
class TestLayer(base_layer.Layer):
def build(self, input_shape):
self.w = self.add_weight(shape=(), trainable=True)
def call(self, inputs):
return self.w * inputs
layer = TestLayer()
layer.trainable = False
layer.build(None)
layer.trainable = True
self.assertListEqual(layer.trainable_weights, [layer.w])
@test_combinations.generate(
test_combinations.times(
test_combinations.keras_mode_combinations(),
test_combinations.keras_model_type_combinations(),
)
)
def test_passing_initial_weights_values(self):
kernel_value = np.random.random((10, 2))
layer_with_weights = layers.Dense(
2, use_bias=False, weights=[kernel_value]
)
model = test_utils.get_model_from_layers(
[layer_with_weights], input_shape=(10,)
)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
inputs = np.random.random((3, 10))
out = model.predict(inputs)
self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)
self.assertAllClose(out, np.dot(inputs, kernel_value))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_set_weights_and_get_weights(self):
layer = layers.Dense(2)
layer.build((None, 10))
kernel = np.random.random((10, 2))
bias = np.random.random((2,))
layer.set_weights([kernel, bias])
weights = layer.get_weights()
self.assertEqual(len(weights), 2)
self.assertAllClose(weights[0], kernel)
self.assertAllClose(weights[1], bias)
with self.assertRaisesRegex(
ValueError, "but the layer was expecting 2 weights"
):
layer.set_weights([1, 2, 3])
with self.assertRaisesRegex(
ValueError, "not compatible with provided weight shape"
):
layer.set_weights([kernel.T, bias])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_set_weights_accepts_output_of_get_weights(self):
layer = layers.Layer()
layer.add_weight(name="scalar_float", shape=(), dtype=tf.float32)
layer.add_weight(
name="scalar_string",
shape=(),
dtype=tf.string,
initializer=lambda *a, **k: "abc",
)
layer.add_weight(name="vector_float", shape=(3,), dtype=tf.float32)
layer.add_weight(
name="vector_string",
shape=(2,),
dtype=tf.string,
initializer=lambda *a, **k: 2 * ["abc"],
)
layer.set_weights(layer.get_weights())
def test_get_config_error(self):
class MyLayer(base_layer.Layer):
def __init__(self, my_kwarg="default", **kwargs):
super().__init__(**kwargs)
self.my_kwarg = my_kwarg
# `__init__` includes kwargs but `get_config` is not overridden, so
# an error should be thrown:
with self.assertRaisesRegex(
NotImplementedError, "Layer MyLayer was created by"
):
# We pass bytes because it's non-serializable and thus
# will not be handled by the auto-get_config
MyLayer(b"custom").get_config()
class MyLayerNew(base_layer.Layer):
def __init__(self, my_kwarg="default", **kwargs):
super().__init__(**kwargs)
self.my_kwarg = my_kwarg
def get_config(self):
config = super().get_config()
config["my_kwarg"] = self.my_kwarg
return config
# Test to make sure that error is not raised if the method call is
# from an overridden `get_config`:
self.assertEqual(
MyLayerNew("custom").get_config()["my_kwarg"], "custom"
)
class MyLayerNew2(base_layer.Layer):
def __init__(self, name="MyLayerName", dtype=None, **kwargs):
super().__init__(name=name, dtype=dtype, **kwargs)
# Check that if the kwargs in `__init__` are base layer constructor
# arguments, no error is thrown:
self.assertEqual(MyLayerNew2(name="New").get_config()["name"], "New")
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_count_params(self):
dense = layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = layers.Dense(16)
with self.assertRaisesRegex(ValueError, "call `count_params`"):
dense.count_params()
model = sequential.Sequential(layers.Dense(16))
with self.assertRaisesRegex(ValueError, "call `count_params`"):
model.count_params()
dense = layers.Dense(16, input_dim=4)
model = sequential.Sequential(dense)
self.assertEqual(model.count_params(), 16 * 4 + 16)
def test_super_not_called(self):
class CustomLayerNotCallingSuper(base_layer.Layer):
def __init__(self):
pass
layer = CustomLayerNotCallingSuper()
with self.assertRaisesRegex(RuntimeError, "You must call `super()"):
layer(np.random.random((10, 2)))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_first_arg_not_called_inputs(self):
x, y = tf.ones((10, 1)), tf.ones((10, 1))
class ArgLayer(base_layer.Layer):
def call(self, x, y):
return x + y
layer = ArgLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
class KwargLayer(base_layer.Layer):
def call(self, x=None, y=None):
return x + y
layer = KwargLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
with self.assertRaisesRegex(ValueError, "must always be passed"):
layer(y=y)
class TFFunctionLayer(base_layer.Layer):
@tf.function
def call(self, x, y=None):
if y is None:
return x
return x + y
layer = TFFunctionLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
def test_build_input_shape(self):
class CustomLayer(base_layer.Layer):
def build(self, input_shape):
self.add_weight("w", shape=input_shape[1:])
super().build(input_shape)
layer = CustomLayer()
self.assertFalse(layer.built)
layer.build([None, 1, 2, 3])
self.assertTrue(layer.built)
self.assertEqual([None, 1, 2, 3], layer._build_input_shape)
layer = CustomLayer()
layer(input_layer.Input((3,)))
self.assertTrue(layer.built)
self.assertEqual([None, 3], layer._build_input_shape.as_list())
def test_build_input_shape_list_with_none(self):
class CustomLayer(base_layer.Layer):
def build(self, input_shape):
super().build(input_shape)
self.build_shape = input_shape
def call(self, inputs):
return inputs[0]
layer = CustomLayer()
layer([tf.constant([1.0]), None, tf.constant([2.0])])
self.assertEqual(layer.build_shape, [[1], None, [1]])
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_layer_input_shape_raises_error(self):
layer = layers.Dense(3)
with self.assertRaisesRegex(AttributeError, "no defined input shape"):
_ = layer.input_shape
layer(tf.ones((10, 1)))
with self.assertRaisesRegex(AttributeError, "no defined input shape"):
_ = layer.input_shape
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_custom_layer_training_arg(self):
class CustomLayerNoTrainingArg(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs):
return self._nested_layer(inputs)
class CustomLayerDefaultTrainingMissing(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs, training):
if training:
return self._nested_layer(inputs)
else:
return self._nested_layer(inputs) * 0.5
class CustomLayerDefaultTrainingNone(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs, training=None):
if training:
return self._nested_layer(inputs)
else:
return self._nested_layer(inputs) * 0.5
class CustomLayerDefaultTrainingFalse(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs, training=False):
if training:
return self._nested_layer(inputs)
else:
return self._nested_layer(inputs) * 0.5
class CustomLayerDefaultTrainingTrue(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs, training=True):
if training:
return self._nested_layer(inputs)
else:
return self._nested_layer(inputs) * 0.5
self._test_custom_layer_training_arg(
CustomLayerNoTrainingArg=CustomLayerNoTrainingArg,
CustomLayerDefaultTrainingMissing=CustomLayerDefaultTrainingMissing,
CustomLayerDefaultTrainingNone=CustomLayerDefaultTrainingNone,
CustomLayerDefaultTrainingFalse=CustomLayerDefaultTrainingFalse,
CustomLayerDefaultTrainingTrue=CustomLayerDefaultTrainingTrue,
)
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_custom_layer_training_arg_kwargonly(self):
class CustomLayerNoTrainingArg(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs):
return self._nested_layer(inputs)
class CustomLayerDefaultTrainingMissing(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs, *, training):
if training:
return self._nested_layer(inputs)
else:
return self._nested_layer(inputs) * 0.5
class CustomLayerDefaultTrainingNone(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs, *, training=None):
if training:
return self._nested_layer(inputs)
else:
return self._nested_layer(inputs) * 0.5
class CustomLayerDefaultTrainingFalse(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs, *, training=False):
if training:
return self._nested_layer(inputs)
else:
return self._nested_layer(inputs) * 0.5
class CustomLayerDefaultTrainingTrue(base_layer.Layer):
def __init__(self, nested_layer=None):
super().__init__()
self._nested_layer = nested_layer or tf.identity
def call(self, inputs, *, training=True):
if training:
return self._nested_layer(inputs)
else:
return self._nested_layer(inputs) * 0.5
self._test_custom_layer_training_arg(
CustomLayerNoTrainingArg=CustomLayerNoTrainingArg,
CustomLayerDefaultTrainingMissing=CustomLayerDefaultTrainingMissing,
CustomLayerDefaultTrainingNone=CustomLayerDefaultTrainingNone,
CustomLayerDefaultTrainingFalse=CustomLayerDefaultTrainingFalse,
CustomLayerDefaultTrainingTrue=CustomLayerDefaultTrainingTrue,
)
def _test_custom_layer_training_arg(
self,
CustomLayerNoTrainingArg,
CustomLayerDefaultTrainingMissing,
CustomLayerDefaultTrainingNone,
CustomLayerDefaultTrainingFalse,
CustomLayerDefaultTrainingTrue,
):
x = tf.ones(shape=(1, 1))
# If the layer signature doesn't specify a default training arg,
# run it in inference mode when to training arg is passed
# to __call__
layer = CustomLayerDefaultTrainingMissing()
self.assertAllEqual(layer(x), x * 0.5)
self.assertAllEqual(layer(x, training=False), x * 0.5)
self.assertAllEqual(layer(x, training=True), x)
# If the layer signature specifies `False` as the default training arg,
# run it in inference mode when no training arg is passed
# to __call__
layer = CustomLayerDefaultTrainingFalse()
self.assertAllEqual(layer(x), x * 0.5)
self.assertAllEqual(layer(x, training=False), x * 0.5)
self.assertAllEqual(layer(x, training=True), x)
# If the layer signature specifies `True` as the default training arg,
# explicitly run it in training mode when no training arg is passed
# to __call__
layer = CustomLayerDefaultTrainingTrue()
self.assertAllEqual(layer(x), x)
self.assertAllEqual(layer(x, training=False), x * 0.5)
self.assertAllEqual(layer(x, training=True), x)
# Outer layers/models should set the training context implicitly for all
# nested layers, respecting whatever mode the outer layer was run with.
layer = CustomLayerDefaultTrainingTrue(
CustomLayerDefaultTrainingFalse()
)
# No outer value passed: use local defaults
self.assertAllEqual(layer(x), x) # Use outer default True
# Outer value passed: override local defaults
self.assertAllEqual(layer(x, training=False), x * 0.25)
self.assertAllEqual(layer(x, training=True), x)
layer = CustomLayerDefaultTrainingFalse(
CustomLayerDefaultTrainingTrue()
)
# No outer value passed: use local defaults
self.assertAllEqual(layer(x), x * 0.25) # Use outer default False
# Outer value passed: override local defaults
self.assertAllEqual(layer(x, training=False), x * 0.25)
self.assertAllEqual(layer(x, training=True), x)
# If the outer layer `call` doesn't take a training argument at all,
# it'll set the nested scope as None when no training arg is passed in.
# If a training arg is passed in it won't use it directly in `call`, but
# it will set the nested training mode.
layer = CustomLayerNoTrainingArg(CustomLayerDefaultTrainingTrue())
self.assertAllEqual(layer(x), x) # Use local default True
self.assertAllEqual(layer(x, training=False), x * 0.5)
self.assertAllEqual(layer(x, training=True), x)
layer = CustomLayerDefaultTrainingNone(CustomLayerDefaultTrainingTrue())
self.assertAllEqual(layer(x), x * 0.5) # Nested use local default True
self.assertAllEqual(layer(x, training=False), x * 0.25)
self.assertAllEqual(layer(x, training=True), x)
def test_activity_regularizer_string(self):
class MyLayer(base_layer.Layer):
pass
layer = MyLayer(activity_regularizer="l2")
self.assertIsInstance(layer.activity_regularizer, regularizers.L2)
def test_tf_module_tracking(self):
class MyModule(tf.Module):
def __init__(self):
super().__init__()
self.v1 = tf.Variable(1.0, trainable=True, name="v1")
self.v2 = tf.Variable(2.0, trainable=False, name="v2")
def __call__(self, x):
return x * self.v1 * self.v2
class MyLayer(base_layer.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.my_modules = {}
self.my_modules["a"] = MyModule()
def call(self, x):
return self.my_modules["a"](x)
layer = MyLayer()
self.assertLen(layer.variables, 2)
self.assertLen(layer.trainable_variables, 1)
self.assertLen(layer.non_trainable_variables, 1)
layer.trainable = False
self.assertLen(layer.variables, 2)
self.assertLen(layer.trainable_variables, 0)
self.assertLen(layer.non_trainable_variables, 2)
class MyModel(training_lib.Model):
def __init__(self):
super().__init__()
self.my_modules = []
self.my_modules.append(MyModule())
def call(self, x):
return self.my_modules[0](x)
model = MyModel()
self.assertLen(model.variables, 2)
self.assertLen(model.trainable_variables, 1)
self.assertLen(model.non_trainable_variables, 1)
model.trainable = False
self.assertLen(model.variables, 2)
self.assertLen(model.trainable_variables, 0)
self.assertLen(model.non_trainable_variables, 2)
def test_tf_tracking_lists(self):
class MyLayer(base_layer.Layer):
def __init__(self, num_weights):
super().__init__()
self.num_weights = num_weights
def build(self, input_shape):
super().build(input_shape)
self.my_weights = []
w_init = tf.random_normal_initializer()
for i in range(self.num_weights):
self.my_weights.append(
tf.Variable(
name=f"w_{i}",
initial_value=w_init(
shape=(input_shape[1], input_shape[1]),
dtype="float32",
),
trainable=True,
)
)
def call(self, x):
for w in self.my_weights:
x = tf.matmul(x, w)
return x
layer = MyLayer(3)
layer(tf.constant([[1.0, 1.0, 1.0, 1.0]]))
self.assertLen(layer.variables, 3)
self.assertLen(layer.trainable_variables, 3)
self.assertLen(layer.non_trainable_variables, 0)
layer.trainable = False
self.assertLen(layer.variables, 3)
self.assertLen(layer.trainable_variables, 0)
self.assertLen(layer.non_trainable_variables, 3)
def test_auto_get_config(self):
class MyLayer(base_layer.Layer):
def __init__(self, var1, var2, var3=None, **kwargs):
super().__init__(**kwargs)
layer = MyLayer("a", 2, var3=True, name="mylayer")
config = layer.get_config()
self.assertLen(config, 6)
self.assertEqual(config["var1"], "a")
self.assertEqual(config["var2"], 2)
self.assertEqual(config["var3"], True)
self.assertEqual(config["name"], "mylayer")
self.assertEqual(config["trainable"], True)
self.assertEqual(config["dtype"], "float32")
layer = MyLayer.from_config(config)
self.assertDictEqual(layer.get_config(), config)
layer = MyLayer("a", 2, var3=tf.nn.relu)
with self.assertRaises(NotImplementedError):
config = layer.get_config()
@test_utils.run_v2_only
class SymbolicSupportTest(test_combinations.TestCase):
def test_using_symbolic_tensors_with_tf_ops(self):
# Single-input.
x = input_layer.Input((3,))
tf.square(x)
# Multi-inputs.
x1, x2 = input_layer.Input((3,)), input_layer.Input((3,))
tf.concat([x1, x2], axis=1)
# Mixing TF-Keras symbolic tensors and graph tensors from the same graph
# works.
with backend.get_graph().as_default():
x1 = input_layer.Input((3,))
x2 = input_layer.Input((3,))
tf.matmul(x1, x2)
# Creating same op type (matmul) multiple times in the TF-Keras graph
# works.
x1 = input_layer.Input((3,))
x2 = input_layer.Input((3,))
tf.matmul(x1, x2)
def test_mixing_eager_and_graph_tensors(self):
with tf.Graph().as_default():
x1 = tf.ones((3, 3))
x2 = tf.ones((3, 3))
with self.assertRaises(TypeError):
tf.matmul(x1, x2)
def test_mixing_numpy_arrays_and_graph_tensors(self):
with tf.Graph().as_default():
x1 = tf.ones((3, 3))
x2 = np.ones((3, 3), dtype="float32")
with self.assertRaises(TypeError):
tf.matmul(x1, x2)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):
x1 = input_layer.Input((3,))
x2 = tf.ones((3, 3))
y = tf.matmul(x1, x2)
fn = backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0], np.matmul(x_val, y_val), atol=1e-5)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):
x1 = input_layer.Input((3,))
x2 = np.ones((3, 3), dtype="float32")
y = tf.matmul(x1, x2)
fn = backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0], np.matmul(x_val, y_val), atol=1e-5)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_reraising_exception(self):
# When layer is not dynamic, we have some pattern matching during
# exception handling to detect when the user is trying to use python
# control flow. When an exception is thrown but the pattern doesn't
# match, we want to preserve the originating stack trace. An early
# implementation of this logic lost the stack trace. We test the correct
# behavior here.
class TypeErrorLayer(base_layer.Layer):
def call(self, inputs):
def easily_identifiable_name():
raise TypeError("Non-matching TypeError message.")
easily_identifiable_name()
inputs = input_layer.Input((3,))
try:
_ = TypeErrorLayer()(inputs)
except TypeError as e:
self.assertIn("easily_identifiable_name", str(e))
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_summaries_in_tf_function(self):
if not tf.executing_eagerly():
return
class MyLayer(base_layer.Layer):
def call(self, inputs):
tf.summary.scalar("mean", tf.reduce_mean(inputs))
return inputs
tmp_dir = self.get_temp_dir()
writer = tf.summary.create_file_writer(tmp_dir)
with writer.as_default(step=1), tf.summary.record_if(True):
my_layer = MyLayer()
x = tf.ones((10, 10))
def my_fn(x):
return my_layer(x)
_ = my_fn(x)
event_file = tf.compat.v1.gfile.Glob(os.path.join(tmp_dir, "events*"))
self.assertLen(event_file, 1)
event_file = event_file[0]
tags = set()
for e in tf.compat.v1.train.summary_iterator(event_file):
for val in e.summary.value:
tags.add(val.tag)
self.assertEqual(set(["my_layer/mean"]), tags)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_error_when_passing_non_tensor(self):
# layers that have an `input_spec` will raise an error when called on
# non-tensors. This covers all built-in layers.
layer = layers.Dense(3)
x = object()
with self.assertRaisesRegex(TypeError, r"should be tensors"):
layer(x)
@test_utils.run_v2_only
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class NestedTrackingTest(tf.test.TestCase):
def test_nested_layer_variable_tracking(self):
# Test that variables from nested sublayers are
# being tracked by subclassed layers.
class MyLayer(base_layer.Layer):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(1)
self.dense2 = layers.BatchNormalization()
def build(self, input_shape):
self.v1 = self.add_weight("v1", shape=input_shape[1:].as_list())
self.v2 = tf.Variable(
name="v2",
initial_value=np.zeros(
input_shape[1:].as_list(), dtype="float32"
),
trainable=False,
)
def call(self, inputs):
x = self.dense1(inputs) + self.dense2(inputs)
return x + self.v1 + self.v2
layer = MyLayer()
inputs = input_layer.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 5)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.dense1.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 5)
layer.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 8)
self.assertEqual(
{id(v) for v in [layer.dense1, layer.dense2, layer.v1, layer.v2]},
{id(v) for v in layer._trackable_children().values()},
)
def test_nested_layer_updates_losses_tracking(self):
# Test that updates and losses from nested sublayers are
# being tracked by subclassed layers.
class UpdateAndLossLayer(base_layer.Layer):
def build(self, _):
self.v1 = self.add_weight("v1", shape=())
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs))
self.add_update(tf.compat.v1.assign_add(self.v1, 1))
return inputs + 1
class MyLayer(base_layer.Layer):
def build(self, _):
self.v1 = self.add_weight("v1", shape=())
def __init__(self):
super().__init__()
self.ul1 = UpdateAndLossLayer()
self.ul2 = UpdateAndLossLayer()
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs))
self.add_update(tf.compat.v1.assign_add(self.v1, 1))
x = self.ul1(inputs)
return self.ul2(x)
layer = MyLayer()
if tf.executing_eagerly():
inputs = tf.ones((3, 1))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
else:
inputs = input_layer.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.updates), 3)
def test_attribute_reassignment(self):
l = base_layer.Layer()
l.a = base_layer.Layer()
l.a = []
l.a = tf.Variable(1.0)
l.a = base_layer.Layer()
last_assignment = base_layer.Layer()
l.a = last_assignment
l.b = tf.Variable(1.0)
del l.b
l.c = base_layer.Layer()
del l.c
l.d = last_assignment
del l.d
sublayers = list(l._flatten_layers(include_self=False, recursive=False))
self.assertEqual([last_assignment], sublayers)
self.assertEqual([], l.trainable_weights)
self.assertEqual([], l.non_trainable_weights)
self.assertEqual([], l.weights)
del l.a
self.assertEqual([], l._self_tracked_trackables)
def test_layer_class_not_tracked_as_sublayer(self):
# See https://github.com/tensorflow/tensorflow/issues/27431 for details.
class LayerWithClassAttribute(base_layer.Layer):
def __init__(self):
super().__init__()
self.layer_fn = layers.Dense
layer = LayerWithClassAttribute()
self.assertEmpty(layer.variables)
self.assertEmpty(layer.submodules)
def test_layer_call_fn_args(self):
class NonDefunLayer(base_layer.Layer):
def call(self, inputs, a, mask, b=None, training=None):
return inputs
class DefunLayer(base_layer.Layer):
@tf.function
def call(self, x, mask, a, training=None, b=None):
return x
nondefun_layer = NonDefunLayer()
self.assertEqual(
nondefun_layer._call_spec.arg_names,
["inputs", "a", "mask", "b", "training"],
)
defun_layer = DefunLayer()
self.assertEqual(
defun_layer._call_spec.arg_names,
["x", "mask", "a", "training", "b"],
)
def test_sequential_model(self):
model = sequential.Sequential(
[layers.Dense(10, input_shape=(10,)), layers.Dense(5)]
)
self.assertLen(model.layers, 2)
self.assertLen(model.weights, 4)
# Make sure a subclass model also works when it is called 'Sequential'.
class Sequential(training_lib.Model):
def __init__(self):
super().__init__()
self.dense_layers = [layers.Dense(10), layers.Dense(5)]
def call(self, inputs):
x = inputs
for d in self.dense_layers:
x = d(x)
return x
s = Sequential()
self.assertLen(s.layers, 2)
self.assertLen(s.weights, 0)
s(input_layer.Input((10,)))
self.assertLen(s.weights, 4)
@test_utils.run_v2_only
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class NameScopingTest(test_combinations.TestCase):
def test_name_scope_layer(self):
x = backend.placeholder(shape=(10, 10))
layer = layers.Dense(10, name="MyName")
layer(x)
self.assertEqual(layer.bias.name, "MyName/bias:0")
self.assertEqual(layer.kernel.name, "MyName/kernel:0")
def test_name_scope_functional_api(self):
inputs = input_layer.Input((3,))
layer = layers.Dense(10, name="MyName")
_ = layer(inputs)
self.assertEqual(layer.bias.name, "MyName/bias:0")
self.assertEqual(layer.kernel.name, "MyName/kernel:0")
def test_name_scope_functional_api_nested(self):
class NestedLayer(base_layer.Layer):
def __init__(self, name="OuterName"):
super().__init__(name=name)
self.dense = layers.Dense(10, name="InnerName")
def call(self, inputs):
return self.dense(inputs)
inputs = input_layer.Input((3,))
layer = NestedLayer()
_ = layer(inputs)
self.assertEqual(layer.dense.bias.name, "OuterName/InnerName/bias:0")
self.assertEqual(
layer.dense.kernel.name, "OuterName/InnerName/kernel:0"
)
def test_name_scope_sublayer(self):
class NameScopeTracker(base_layer.Layer):
def call(self, inputs):
self.active_name_scope = tf.__internal__.get_name_scope()
return inputs
x = backend.placeholder(shape=(10, 10))
sublayer = NameScopeTracker(name="Sublayer")
layer = layers.Dense(10, activation=sublayer, name="MyName2")
layer(x)
self.assertEqual(layer.bias.name, "MyName2/bias:0")
self.assertEqual(layer.kernel.name, "MyName2/kernel:0")
self.assertEqual(sublayer.active_name_scope, "MyName2/Sublayer")
def test_name_scope_tf_tensor(self):
x = tf.convert_to_tensor(np.ones((10, 10)))
layer = layers.Dense(
10, activation=layers.ReLU(name="MyAct"), name="MyName3"
)
layer(x)
self.assertEqual(layer.bias.name, "MyName3/bias:0")
self.assertEqual(layer.kernel.name, "MyName3/kernel:0")
@test_utils.run_v2_only
def test_apply_name_scope_on_model_declaration(self):
if not tf.executing_eagerly():
self.skipTest(
"`apply_name_scope_on_model_declaration` API is supported"
" only for V2 eager"
)
base_layer._apply_name_scope_on_model_declaration(True)
inputs = input_layer.Input((3,))
x = layers.Dense(10, name="Dense1")(inputs)
with tf.name_scope("outer"):
x = layers.Dense(10, name="Dense2")(x)
with tf.name_scope("inner"):
x = layers.Dense(10, name="Dense3")(x)
x = layers.Dense(10, name="Dense4")(x)
outputs = layers.Dense(10, name="Dense5")(x)
model = training_lib.Model(inputs, outputs)
node_names = self._get_model_node_names(
model, np.random.random((1, 3)), "call_scope"
)
self.assertListEqual(
node_names,
[
"call_scope/Const",
"call_scope/model/Cast",
"call_scope/model/Dense1/MatMul/ReadVariableOp/resource",
"call_scope/model/Dense1/MatMul/ReadVariableOp",
"call_scope/model/Dense1/MatMul",
"call_scope/model/Dense1/BiasAdd/ReadVariableOp/resource",
"call_scope/model/Dense1/BiasAdd/ReadVariableOp",
"call_scope/model/Dense1/BiasAdd",
"call_scope/model/outer/Dense2/MatMul/ReadVariableOp/resource",
"call_scope/model/outer/Dense2/MatMul/ReadVariableOp",
"call_scope/model/outer/Dense2/MatMul",
"call_scope/model/outer/Dense2/BiasAdd/ReadVariableOp/resource",
"call_scope/model/outer/Dense2/BiasAdd/ReadVariableOp",
"call_scope/model/outer/Dense2/BiasAdd",
"call_scope/model/outer/inner/Dense3/MatMul/ReadVariableOp/"
"resource",
"call_scope/model/outer/inner/Dense3/MatMul/ReadVariableOp",
"call_scope/model/outer/inner/Dense3/MatMul",
"call_scope/model/outer/inner/Dense3/BiasAdd/ReadVariableOp/"
"resource",
"call_scope/model/outer/inner/Dense3/BiasAdd/ReadVariableOp",
"call_scope/model/outer/inner/Dense3/BiasAdd",
"call_scope/model/outer/Dense4/MatMul/ReadVariableOp/resource",
"call_scope/model/outer/Dense4/MatMul/ReadVariableOp",
"call_scope/model/outer/Dense4/MatMul",
"call_scope/model/outer/Dense4/BiasAdd/ReadVariableOp/resource",
"call_scope/model/outer/Dense4/BiasAdd/ReadVariableOp",
"call_scope/model/outer/Dense4/BiasAdd",
"call_scope/model/Dense5/MatMul/ReadVariableOp/resource",
"call_scope/model/Dense5/MatMul/ReadVariableOp",
"call_scope/model/Dense5/MatMul",
"call_scope/model/Dense5/BiasAdd/ReadVariableOp/resource",
"call_scope/model/Dense5/BiasAdd/ReadVariableOp",
"call_scope/model/Dense5/BiasAdd",
"Identity",
"NoOp",
],
)
base_layer._apply_name_scope_on_model_declaration(False)
@test_utils.run_v2_only
def test_apply_name_scope_on_nested_layer_model_declaration(self):
if not tf.executing_eagerly():
self.skipTest(
"`apply_name_scope_on_model_declaration` API is supported"
" only for V2 eager"
)
base_layer._apply_name_scope_on_model_declaration(True)
class ThreeDenses(layers.Layer):
def __init__(self, name="ThreeDenses", **kwargs):
super().__init__(name=name, **kwargs)
self.inner_dense_1 = layers.Dense(10, name="NestedDense1")
with tf.name_scope("inner1/inner2"):
self.inner_dense_2 = layers.Dense(20, name="NestedDense2")
self.inner_dense_3 = layers.Dense(30, name="NestedDense3")
def call(self, x):
x = self.inner_dense_1(x)
x = self.inner_dense_2(x)
x = self.inner_dense_3(x)
return x
inputs = input_layer.Input((3,))
with tf.name_scope("outer"):
x = ThreeDenses()(inputs)
outputs = layers.Dense(10, name="OuterDense")(x)
model = training_lib.Model(inputs, outputs)
node_names = self._get_model_node_names(
model, np.random.random((1, 3)), "call_scope"
)
self.assertListEqual(
node_names,
[
"call_scope/Const",
"call_scope/model/Cast",
"call_scope/model/outer/ThreeDenses/NestedDense1/MatMul/"
"ReadVariableOp/resource",
"call_scope/model/outer/ThreeDenses/NestedDense1/MatMul/"
"ReadVariableOp",
"call_scope/model/outer/ThreeDenses/NestedDense1/MatMul",
"call_scope/model/outer/ThreeDenses/NestedDense1/BiasAdd/"
"ReadVariableOp/resource",
"call_scope/model/outer/ThreeDenses/NestedDense1/BiasAdd/"
"ReadVariableOp",
"call_scope/model/outer/ThreeDenses/NestedDense1/BiasAdd",
"call_scope/model/outer/ThreeDenses/inner1/inner2/"
"NestedDense2/MatMul/ReadVariableOp/resource",
"call_scope/model/outer/ThreeDenses/inner1/inner2/"
"NestedDense2/MatMul/ReadVariableOp",
"call_scope/model/outer/ThreeDenses/inner1/inner2/"
"NestedDense2/MatMul",
"call_scope/model/outer/ThreeDenses/inner1/inner2/"
"NestedDense2/BiasAdd/ReadVariableOp/resource",
"call_scope/model/outer/ThreeDenses/inner1/inner2/"
"NestedDense2/BiasAdd/ReadVariableOp",
"call_scope/model/outer/ThreeDenses/inner1/inner2/"
"NestedDense2/BiasAdd",
"call_scope/model/outer/ThreeDenses/NestedDense3/"
"MatMul/ReadVariableOp/resource",
"call_scope/model/outer/ThreeDenses/NestedDense3/"
"MatMul/ReadVariableOp",
"call_scope/model/outer/ThreeDenses/NestedDense3/MatMul",
"call_scope/model/outer/ThreeDenses/NestedDense3/"
"BiasAdd/ReadVariableOp/resource",
"call_scope/model/outer/ThreeDenses/NestedDense3/"
"BiasAdd/ReadVariableOp",
"call_scope/model/outer/ThreeDenses/NestedDense3/BiasAdd",
"call_scope/model/OuterDense/MatMul/ReadVariableOp/resource",
"call_scope/model/OuterDense/MatMul/ReadVariableOp",
"call_scope/model/OuterDense/MatMul",
"call_scope/model/OuterDense/BiasAdd/ReadVariableOp/resource",
"call_scope/model/OuterDense/BiasAdd/ReadVariableOp",
"call_scope/model/OuterDense/BiasAdd",
"Identity",
"NoOp",
],
)
base_layer._apply_name_scope_on_model_declaration(False)
def _get_model_node_names(self, model, inputs, call_name_scope):
"""Returns a list of model's node names."""
@tf.function()
def wrapper():
with tf.name_scope(call_name_scope):
return model(inputs)
return [
node.name
for node in wrapper.get_concrete_function()
.graph.as_graph_def()
.node
]
@test_utils.run_v2_only
@test_combinations.generate(
test_combinations.keras_mode_combinations(mode=["eager"])
)
class AutographControlFlowTest(test_combinations.TestCase):
def test_disabling_in_context_is_matched(self):
test_obj = self
class MyLayer(base_layer.Layer):
def call(self, inputs, training=None):
with test_obj.assertRaisesRegex(TypeError, "Tensor.*as.*bool"):
if tf.constant(False):
return inputs * 1.0
return inputs * 0.0
@tf.function(autograph=False)
def test_fn():
return MyLayer()(tf.constant([[1.0, 2.0, 3.0]]))
test_fn()
def test_if_training_pattern_output(self):
class MyLayer(base_layer.Layer):
def call(self, inputs, training=None):
if training:
return inputs * 1.0
return inputs * 0.0
inputs = input_layer.Input((3,))
outputs = MyLayer()(inputs)
model = training_lib.Model(inputs, outputs)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 0.0)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 1.0)
def test_if_training_pattern_loss(self):
class MyLayer(base_layer.Layer):
def call(self, inputs, training=None):
if training:
loss = tf.reduce_sum(inputs)
else:
loss = 0.0
self.add_loss(loss)
return inputs
inputs = input_layer.Input((3,))
outputs = MyLayer()(inputs)
model = training_lib.Model(inputs, outputs)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 2 * 3)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 0)
def test_if_training_pattern_metric(self):
class MyLayer(base_layer.Layer):
def call(self, inputs, training=None):
if training:
metric = tf.reduce_sum(inputs)
else:
metric = 0.0
self.add_metric(metric, name="my_metric", aggregation="mean")
return inputs
inputs = input_layer.Input((3,))
outputs = MyLayer()(inputs)
model = training_lib.Model(inputs, outputs)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
for _ in range(3):
_, train_metric = model.train_on_batch(
np.ones((2, 3)), np.ones((2, 3))
)
self.assertEqual(train_metric, 2 * 3)
_, test_metric = model.test_on_batch(
np.ones((2, 3)), np.ones((2, 3))
)
self.assertEqual(test_metric, 0)
def test_if_training_pattern_update(self):
class MyLayer(base_layer.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer="zeros"
)
def call(self, inputs, training=None):
if training:
increment = 1.0
else:
increment = 0.0
self.counter.assign_add(increment)
return inputs
inputs = input_layer.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = training_lib.Model(inputs, outputs)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(backend.get_value(layer.counter), 1.0)
def test_conditional_losses_in_call(self):
class MyLayer(base_layer.Layer):
def __init__(self):
super().__init__(dynamic=test_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_loss(tf.reduce_sum(inputs))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
inputs = input_layer.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = training_lib.Model(inputs, outputs)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
def test_conditional_callable_losses(self):
model = sequential.Sequential(
[
layers.Dense(
1,
kernel_regularizer=regularizers.l2(1e-4),
input_shape=(1,),
)
]
)
model._run_eagerly = test_utils.should_run_eagerly()
def assert_graph(t):
if not tf.executing_eagerly():
self.assertEqual(t.graph, tf.compat.v1.get_default_graph())
@tf.function
def get_losses(t):
if t < 0:
return tf.reduce_sum(model.losses) * t
else:
return tf.reduce_sum(model.losses)
assert_graph(get_losses(tf.constant(2.0)))
assert_graph(get_losses(tf.constant(0.5)))
def test_conditional_metrics_in_call(self):
class MyLayer(base_layer.Layer):
def __init__(self):
super().__init__(dynamic=test_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_metric(
tf.reduce_sum(inputs), name="sum", aggregation="mean"
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
inputs = input_layer.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = training_lib.Model(inputs, outputs)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
history = model.fit(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(history.history["sum"][-1], 2 * 3)
def test_conditional_activity_regularizer_in_call(self):
class TestModel(training_lib.Model):
def __init__(self):
super().__init__(
name="test_model", dynamic=test_utils.should_run_eagerly()
)
self.layer = layers.Dense(2, activity_regularizer="l2")
def call(self, x, training=None):
if tf.greater(tf.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss="mse",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
if test_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegex(ValueError, "ActivityRegularizer"):
model.fit(x, y, epochs=2, batch_size=5)
def test_conditional_activity_regularizer_with_wrappers_in_call(self):
class TestModel(training_lib.Model):
def __init__(self):
super().__init__(
name="test_model", dynamic=test_utils.should_run_eagerly()
)
self.layer = layers.TimeDistributed(
layers.Dense(2, activity_regularizer="l2"),
input_shape=(3, 4),
)
def call(self, x, training=None):
if tf.greater(tf.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss="mse",
optimizer="sgd",
run_eagerly=test_utils.should_run_eagerly(),
)
x = np.ones(shape=(10, 3, 4))
y = np.ones(shape=(10, 3, 2))
if test_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegex(ValueError, "ActivityRegularizer"):
model.fit(x, y, epochs=2, batch_size=5)
class AddLayer(base_layer.Layer):
"""A layer which adds its input to a variable.
Useful for testing a layer with a variable
"""
def build(self, _):
self.v = self.add_weight("v", (), initializer="ones")
self.built = True
def call(self, inputs):
return inputs + self.v
class IdentityLayer(base_layer.Layer):
"""A layer that returns its input.
Useful for testing a layer without a variable.
"""
def call(self, inputs):
return inputs
@test_utils.run_v2_only
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class DTypeTest(test_combinations.TestCase):
def _const(self, dtype):
return tf.constant(1, dtype=dtype)
@test_utils.enable_v2_dtype_behavior
def test_dtype_defaults_to_floatx(self):
layer = AddLayer()
self.assertEqual(layer.dtype, "float32")
layer(self._const("float64"))
self.assertEqual(layer.dtype, "float32") # dtype should not change
try:
backend.set_floatx("float64")
layer = AddLayer()
self.assertEqual(layer.dtype, "float64")
finally:
backend.set_floatx("float32")
@test_utils.enable_v2_dtype_behavior
def test_passing_dtype_to_constructor(self):
layer = IdentityLayer(dtype="float64")
layer(self._const("float32"))
self.assertEqual(layer.dtype, "float64")
layer = IdentityLayer(dtype="int32")
layer(self._const("float32"))
self.assertEqual(layer.dtype, "int32")
layer = IdentityLayer(dtype=tf.float64)
layer(self._const("float32"))
self.assertEqual(layer.dtype, "float64")
@test_utils.enable_v2_dtype_behavior
def input_cast_to_dtype(self):
layer = AddLayer()
# Input should be cast to layer.dtype, so output should also be
# layer.dtype
self.assertEqual(layer(self._const("float64")).dtype, "float32")
layer = AddLayer(dtype="float64")
self.assertEqual(layer(self._const("float32")).dtype, "float64")
# Test inputs are not casted if layer.dtype is not floating-point
layer = IdentityLayer(dtype="int32")
self.assertEqual(layer(self._const("float64")).dtype, "float64")
# Test inputs are not casted if the inputs are not floating-point
layer = IdentityLayer(dtype="float32")
self.assertEqual(layer(self._const("int32")).dtype, "int32")
# Test Numpy arrays are casted
layer = IdentityLayer(dtype="float64")
self.assertEqual(layer(np.array(1, dtype="float32")).dtype, "float64")
# Test Python floats are casted
layer = IdentityLayer(dtype="float64")
self.assertEqual(layer(1.0).dtype, "float64")
@test_utils.enable_v2_dtype_behavior
def multiple_inputs_cast_to_dtype(self):
class MultiIdentityLayer(base_layer.Layer):
def call(self, inputs):
return [tf.identity(x) for x in inputs]
# Testing layer with default dtype of float32
layer = MultiIdentityLayer()
x, y = layer([self._const("float16"), self._const("float32")])
self.assertEqual(x.dtype, "float32")
self.assertEqual(y.dtype, "float32")
# Test passing dtype to the constructor
layer = MultiIdentityLayer(dtype="float64")
x, y = layer([self._const("float16"), self._const("float32")])
self.assertEqual(x.dtype, "float64")
self.assertEqual(y.dtype, "float64")
# Test several non-floating point types
layer = MultiIdentityLayer(dtype="float64")
x, y, z, w = layer(
[
self._const("float16"),
self._const("bool"),
self._const("float64"),
self._constant("complex64"),
]
)
self.assertEqual(x.dtype, "float64")
self.assertEqual(y.dtype, "bool")
self.assertEqual(z.dtype, "float64")
self.assertEqual(w.dtype, "complex64")
@test_utils.enable_v2_dtype_behavior
def test_extra_args_and_kwargs_not_casted(self):
class IdentityLayerWithArgs(base_layer.Layer):
def call(self, inputs, *args, **kwargs):
kwargs.pop("training", None)
return tf.nest.flatten([inputs, args, kwargs])
layer = IdentityLayerWithArgs(dtype="float64")
x, y, z = layer(
self._const("float16"),
self._const("float16"),
kwarg=self._const("float16"),
)
self.assertEqual(x.dtype, "float64")
self.assertEqual(y.dtype, "float16")
self.assertEqual(z.dtype, "float16")
@test_utils.enable_v2_dtype_behavior
def test_layer_without_autocast(self):
class IdentityLayerWithoutAutocast(IdentityLayer):
def __init__(self, *args, **kwargs):
kwargs["autocast"] = False
super().__init__(*args, **kwargs)
layer = IdentityLayerWithoutAutocast(dtype="float64")
self.assertEqual(layer(self._const("float32")).dtype, "float32")
@test_utils.enable_v2_dtype_behavior
def test_compute_output_signature(self):
class IdentityLayerWithOutputShape(IdentityLayer):
def compute_output_shape(self, input_shape):
return input_shape
layer = IdentityLayerWithOutputShape(dtype="float64")
output_signature = layer.compute_output_signature(
tf.TensorSpec(shape=(), dtype="float32")
)
self.assertEqual(output_signature.shape, ())
self.assertEqual(output_signature.dtype, "float64")
@test_utils.enable_v2_dtype_behavior
def test_composite_tensors_input_casting(self):
sparse = tf.SparseTensor(
indices=tf.constant([[0, 1], [2, 3]], dtype="int64"),
values=tf.constant([0.0, 1.0], dtype="float32"),
dense_shape=tf.constant([4, 4], dtype="int64"),
)
ragged = tf.RaggedTensor.from_row_splits(
values=tf.constant([1.0, 2.0, 3.0], dtype="float32"),
row_splits=tf.constant([0, 2, 2, 3], dtype="int64"),
)
layer = IdentityLayer(dtype="float16")
for x in sparse, ragged:
self.assertEqual(x.dtype, "float32")
y = layer(x)
self.assertEqual(y.dtype, "float16")
self.assertEqual(type(x), type(y))
@test_utils.enable_v2_dtype_behavior
def test_passing_non_tensor(self):
layer = IdentityLayer()
x = object()
y = layer(x) # Layer should not cast 'x', as it's not a tensor
self.assertIs(x, y)
@test_utils.disable_v2_dtype_behavior
def test_v1_behavior(self):
# Test dtype defaults to None and inferred from input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const("float64"))
self.assertEqual(layer.dtype, "float64")
# Test layer does not cast to dtype
self.assertEqual(layer(self._const("float32")).dtype, "float32")
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/base_layer_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/base_layer_test.py",
"repo_id": "tf-keras",
"token_count": 38473
} | 181 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for keras functional model."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import input_layer as input_layer_module
from tf_keras.engine import keras_tensor
from tf_keras.engine import node as node_module
_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG = (
"Found unexpected instance while processing input tensors for keras "
"functional model. Expecting KerasTensor which is from tf.keras.Input() "
"or output from keras layer call(). Got: {}"
)
def is_input_keras_tensor(tensor):
"""Check if tensor is directly generated from `tf.keras.Input`.
This check is useful when constructing the functional model, since we will
need to clone Nodes and KerasTensors if the model is building from non input
tensor.
Args:
tensor: A `KerasTensor` as inputs to the functional model.
Returns:
bool. Whether the tensor is directly generated from `tf.keras.Input`.
Raises:
ValueError: if the tensor is not a KerasTensor instance.
"""
if not node_module.is_keras_tensor(tensor):
raise ValueError(_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG.format(tensor))
return tensor.node.is_input
def find_nodes_by_inputs_and_outputs(inputs, outputs):
"""Fetch all Nodes in the graph defined by "inputs" and "outputs".
This method is used to find and then clone Nodes when creating a new
sub-model from an existing functional model.
Args:
inputs: A nested structure of KerasTensor to use as model inputs.
outputs: A nested structure of KerasTensor to use as model outputs.
Returns:
A list of Nodes that are connected to the inputs and outputs.
Raises:
ValueError: when inputs and outputs are disconnected or in case of
unexpected objects in the inputs/outputs.
"""
# We walk the graph bottom up, starting from output nodes, and keep tracing
# the upstream node, until we find all the inputs nodes. We don't use top
# down search here since we don't know whether a certain node is in the
# graph between inputs and outputs, e.g. a functional graph could have
# multiple outputs, and the user could choose a subset of them to build the
# model. The bottom up approach will ensure all the nodes we visit are
# actually in use. If we reach the top and didn't find the nodes in the
# `inputs`, that's an error, since the user didn't specify the correct
# inputs.
start_keras_tensors = tf.nest.flatten(outputs)
end_keras_tensors = tf.nest.flatten(inputs)
for t in start_keras_tensors + end_keras_tensors:
if not node_module.is_keras_tensor(t):
raise ValueError(_KERAS_TENSOR_TYPE_CHECK_ERROR_MSG.format(t))
end_ids = set([id(kt) for kt in end_keras_tensors])
# Track all the end tensors we found so far, if we didn't reach all the
# user-specified keras inputs after we finish the search, then that's an
# error since the inputs are disconnected from the outputs.
end_ids_found = set()
nodes_to_visit = []
nodes_in_graph = []
node_id_visited = set()
for t in start_keras_tensors:
nodes_to_visit.append(t.node)
while nodes_to_visit:
node = nodes_to_visit.pop(0)
if id(node) in node_id_visited:
continue
node_id_visited.add(id(node))
nodes_in_graph.append(node)
# Any input keras_tensor that produce the current node.
for kt in node.keras_inputs:
if id(kt) in end_ids:
# We found the inputs of the model, stop tracing upstream nodes
end_ids_found.add(id(kt))
continue
inbound_node = kt.node
# In case this is the tf.keras.Input node, we have reached the end
# of the tracing of upstream nodes. Any further tracing will just be
# an infinite loop. we should raise an error here since we didn't
# find the input in the user-specified inputs.
if inbound_node.is_input:
raise ValueError(
"Found input tensor cannot be reached given provided "
"output tensors. Please make sure the tensor {} is "
"included in the model inputs when building "
"functional model.".format(kt)
)
nodes_to_visit.append(inbound_node)
# Do a final check and make sure we have reached all the user-specified
# inputs
if end_ids != end_ids_found:
unvisited_inputs = [
kt for kt in end_keras_tensors if id(kt) not in end_ids_found
]
raise ValueError(
"Found unvisited input tensors that are disconnected from "
"the outputs: {}".format(unvisited_inputs)
)
return nodes_in_graph
def clone_graph_nodes(inputs, outputs):
"""Clone the `Node` between the inputs and output tensors.
This function is used to create a new functional model from any intermediate
keras tensors. The clone of the nodes mimic the behavior of reconstructing
the functional graph network by re-executing all the __call__ methods. The
cloned nodes will be appended to the layers.
Note that a new tf.keras.Inputs will be created for any items in the
`inputs`
Args:
inputs: A nested structure of keras_tensors.
outputs: A nested structure of keras_tensors.
Returns:
A pair of inputs and outputs, with cloned keras_tensors. They can be used
to create a new functional model.
"""
nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)
cloned_inputs = []
cloned_outputs = []
# We not only need to create copies of Nodes (mimic the calls), also need to
# clone keras_tensors to avoid the override of _keras_history attached on
# the keras_tensor. The following dict is used to track any keras tensor we
# cloned The key is the string ID of the original keras tensor, and value is
# the cloned keras_tensor instance.
kt_id_mapping = {}
for kt_input in tf.nest.flatten(inputs):
if kt_input.node.is_input:
# For any existing keras_tensor from tf.keras.Input, we leave them
# as is.
cloned_inputs.append(kt_input)
kt_id_mapping[id(kt_input)] = kt_input
else:
# We need to create a new tf.keras.Input for any intermediate
# keras_tensor
cpy = _clone_keras_tensor(kt_input)
cloned_input = input_layer_module.Input(tensor=cpy)
cloned_inputs.append(cloned_input)
kt_id_mapping[id(kt_input)] = cloned_input
cloned_inputs = tf.nest.pack_sequence_as(inputs, cloned_inputs)
for kt_output in tf.nest.flatten(outputs):
cpy = _clone_keras_tensor(kt_output)
# We reuse the _keras_history here, which contains the old information.
# It is used in the Node constructor to check if the tensor
# "is_keras_tensor()" The history will be override by the Node
# constructor anyway for the corresponding layer output anyway.
cpy._keras_history = kt_output._keras_history
cloned_outputs.append(cpy)
kt_id_mapping[id(kt_output)] = cpy
cloned_outputs = tf.nest.pack_sequence_as(outputs, cloned_outputs)
for node in nodes_to_clone:
# Clone any keras_tensors to avoid override of _keras_history
# Or reuse an existing keras_tensor if it has already been cloned.
output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping)
call_args_copy = clone_keras_tensors(node.call_args, kt_id_mapping)
call_kwargs_copy = clone_keras_tensors(node.call_kwargs, kt_id_mapping)
# Creating new nodes based on the existing node information. Node wires
# itself to inbound and outbound layers. The Node constructor actually
# updates this layer's self._inbound_nodes, sets _keras_history on the
# outputs, and adds itself to the `_outbound_nodes` of the layers that
# produced the inputs to this layer call.
node_module.Node(
node.layer,
call_args=call_args_copy,
call_kwargs=call_kwargs_copy,
outputs=output_copy,
)
return cloned_inputs, cloned_outputs
def clone_keras_tensors(args, keras_tensor_mapping):
"""Clone the keras tensors from the inputs.
For any KerasTensor instance in the `args`, a new copy of KerasTensor will
be created if it has not been cloned yet (by checking the
`keras_tensor_mapping`). For any other types, the instance will be
unchanged. This function is useful for cloning the Nodes since KerasTensor
can't be reused across the models.
Args:
args: A nested structure of objects, which could contain KerasTensor.
keras_tensor_mapping: A dict contains the ID of original KerasTensor, and
the cloned KerasTensor instance. The dict will be updated with newly
copied KerasTensor instances within this method.
Returns:
Same structure as inputs, with KerasTensor cloned.
"""
result = []
for obj in tf.nest.flatten(args):
if node_module.is_keras_tensor(obj):
if id(obj) in keras_tensor_mapping:
cpy = keras_tensor_mapping[id(obj)]
else:
# Create copy of keras_tensor if we haven't done it before
cpy = _clone_keras_tensor(obj)
cpy._keras_history = obj._keras_history
keras_tensor_mapping[id(obj)] = cpy
result.append(cpy)
else:
result.append(obj)
return tf.nest.pack_sequence_as(args, result)
def _clone_keras_tensor(kt):
"""Create an identical keras_tensor based on the input.
We use keras_tensor_to_placeholder and keras_tensor_from_tensor to make sure
inferred shape are not lost during the copy.
Args:
kt: the input KerasTensor.
Returns:
An identical copy of the input KerasTensor.
"""
# Create a scratch graph since we don't intend to use the placeholders.
with backend._scratch_graph() as scratch_graph:
with scratch_graph.as_default():
placeholder = keras_tensor.keras_tensor_to_placeholder(kt)
return keras_tensor.keras_tensor_from_tensor(placeholder)
| tf-keras/tf_keras/engine/functional_utils.py/0 | {
"file_path": "tf-keras/tf_keras/engine/functional_utils.py",
"repo_id": "tf-keras",
"token_count": 4249
} | 182 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model.fit calls with a Dataset object passed as validation_data."""
import io
import sys
from unittest import mock
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.engine import data_adapter
from tf_keras.layers import core
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import io_utils
# isort: off
from tensorflow.python.framework import (
test_util as tf_test_utils,
)
def _create_dataset(num_samples, batch_size):
input_data = np.random.rand(num_samples, 1)
expected_data = input_data * 3
dataset = tf.data.Dataset.from_tensor_slices((input_data, expected_data))
return dataset.shuffle(10 * batch_size).batch(batch_size)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class ValidationDatasetAndValidationSplit(
test_combinations.TestCase, parameterized.TestCase
):
"""Verifies when validation_data is provided validation_split is ignored.
The validation_split arg can't be passed in v1 mode because
training_utils_v1.py:validate_dataset_input will raise a ValueError that
validation_split is not supported when input x is a dataset or a dataset
iterator.
"""
@parameterized.named_parameters(
("with_default_falsey_validation_split", 0.0),
("with_non_falsey_validation_split", 0.1),
)
def test_ignore_validation_split_when_validation_dataset_is_present(
self, validation_split
):
# Create a model that learns y=Mx.
layers = [core.Dense(1)]
model = test_utils.get_model_from_layers(layers, input_shape=(1,))
model.compile(
loss="mse", optimizer="adam", metrics=["mean_absolute_error"]
)
train_dataset = _create_dataset(num_samples=200, batch_size=10)
eval_dataset = _create_dataset(num_samples=50, batch_size=25)
# Make sure model.fit doesn't raise an error because of the mocking
# alone.
mock_train_validation_split_return = (
(train_dataset, None, None),
eval_dataset,
)
with mock.patch.object(
data_adapter,
"train_validation_split",
return_value=mock_train_validation_split_return,
) as mock_train_validation_split:
model.fit(
x=train_dataset,
validation_split=validation_split,
validation_data=eval_dataset,
epochs=2,
)
mock_train_validation_split.assert_not_called()
history = model.fit(
x=train_dataset, validation_data=eval_dataset, epochs=2
)
evaluation = model.evaluate(x=eval_dataset)
# See test_validation_dataset_with_no_step_arg for details.
self.assertAlmostEqual(
history.history["val_mean_absolute_error"][-1],
evaluation[-1],
places=5,
)
@test_combinations.run_with_all_model_types
@test_combinations.run_all_keras_modes
class ValidationDatasetNoLimitTest(test_combinations.TestCase):
def test_validation_dataset_with_no_step_arg(self):
# Create a model that learns y=Mx.
layers = [core.Dense(1)]
model = test_utils.get_model_from_layers(layers, input_shape=(1,))
model.compile(
loss="mse", optimizer="adam", metrics=["mean_absolute_error"]
)
train_dataset = _create_dataset(num_samples=200, batch_size=10)
eval_dataset = _create_dataset(num_samples=50, batch_size=25)
history = model.fit(
x=train_dataset, validation_data=eval_dataset, epochs=2
)
evaluation = model.evaluate(x=eval_dataset)
# If the fit call used the entire dataset, then the final val MAE error
# from the fit history should be equal to the final element in the
# output of evaluating the model on the same eval dataset.
self.assertAlmostEqual(
history.history["val_mean_absolute_error"][-1],
evaluation[-1],
places=5,
)
class PrintTrainingInfoTest(test_combinations.TestCase, parameterized.TestCase):
@tf_test_utils.run_v1_only("Only relevant in graph mode.")
def test_print_info_with_datasets(self):
"""Print training info should work with val datasets (b/133391839)."""
model = keras.models.Sequential(
[keras.layers.Dense(1, input_shape=(1,))]
)
model.compile(loss="mse", optimizer="sgd")
dataset = (
tf.data.Dataset.from_tensors(([1.0], [1.0])).repeat(100).batch(10)
)
val_dataset = (
tf.data.Dataset.from_tensors(([1.0], [1.0])).repeat(50).batch(10)
)
mock_stdout = io.StringIO()
io_utils.enable_interactive_logging()
with tf.compat.v1.test.mock.patch.object(sys, "stdout", mock_stdout):
model.fit(dataset, epochs=2, validation_data=val_dataset)
self.assertIn(
"Train on 10 steps, validate on 5 steps", mock_stdout.getvalue()
)
@parameterized.named_parameters(
("with_validation", True), ("without_validation", False)
)
@tf_test_utils.run_v1_only("Only relevant in graph mode.")
def test_print_info_with_numpy(self, do_validation):
"""Print training info should work with val datasets (b/133391839)."""
model = keras.models.Sequential(
[keras.layers.Dense(1, input_shape=(2,))]
)
model.compile(loss="mse", optimizer="sgd")
dataset = np.arange(200).reshape(100, 2)
if do_validation:
val_data = (
np.arange(100).reshape(50, 2),
np.arange(50).reshape(50, 1),
)
else:
val_data = None
mock_stdout = io.StringIO()
with tf.compat.v1.test.mock.patch.object(sys, "stdout", mock_stdout):
model.fit(
dataset, batch_size=10, epochs=2, validation_data=val_data
)
self.assertIn("Train on 100 samples", mock_stdout.getvalue())
if do_validation:
self.assertIn(", validate on 50 samples", mock_stdout.getvalue())
@test_combinations.run_all_keras_modes
def test_dict_float64_input(self):
class MyModel(keras.Model):
def __init__(self):
super().__init__(self)
self.dense1 = keras.layers.Dense(10, activation="relu")
self.dense2 = keras.layers.Dense(10, activation="relu")
self.concat = keras.layers.Concatenate()
self.dense3 = keras.layers.Dense(1, activation="sigmoid")
def call(self, inputs):
d1 = self.dense1(inputs["one"])
d2 = self.dense2(inputs["two"])
concat = self.concat([d1, d2])
return self.dense3(concat)
model = MyModel()
model.compile(
loss="mae",
optimizer="adam",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(
x={
"one": np.random.rand(100, 10, 1),
"two": np.random.rand(100, 10, 1),
},
y=np.random.rand(100, 10, 1),
)
def test_dict_validation_input(self):
"""Test case for GitHub issue 30122."""
train_input_0 = np.random.rand(1000, 1)
train_input_1 = np.random.rand(1000, 1)
train_labels = np.random.rand(1000, 1)
val_input_0 = np.random.rand(1000, 1)
val_input_1 = np.random.rand(1000, 1)
val_labels = np.random.rand(1000, 1)
input_0 = keras.Input(shape=(None,), name="input_0")
input_1 = keras.Input(shape=(None,), name="input_1")
class my_model(keras.Model):
def __init__(self):
super().__init__(self)
self.hidden_layer_0 = keras.layers.Dense(100, activation="relu")
self.hidden_layer_1 = keras.layers.Dense(100, activation="relu")
self.concat = keras.layers.Concatenate()
self.out_layer = keras.layers.Dense(1, activation="sigmoid")
def call(self, inputs=[input_0, input_1]):
activation_0 = self.hidden_layer_0(inputs["input_0"])
activation_1 = self.hidden_layer_1(inputs["input_1"])
concat = self.concat([activation_0, activation_1])
return self.out_layer(concat)
model = my_model()
model.compile(loss="mae", optimizer="adam")
model.fit(
x={"input_0": train_input_0, "input_1": train_input_1},
y=train_labels,
validation_data=(
{"input_0": val_input_0, "input_1": val_input_1},
val_labels,
),
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/engine/training_arrays_test.py/0 | {
"file_path": "tf-keras/tf_keras/engine/training_arrays_test.py",
"repo_id": "tf-keras",
"token_count": 4461
} | 183 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras estimator API."""
import tensorflow.compat.v2 as tf
# isort: off
from tensorflow.python.util.tf_export import keras_export
# TF-Keras has undeclared dependency on tensorflow/estimator:estimator_py.
# As long as you depend //third_party/py/tensorflow:tensorflow target
# everything will work as normal.
# LINT.IfChange
@keras_export(v1=["keras.estimator.model_to_estimator"])
def model_to_estimator(
keras_model=None,
keras_model_path=None,
custom_objects=None,
model_dir=None,
config=None,
checkpoint_format="saver",
metric_names_map=None,
export_outputs=None,
):
"""Constructs an `Estimator` instance from given keras model.
If you use infrastructure or other tooling that relies on Estimators, you
can still build a TF-Keras model and use model_to_estimator to convert the
TF-Keras model to an Estimator for use with downstream systems.
For usage example, please see:
[Creating estimators from TF-Keras Models](
https://www.tensorflow.org/guide/estimator#create_an_estimator_from_a_keras_model).
Sample Weights:
Estimators returned by `model_to_estimator` are configured so that they can
handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`).
To pass sample weights when training or evaluating the Estimator, the first
item returned by the input function should be a dictionary with keys
`features` and `sample_weights`. Example below:
```python
keras_model = tf.keras.Model(...)
keras_model.compile(...)
estimator = tf.keras.estimator.model_to_estimator(keras_model)
def input_fn():
return dataset_ops.Dataset.from_tensors(
({'features': features, 'sample_weights': sample_weights},
targets))
estimator.train(input_fn, steps=1)
```
Example with customized export signature:
```python
inputs = {'a': tf.keras.Input(..., name='a'),
'b': tf.keras.Input(..., name='b')}
outputs = {'c': tf.keras.layers.Dense(..., name='c')(inputs['a']),
'd': tf.keras.layers.Dense(..., name='d')(inputs['b'])}
keras_model = tf.keras.Model(inputs, outputs)
keras_model.compile(...)
export_outputs = {'c': tf.estimator.export.RegressionOutput,
'd': tf.estimator.export.ClassificationOutput}
estimator = tf.keras.estimator.model_to_estimator(
keras_model, export_outputs=export_outputs)
def input_fn():
return dataset_ops.Dataset.from_tensors(
({'features': features, 'sample_weights': sample_weights},
targets))
estimator.train(input_fn, steps=1)
```
Args:
keras_model: A compiled TF-Keras model object. This argument is mutually
exclusive with `keras_model_path`. Estimator's `model_fn` uses the
structure of the model to clone the model. Defaults to `None`.
keras_model_path: Path to a compiled TF-Keras model saved on disk, in HDF5
format, which can be generated with the `save()` method of a Keras
model. This argument is mutually exclusive with `keras_model`.
Defaults to `None`.
custom_objects: Dictionary for cloning customized objects. This is
used with classes that is not part of this pip package. For example, if
user maintains a `relu6` class that inherits from
`tf.keras.layers.Layer`, then pass `custom_objects={'relu6': relu6}`.
Defaults to `None`.
model_dir: Directory to save `Estimator` model parameters, graph, summary
files for TensorBoard, etc. If unset a directory will be created with
`tempfile.mkdtemp`
config: `RunConfig` to config `Estimator`. Allows setting up things in
`model_fn` based on configuration such as `num_ps_replicas`, or
`model_dir`. If both `config.model_dir` and the
`model_dir` argument (above) are specified the `model_dir` **argument**
takes precedence. Defaults to `None`.
checkpoint_format: Sets the format of the checkpoint saved by the
estimator when training. May be `saver` or `checkpoint`, depending on
whether to save checkpoints from `tf.train.Saver` or
`tf.train.Checkpoint`. Estimators use name-based `tf.train.Saver`
checkpoints, while TF-Keras models use object-based checkpoints from
`tf.train.Checkpoint`. Currently, saving object-based checkpoints
from `model_to_estimator` is only supported by Functional and
Sequential models. Defaults to 'saver'.
metric_names_map: Optional dictionary mapping TF-Keras model output metric
names to custom names. This can be used to override the default Keras
model output metrics names in a multi IO model use case and provide
custom names for the `eval_metric_ops` in Estimator.
TF-Keras model metric names can be obtained using `model.metrics_names`
excluding any loss metrics such as total loss and output losses.
For example, if your TF-Keras model has two outputs `out_1` and `out_2`,
with `mse` loss and `acc` metric, then `model.metrics_names` will be
`['loss', 'out_1_loss', 'out_2_loss', 'out_1_acc', 'out_2_acc']`.
The model metric names excluding the loss metrics will be
`['out_1_acc', 'out_2_acc']`.
export_outputs: Optional dictionary. This can be used to override the
default TF-Keras model output exports in a multi IO model use case and
provide custom names for the `export_outputs` in
`tf.estimator.EstimatorSpec`. Default is None, which is equivalent to
{'serving_default': `tf.estimator.export.PredictOutput`}. If not None,
the keys must match the keys of `model.output_names`.
A dict `{name: output}` where:
* name: An arbitrary name for this output.
* output: an `ExportOutput` class such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`. Single-headed models only
need to specify one entry in this dictionary. Multi-headed models
should specify one entry for each head, one of which must be named
using
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`
If no entry is provided, a default `PredictOutput` mapping to
`predictions` will be created.
Returns:
An Estimator from given keras model.
Raises:
ValueError: If neither keras_model nor keras_model_path was given.
ValueError: If both keras_model and keras_model_path was given.
ValueError: If the keras_model_path is a GCS URI.
ValueError: If keras_model has not been compiled.
ValueError: If an invalid checkpoint_format was given.
"""
try:
# isort: off
from tensorflow_estimator.python.estimator import (
keras_lib,
)
except ImportError:
raise NotImplementedError(
"tf.keras.estimator.model_to_estimator function not available in "
"your installation."
)
return keras_lib.model_to_estimator(
keras_model=keras_model,
keras_model_path=keras_model_path,
custom_objects=custom_objects,
model_dir=model_dir,
config=config,
checkpoint_format=checkpoint_format,
use_v2_estimator=False,
metric_names_map=metric_names_map,
export_outputs=export_outputs,
)
@keras_export("keras.estimator.model_to_estimator", v1=[])
def model_to_estimator_v2(
keras_model=None,
keras_model_path=None,
custom_objects=None,
model_dir=None,
config=None,
checkpoint_format="checkpoint",
metric_names_map=None,
export_outputs=None,
):
"""Constructs an `Estimator` instance from given keras model.
If you use infrastructure or other tooling that relies on Estimators, you
can still build a TF-Keras model and use model_to_estimator to convert the
TF-Keras model to an Estimator for use with downstream systems.
For usage example, please see:
[Creating estimators from TF-Keras Models](
https://www.tensorflow.org/guide/estimators#creating_estimators_from_keras_models).
Sample Weights:
Estimators returned by `model_to_estimator` are configured so that they can
handle sample weights (similar to `keras_model.fit(x, y, sample_weights)`).
To pass sample weights when training or evaluating the Estimator, the first
item returned by the input function should be a dictionary with keys
`features` and `sample_weights`. Example below:
```python
keras_model = tf.keras.Model(...)
keras_model.compile(...)
estimator = tf.keras.estimator.model_to_estimator(keras_model)
def input_fn():
return dataset_ops.Dataset.from_tensors(
({'features': features, 'sample_weights': sample_weights},
targets))
estimator.train(input_fn, steps=1)
```
Example with customized export signature:
```python
inputs = {'a': tf.keras.Input(..., name='a'),
'b': tf.keras.Input(..., name='b')}
outputs = {'c': tf.keras.layers.Dense(..., name='c')(inputs['a']),
'd': tf.keras.layers.Dense(..., name='d')(inputs['b'])}
keras_model = tf.keras.Model(inputs, outputs)
keras_model.compile(...)
export_outputs = {'c': tf.estimator.export.RegressionOutput,
'd': tf.estimator.export.ClassificationOutput}
estimator = tf.keras.estimator.model_to_estimator(
keras_model, export_outputs=export_outputs)
def input_fn():
return dataset_ops.Dataset.from_tensors(
({'features': features, 'sample_weights': sample_weights},
targets))
estimator.train(input_fn, steps=1)
```
Note: We do not support creating weighted metrics in TF-Keras and converting
them to weighted metrics in the Estimator API using `model_to_estimator`.
You will have to create these metrics directly on the estimator spec using
the `add_metrics` function.
To customize the estimator `eval_metric_ops` names, you can pass in the
`metric_names_map` dictionary mapping the keras model output metric names
to the custom names as follows:
```python
input_a = tf.keras.layers.Input(shape=(16,), name='input_a')
input_b = tf.keras.layers.Input(shape=(16,), name='input_b')
dense = tf.keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
interm_b = dense(input_b)
merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge')
output_a = tf.keras.layers.Dense(3, activation='softmax', name='dense_2')(
merged)
output_b = tf.keras.layers.Dense(2, activation='softmax', name='dense_3')(
merged)
keras_model = tf.keras.models.Model(
inputs=[input_a, input_b], outputs=[output_a, output_b])
keras_model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
metric_names_map = {
'dense_2_categorical_accuracy': 'acc_1',
'dense_3_categorical_accuracy': 'acc_2',
}
keras_est = tf.keras.estimator.model_to_estimator(
keras_model=keras_model,
config=config,
metric_names_map=metric_names_map)
```
Args:
keras_model: A compiled TF-Keras model object. This argument is mutually
exclusive with `keras_model_path`. Estimator's `model_fn` uses the
structure of the model to clone the model. Defaults to `None`.
keras_model_path: Path to a compiled TF-Keras model saved on disk, in HDF5
format, which can be generated with the `save()` method of a Keras
model. This argument is mutually exclusive with `keras_model`.
Defaults to `None`.
custom_objects: Dictionary for cloning customized objects. This is
used with classes that is not part of this pip package. For example, if
user maintains a `relu6` class that inherits from
`tf.keras.layers.Layer`, then pass `custom_objects={'relu6': relu6}`.
Defaults to `None`.
model_dir: Directory to save `Estimator` model parameters, graph, summary
files for TensorBoard, etc. If unset a directory will be created with
`tempfile.mkdtemp`
config: `RunConfig` to config `Estimator`. Allows setting up things in
`model_fn` based on configuration such as `num_ps_replicas`, or
`model_dir`. If both `config.model_dir` and the
`model_dir` argument (above) are specified the `model_dir` **argument**
takes precedence. Defaults to `None`.
checkpoint_format: Sets the format of the checkpoint saved by the
estimator when training. May be `saver` or `checkpoint`, depending on
whether to save checkpoints from `tf.compat.v1.train.Saver` or
`tf.train.Checkpoint`. The default is `checkpoint`. Estimators use
name-based `tf.train.Saver` checkpoints, while TF-Keras models use
object-based checkpoints from `tf.train.Checkpoint`. Currently, saving
object-based checkpoints from `model_to_estimator` is only supported by
Functional and Sequential models. Defaults to 'checkpoint'.
metric_names_map: Optional dictionary mapping TF-Keras model output metric
names to custom names. This can be used to override the default Keras
model output metrics names in a multi IO model use case and provide
custom names for the `eval_metric_ops` in Estimator.
TF-Keras model metric names can be obtained using `model.metrics_names`
excluding any loss metrics such as total loss and output losses.
For example, if your TF-Keras model has two outputs `out_1` and `out_2`,
with `mse` loss and `acc` metric, then `model.metrics_names` will be
`['loss', 'out_1_loss', 'out_2_loss', 'out_1_acc', 'out_2_acc']`.
The model metric names excluding the loss metrics will be
`['out_1_acc', 'out_2_acc']`.
export_outputs: Optional dictionary. This can be used to override the
default TF-Keras model output exports in a multi IO model use case and
provide custom names for the `export_outputs` in
`tf.estimator.EstimatorSpec`. Default is None, which is equivalent to
{'serving_default': `tf.estimator.export.PredictOutput`}. If not None,
the keys must match the keys of `model.output_names`.
A dict `{name: output}` where:
* name: An arbitrary name for this output.
* output: an `ExportOutput` class such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`. Single-headed models only
need to specify one entry in this dictionary. Multi-headed models
should specify one entry for each head, one of which must be named
using
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`
If no entry is provided, a default `PredictOutput` mapping to
`predictions` will be created.
Returns:
An Estimator from given keras model.
Raises:
ValueError: If neither keras_model nor keras_model_path was given.
ValueError: If both keras_model and keras_model_path was given.
ValueError: If the keras_model_path is a GCS URI.
ValueError: If keras_model has not been compiled.
ValueError: If an invalid checkpoint_format was given.
"""
try:
# isort: off
from tensorflow_estimator.python.estimator import (
keras_lib,
)
except ImportError:
raise NotImplementedError(
"tf.keras.estimator.model_to_estimator function not available in "
"your installation."
)
return keras_lib.model_to_estimator(
keras_model=keras_model,
keras_model_path=keras_model_path,
custom_objects=custom_objects,
model_dir=model_dir,
config=config,
checkpoint_format=checkpoint_format,
use_v2_estimator=True,
metric_names_map=metric_names_map,
export_outputs=export_outputs,
)
# LINT.ThenChange(//tensorflow_estimator/python/estimator/keras_lib.py)
| tf-keras/tf_keras/estimator/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/estimator/__init__.py",
"repo_id": "tf-keras",
"token_count": 6559
} | 184 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializer serialization / deserialization."""
import threading
import warnings
import tensorflow.compat.v2 as tf
from tf_keras.initializers import initializers
from tf_keras.initializers import initializers_v1
from tf_keras.saving import serialization_lib
from tf_keras.saving.legacy import serialization as legacy_serialization
from tf_keras.utils import generic_utils
from tf_keras.utils import tf_inspect as inspect
# isort: off
from tensorflow.python import tf2
from tensorflow.python.ops import init_ops
from tensorflow.python.util.tf_export import keras_export
# LOCAL.ALL_OBJECTS is meant to be a global mutable. Hence we need to make it
# thread-local to avoid concurrent mutations.
LOCAL = threading.local()
def populate_deserializable_objects():
"""Populates dict ALL_OBJECTS with every built-in initializer."""
global LOCAL
if not hasattr(LOCAL, "ALL_OBJECTS"):
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = None
if (
LOCAL.ALL_OBJECTS
and LOCAL.GENERATED_WITH_V2 == tf.__internal__.tf2.enabled()
):
# Objects dict is already generated for the proper TF version:
# do nothing.
return
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = tf.__internal__.tf2.enabled()
# Compatibility aliases (need to exist in both V1 and V2).
LOCAL.ALL_OBJECTS["ConstantV2"] = initializers.Constant
LOCAL.ALL_OBJECTS["GlorotNormalV2"] = initializers.GlorotNormal
LOCAL.ALL_OBJECTS["GlorotUniformV2"] = initializers.GlorotUniform
LOCAL.ALL_OBJECTS["HeNormalV2"] = initializers.HeNormal
LOCAL.ALL_OBJECTS["HeUniformV2"] = initializers.HeUniform
LOCAL.ALL_OBJECTS["IdentityV2"] = initializers.Identity
LOCAL.ALL_OBJECTS["LecunNormalV2"] = initializers.LecunNormal
LOCAL.ALL_OBJECTS["LecunUniformV2"] = initializers.LecunUniform
LOCAL.ALL_OBJECTS["OnesV2"] = initializers.Ones
LOCAL.ALL_OBJECTS["OrthogonalV2"] = initializers.Orthogonal
LOCAL.ALL_OBJECTS["RandomNormalV2"] = initializers.RandomNormal
LOCAL.ALL_OBJECTS["RandomUniformV2"] = initializers.RandomUniform
LOCAL.ALL_OBJECTS["TruncatedNormalV2"] = initializers.TruncatedNormal
LOCAL.ALL_OBJECTS["VarianceScalingV2"] = initializers.VarianceScaling
LOCAL.ALL_OBJECTS["ZerosV2"] = initializers.Zeros
# Out of an abundance of caution we also include these aliases that have
# a non-zero probability of having been included in saved configs in the
# past.
LOCAL.ALL_OBJECTS["glorot_normalV2"] = initializers.GlorotNormal
LOCAL.ALL_OBJECTS["glorot_uniformV2"] = initializers.GlorotUniform
LOCAL.ALL_OBJECTS["he_normalV2"] = initializers.HeNormal
LOCAL.ALL_OBJECTS["he_uniformV2"] = initializers.HeUniform
LOCAL.ALL_OBJECTS["lecun_normalV2"] = initializers.LecunNormal
LOCAL.ALL_OBJECTS["lecun_uniformV2"] = initializers.LecunUniform
if tf.__internal__.tf2.enabled():
# For V2, entries are generated automatically based on the content of
# initializers.py.
v2_objs = {}
base_cls = initializers.Initializer
generic_utils.populate_dict_with_module_objects(
v2_objs,
[initializers],
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls),
)
for key, value in v2_objs.items():
LOCAL.ALL_OBJECTS[key] = value
# Functional aliases.
LOCAL.ALL_OBJECTS[generic_utils.to_snake_case(key)] = value
else:
# V1 initializers.
v1_objs = {
"Constant": tf.compat.v1.constant_initializer,
"GlorotNormal": tf.compat.v1.glorot_normal_initializer,
"GlorotUniform": tf.compat.v1.glorot_uniform_initializer,
"Identity": tf.compat.v1.initializers.identity,
"Ones": tf.compat.v1.ones_initializer,
"Orthogonal": tf.compat.v1.orthogonal_initializer,
"VarianceScaling": tf.compat.v1.variance_scaling_initializer,
"Zeros": tf.compat.v1.zeros_initializer,
"HeNormal": initializers_v1.HeNormal,
"HeUniform": initializers_v1.HeUniform,
"LecunNormal": initializers_v1.LecunNormal,
"LecunUniform": initializers_v1.LecunUniform,
"RandomNormal": initializers_v1.RandomNormal,
"RandomUniform": initializers_v1.RandomUniform,
"TruncatedNormal": initializers_v1.TruncatedNormal,
}
for key, value in v1_objs.items():
LOCAL.ALL_OBJECTS[key] = value
# Functional aliases.
LOCAL.ALL_OBJECTS[generic_utils.to_snake_case(key)] = value
# More compatibility aliases.
LOCAL.ALL_OBJECTS["normal"] = LOCAL.ALL_OBJECTS["random_normal"]
LOCAL.ALL_OBJECTS["uniform"] = LOCAL.ALL_OBJECTS["random_uniform"]
LOCAL.ALL_OBJECTS["one"] = LOCAL.ALL_OBJECTS["ones"]
LOCAL.ALL_OBJECTS["zero"] = LOCAL.ALL_OBJECTS["zeros"]
# For backwards compatibility, we populate this file with the objects
# from ALL_OBJECTS. We make no guarantees as to whether these objects will
# using their correct version.
populate_deserializable_objects()
globals().update(LOCAL.ALL_OBJECTS)
# Utility functions
@keras_export("keras.initializers.serialize")
def serialize(initializer, use_legacy_format=False):
populate_deserializable_objects()
if initializer is None:
return None
if not isinstance(initializer, tuple(LOCAL.ALL_OBJECTS.values())):
warnings.warn(
"The `keras.initializers.serialize()` API should only be used for "
"objects of type `keras.initializers.Initializer`. Found an "
f"instance of type {type(initializer)}, which may lead to improper "
"serialization."
)
if use_legacy_format:
return legacy_serialization.serialize_keras_object(initializer)
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.initializers.deserialize")
def deserialize(config, custom_objects=None, use_legacy_format=False):
"""Return an `Initializer` object from its config."""
populate_deserializable_objects()
if use_legacy_format:
return legacy_serialization.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name="initializer",
)
return serialization_lib.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name="initializer",
)
@keras_export("keras.initializers.get")
def get(identifier):
"""Retrieve a TF-Keras initializer by the identifier.
The `identifier` may be the string name of a initializers function or class
(case-sensitively).
>>> identifier = 'Ones'
>>> tf.keras.initializers.deserialize(identifier)
<...keras.initializers.initializers.Ones...>
You can also specify `config` of the initializer to this function by passing
dict containing `class_name` and `config` as an identifier. Also note that
the `class_name` must map to a `Initializer` class.
>>> cfg = {'class_name': 'Ones', 'config': {}}
>>> tf.keras.initializers.deserialize(cfg)
<...keras.initializers.initializers.Ones...>
In the case that the `identifier` is a class, this method will return a new
instance of the class by its constructor.
Args:
identifier: String or dict that contains the initializer name or
configurations.
Returns:
Initializer instance base on the input identifier.
Raises:
ValueError: If the input identifier is not a supported type or in a bad
format.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
use_legacy_format = "module" not in identifier
return deserialize(identifier, use_legacy_format=use_legacy_format)
elif isinstance(identifier, str):
config = {"class_name": str(identifier), "config": {}}
return get(config)
elif callable(identifier):
if inspect.isclass(identifier):
identifier = identifier()
return identifier
else:
raise ValueError(
"Could not interpret initializer identifier: " + str(identifier)
)
| tf-keras/tf_keras/initializers/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/initializers/__init__.py",
"repo_id": "tf-keras",
"token_count": 3560
} | 185 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow.compat.v2 as tf
import tf_keras as keras
class TestKerasModelClass(keras.Model):
"""A simple tensorflow keras Model class definition."""
def __init__(self, width):
super().__init__()
self.width = width
def build(self, input_shape):
self.weight = self.add_weight(
name="test_keras_var",
shape=(self.width,),
dtype=tf.float32,
trainable=True,
)
def call(self, inputs):
return self.weight * inputs
class GradientsTest(tf.test.TestCase):
def _TestVariablesGradient(self, inputs, test_model, vars_to_grad):
"""Returns gradients of `test_model` with respect to `vars_to_grad`."""
test_model_re = tf.recompute_grad(test_model)
with tf.GradientTape(persistent=True) as tape:
tape.watch(vars_to_grad)
out_re = test_model_re(inputs)
out = test_model(inputs)
grads_re = tape.gradient(out_re, vars_to_grad)
grads = tape.gradient(out, vars_to_grad)
return grads_re, grads
def testKerasRecompute(self):
"""Checks that recompute_grad works for a simple TF-Keras Model."""
test_model = TestKerasModelClass(10)
test_input = tf.constant(tf.zeros((10, 10), dtype=np.float32))
# Ensures keras model is initialized.
test_model(test_input)
grads_re, grads = self._TestVariablesGradient(
test_input, test_model, test_input
)
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
grads_re, grads = self._TestVariablesGradient(
test_input, test_model, test_model.variables
)
grads_re = self.evaluate(grads_re)
grads = self.evaluate(grads)
for g, g_re in zip(grads, grads_re):
self.assertAllClose(g, g_re)
def testLSTMBatchJacobian(self):
class HasLSTM(keras.Model):
def __init__(self):
super().__init__()
self.lstm = keras.layers.LSTM(units=5)
self.dense = keras.layers.Dense(1, activation=tf.nn.sigmoid)
def call(self, x):
return self.dense(self.lstm(x))
m = HasLSTM()
def jacobian(x):
with tf.GradientTape() as tape:
tape.watch(x)
y = m(x)
return tape.batch_jacobian(y, x)
inp = tf.nn.l2_normalize(tf.ones([1, 2, 3]), axis=[1, 2])
eager_result = jacobian(inp)
function_result = tf.function(jacobian)(inp)
self.assertAllClose(eager_result, function_result)
backprop_result, numeric_result = tf.test.compute_gradient(
m, [inp], delta=1e-3
)
self.assertAllClose(numeric_result, backprop_result, atol=1e-3)
self.assertAllClose(
tf.reshape(numeric_result, [-1]),
tf.reshape(eager_result, [-1]),
atol=1e-3,
)
def testEmbeddingLookupGradientsHaveKnownShape(self):
class MyLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.embedding = None
def build(self, input_shape):
self.embedding = tf.Variable(tf.random.uniform([50, 16]))
def call(self, x):
return tf.nn.embedding_lookup(self.embedding, x)
layer = MyLayer()
@tf.function
def _run(x):
with tf.GradientTape() as tape:
y = layer(x)
loss = tf.math.reduce_sum(y)
gradients = tape.gradient(loss, layer.weights)
self.assertListEqual(gradients[0].shape.as_list(), [50, 16])
_run(tf.random.uniform([4, 16], minval=0, maxval=50, dtype=tf.int64))
if __name__ == "__main__":
if tf.__internal__.tf2.enabled():
tf.test.main()
| tf-keras/tf_keras/integration_test/gradients_test.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/gradients_test.py",
"repo_id": "tf-keras",
"token_count": 2176
} | 186 |
from tensorflow import keras
from tf_keras.integration_test.models.input_spec import InputSpec
TIMESTEPS = 32
def get_data_spec(batch_size):
return (
InputSpec((batch_size, TIMESTEPS, 1)),
InputSpec((batch_size, 1)),
)
def get_input_preprocessor():
return None
def get_model(
build=False, compile=False, jit_compile=False, include_preprocessing=True
):
model = keras.Sequential(
[
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32),
keras.layers.Dense(1),
]
)
if build:
model.build((None, TIMESTEPS, 1))
if compile:
model.compile(
optimizer=keras.optimizers.Adam(),
loss="mse",
jit_compile=jit_compile,
)
return model
def get_custom_objects():
return {}
| tf-keras/tf_keras/integration_test/models/timeseries_forecasting.py/0 | {
"file_path": "tf-keras/tf_keras/integration_test/models/timeseries_forecasting.py",
"repo_id": "tf-keras",
"token_count": 406
} | 187 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LeakyReLU layer."""
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
@test_combinations.run_all_keras_modes
class LeakyReLUTest(test_combinations.TestCase):
def test_leaky_relu(self):
for alpha in [0.0, 0.5]:
test_utils.layer_test(
keras.layers.LeakyReLU,
kwargs={"alpha": alpha},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_leaky_relu_with_invalid_alpha(self):
# Test case for GitHub issue 46993.
with self.assertRaisesRegex(
ValueError,
"The alpha value of a Leaky ReLU layer "
"cannot be None. Expecting a float. Received: None",
):
test_utils.layer_test(
keras.layers.LeakyReLU,
kwargs={"alpha": None},
input_shape=(2, 3, 4),
supports_masking=True,
)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/activation/leaky_relu_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/activation/leaky_relu_test.py",
"repo_id": "tf-keras",
"token_count": 718
} | 188 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests BaseDenseAttention layer."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from tf_keras.layers.attention.base_dense_attention import BaseDenseAttention
from tf_keras.layers.attention.base_dense_attention import (
_lower_triangular_mask,
)
from tf_keras.testing_infra import test_combinations
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class BaseDenseAttentionTest(tf.test.TestCase, parameterized.TestCase):
def test_one_dim_with_mask(self):
# Scores tensor of shape [1, 1, 1]
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 1]
scores_mask = np.array([[[True]]], dtype=np.bool_)
actual, actual_scores = BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask
)
# Expected softmax_scores = [[[1]]]
expected_scores = np.array([[[1.0]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 1, 1].
# expected000 = softmax_scores[0, 0] * 1.6 = 1.6
expected = np.array([[[1.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_one_dim_no_mask(self):
# Scores tensor of shape [1, 1, 1]
scores = np.array([[[1.1]]], dtype=np.float32)
# Value tensor of shape [1, 1, 1]
v = np.array([[[1.6]]], dtype=np.float32)
actual, actual_scores = BaseDenseAttention()._apply_scores(
scores=scores, value=v
)
# Expected softmax_scores = [[[1]]]
expected_scores = np.array([[[1.0]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 1, 1].
# expected000 = softmax_scores[0, 0] * 1.6 = 1.6
expected = np.array([[[1.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_with_mask(self):
# Scores tensor of shape [1, 1, 3]
scores = np.array([[[1.0, 0.0, 1.0]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
# Scores mask tensor of shape [1, 1, 3]
scores_mask = np.array([[[True, True, False]]], dtype=np.bool_)
actual, actual_scores = BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask
)
# Expected softmax scores = softmax(scores) with zeros in positions
# where v_mask == False.
# => softmax_scores000 = exp(1)/(exp(1) + exp(0)) = 0.73105857863
# softmax_scores001 = exp(0)/(exp(1) + exp(0)) = 0.26894142137
# softmax_scores002 = 0
expected_scores = np.array(
[[[0.73105857863, 0.26894142137, 0.0]]], dtype=np.float32
)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.73105857863 * 1.6 + 0.26894142137 * 0.7 - 0 * 0.8
# = 1.35795272077
expected = np.array([[[1.35795272077]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_multi_dim_no_mask(self):
# Scores tensor of shape [1, 1, 3]
scores = np.array([[[1.0, 0.0, 1.0]]], dtype=np.float32)
# Value tensor of shape [1, 3, 1]
v = np.array([[[1.6], [0.7], [-0.8]]], dtype=np.float32)
actual, actual_scores = BaseDenseAttention()._apply_scores(
scores=scores, value=v
)
# Expected softmax_scores = softmax(scores).
# => softmax_scores000 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
# softmax_scores001 = exp(0)/(exp(1) + exp(0) + exp(1))
# = 0.15536240349
# softmax_scores002 = exp(1)/(exp(1) + exp(0) + exp(1))
# = 0.42231879825
expected_scores = np.array(
[[[0.42231879825, 0.15536240349, 0.42231879825]]], dtype=np.float32
)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [1, 1, 1].
# expected000 = 0.42231879825 * 1.6 + 0.15536240349 * 0.7
# - 0.42231879825 * 0.8
# = 0.44660872104
expected = np.array([[[0.44660872104]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_one_dim_batch_size_two(self):
# Scores tensor of shape [2, 1, 1]
scores = np.array([[[1.1]], [[2.1]]], dtype=np.float32)
# Value tensor of shape [2, 1, 1]
v = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
# Scpres mask tensor of shape [2, 1, 1]
scores_mask = np.array([[[True]], [[True]]], dtype=np.bool_)
actual, actual_scores = BaseDenseAttention()._apply_scores(
scores=scores, value=v, scores_mask=scores_mask
)
# Expected softmax_scores = [[[1]], [[1]]]
expected_scores = np.array([[[1.0]], [[1.0]]], dtype=np.float32)
self.assertAllClose(expected_scores, actual_scores)
# Expected tensor of shape [2, 1, 1].
# expected000 = softmax_scores[0, 0] * 1.6 = 1.6
# expected100 = softmax_scores[1, 0] * 2.6 = 2.6
expected = np.array([[[1.6]], [[2.6]]], dtype=np.float32)
self.assertAllClose(expected, actual)
def test_shape_with_dropout(self):
# scores: Scores float tensor of shape `[batch_size, tq, tv]`.
# value: Value tensor of shape `[batch_size, tv, dim]`.
batch_size = 4
tq = 5
tv = 6
dim = 7
scores = np.ones((batch_size, tq, tv))
value = np.ones((batch_size, tv, dim))
actual, actual_scores = BaseDenseAttention(dropout=0.1)._apply_scores(
scores=scores, value=value, training=False
)
# Expected Tensor of shape `[batch_size, tq, tv]`.
expected_scores_shape = [batch_size, tq, tv]
self.assertAllEqual(expected_scores_shape, tf.shape(actual_scores))
# Expected Tensor of shape `[batch_size, tq, dim]`.
expected_shape = [batch_size, tq, dim]
self.assertAllEqual(expected_shape, tf.shape(actual))
def test_skip_rng_init_when_no_dropout(self):
batch_size = 4
tq = 5
tv = 6
dim = 7
scores = np.ones((batch_size, tq, tv))
value = np.ones((batch_size, tv, dim))
layer = BaseDenseAttention()
layer.build(None) # The input shape is not used by this layer
_, _ = layer._apply_scores(scores=scores, value=value, training=True)
# Make sure the rng is not built and no tf.random.Generator created.
self.assertFalse(layer._random_generator._built)
self.assertIsNone(getattr(layer._random_generator, "_generator", None))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class LowerTriangularMaskTest(tf.test.TestCase, parameterized.TestCase):
def test_square_shape(self):
actual = _lower_triangular_mask([3, 3])
expected = np.array(
[[True, False, False], [True, True, False], [True, True, True]],
dtype=np.bool_,
)
self.assertAllEqual(expected, actual)
def test_orthogonal_shape(self):
actual = _lower_triangular_mask([3, 2])
expected = np.array(
[[True, False], [True, True], [True, True]], dtype=np.bool_
)
self.assertAllEqual(expected, actual)
def test_three_dim(self):
actual = _lower_triangular_mask([1, 3, 3])
expected = np.array(
[[[True, False, False], [True, True, False], [True, True, True]]],
dtype=np.bool_,
)
self.assertAllEqual(expected, actual)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/attention/base_dense_attention_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/attention/base_dense_attention_test.py",
"repo_id": "tf-keras",
"token_count": 3991
} | 189 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras depthwise 1D convolution."""
import tensorflow.compat.v2 as tf
from tf_keras.layers.convolutional.base_depthwise_conv import DepthwiseConv
from tf_keras.utils import conv_utils
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.DepthwiseConv1D")
class DepthwiseConv1D(DepthwiseConv):
"""Depthwise 1D convolution.
Depthwise convolution is a type of convolution in which each input channel
is convolved with a different kernel (called a depthwise kernel). You can
understand depthwise convolution as the first step in a depthwise separable
convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each channel with an individual depthwise kernel with
`depth_multiplier` output channels.
- Concatenate the convolved outputs along the channels axis.
Unlike a regular 1D convolution, depthwise convolution does not mix
information across different input channels.
The `depth_multiplier` argument determines how many filter are applied to
one input channel. As such, it controls the amount of output channels that
are generated per input channel in the depthwise step.
Args:
kernel_size: An integer, specifying the height and width of the 1D
convolution window. Can be a single integer to specify the same value
for all spatial dimensions.
strides: An integer, specifying the strides of the convolution along the
height and width. Can be a single integer to specify the same value for
all spatial dimensions. Specifying any stride value != 1 is incompatible
with specifying any `dilation_rate` value != 1.
padding: one of `'valid'` or `'same'` (case-insensitive). `"valid"` means
no padding. `"same"` results in padding with zeros evenly to the
left/right or up/down of the input such that output has the same
height/width dimension as the input.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch_size, height,
width, channels)` while `channels_first` corresponds to inputs with
shape `(batch_size, channels, height, width)`. When unspecified, uses
`image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
dilation_rate: A single integer, specifying the dilation rate to use for
dilated convolution. Currently, specifying any `dilation_rate`
value != 1 is incompatible with specifying any stride value != 1.
activation: Activation function to use. If you don't specify anything, no
activation is applied (see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
depthwise_initializer: Initializer for the depthwise kernel matrix (see
`keras.initializers`). If None, the default initializer
('glorot_uniform') will be used.
bias_initializer: Initializer for the bias vector (see
`keras.initializers`). If None, the default initializer ('zeros') will
be used.
depthwise_regularizer: Regularizer function applied to the depthwise
kernel matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (see
`keras.regularizers`).
activity_regularizer: Regularizer function applied to the output of the
layer (its 'activation') (see `keras.regularizers`).
depthwise_constraint: Constraint function applied to the depthwise kernel
matrix (see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (see
`keras.constraints`).
Input shape:
3D tensor with shape: `[batch_size, channels, input_dim]` if
data_format='channels_first'
or 3D tensor with shape: `[batch_size, input_dim, channels]` if
data_format='channels_last'.
Output shape:
3D tensor with shape:
`[batch_size, channels * depth_multiplier, new_dims]`
if `data_format='channels_first'`
or 3D tensor with shape: `[batch_size,
new_dims, channels * depth_multiplier]` if
`data_format='channels_last'`. `new_dims` values might have
changed due to padding.
Returns:
A tensor of rank 3 representing
`activation(depthwiseconv1d(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(
self,
kernel_size,
strides=1,
padding="valid",
depth_multiplier=1,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
depth_multiplier=depth_multiplier,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
**kwargs
)
def call(self, inputs):
if self.data_format == "channels_last":
strides = (1,) + self.strides * 2 + (1,)
spatial_start_dim = 1
else:
strides = (1, 1) + self.strides * 2
spatial_start_dim = 2
inputs = tf.expand_dims(inputs, spatial_start_dim)
depthwise_kernel = tf.expand_dims(self.depthwise_kernel, axis=0)
dilation_rate = (1,) + self.dilation_rate
outputs = tf.nn.depthwise_conv2d(
inputs,
depthwise_kernel,
strides=strides,
padding=self.padding.upper(),
dilations=dilation_rate,
data_format=conv_utils.convert_data_format(
self.data_format, ndim=4
),
)
if self.use_bias:
outputs = tf.nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(
self.data_format, ndim=4
),
)
outputs = tf.squeeze(outputs, [spatial_start_dim])
if self.activation is not None:
return self.activation(outputs)
return outputs
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
input_dim = input_shape[2]
out_filters = input_shape[1] * self.depth_multiplier
elif self.data_format == "channels_last":
input_dim = input_shape[1]
out_filters = input_shape[2] * self.depth_multiplier
input_dim = conv_utils.conv_output_length(
input_dim,
self.kernel_size[0],
self.padding,
self.strides[0],
self.dilation_rate[0],
)
if self.data_format == "channels_first":
return (input_shape[0], out_filters, input_dim)
elif self.data_format == "channels_last":
return (input_shape[0], input_dim, out_filters)
| tf-keras/tf_keras/layers/convolutional/depthwise_conv1d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/convolutional/depthwise_conv1d.py",
"repo_id": "tf-keras",
"token_count": 3527
} | 190 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the Lambda layer."""
import sys
import textwrap
import types as python_types
import warnings
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras.engine.base_layer import Layer
from tf_keras.saving import serialization_lib
from tf_keras.utils import generic_utils
from tf_keras.utils import tf_inspect
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Lambda")
class Lambda(Layer):
"""Wraps arbitrary expressions as a `Layer` object.
The `Lambda` layer exists so that arbitrary expressions can be used
as a `Layer` when constructing Sequential
and Functional API models. `Lambda` layers are best suited for simple
operations or quick experimentation. For more advanced use cases, follow
[this guide](
https://www.tensorflow.org/guide/keras/custom_layers_and_models)
for subclassing `tf.keras.layers.Layer`.
WARNING: `tf.keras.layers.Lambda` layers have (de)serialization limitations!
The main reason to subclass `tf.keras.layers.Layer` instead of using a
`Lambda` layer is saving and inspecting a Model. `Lambda` layers
are saved by serializing the Python bytecode, which is fundamentally
non-portable. They should only be loaded in the same environment where
they were saved. Subclassed layers can be saved in a more portable way
by overriding their `get_config()` method. Models that rely on
subclassed Layers are also often easier to visualize and reason about.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
**Note on Variables:**
While it is possible to use Variables with Lambda layers,
this practice is discouraged as it can easily lead to bugs.
For instance, consider the following layer:
```python
scale = tf.Variable(1.)
scale_layer = tf.keras.layers.Lambda(lambda x: x * scale)
```
Because `scale_layer` does not directly track the `scale` variable, it will
not appear in `scale_layer.trainable_weights` and will therefore not be
trained if `scale_layer` is used in a Model.
A better pattern is to write a subclassed Layer:
```python
class ScaleLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.scale = tf.Variable(1.)
def call(self, inputs):
return inputs * self.scale
```
In general, `Lambda` layers can be convenient for simple stateless
computation, but anything more complex should use a subclass Layer instead.
Args:
function: The function to be evaluated. Takes input tensor as first
argument.
output_shape: Expected output shape from function. This argument can be
inferred if not explicitly provided. Can be a tuple or function. If a
tuple, it only specifies the first dimension onward;
sample dimension is assumed either the same as the input:
`output_shape = (input_shape[0], ) + output_shape` or, the input is
`None` and the sample dimension is also `None`:
`output_shape = (None, ) + output_shape` If a function, it specifies the
entire shape as a function of the input shape:
`output_shape = f(input_shape)`
mask: Either None (indicating no masking) or a callable with the same
signature as the `compute_mask` layer method, or a tensor that will be
returned as output mask regardless of what the input is.
arguments: Optional dictionary of keyword arguments to be passed to the
function.
Input shape: Arbitrary. Use the keyword argument input_shape (tuple of
integers, does not include the samples axis) when using this layer as the
first layer in a model.
Output shape: Specified by `output_shape` argument
"""
@tf.__internal__.tracking.no_automatic_dependency_tracking
def __init__(
self, function, output_shape=None, mask=None, arguments=None, **kwargs
):
super().__init__(**kwargs)
self.arguments = arguments or {}
self.function = function
if mask is not None:
self.supports_masking = True
self.mask = mask
self._output_shape = output_shape
# Warning on every invocation will be quite irksome in Eager mode.
self._already_warned = False
function_args = tf_inspect.getfullargspec(function).args
self._fn_expects_training_arg = "training" in function_args
self._fn_expects_mask_arg = "mask" in function_args
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Make use of existing autocomputation but provide Lambda-specific
# error message. This is always safe to run even when the outer
# context is Graph mode because Lambda layers don't have side
# effects such as `add_loss`.
with tf.__internal__.eager_context.eager_mode():
try:
return super().compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError(
"We could not automatically infer the shape of "
"the Lambda's output. Please specify `output_shape` "
"for this Lambda."
)
if callable(self._output_shape):
output_shapes = self._output_shape(input_shape)
return tf_utils.convert_shapes(output_shapes, to_tuples=False)
# Output shapes are passed directly and don't include batch dimension.
input_tensor_shape = tf_utils.convert_shapes(
input_shape, to_tuples=False
)
batch_size = (
tf.nest.flatten(input_tensor_shape)[0][0] if input_shape else None
)
def _add_batch(shape):
return tf.TensorShape([batch_size] + shape.as_list())
output_shapes = tf_utils.convert_shapes(
self._output_shape, to_tuples=False
)
return tf.nest.map_structure(_add_batch, output_shapes)
def call(self, inputs, mask=None, training=None):
# We must copy for thread safety, but it only needs to be a shallow
# copy.
kwargs = {k: v for k, v in self.arguments.items()}
if self._fn_expects_mask_arg:
kwargs["mask"] = mask
if self._fn_expects_training_arg:
kwargs["training"] = training
created_variables = []
def _variable_creator(next_creator, **kwargs):
var = next_creator(**kwargs)
created_variables.append(var)
return var
with tf.GradientTape(
watch_accessed_variables=True
) as tape, tf.variable_creator_scope(_variable_creator):
result = self.function(inputs, **kwargs)
self._check_variables(created_variables, tape.watched_variables())
return result
def _check_variables(self, created_variables, accessed_variables):
if not created_variables and not accessed_variables:
# In the common case that a Lambda layer does not touch a Variable,
# we don't want to incur the runtime cost of assembling any state
# used for checking only to immediately discard it.
return
# Filter out the state variable in the tf.random.Generator, which is
# commonly used for initializer or droput. The variable is intentionally
# not tracked and it is not a trainable variable.
created_variables = [
v for v in created_variables if "StateVar" not in v.name
]
tracked_weights = set(v.ref() for v in self.weights)
untracked_new_vars = [
v for v in created_variables if v.ref() not in tracked_weights
]
if untracked_new_vars:
variable_str = "\n".join(f" {i}" for i in untracked_new_vars)
error_str = textwrap.dedent(
"""
The following Variables were created within a Lambda layer ({name})
but are not tracked by said layer:
{variable_str}
The layer cannot safely ensure proper Variable reuse across multiple
calls, and consequently this behavior is disallowed for safety. Lambda
layers are not well suited to stateful computation; instead, writing a
subclassed Layer is the recommend way to define layers with
Variables."""
).format(name=self.name, variable_str=variable_str)
raise ValueError(error_str)
untracked_used_vars = [
v for v in accessed_variables if v.ref() not in tracked_weights
]
if untracked_used_vars and not self._already_warned:
variable_str = "\n".join(f" {i}" for i in untracked_used_vars)
self._warn(
textwrap.dedent(
"""
The following Variables were used a Lambda layer's call ({name}), but
are not present in its tracked objects:
{variable_str}
It is possible that this is intended behavior, but it is more likely
an omission. This is a strong indication that this layer should be
formulated as a subclassed Layer rather than a Lambda layer."""
).format(name=self.name, variable_str=variable_str)
)
self._already_warned = True
def _warn(self, msg):
# This method will be overridden in a unit test to raise an error,
# because self.assertWarns is not universally implemented.
return tf_logging.warning(msg)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
function_config = self._serialize_function_to_config(self.function)
output_shape_config = self._serialize_function_to_config(
self._output_shape, allow_raw=True
)
config = {
"function": function_config[0],
"function_type": function_config[1],
"module": function_config[2],
"output_shape": output_shape_config[0],
"output_shape_type": output_shape_config[1],
"output_shape_module": output_shape_config[2],
}
if self.mask is not None:
mask_config = self._serialize_function_to_config(self.mask)
config.update(
{
"mask": mask_config[0],
"mask_type": mask_config[1],
"mask_module": mask_config[2],
}
)
config["arguments"] = self.arguments
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def _serialize_function_to_config(self, inputs, allow_raw=False):
if isinstance(inputs, python_types.LambdaType):
output = generic_utils.func_dump(inputs)
output_type = "lambda"
module = inputs.__module__
elif callable(inputs):
output = inputs.__name__
output_type = "function"
module = inputs.__module__
elif allow_raw:
output = inputs
output_type = "raw"
module = None
else:
raise ValueError(
f"Invalid input for serialization, type: {type(inputs)} "
)
return output, output_type, module
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
function = cls._parse_function_from_config(
config, custom_objects, "function", "module", "function_type"
)
output_shape = cls._parse_function_from_config(
config,
custom_objects,
"output_shape",
"output_shape_module",
"output_shape_type",
)
if "mask" in config:
mask = cls._parse_function_from_config(
config, custom_objects, "mask", "mask_module", "mask_type"
)
else:
mask = None
config["function"] = function
config["output_shape"] = output_shape
config["mask"] = mask
# If arguments were numpy array, they have been saved as
# list. We need to recover the ndarray
if "arguments" in config:
for key in config["arguments"]:
if isinstance(config["arguments"][key], dict):
arg_dict = config["arguments"][key]
if "type" in arg_dict and arg_dict["type"] == "ndarray":
# Overwrite the argument with its numpy translation
config["arguments"][key] = np.array(arg_dict["value"])
return cls(**config)
@classmethod
def _parse_function_from_config(
cls,
config,
custom_objects,
func_attr_name,
module_attr_name,
func_type_attr_name,
):
globs = globals().copy()
module = config.pop(module_attr_name, None)
if module in sys.modules:
globs.update(sys.modules[module].__dict__)
elif module is not None:
# Note: we don't know the name of the function if it's a lambda.
warnings.warn(
"{} is not loaded, but a Lambda layer uses it. "
"It may cause errors.".format(module),
UserWarning,
stacklevel=2,
)
if custom_objects:
globs.update(custom_objects)
function_type = config.pop(func_type_attr_name)
if function_type == "function":
# Simple lookup in custom objects
function = serialization_lib.deserialize_keras_object(
config[func_attr_name],
custom_objects=custom_objects,
printable_module_name="function in Lambda layer",
)
elif function_type == "lambda":
if serialization_lib.in_safe_mode():
raise ValueError(
"Requested the deserialization of a Lambda layer with a "
"Python `lambda` inside it. "
"This carries a potential risk of arbitrary code execution "
"and thus it is disallowed by default. If you trust the "
"source of the saved model, you can pass `safe_mode=False` "
"to the loading function in order to allow "
"Lambda layer loading."
)
# /!\ Unsafe deserialization from bytecode! Danger! /!\
function = generic_utils.func_load(
config[func_attr_name], globs=globs
)
elif function_type == "raw":
function = config[func_attr_name]
else:
supported_types = ["function", "lambda", "raw"]
raise TypeError(
"Unsupported value for `function_type` argument. Received: "
f"function_type={function_type}. "
f"Expected one of {supported_types}"
)
return function
| tf-keras/tf_keras/layers/core/lambda_layer.py/0 | {
"file_path": "tf-keras/tf_keras/layers/core/lambda_layer.py",
"repo_id": "tf-keras",
"token_count": 6891
} | 191 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Locally-connected layer for 1D input."""
from tf_keras import activations
from tf_keras import backend
from tf_keras import constraints
from tf_keras import initializers
from tf_keras import regularizers
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.layers.locally_connected import locally_connected_utils
from tf_keras.utils import conv_utils
from tf_keras.utils import tf_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.LocallyConnected1D")
class LocallyConnected1D(Layer):
"""Locally-connected layer for 1D inputs.
The `LocallyConnected1D` layer works similarly to
the `Conv1D` layer, except that weights are unshared,
that is, a different set of filters is applied at each different patch
of the input.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Example:
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
Args:
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer, specifying
the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer, specifying the
stride length of the convolution.
padding: Currently only supports `"valid"` (case-insensitive). `"same"`
may be supported in the future. `"valid"` means no padding.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape `(batch, length,
channels)` while `channels_first` corresponds to inputs with shape
`(batch, channels, length)`. When unspecified, uses
`image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
activation: Activation function to use. If you don't specify anything,
no activation is applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
implementation: implementation mode, either `1`, `2`, or `3`. `1` loops
over input spatial locations to perform the forward pass. It is
memory-efficient but performs a lot of (small) ops. `2` stores layer
weights in a dense but sparsely-populated 2D matrix and implements the
forward pass as a single matrix-multiply. It uses a lot of RAM but
performs few (large) ops. `3` stores layer weights in a sparse tensor
and implements the forward pass as a single sparse matrix-multiply.
How to choose:
`1`: large, dense models,
`2`: small models,
`3`: large, sparse models, where "large" stands for large
input/output activations (i.e. many `filters`, `input_filters`,
large `input_size`, `output_size`), and "sparse" stands for few
connections between inputs and outputs, i.e. small ratio
`filters * input_filters * kernel_size / (input_size * strides)`,
where inputs to and outputs of the layer are assumed to have
shapes `(input_size, input_filters)`, `(output_size, filters)`
respectively. It is recommended to benchmark each in the setting
of interest to pick the most efficient one (in terms of speed and
memory usage). Correct choice of implementation can lead to
dramatic speed improvements (e.g. 50X), potentially at the expense
of RAM. Also, only `padding="valid"` is supported by
`implementation=1`.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)` `steps` value
might have changed due to padding or strides.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
implementation=1,
**kwargs,
):
super().__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, 1, "kernel_size"
)
self.strides = conv_utils.normalize_tuple(
strides, 1, "strides", allow_zero=True
)
self.padding = conv_utils.normalize_padding(padding)
if self.padding != "valid" and implementation == 1:
raise ValueError(
"Invalid border mode for LocallyConnected1D "
'(only "valid" is supported if implementation is 1): ' + padding
)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.implementation = implementation
self.input_spec = InputSpec(ndim=3)
@property
def _use_input_spec_as_call_signature(self):
return False
@tf_utils.shape_type_conversion
def build(self, input_shape):
if self.data_format == "channels_first":
input_dim, input_length = input_shape[1], input_shape[2]
else:
input_dim, input_length = input_shape[2], input_shape[1]
if input_dim is None:
raise ValueError(
"Axis 2 of input should be fully-defined. Found shape:",
input_shape,
)
self.output_length = conv_utils.conv_output_length(
input_length, self.kernel_size[0], self.padding, self.strides[0]
)
if self.output_length <= 0:
raise ValueError(
"One of the dimensions in the output is <= 0 "
f"due to downsampling in {self.name}. Consider "
"increasing the input size. "
f"Received input shape {input_shape} which would produce "
"output shape with a zero or negative value in a "
"dimension."
)
if self.implementation == 1:
self.kernel_shape = (
self.output_length,
self.kernel_size[0] * input_dim,
self.filters,
)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
elif self.implementation == 2:
if self.data_format == "channels_first":
self.kernel_shape = (
input_dim,
input_length,
self.filters,
self.output_length,
)
else:
self.kernel_shape = (
input_length,
input_dim,
self.output_length,
self.filters,
)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.kernel_mask = (
locally_connected_utils.get_locallyconnected_mask(
input_shape=(input_length,),
kernel_shape=self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
)
elif self.implementation == 3:
self.kernel_shape = (
self.output_length * self.filters,
input_length * input_dim,
)
self.kernel_idxs = sorted(
conv_utils.conv_kernel_idxs(
input_shape=(input_length,),
kernel_shape=self.kernel_size,
strides=self.strides,
padding=self.padding,
filters_in=input_dim,
filters_out=self.filters,
data_format=self.data_format,
)
)
self.kernel = self.add_weight(
shape=(len(self.kernel_idxs),),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
else:
raise ValueError(
"Unrecognized implementation mode: %d." % self.implementation
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.output_length, self.filters),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
if self.data_format == "channels_first":
self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
else:
self.input_spec = InputSpec(ndim=3, axes={-1: input_dim})
self.built = True
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.data_format == "channels_first":
input_length = input_shape[2]
else:
input_length = input_shape[1]
length = conv_utils.conv_output_length(
input_length, self.kernel_size[0], self.padding, self.strides[0]
)
if self.data_format == "channels_first":
return (input_shape[0], self.filters, length)
elif self.data_format == "channels_last":
return (input_shape[0], length, self.filters)
def call(self, inputs):
if self.implementation == 1:
output = backend.local_conv(
inputs,
self.kernel,
self.kernel_size,
self.strides,
(self.output_length,),
self.data_format,
)
elif self.implementation == 2:
output = locally_connected_utils.local_conv_matmul(
inputs,
self.kernel,
self.kernel_mask,
self.compute_output_shape(inputs.shape),
)
elif self.implementation == 3:
output = locally_connected_utils.local_conv_sparse_matmul(
inputs,
self.kernel,
self.kernel_idxs,
self.kernel_shape,
self.compute_output_shape(inputs.shape),
)
else:
raise ValueError(
"Unrecognized implementation mode: %d." % self.implementation
)
if self.use_bias:
output = backend.bias_add(
output, self.bias, data_format=self.data_format
)
output = self.activation(output)
return output
def get_config(self):
config = {
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
"implementation": self.implementation,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/locally_connected/locally_connected1d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/locally_connected/locally_connected1d.py",
"repo_id": "tf-keras",
"token_count": 6810
} | 192 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras Pooling layers."""
# Pooling layer aliases.
# Pooling layers.
from tf_keras.layers.pooling.average_pooling1d import AveragePooling1D
from tf_keras.layers.pooling.average_pooling1d import AvgPool1D
from tf_keras.layers.pooling.average_pooling2d import AveragePooling2D
from tf_keras.layers.pooling.average_pooling2d import AvgPool2D
from tf_keras.layers.pooling.average_pooling3d import AveragePooling3D
from tf_keras.layers.pooling.average_pooling3d import AvgPool3D
from tf_keras.layers.pooling.global_average_pooling1d import (
GlobalAveragePooling1D,
)
from tf_keras.layers.pooling.global_average_pooling1d import GlobalAvgPool1D
from tf_keras.layers.pooling.global_average_pooling2d import (
GlobalAveragePooling2D,
)
from tf_keras.layers.pooling.global_average_pooling2d import GlobalAvgPool2D
from tf_keras.layers.pooling.global_average_pooling3d import (
GlobalAveragePooling3D,
)
from tf_keras.layers.pooling.global_average_pooling3d import GlobalAvgPool3D
from tf_keras.layers.pooling.global_max_pooling1d import GlobalMaxPool1D
from tf_keras.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D
from tf_keras.layers.pooling.global_max_pooling2d import GlobalMaxPool2D
from tf_keras.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D
from tf_keras.layers.pooling.global_max_pooling3d import GlobalMaxPool3D
from tf_keras.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D
from tf_keras.layers.pooling.max_pooling1d import MaxPool1D
from tf_keras.layers.pooling.max_pooling1d import MaxPooling1D
from tf_keras.layers.pooling.max_pooling2d import MaxPool2D
from tf_keras.layers.pooling.max_pooling2d import MaxPooling2D
from tf_keras.layers.pooling.max_pooling3d import MaxPool3D
from tf_keras.layers.pooling.max_pooling3d import MaxPooling3D
| tf-keras/tf_keras/layers/pooling/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/layers/pooling/__init__.py",
"repo_id": "tf-keras",
"token_count": 833
} | 193 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras CategoryEncoding preprocessing layer."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.layers.preprocessing import preprocessing_utils as utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
INT = utils.INT
ONE_HOT = utils.ONE_HOT
MULTI_HOT = utils.MULTI_HOT
COUNT = utils.COUNT
@keras_export(
"keras.layers.CategoryEncoding",
"keras.layers.experimental.preprocessing.CategoryEncoding",
)
class CategoryEncoding(base_layer.Layer):
"""A preprocessing layer which encodes integer features.
This layer provides options for condensing data into a categorical encoding
when the total number of tokens are known in advance. It accepts integer
values as inputs, and it outputs a dense or sparse representation of those
inputs. For integer inputs where the total number of tokens is not known,
use `tf.keras.layers.IntegerLookup` instead.
For an overview and full list of preprocessing layers, see the preprocessing
[guide](https://www.tensorflow.org/guide/keras/preprocessing_layers).
Examples:
**One-hot encoding data**
>>> layer = tf.keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="one_hot")
>>> layer([3, 2, 0, 1])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[0., 0., 0., 1.],
[0., 0., 1., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]], dtype=float32)>
**Multi-hot encoding data**
>>> layer = tf.keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="multi_hot")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 1., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)>
**Using weighted inputs in `"count"` mode**
>>> layer = tf.keras.layers.CategoryEncoding(
... num_tokens=4, output_mode="count")
>>> count_weights = np.array([[.1, .2], [.1, .1], [.2, .3], [.4, .2]])
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]], count_weights=count_weights)
<tf.Tensor: shape=(4, 4), dtype=float64, numpy=
array([[0.1, 0.2, 0. , 0. ],
[0.2, 0. , 0. , 0. ],
[0. , 0.2, 0.3, 0. ],
[0. , 0.2, 0. , 0.4]], dtype=float32)>
Args:
num_tokens: The total number of tokens the layer should support. All
inputs to the layer must integers in the range `0 <= value <
num_tokens`, or an error will be thrown.
output_mode: Specification for the output of the layer.
Values can be `"one_hot"`, `"multi_hot"` or
`"count"`, configuring the layer as follows:
- `"one_hot"`: Encodes each individual element in the input into an
array of `num_tokens` size, containing a 1 at the element index. If
the last dimension is size 1, will encode on that dimension. If the
last dimension is not size 1, will append a new dimension for the
encoded output.
- `"multi_hot"`: Encodes each sample in the input into a single array
of `num_tokens` size, containing a 1 for each vocabulary term
present in the sample. Treats the last dimension as the sample
dimension, if input shape is `(..., sample_length)`, output shape
will be `(..., num_tokens)`.
- `"count"`: Like `"multi_hot"`, but the int array contains a count of
the number of times the token at that index appeared in the sample.
For all output modes, currently only output up to rank 2 is supported.
Defaults to `"multi_hot"`.
sparse: Boolean. If true, returns a `SparseTensor` instead of a dense
`Tensor`. Defaults to `False`.
Call arguments:
inputs: A 1D or 2D tensor of integer inputs.
count_weights: A tensor in the same shape as `inputs` indicating the
weight for each sample value when summing up in `count` mode. Not used
in `"multi_hot"` or `"one_hot"` modes.
"""
def __init__(
self, num_tokens=None, output_mode="multi_hot", sparse=False, **kwargs
):
# max_tokens is an old name for the num_tokens arg we continue to
# support because of usage.
if "max_tokens" in kwargs:
logging.warning(
"max_tokens is deprecated, please use num_tokens instead."
)
num_tokens = kwargs["max_tokens"]
del kwargs["max_tokens"]
# By default, output floats. This is already default for TF2, but in TF1
# dtype is inferred from inputs, and would default to int.
if "dtype" not in kwargs:
kwargs["dtype"] = backend.floatx()
super().__init__(**kwargs)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = MULTI_HOT
# 'output_mode' must be one of (COUNT, ONE_HOT, MULTI_HOT)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(COUNT, ONE_HOT, MULTI_HOT),
layer_name="CategoryEncoding",
arg_name="output_mode",
)
if num_tokens is None:
raise ValueError(
"num_tokens must be set to use this layer. If the "
"number of tokens is not known beforehand, use the "
"IntegerLookup layer instead."
)
if num_tokens < 1:
raise ValueError(
f"`num_tokens` must be >= 1. Received: num_tokens={num_tokens}."
)
self.num_tokens = num_tokens
self.output_mode = output_mode
self.sparse = sparse
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
if not input_shape:
return tf.TensorShape([self.num_tokens])
if self.output_mode == ONE_HOT and input_shape[-1] != 1:
return tf.TensorShape(input_shape + [self.num_tokens])
else:
return tf.TensorShape(input_shape[:-1] + [self.num_tokens])
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
if self.sparse:
return tf.SparseTensorSpec(shape=output_shape, dtype=tf.int64)
else:
return tf.TensorSpec(shape=output_shape, dtype=tf.int64)
def get_config(self):
config = {
"num_tokens": self.num_tokens,
"output_mode": self.output_mode,
"sparse": self.sparse,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs, count_weights=None):
inputs = utils.ensure_tensor(inputs)
if count_weights is not None:
if self.output_mode != COUNT:
raise ValueError(
"`count_weights` is not used when `output_mode` is not "
"`'count'`. Received `count_weights={count_weights}`."
)
count_weights = utils.ensure_tensor(
count_weights, self.compute_dtype
)
depth = self.num_tokens
if isinstance(inputs, tf.SparseTensor):
max_value = tf.reduce_max(inputs.values)
min_value = tf.reduce_min(inputs.values)
else:
max_value = tf.reduce_max(inputs)
min_value = tf.reduce_min(inputs)
condition = tf.logical_and(
tf.greater(tf.cast(depth, max_value.dtype), max_value),
tf.greater_equal(min_value, tf.cast(0, min_value.dtype)),
)
assertion = tf.Assert(
condition,
[
"Input values must be in the range 0 <= values < num_tokens"
" with num_tokens={}".format(depth)
],
)
with tf.control_dependencies([assertion]):
return utils.encode_categorical_inputs(
inputs,
output_mode=self.output_mode,
depth=depth,
dtype=self.compute_dtype,
sparse=self.sparse,
count_weights=count_weights,
)
| tf-keras/tf_keras/layers/preprocessing/category_encoding.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/category_encoding.py",
"repo_id": "tf-keras",
"token_count": 4005
} | 194 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras text vectorization preprocessing layer."""
import itertools
import math
import os
import random
import string
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.layers.preprocessing import index_lookup
from tf_keras.layers.preprocessing import preprocessing_test_utils
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import CustomObjectScope
def zip_and_sort(weight_values):
keys, values = weight_values
return sorted(zip(keys, values), key=lambda x: x[1])
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name": "test_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed
# by 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": tf.string,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
"input_dtype": tf.string,
},
{
"testcase_name": "test_inverse_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed
# by 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array([[2], [3], [4], [1], [1], [4], [2], [5]]),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": tf.string,
"invert": True,
},
"expected_output": np.array(
[
[b"earth"],
[b"wind"],
[b"and"],
[b"[OOV]"],
[b"[OOV]"],
[b"and"],
[b"earth"],
[b"fire"],
]
),
"input_dtype": tf.int64,
},
{
"testcase_name": "test_strings_with_special_tokens",
# Mask and oov values in the vocab data should be dropped, and
# mapped to 0 and 1 respectively when calling the layer.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
[""],
[""],
[""],
["[OOV]"],
["[OOV]"],
["[OOV]"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
[""],
["wind"],
["[OOV]"],
["and"],
[""],
["fire"],
["and"],
["[OOV]"],
["michigan"],
]
),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": tf.string,
},
"expected_output": [
[2],
[0],
[3],
[1],
[4],
[0],
[5],
[4],
[1],
[1],
],
"input_dtype": tf.string,
},
{
"testcase_name": "test_ints_soft_vocab_cap",
# Create an array where 1138 is the most frequent term, followed by
# 1729, then 725, then 42. This ensures that the vocab accumulator
# is sorting by frequency.
"vocab_data": np.array(
[
[42],
[1138],
[1138],
[1138],
[1138],
[1729],
[1729],
[1729],
[725],
[725],
],
dtype=np.int64,
),
"input_data": np.array(
[[1138], [1729], [725], [42], [42], [725], [1138], [4]],
dtype=np.int64,
),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"vocabulary_dtype": tf.int64,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
"input_dtype": tf.int64,
},
{
"testcase_name": "test_ints_with_special_tokens",
# Mask and oov values in the vocab data should be dropped, and
# mapped to 0 and 1 respectively when calling the layer.
"vocab_data": np.array(
[
[42],
[1138],
[1138],
[1138],
[1138],
[0],
[0],
[0],
[-1],
[-1],
[-1],
[1729],
[1729],
[1729],
[725],
[725],
],
dtype=np.int64,
),
"input_data": np.array(
[[1138], [0], [1729], [-1], [725], [0], [42], [725], [-1], [4]],
dtype=np.int64,
),
"kwargs": {
"max_tokens": None,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"vocabulary_dtype": tf.int64,
},
"expected_output": [
[2],
[0],
[3],
[1],
[4],
[0],
[5],
[4],
[1],
[1],
],
"input_dtype": tf.int64,
},
{
"testcase_name": "test_strings_hard_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed
# by 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": 5,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": tf.string,
},
"expected_output": [[2], [3], [4], [1], [1], [4], [2], [1]],
"input_dtype": tf.string,
},
{
"testcase_name": "test_inverse_strings_hard_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed
# by 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array([[2], [3], [4], [1], [1], [4], [2], [5]]),
"kwargs": {
"max_tokens": 5,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"vocabulary_dtype": tf.string,
"invert": True,
},
"expected_output": np.array(
[
[b"earth"],
[b"wind"],
[b"and"],
[b"[OOV]"],
[b"[OOV]"],
[b"and"],
[b"earth"],
[b"[OOV]"],
]
),
"input_dtype": tf.int64,
},
{
"testcase_name": "test_ints_hard_vocab_cap",
# Create an array where 1138 is the most frequent term, followed by
# 1729, then 725, then 42. This ensures that the vocab accumulator
# is sorting by frequency.
"vocab_data": np.array(
[
[42],
[1138],
[1138],
[1138],
[1138],
[1729],
[1729],
[1729],
[725],
[725],
],
dtype=np.int64,
),
"input_data": np.array(
[[1138], [1729], [725], [42], [42], [725], [1138], [4]],
dtype=np.int64,
),
"kwargs": {
"max_tokens": 5,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"vocabulary_dtype": tf.int64,
},
"expected_output": [[2], [3], [4], [1], [1], [4], [2], [1]],
"input_dtype": tf.int64,
},
{
"testcase_name": "test_ints_tf_idf_output",
"vocab_data": np.array(
[
[42],
[1138],
[1138],
[1138],
[1138],
[1729],
[1729],
[1729],
[725],
[725],
]
),
"input_data": np.array(
[[1138], [1729], [725], [42], [42], [725], [1138], [4]]
),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"num_oov_indices": 1,
"mask_token": 0,
"oov_token": -1,
"output_mode": index_lookup.TF_IDF,
"vocabulary_dtype": tf.int64,
},
"expected_output": [
[0, 1.098612, 0, 0, 0],
[0, 0, 1.252763, 0, 0],
[0, 0, 0, 1.466337, 0],
[0, 0, 0, 0, 1.7917595],
[0, 0, 0, 0, 1.7917595],
[0, 0, 0, 1.4663371, 0],
[0, 1.098612, 0, 0, 0],
[1.402368, 0, 0, 0, 0],
],
"input_dtype": tf.int64,
},
{
"testcase_name": "test_strings_tf_idf_output",
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"num_oov_indices": 1,
"mask_token": "",
"oov_token": "[OOV]",
"output_mode": index_lookup.TF_IDF,
"vocabulary_dtype": tf.string,
},
"expected_output": [
[0, 1.098612, 0, 0, 0],
[0, 0, 1.252763, 0, 0],
[0, 0, 0, 1.466337, 0],
[0, 0, 0, 0, 1.7917595],
[0, 0, 0, 0, 1.7917595],
[0, 0, 0, 1.4663371, 0],
[0, 1.098612, 0, 0, 0],
[1.402368, 0, 0, 0, 0],
],
"input_dtype": tf.string,
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class IndexLookupLayerTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(
self,
vocab_data,
input_data,
kwargs,
use_dataset,
expected_output,
input_dtype,
):
cls = index_lookup.IndexLookup
if "invert" in kwargs and kwargs["invert"]:
expected_output_dtype = kwargs["vocabulary_dtype"]
elif (
"output_mode" in kwargs
and kwargs["output_mode"] != index_lookup.INT
):
expected_output_dtype = tf.float32
else:
expected_output_dtype = tf.int64
input_shape = input_data.shape
if use_dataset:
# TF-Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# IndexLookup), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing
# layer into other keras layers instead of predicting it directly. A
# workaround for these unit tests is to have the dataset only
# contain one batch, so no concatenation needs to happen with the
# result. For consistency with numpy input, we should make `predict`
# join differently shaped results together sensibly, with 0 padding.
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
input_shape[0]
)
vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0]
)
with CustomObjectScope({"IndexLookup": cls}):
output_data = test_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data,
)
if "invert" in kwargs and kwargs["invert"]:
self.assertAllEqual(expected_output, output_data)
else:
self.assertAllClose(expected_output, output_data)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingInputTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_sparse_string_input(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=["fire", "michigan"],
dense_shape=[3, 4],
)
expected_indices = [[0, 0], [1, 2]]
expected_values = [5, 1]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_sparse_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 32], dtype=np.int64),
dense_shape=[3, 4],
)
expected_indices = [[0, 0], [1, 2]]
expected_values = [5, 1]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
vocabulary_dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_string_input(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant(
[["earth", "wind", "fire"], ["fire", "and", "earth", "michigan"]]
)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant(
[[10, 11, 13], [13, 12, 10, 42]], dtype=np.int64
)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
vocabulary_dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int32_input_with_int64_keys(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant(
[[10, 11, 13], [13, 12, 10, 42]], dtype=np.int32
)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
vocabulary_dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingMultiOOVTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_sparse_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=["fire", "ohio"],
dense_shape=[3, 4],
)
expected_indices = [[0, 0], [1, 2]]
expected_values = [6, 2]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_sparse_int_input_multi_bucket(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 133], dtype=np.int64),
dense_shape=[3, 4],
)
expected_indices = [[0, 0], [1, 2]]
expected_values = [6, 2]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
vocabulary_dtype=tf.int64,
num_oov_indices=2,
mask_token=0,
oov_token=-1,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant(
[["earth", "wind", "fire"], ["fire", "and", "earth", "ohio"]]
)
expected_output = [[3, 4, 6], [6, 5, 3, 2]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_int_input_multi_bucket(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant(
[[10, 11, 13], [13, 12, 10, 133]], dtype=np.int64
)
expected_output = [[3, 4, 6], [6, 5, 3, 2]]
input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
vocabulary_dtype=tf.int64,
num_oov_indices=2,
mask_token=0,
oov_token=-1,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class CategoricalEncodingAdaptTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_sparse_adapt(self):
vocab_data = tf.SparseTensor(
indices=[[0, 0], [0, 1], [1, 2]],
values=["michigan", "fire", "michigan"],
dense_shape=[3, 4],
)
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.adapt(vocab_dataset)
expected_vocabulary = ["", "[OOV]", "michigan", "fire"]
self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())
def test_ragged_adapt(self):
vocab_data = tf.ragged.constant([["michigan"], ["fire", "michigan"]])
vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.adapt(vocab_dataset)
expected_vocabulary = ["", "[OOV]", "michigan", "fire"]
self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())
def test_sparse_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.SparseTensor(
indices=[[0, 0], [1, 2]],
values=np.array([13, 32], dtype=np.int64),
dense_shape=[3, 4],
)
expected_indices = [[0, 0], [1, 2]]
expected_values = [5, 1]
expected_dense_shape = [3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
vocabulary_dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array, steps=1)
self.assertAllEqual(expected_indices, output_data.indices)
self.assertAllEqual(expected_values, output_data.values)
self.assertAllEqual(expected_dense_shape, output_data.dense_shape)
def test_ragged_string_input(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.ragged.constant(
[["earth", "wind", "fire"], ["fire", "and", "earth", "michigan"]]
)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_ragged_int_input(self):
vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)
input_array = tf.ragged.constant(
[[10, 11, 13], [13, 12, 10, 42]], dtype=np.int64
)
expected_output = [[2, 3, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)
layer = index_lookup.IndexLookup(
max_tokens=None,
vocabulary_dtype=tf.int64,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_string_generator_dataset(self):
def word_gen():
for _ in itertools.count(1):
yield "".join(
random.choice(string.ascii_letters) for i in range(2)
)
ds = tf.data.Dataset.from_generator(
word_gen, tf.string, tf.TensorShape([])
)
batched_ds = ds.take(2)
input_t = keras.Input(shape=(), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=10,
num_oov_indices=0,
mask_token=None,
oov_token=None,
vocabulary_dtype=tf.string,
)
_ = layer(input_t)
layer.adapt(batched_ds)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class IndexLookupOutputTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
@parameterized.product(
rank=[0, 1, 2],
# Check lists, numpy arrays, tensors, and objects convertable to tensor.
data_fn=[
None,
np.array,
tf.constant,
preprocessing_test_utils.ArrayLike,
],
)
def test_input_types(self, rank, data_fn):
input_data = vocab = ["earth", "wind", "and", "fire"]
expected_output = [2, 3, 4, 5]
if rank == 0:
input_data = input_data[0]
expected_output = expected_output[0]
elif rank == 2:
input_data = [input_data]
expected_output = [expected_output]
if data_fn is not None:
input_data = data_fn(input_data)
input_shape = [] if rank == 0 else [None]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary=vocab,
vocabulary_dtype=tf.string,
)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
# Again in a keras.Model
inputs = keras.Input(shape=input_shape, dtype=tf.string)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(tf.constant(input_data))
self.assertAllEqual(expected_output, output_data)
def test_int_output_shape(self):
input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
int_data = layer(input_data)
self.assertAllEqual(int_data.shape.as_list(), [16, 4])
@parameterized.named_parameters(
("int32", tf.int32),
("int64", tf.int64),
)
def test_int_output_dtype(self, dtype):
input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
dtype=dtype,
)
int_data = layer(input_data)
self.assertAllEqual(int_data.dtype, dtype)
def test_int_output_float_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "`dtype` should be an integer"):
index_lookup.IndexLookup(
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
dtype=tf.float32,
)
def test_int_output_no_reserved_zero(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_no_oov(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array(
[["earth", "wind", "and", "fire"], ["fire", "and", "earth", ""]]
)
invalid_input = np.array(
[
["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "found OOV values.*michigan"
):
_ = model.predict(invalid_input)
def test_int_output_no_oov_ragged(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array(
[["earth", "wind", "and", "fire"], ["fire", "and", "earth", ""]]
)
invalid_input = np.array(
[
["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"],
]
)
valid_input = tf.RaggedTensor.from_tensor(valid_input)
invalid_input = tf.RaggedTensor.from_tensor(invalid_input)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "found OOV values.*michigan"
):
_ = model.predict(invalid_input)
def test_int_output_no_oov_sparse(self):
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array(
[["earth", "wind", "and", "fire"], ["fire", "and", "earth", ""]]
)
invalid_input = np.array(
[
["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"],
]
)
valid_input = tf.sparse.from_dense(valid_input)
invalid_input = tf.sparse.from_dense(invalid_input)
expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, tf.sparse.to_dense(output_data))
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "found OOV values.*michigan"
):
_ = model.predict(invalid_input)
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_one_hot_output_hard_maximum(self):
"""Check binary output when pad_to_max_tokens=True."""
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire", "michigan", ""])
expected_output = [
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=6,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.ONE_HOT,
pad_to_max_tokens=True,
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
binary_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=binary_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_one_hot_output_soft_maximum(self):
"""Check binary output when pad_to_max_tokens=False."""
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire", "michigan", ""])
expected_output = [
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.ONE_HOT,
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
binary_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=binary_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_one_hot_output_rank_zero_no_oov(self):
"""Check binary output when pad_to_max_tokens=False."""
vocab_data = ["earth", "wind", "and", "fire"]
input_data = tf.constant("earth")
expected_output = [1, 0, 0, 0]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.ONE_HOT,
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
output_data = layer(input_data)
self.assertAllEqual(expected_output, output_data)
def test_one_hot_output_shape(self):
inputs = keras.Input(batch_size=16, shape=(1,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=["earth"],
max_tokens=2,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.ONE_HOT,
vocabulary_dtype=tf.string,
)
outputs = layer(inputs)
self.assertAllEqual(outputs.shape.as_list(), [16, 2])
@parameterized.product(
sparse=[True, False],
adapt=[True, False],
pad_to_max=[True, False],
mode=["multi_hot", "count", "tf_idf"],
dtype=[tf.float32, tf.float64],
)
def test_binned_output(self, sparse, adapt, pad_to_max, mode, dtype):
"""Check "multi_hot", "count", and "tf_idf" output."""
# Adapt breaks ties with sort order.
vocab_data = ["wind", "fire", "earth", "and"]
# IDF weight for a term in 1 out of 1 document is log(1 + 1/2).
idf_data = [math.log(1.5)] * 4
input_data = np.array(
[
["and", "earth", "fire", "and", ""],
["michigan", "wind", "and", "ohio", ""],
]
)
if mode == "count":
expected_output = np.array(
[
[0, 0, 1, 1, 2],
[2, 1, 0, 0, 1],
]
)
elif mode == "tf_idf":
expected_output = np.array(
[
[0, 0, 1, 1, 2],
[2, 1, 0, 0, 1],
]
) * math.log(1.5)
else:
expected_output = np.array(
[
[0, 0, 1, 1, 1],
[1, 1, 0, 0, 1],
]
)
expected_output_shape = [None, 5]
if pad_to_max:
expected_output = np.concatenate(
(expected_output, [[0], [0]]), axis=1
)
expected_output_shape = [None, 6]
inputs = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=6,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=mode,
pad_to_max_tokens=pad_to_max,
vocabulary_dtype=tf.string,
sparse=sparse,
vocabulary=None if adapt else vocab_data,
idf_weights=None if adapt or mode != "tf_idf" else idf_data,
dtype=dtype,
)
if adapt:
layer.adapt(vocab_data)
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
output_data = model.predict(input_data)
if sparse:
output_data = tf.sparse.to_dense(output_data)
# Check output data.
self.assertAllClose(expected_output, output_data)
# Check symbolic output shape.
self.assertAllEqual(expected_output_shape, outputs.shape.as_list())
# Check output dtype.
self.assertAllEqual(dtype, output_data.dtype)
def test_multi_hot_output_no_oov(self):
"""Check multi hot output when num_oov_indices=0."""
vocab_data = ["earth", "wind", "and", "fire"]
valid_input = np.array(
[["earth", "wind", "and", "fire"], ["fire", "and", "earth", ""]]
)
invalid_input = np.array(
[
["earth", "wind", "and", "michigan"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [
[1, 1, 1, 1, 0],
[1, 0, 1, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=0,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
pad_to_max_tokens=True,
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
binary_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=binary_data)
output_data = model.predict(valid_input)
self.assertAllEqual(expected_output, output_data)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "found OOV values.*michigan"
):
_ = model.predict(invalid_input)
def test_multi_hot_output_hard_maximum_multiple_adapts(self):
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
adapt_data = [
"earth",
"earth",
"earth",
"earth",
"wind",
"wind",
"wind",
]
first_expected_output = [
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
]
second_adapt_data = [
"earth",
"earth",
"earth",
"earth",
"wind",
"wind",
"wind",
"and",
"and",
"fire",
]
second_expected_output = [
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
pad_to_max_tokens=True,
vocabulary_dtype=tf.string,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Test the first adapt
layer.adapt(adapt_data)
first_output = model.predict(input_array)
# Test the second adapt
layer.adapt(second_adapt_data)
# We need to recompile the model to retrace our call graph.
model.compile()
second_output = model.predict(input_array)
self.assertAllEqual(first_expected_output, first_output)
self.assertAllEqual(second_expected_output, second_output)
def test_int_output_file_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_non_int_output_file_vocab_in_tf_function(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = tf.constant(
[
["earth", "wind", "and", "fire", ""],
["fire", "and", "earth", "michigan", ""],
],
dtype=tf.string,
)
expected_output = [
[0, 1, 1, 1, 1],
[1, 1, 0, 1, 1],
]
vocab_file = self._write_to_temp_file("temp", vocab_data)
@tf.function
def compute(data):
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
vocabulary_dtype=tf.string,
)
return layer(data)
output_dataset = compute(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_file_vocab_and_list_vocab_identical_attrs(self):
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
file_layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
list_layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
expected_vocab = ["", "[OOV]", "earth", "wind", "and", "fire"]
self.assertAllEqual(expected_vocab, list_layer.get_vocabulary())
expected_vocab_size = 6
self.assertAllEqual(expected_vocab_size, list_layer.vocabulary_size())
self.assertAllEqual(
list_layer.get_vocabulary(), file_layer.get_vocabulary()
)
self.assertAllEqual(
list_layer.vocabulary_size(), file_layer.vocabulary_size()
)
def test_file_vocab_and_list_vocab_identical_attrs_multi_oov(self):
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
file_layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
list_layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
expected_vocab = ["", "[OOV]", "[OOV]", "earth", "wind", "and", "fire"]
self.assertAllEqual(expected_vocab, list_layer.get_vocabulary())
expected_vocab_size = 7
self.assertAllEqual(expected_vocab_size, list_layer.vocabulary_size())
self.assertAllEqual(
list_layer.get_vocabulary(), file_layer.get_vocabulary()
)
self.assertAllEqual(
list_layer.vocabulary_size(), file_layer.vocabulary_size()
)
def test_file_vocab_and_list_vocab_identical_attrs_no_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
file_layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=2,
mask_token=None,
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
list_layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=2,
mask_token=None,
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
expected_vocab = ["[OOV]", "[OOV]", "earth", "wind", "and", "fire"]
self.assertAllEqual(expected_vocab, list_layer.get_vocabulary())
expected_vocab_size = 6
self.assertAllEqual(expected_vocab_size, list_layer.vocabulary_size())
self.assertAllEqual(
list_layer.get_vocabulary(), file_layer.get_vocabulary()
)
self.assertAllEqual(
list_layer.vocabulary_size(), file_layer.vocabulary_size()
)
def test_int_output_file_vocab_no_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "", "earth", "michigan"],
]
)
expected_output = [[1, 2, 3, 4], [4, 0, 1, 0]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
mask_token=None,
num_oov_indices=1,
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_file_vocab_no_oov_or_mask(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[["earth", "wind", "and", "fire"], ["fire", "wind", "earth", "and"]]
)
expected_output = [[0, 1, 2, 3], [3, 1, 0, 2]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
mask_token=None,
num_oov_indices=0,
oov_token=None,
vocabulary_dtype=tf.string,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_file_vocab_inversion(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[1, 2, 3, 4], [4, 0, 1, 0]])
expected_output = [
["earth", "wind", "and", "fire"],
["fire", "[OOV]", "earth", "[OOV]"],
]
vocab_file = self._write_to_temp_file("temp", vocab_data)
idata = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
mask_token=None,
num_oov_indices=1,
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
_ = layer(idata)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
invert_layer = index_lookup.IndexLookup(
vocabulary=layer.get_vocabulary(),
max_tokens=None,
oov_token="[OOV]",
mask_token=None,
num_oov_indices=1,
invert=True,
vocabulary_dtype=tf.string,
)
int_data = invert_layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_int_file_vocab(self):
vocab_data = ["10", "20", "30", "40"]
input_array = np.array([[10, 20, 30, 40], [40, 0, 10, 42]])
expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = index_lookup.IndexLookup(
vocabulary=vocab_file,
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_dataset_map_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token=None,
oov_token="[OOV]",
vocabulary=vocab_data,
vocabulary_dtype=tf.string,
)
ds = tf.data.Dataset.from_tensor_slices([["earth"], ["wind"], ["and"]])
ds = ds.map(layer)
self.assertAllEqual(list(ds.as_numpy_iterator()), [[0], [1], [2]])
def test_dataset_map_output_layer_created_in_function(self):
vocab_data = ["earth", "wind", "and", "fire"]
def apply_lookup(data):
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=0,
mask_token=None,
oov_token="[OOV]",
vocabulary=vocab_data,
vocabulary_dtype=tf.string,
)
return layer(data)
ds = tf.data.Dataset.from_tensor_slices([["earth"], ["wind"], ["and"]])
ds = ds.map(apply_lookup)
self.assertAllEqual(list(ds.as_numpy_iterator()), [[0], [1], [2]])
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class IndexLookupVocabularyTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_explicit_vocab_with_special_tokens(self):
vocab_data = ["", "[OOV]", "earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_get_vocabulary_no_special_tokens(self):
vocab_data = ["", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary(include_special_tokens=False)
self.assertAllEqual(returned_vocab, ["wind", "and", "fire"])
self.assertAllEqual(layer.vocabulary_size(), 5)
def test_vocab_multi_oov(self):
vocab_data = ["", "[OOV]", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=2,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(returned_vocab, vocab_data)
def test_vocab_multi_oov_not_present(self):
vocab_data = ["wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=10,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(
returned_vocab, [""] + ["[OOV]"] * 10 + ["wind", "and", "fire"]
)
def test_vocab_with_max_cap(self):
vocab_data = ["", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
self.assertAllEqual(layer.vocabulary_size(), 5)
def test_int_vocab_with_max_cap(self):
vocab_data = [0, -1, 42, 1276, 1138]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
self.assertAllEqual(layer.vocabulary_size(), 5)
def test_vocab_with_multiple_oov_indices(self):
vocab_data = ["", "[OOV]", "[OOV]", "[OOV]", "wind"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=3,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
def test_int_vocab_with_multiple_oov_indices(self):
vocab_data = [0, -1, -1, -1, 42]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=3,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, "repeated term.*fire"):
_ = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
def test_vocab_with_repeated_element_fails(self):
vocab_data = ["earth", "earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
with self.assertRaisesRegex(ValueError, "repeated term.*earth"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_reserved_oov_element_and_invert_true_fails(self):
vocab_data = ["earth", "test", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
invert=True,
vocabulary_dtype=tf.string,
)
with self.assertRaisesRegex(ValueError, "reserved OOV"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_reserved_mask_element_fails(self):
vocab_data = ["earth", "mask_token", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="mask_token",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
with self.assertRaisesRegex(ValueError, "reserved mask"):
layer.set_vocabulary(vocab_data)
def test_vocab_size_changed_pad_to_max_false_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
pad_to_max_tokens=False,
output_mode=index_lookup.MULTI_HOT,
vocabulary_dtype=tf.string,
)
layer.set_vocabulary(vocab_data)
# Calling the layer should lock the vocabulary size.
_ = layer([["earth"]])
with self.assertRaisesRegex(
RuntimeError, "vocabulary size cannot be changed"
):
layer.set_vocabulary(vocab_data[:2])
def test_vocab_with_idf_weights_non_tfidf_output_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
weight_data = [1, 1, 1, 1, 1]
with self.assertRaisesRegex(
ValueError, "`idf_weights` should only be set if"
):
index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.MULTI_HOT,
vocabulary_dtype=tf.string,
vocabulary=vocab_data,
idf_weights=weight_data,
)
def test_vocab_with_idf_weights_length_mismatch_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
weight_data = [1, 1, 1, 1, 1] # too long
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be the same length as vocab"
):
index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.TF_IDF,
vocabulary_dtype=tf.string,
vocabulary=vocab_data,
idf_weights=weight_data,
)
def test_vocab_without_idf_weights_tfidf_output_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be set if output_mode is TF_IDF"
):
index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
output_mode=index_lookup.TF_IDF,
vocabulary_dtype=tf.string,
vocabulary=vocab_data,
)
def test_non_unique_int_vocab_fails(self):
vocab_data = [12, 13, 14, 15, 15]
with self.assertRaisesRegex(ValueError, "repeated term.*15"):
_ = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
)
def test_int_vocab_with_reserved_oov_element_and_invert_true_fails(self):
vocab_data = [14, 38, -1, 34, 3, 84]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
invert=True,
vocabulary_dtype=tf.int64,
)
with self.assertRaisesRegex(ValueError, "reserved OOV"):
layer.set_vocabulary(vocab_data)
def test_int_vocab_with_reserved_mask_element_fails(self):
vocab_data = [125, 0, 3, 4, 94]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
)
with self.assertRaisesRegex(ValueError, "reserved mask"):
layer.set_vocabulary(vocab_data)
def test_no_vocab_file_string_fails(self):
with self.assertRaisesRegex(ValueError, "non_existent_file"):
_ = index_lookup.IndexLookup(
vocabulary="non_existent_file",
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class IndexLookupInverseVocabularyTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_int_output_explicit_vocab(self):
vocab_data = ["", "[OOV]", "earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 1]])
expected_output = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[OOV]"],
]
)
input_data = keras.Input(shape=(None,), dtype=tf.int64)
layer = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
invert=True,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_with_max_cap(self):
vocab_data = ["", "[OOV]", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
invert=True,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
def test_int_vocab_with_max_cap(self):
vocab_data = [0, -1, 42, 1276, 1138]
layer = index_lookup.IndexLookup(
max_tokens=5,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
invert=True,
)
layer.set_vocabulary(vocab_data)
returned_vocab = layer.get_vocabulary()
self.assertAllEqual(vocab_data, returned_vocab)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, "repeated term.*fire"):
_ = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
invert=True,
)
def test_non_int_output_fails(self):
with self.assertRaisesRegex(
ValueError, "`output_mode` must be `'int'`"
):
_ = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
output_mode=index_lookup.COUNT,
invert=True,
)
def test_vocab_with_repeated_element_fails(self):
vocab_data = ["earth", "earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
invert=True,
)
with self.assertRaisesRegex(ValueError, "repeated term.*earth"):
layer.set_vocabulary(vocab_data)
def test_vocab_with_reserved_mask_element_fails(self):
vocab_data = ["earth", "mask_token", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="mask_token",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
invert=True,
)
with self.assertRaisesRegex(ValueError, "reserved mask"):
layer.set_vocabulary(vocab_data)
def test_non_unique_int_vocab_fails(self):
vocab_data = [12, 13, 14, 15, 15]
with self.assertRaisesRegex(ValueError, "repeated term.*15"):
_ = index_lookup.IndexLookup(
vocabulary=vocab_data,
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
invert=True,
)
def test_int_vocab_with_repeated_element_fails(self):
vocab_data = [11, 11, 34, 23, 124]
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=0,
oov_token=-1,
vocabulary_dtype=tf.int64,
invert=True,
)
with self.assertRaisesRegex(ValueError, "repeated term.*11"):
layer.set_vocabulary(vocab_data)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class IndexLookupErrorTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_too_long_vocab_fails_in_single_setting(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = index_lookup.IndexLookup(
max_tokens=4,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
with self.assertRaisesRegex(
ValueError, "vocabulary larger than the maximum vocab"
):
layer.set_vocabulary(vocab_data)
def test_zero_max_tokens_fails(self):
with self.assertRaisesRegex(ValueError, "max_tokens"):
_ = index_lookup.IndexLookup(
max_tokens=0,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
)
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class IndexLookupSavingTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_vocabulary_persistence_across_saving(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_file,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
with self.subTest("keras_v3"):
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_model.keras"
)
model.save(output_path, save_format="keras_v3")
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique
# (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
with self.subTest("savedmodel"):
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_saved_model"
)
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
tf.io.gfile.remove(vocab_file)
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique
# (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_vocabulary_persistence_file_across_cloning(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_file,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Clone the model and set weights.
new_model = keras.models.clone_model(model)
new_model.set_weights(model.get_weights())
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, new_model)
# Validate correctness of the new model.
new_output_dataset = new_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_persistence_file_vocabs_tf_save_tf_load(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_file,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
tf.saved_model.save(obj=model, export_dir=output_path)
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = f(tf.constant(input_array))["index_lookup"]
self.assertAllEqual(new_output_dataset, expected_output)
def test_vocabulary_persistence_file_vocab_keras_save_tf_load(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_file,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = f(tf.constant(input_array))["index_lookup"]
self.assertAllEqual(new_output_dataset, expected_output)
def test_persistence_file_vocab_keras_save_keras_load(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_file,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
with self.subTest("keras_v3"):
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_model.keras"
)
model.save(output_path, save_format="keras_v3")
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique
# (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Try re-saving the layer. This simulates saving a layer
# contained at a hub Module.
input_data_2 = keras.Input(shape=(None,), dtype=tf.string)
output_2 = loaded_model(input_data_2)
model_2 = keras.Model(inputs=input_data_2, outputs=output_2)
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_model_2.keras"
)
model_2.save(output_path, save_format="keras_v3")
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique
# (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
with self.subTest("saved_model"):
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_saved_model"
)
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
tf.io.gfile.remove(vocab_file)
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique
# (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Try re-saving the layer. This simulates saving a layer
# contained at a hub Module.
input_data_2 = keras.Input(shape=(None,), dtype=tf.string)
output_2 = loaded_model(input_data_2)
model_2 = keras.Model(inputs=input_data_2, outputs=output_2)
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_saved_model_2"
)
model_2.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique
# (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_persistence_file_vocab_keras_save_keras_load_tf_save_tf_load(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_file,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
tf.io.gfile.remove(vocab_file)
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Try re-saving the layer. This simulates saving a layer contained at
# a hub Module.
input_data_2 = keras.Input(shape=(None,), dtype=tf.string)
output_2 = loaded_model(input_data_2)
model_2 = keras.Model(inputs=input_data_2, outputs=output_2)
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_saved_model_2"
)
tf.saved_model.save(model_2, output_path)
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
loaded_model = tf.saved_model.load(output_path)
f = loaded_model.signatures["serving_default"]
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = f(tf.constant(input_array))["model"]
self.assertAllEqual(new_output_dataset, expected_output)
def test_persistence_file_vocab_keras_save_keras_load_keras_save_keras_load(
self,
):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_file = self._write_to_temp_file("temp", vocab_data)
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_file,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
tf.io.gfile.remove(vocab_file)
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Try re-saving the layer. This simulates saving a layer contained at
# a hub Module.
input_data_2 = keras.Input(shape=(None,), dtype=tf.string)
output_2 = loaded_model(input_data_2)
model_2 = keras.Model(inputs=input_data_2, outputs=output_2)
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(
self.get_temp_dir(), "tf_keras_saved_model_2"
)
model_2.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
loaded_model = keras.models.load_model(
output_path,
custom_objects={"IndexLookup": index_lookup.IndexLookup},
)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = model_2.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_static_table_config_weight_data_transfer_succeeds(self):
vocab_data = ["earth", "wind", "and", "fire"]
vocab_file = self._write_to_temp_file("temp", vocab_data)
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
layer_cls = index_lookup.IndexLookup
layer = layer_cls(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_file,
)
config = layer.get_config()
weights = layer.get_weights()
layer = layer_cls.from_config(config)
layer.set_weights(weights)
input_data = keras.Input(shape=(None,), dtype=tf.string)
output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=output)
new_output_dataset = model.predict(input_array)
self.assertAllEqual(new_output_dataset, expected_output)
def test_sparse_output_across_saving(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[0.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0, 1.0]]
layer_cls = index_lookup.IndexLookup
layer = layer_cls(
max_tokens=None,
num_oov_indices=1,
mask_token="",
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_data,
output_mode="multi_hot",
sparse=True,
)
config = layer.get_config()
layer = layer_cls.from_config(config)
output = layer(input_array)
self.assertIsInstance(output, tf.SparseTensor)
self.assertAllEqual(tf.sparse.to_dense(output), expected_output)
class EagerExecutionDisabled(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_lookup(self):
# We need this test for model_to_estimator followed by
# export_saved_model, which will call the layer in a legacy session.
# This could also happen directly if a user calls disable_v2_behavior or
# disable_eager_execution.
with tf.compat.v1.Session():
with test_utils.run_eagerly_scope(False):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth", "wind", "and", "fire"])
expected_output = [1, 2, 3, 4]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = index_lookup.IndexLookup(
max_tokens=None,
num_oov_indices=1,
mask_token=None,
oov_token="[OOV]",
vocabulary_dtype=tf.string,
vocabulary=vocab_data,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# In a TF1 session the user will need to make sure all tables
# are initialized themselves.
tf.compat.v1.tables_initializer().run()
output_dataset = model(input_array)
self.assertAllEqual(output_dataset, expected_output)
if __name__ == "__main__":
# IndexLookup is only exported as a TF2 API.
tf.compat.v1.enable_v2_behavior()
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/index_lookup_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/index_lookup_test.py",
"repo_id": "tf-keras",
"token_count": 53913
} | 195 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras text vectorization preprocessing layer."""
import gc
import os
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras import backend
from tf_keras.layers import convolutional
from tf_keras.layers import core
from tf_keras.layers.preprocessing import preprocessing_test_utils
from tf_keras.layers.preprocessing import text_vectorization
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import register_keras_serializable
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name": "test_simple_tokens_int_mode",
# Create an array where 'earth' is the most frequent term, followed
# by 'wind', then 'and', then 'fire'. This ensures that the vocab is
# sorting by frequency.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
},
{
"testcase_name": "test_simple_tokens_int_mode_hard_cap",
# Create an array where 'earth' is the most frequent term, followed
# by 'wind', then 'and', then 'fire'. This ensures that the vocab is
# sorting by frequency.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": 6,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
},
{
"testcase_name": "test_special_tokens_int_mode",
# Mask tokens in the vocab data should be ignored, and mapped to 0
# in from the input data.
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
[""],
[""],
[""],
["[UNK]"],
["[UNK]"],
["[UNK]"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
[""],
["wind"],
["[UNK]"],
["and"],
[""],
["fire"],
["and"],
["[UNK]"],
["michigan"],
]
),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": None,
"output_mode": text_vectorization.INT,
},
"expected_output": [
[2],
[0],
[3],
[1],
[4],
[0],
[5],
[4],
[1],
[1],
],
},
{
"testcase_name": "test_documents_int_mode",
"vocab_data": np.array(
[
["fire earth earth"],
["earth earth"],
["wind wind"],
["and wind and"],
]
),
"input_data": np.array(
[["earth wind and"], ["fire fire"], ["and earth"], ["michigan"]]
),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": text_vectorization.WHITESPACE,
"output_mode": text_vectorization.INT,
},
"expected_output": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],
},
{
"testcase_name": "test_documents_1d_input_int_mode",
"vocab_data": np.array(
["fire earth earth", "earth earth", "wind wind", "and wind and"]
),
"input_data": np.array(
[["earth wind and"], ["fire fire"], ["and earth"], ["michigan"]]
),
"kwargs": {
"max_tokens": None,
"standardize": None,
"split": text_vectorization.WHITESPACE,
"output_mode": text_vectorization.INT,
},
"expected_output": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],
},
{
"testcase_name": "test_simple_tokens_binary_mode",
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": None,
"output_mode": text_vectorization.MULTI_HOT,
},
"expected_output": [
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
],
},
{
"testcase_name": "test_documents_binary_mode",
"vocab_data": np.array(
[
["fire earth earth"],
["earth earth"],
["wind wind"],
["and wind and"],
]
),
"input_data": np.array(
[["earth wind"], ["and"], ["fire fire"], ["earth michigan"]]
),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": text_vectorization.WHITESPACE,
"output_mode": text_vectorization.MULTI_HOT,
},
"expected_output": [
[0, 1, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[1, 1, 0, 0, 0],
],
},
{
"testcase_name": "test_simple_tokens_count_mode",
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": None,
"output_mode": text_vectorization.COUNT,
},
"expected_output": [
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
],
},
{
"testcase_name": "test_documents_count_mode",
"vocab_data": np.array(
[
["fire earth earth"],
["earth earth"],
["wind wind"],
["and wind and"],
]
),
"input_data": np.array(
[["earth wind"], ["and"], ["fire fire"], ["earth michigan"]]
),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": text_vectorization.WHITESPACE,
"output_mode": text_vectorization.COUNT,
},
"expected_output": [
[0, 1, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 2],
[1, 1, 0, 0, 0],
],
},
{
"testcase_name": "test_tokens_idf_mode",
"vocab_data": np.array(
[
["fire"],
["earth"],
["earth"],
["earth"],
["earth"],
["wind"],
["wind"],
["wind"],
["and"],
["and"],
]
),
"input_data": np.array(
[
["earth"],
["wind"],
["and"],
["fire"],
["fire"],
["and"],
["earth"],
["michigan"],
]
),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": None,
"output_mode": text_vectorization.TF_IDF,
},
"expected_output": [
[0, 1.098612, 0, 0, 0],
[0, 0, 1.252763, 0, 0],
[0, 0, 0, 1.466337, 0],
[0, 0, 0, 0, 1.7917595],
[0, 0, 0, 0, 1.7917595],
[0, 0, 0, 1.4663371, 0],
[0, 1.098612, 0, 0, 0],
[1.402368, 0, 0, 0, 0],
],
},
{
"testcase_name": "test_documents_idf_mode",
"vocab_data": np.array(
[
["fire earth earth"],
["earth earth"],
["wind wind"],
["and wind and"],
]
),
"input_data": np.array(
[["earth wind"], ["and"], ["fire fire"], ["earth michigan"]]
),
"kwargs": {
"max_tokens": 5,
"pad_to_max_tokens": True,
"standardize": None,
"split": text_vectorization.WHITESPACE,
"output_mode": text_vectorization.TF_IDF,
},
"expected_output": [
[0.0, 0.847298, 0.847298, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.098612, 0.0],
[0.0, 0.0, 0.0, 0.0, 2.197225],
[0.972955, 0.847298, 0.0, 0.0, 0.0],
],
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationLayerTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(
self, vocab_data, input_data, kwargs, use_dataset, expected_output
):
cls = text_vectorization.TextVectorization
if kwargs.get("output_mode") == text_vectorization.INT:
expected_output_dtype = tf.int64
else:
expected_output_dtype = tf.float32
input_shape = input_data.shape
if use_dataset:
# TF-Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# TextVectorization), the concatenation fails. In real use cases,
# this may not be an issue because users are likely to pipe the
# preprocessing layer into other keras layers instead of predicting
# it directly. A workaround for these unit tests is to have the
# dataset only contain one batch, so no concatenation needs to
# happen with the result. For consistency with numpy input, we
# should make `predict` join differently shaped results together
# sensibly, with 0 padding.
input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(
input_shape[0]
)
vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0]
)
output_data = test_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=tf.string,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data,
)
self.assertAllClose(expected_output, output_data)
@parameterized.product(
rank=[0, 1, 2],
# Check lists, numpy arrays, tensors, and objects convertable to tensor.
data_fn=[
None,
np.array,
tf.constant,
preprocessing_test_utils.ArrayLike,
],
)
def test_input_types(self, rank, data_fn):
input_data = "earth wind and fire"
expected_output = [2, 3, 4, 5]
if rank == 1:
input_data = [input_data]
expected_output = [expected_output]
elif rank == 2:
input_data = [[input_data]]
expected_output = [expected_output]
if data_fn is not None:
input_data = data_fn(input_data)
input_shape = [] if rank == 0 else [1]
layer = text_vectorization.TextVectorization(
vocabulary=["earth", "wind", "and", "fire"]
)
output_data = layer(input_data)
self.assertAllEqual(output_data, expected_output)
# Again in a keras.Model
inputs = keras.Input(shape=input_shape, dtype=tf.string)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model(tf.constant(input_data))
self.assertAllEqual(output_data, expected_output)
@parameterized.named_parameters(
[
{
"testcase_name": "ragged_tensor1",
"input_data": [
[["0 a b"], ["c d"]],
[["e a"], ["b c d"]],
[["f"]],
],
"expected_output": [
[[1, 2, 3], [4, 5]],
[[6, 2], [3, 4, 5]],
[[7]],
],
},
{
"testcase_name": "ragged_tensor2",
"input_data": [
[["0 a b"], [""]],
[],
[["e a"], ["b c d"]],
[["f"]],
],
"expected_output": [
[[1, 2, 3], []],
[],
[[6, 2], [3, 4, 5]],
[[7]],
],
},
]
)
def test_ragged_input_and_ragged_output(self, input_data, expected_output):
input_data = tf.ragged.constant(input_data, inner_shape=(1,))
layer = text_vectorization.TextVectorization(
vocabulary=["a", "b", "c", "d", "e", "f"], ragged=True
)
output_data = layer(input_data)
self.assertAllEqual(output_data, expected_output)
# Again in a keras.Model
inputs = keras.Input(shape=(1,), dtype=tf.string)
outputs = layer(inputs)
model = keras.Model(inputs=inputs, outputs=outputs)
output_data = model.predict(input_data)
self.assertAllEqual(output_data, expected_output)
def test_scalar_input_int_mode_no_len_limit(self):
vocab_data = [
"fire earth earth",
"earth earth",
"wind wind",
"and wind and",
]
input_data = "earth wind and fire fire and earth michigan"
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4, 5, 5, 4, 2, 1])
layer.set_vocabulary(["earth", "wind", "and", "fire"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4, 5, 5, 4, 2, 1])
def test_scalar_input_int_mode_trim_to_len_limit(self):
vocab_data = [
"fire earth earth",
"earth earth",
"wind wind",
"and wind and",
]
input_data = "earth wind and fire fire and earth michigan"
layer = text_vectorization.TextVectorization(output_sequence_length=3)
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4])
layer.set_vocabulary(["earth", "wind", "and", "fire"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4])
def test_scalar_input_int_pad_to_len_limit(self):
vocab_data = [
"fire earth earth",
"earth earth",
"wind wind",
"and wind and",
]
input_data = "earth wind and fire fire and earth michigan"
layer = text_vectorization.TextVectorization(output_sequence_length=10)
layer.adapt(vocab_data)
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4, 5, 5, 4, 2, 1, 0, 0])
layer.set_vocabulary(["earth", "wind", "and", "fire"])
out = layer(input_data)
self.assertAllClose(out.numpy(), [2, 3, 4, 5, 5, 4, 2, 1, 0, 0])
def test_dataset_of_single_strings(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
vocab_ds = tf.data.Dataset.from_tensor_slices(vocab_data) # unbatched
input_ds = tf.data.Dataset.from_tensor_slices(input_data) # unbatched
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_ds)
out = input_ds.map(layer)
self.assertAllClose(list(out.as_numpy_iterator()), [[2, 3], [4, 5]])
def test_dataset_of_single_strings_with_output_sequence(self):
vocab_data = ["two two two", "two three three", "three four four five"]
input_data = ["two three", "four five"]
vocab_ds = tf.data.Dataset.from_tensor_slices(vocab_data) # unbatched
input_ds = tf.data.Dataset.from_tensor_slices(input_data) # unbatched
layer = text_vectorization.TextVectorization(output_sequence_length=3)
layer.adapt(vocab_ds)
out = input_ds.map(layer)
self.assertAllClose(
list(out.as_numpy_iterator()), [[2, 3, 0], [4, 5, 0]]
)
@parameterized.named_parameters(
{
"testcase_name": "1d",
"data": ["0", "a", "b", "c", "d", "e", "a", "b", "c", "d", "f"],
"expected": [1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1],
},
{
"testcase_name": "2d",
"data": [
["0", "a", "b", "c", "d"],
["e", "a", "b", "c", "d"],
["f"],
],
"expected": [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5], [1, 0, 0, 0, 0]],
},
{
"testcase_name": "3d",
"data": [
[["0", "a", "b"], ["c", "d"]],
[["e", "a"], ["b", "c", "d"]],
[["f"]],
],
"expected": [
[[1, 2, 3], [4, 5, 0]],
[[1, 2, 0], [3, 4, 5]],
[[1, 0, 0], [0, 0, 0]],
],
},
)
def test_layer_dimensionality_handling(self, data, expected):
vocab = ["a", "b", "c", "d"]
vectorization = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
pad_to_max_tokens=False,
)
vectorization.set_vocabulary(vocab)
output = vectorization(tf.ragged.constant(data))
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{
"testcase_name": "1d",
"data": ["0 a b c d e a b c d f"],
"expected": [[1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1]],
},
{
"testcase_name": "3d",
"data": [[["0 a b"], ["c d"]], [["e a"], ["b c d"]], [["f"]]],
"expected": [
[[1, 2, 3], [4, 5, 0]],
[[1, 2, 0], [3, 4, 5]],
[[1, 0, 0], [0, 0, 0]],
],
},
)
def test_layer_dimensionality_handling_with_split(self, data, expected):
vocab = ["a", "b", "c", "d"]
vectorization = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
pad_to_max_tokens=False,
)
vectorization.set_vocabulary(vocab)
output = vectorization(tf.ragged.constant(data, inner_shape=(1,)))
self.assertAllEqual(expected, output)
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationPreprocessingTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with tf.io.gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_summary_before_adapt(self):
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
pad_to_max_tokens=True,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=text_vectorization.TF_IDF,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# We are testing that model.summary() can be called without erroring
# out. (b/145726907)
model.summary()
@parameterized.parameters([list, np.array, tf.constant, tf.ragged.constant])
def test_lower_and_strip_punctuation(self, data_fn):
input_array = data_fn(
[
["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"],
]
)
expected_output = data_fn(
[
[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth", b"michigan"],
]
)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@parameterized.parameters([list, np.array, tf.constant, tf.ragged.constant])
def test_strip_punctuation(self, data_fn):
input_array = data_fn(
[
["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"],
]
)
expected_output = data_fn(
[
[b"Earth", b"wInD", b"aNd", b"firE"],
[b"fire", b"and", b"earth", b"michigan"],
]
)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.STRIP_PUNCTUATION,
split=None,
ngrams=None,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@parameterized.parameters([list, np.array, tf.constant, tf.ragged.constant])
def test_lower(self, data_fn):
input_array = data_fn(
[
["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@$"],
]
)
expected_output = data_fn(
[
[b"earth", b"wind", b"and", b"fire"],
[b"fire|", b"an<>d", b"{earth}", b"michigan@$"],
]
)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER,
split=None,
ngrams=None,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_custom_normalization(self):
input_array = np.array(
[
["Earth", "wInD", "aNd", "firE"],
["fire|", "an<>d", "{earth}", "michigan@%$"],
]
)
expected_output = np.array(
[
[b"earth", b"wind", b"and", b"fire"],
[b"fire|", b"an<>d", b"{earth}", b"michigan@%$"],
]
)
custom_standardization = tf.strings.lower
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=custom_standardization,
split=None,
ngrams=None,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_whitespace_splitting(self):
input_array = np.array(
[["earth wind and fire"], ["\tfire\tand\nearth michigan "]]
)
expected_output = [
[b"earth", b"wind", b"and", b"fire"],
[b"fire", b"and", b"earth", b"michigan"],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
ngrams=None,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_character_splitting(self):
input_array = np.array([["earthwind"], ["and fire"]])
expected_output = [
[b"e", b"a", b"r", b"t", b"h", b"w", b"i", b"n", b"d"],
[b"a", b"n", b"d", b" ", b"f", b"i", b"r", b"e"],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.CHARACTER,
ngrams=None,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_custom_string_splitting(self):
input_array = np.array(
[["earth>wind>and fire"], ["\tfire>and\nearth>michigan"]]
)
expected_output = [
[b"earth", b"wind", b"and fire"],
[b"\tfire", b"and\nearth", b"michigan"],
]
custom_split = lambda x: tf.strings.split(x, sep=">")
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=custom_split,
ngrams=None,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_ngram_value_ragged_inputs(self):
input_array = tf.ragged.constant(
[["earth", "wind", "and", "fire"], ["fire", "and", "earth"]]
)
# pyformat: disable
expected_output = [
[
b"earth",
b"wind",
b"and",
b"fire",
b"earth wind",
b"wind and",
b"and fire",
b"earth wind and",
b"wind and fire",
],
[
b"fire",
b"and",
b"earth",
b"fire and",
b"and earth",
b"fire and earth",
],
]
# pyformat: enable
input_data = keras.Input(shape=(None,), ragged=True, dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=3,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_single_ngram_value(self):
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [
[
b"earth",
b"wind",
b"and",
b"fire",
b"earth wind",
b"wind and",
b"and fire",
b"earth wind and",
b"wind and fire",
],
[
b"fire",
b"and",
b"earth",
b"michigan",
b"fire and",
b"and earth",
b"earth michigan",
b"fire and earth",
b"and earth michigan",
],
]
# pyformat: enable
input_data = keras.Input(shape=(4,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=3,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_multiple_ngram_values(self):
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [
[
b"earth wind",
b"wind and",
b"and fire",
b"earth wind and",
b"wind and fire",
],
[
b"fire and",
b"and earth",
b"earth michigan",
b"fire and earth",
b"and earth michigan",
],
]
# pyformat: enable
input_data = keras.Input(shape=(4,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
ngrams=(2, 3),
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_multiple_preprocessing_steps(self):
input_array = np.array(
[["earth wInD and firE"], ["\tfire\tand\nearth!! michig@n "]]
)
expected_output = [
[
b"earth",
b"wind",
b"and",
b"fire",
b"earth wind",
b"wind and",
b"and fire",
],
[
b"fire",
b"and",
b"earth",
b"michign",
b"fire and",
b"and earth",
b"earth michign",
],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,
split=text_vectorization.WHITESPACE,
ngrams=2,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_string_splitting_with_non_1d_array_fails(self):
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
vocabulary=["a"],
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
output_mode=None,
)
with self.assertRaisesRegex(
ValueError, "last shape dimension must be 1"
):
_ = layer(input_data)
def test_string_splitting_with_non_1d_raggedarray_fails(self):
input_data = keras.Input(shape=(None,), ragged=True, dtype=tf.string)
layer = text_vectorization.TextVectorization(
vocabulary=["a"],
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
output_mode=None,
)
with self.assertRaisesRegex(
ValueError, "last shape dimension must be 1"
):
_ = layer(input_data)
def test_standardization_with_invalid_standardize_arg(self):
with self.assertRaisesRegex(
ValueError, "Unkown value for `standardize`"
):
text_vectorization.TextVectorization(
vocabulary=["a"], standardize="unsupported"
)
def test_splitting_with_invalid_split_arg(self):
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(vocabulary=["a"])
layer._split = "unsupported"
with self.assertRaisesRegex(
ValueError, ".*is not a supported splitting.*"
):
_ = layer(input_data)
def test_vocab_setting_via_init(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_data,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_via_init_file(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_path,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_via_setter(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
)
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_setting_with_oov_via_setter(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
)
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationDistributionTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_distribution_strategy_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
strategy = tf.distribute.OneDeviceStrategy("/cpu:0")
with strategy.scope():
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationOutputTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_int_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4
# in the second. This should output a 2x5 tensor with a padding value in
# the second example.
input_array = np.array(
[["earth wind and also fire"], ["fire and earth michigan"]]
)
expected_output = [[2, 3, 4, 1, 5], [5, 4, 2, 1, 0]]
# This test doesn't explicitly set an output shape, so the 2nd dimension
# should stay 'None'.
expected_output_shape = [None, None]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
output_mode=text_vectorization.INT,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_ragged(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4
# in the second.
input_array = np.array(
[["earth wind and also fire"], ["fire and earth michigan"]]
)
expected_output = tf.ragged.constant([[2, 3, 4, 1, 5], [5, 4, 2, 1]])
expected_output_shape = [None, None]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
output_mode=text_vectorization.INT,
ragged=True,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros_and_pads(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4
# in the second. This should output a 2x6 tensor with a padding value in
# the second example, since output_sequence_length is set to 6.
input_array = np.array(
[["earth wind and also fire"], ["fire and earth michigan"]]
)
expected_output = [[2, 3, 4, 1, 5, 0], [5, 4, 2, 1, 0, 0]]
output_sequence_length = 6
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_densifies_with_zeros_and_strips(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4
# in the second. This should output a 2x3 tensor with a padding value in
# the second example, since output_sequence_length is set to 3.
input_array = np.array(
[["earth wind and also fire"], ["fire and earth michigan"]]
)
expected_output = [[2, 3, 4], [5, 4, 2]]
output_sequence_length = 3
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_int_output_dynamically_strips_and_pads(self):
vocab_data = ["earth", "wind", "and", "fire"]
# Create an input array that has 5 elements in the first example and 4
# in the second. This should output a 2x3 tensor with a padding value in
# the second example, since output_sequence_length is set to 3.
input_array = np.array(
[["earth wind and also fire"], ["fire and earth michigan"]]
)
expected_output = [[2, 3, 4], [5, 4, 2]]
output_sequence_length = 3
expected_output_shape = [None, output_sequence_length]
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
# Create an input array that has 1 element in the first example and 2 in
# the second. This should output a 2x3 tensor with a padding value in
# the second example, since output_sequence_length is set to 3.
input_array_2 = np.array([["wind"], ["fire and"]])
expected_output_2 = [[3, 0, 0], [5, 4, 0]]
output_dataset = model.predict(input_array_2)
self.assertAllEqual(expected_output_2, output_dataset)
@parameterized.parameters(
{"sparse": True},
{"sparse": False},
)
def test_multi_hot_output_hard_maximum(self, sparse):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0, 0], [1, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=True,
sparse=sparse,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
if sparse:
expected_output = tf.sparse.from_dense(tf.constant(expected_output))
self.assertAllEqual(expected_output.indices, output_dataset.indices)
self.assertAllEqual(expected_output.values, output_dataset.values)
else:
self.assertAllEqual(expected_output, output_dataset)
@parameterized.parameters(
{"sparse": True},
{"sparse": False},
)
def test_multi_hot_output_soft_maximum(self, sparse):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0], [1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=False,
sparse=sparse,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
if sparse:
expected_output = tf.sparse.from_dense(tf.constant(expected_output))
self.assertAllEqual(expected_output.indices, output_dataset.indices)
self.assertAllEqual(expected_output.values, output_dataset.values)
else:
self.assertAllEqual(expected_output, output_dataset)
def test_multi_hot_output_hard_maximum_set_vocabulary_after_build(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0], [1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=True,
)
int_data = layer(input_data)
layer.set_vocabulary(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_multi_hot_output_hard_maximum_adapt_after_build(self):
vocab_data = np.array(
[
"earth",
"earth",
"earth",
"earth",
"wind",
"wind",
"wind",
"and",
"and",
"fire",
]
)
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0], [1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=True,
)
int_data = layer(input_data)
layer.adapt(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_multi_hot_output_hard_maximum_multiple_adapts(self):
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
adapt_data = [
"earth",
"earth",
"earth",
"earth",
"wind",
"wind",
"wind",
]
first_expected_output = [
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
]
second_adapt_data = [
"earth",
"earth",
"earth",
"earth",
"wind",
"wind",
"wind",
"and",
"and",
"fire",
]
second_expected_output = [
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=True,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Test the first adapt
layer.adapt(adapt_data)
first_output = model.predict(input_array)
# Test the second adapt
layer.adapt(second_adapt_data)
# We need to recompile the model to retrace our call graph.
model.compile()
second_output = model.predict(input_array)
self.assertAllEqual(first_expected_output, first_output)
self.assertAllEqual(second_expected_output, second_output)
def test_multi_hot_output_soft_maximum_set_state_after_build(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 1, 1, 1, 0], [1, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=False,
)
layer.build(input_data.shape)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_vocab_size_changed_pad_to_max_false_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
pad_to_max_tokens=False,
)
layer.adapt(vocab_data)
_ = layer(input_data)
with self.assertRaisesRegex(
RuntimeError, "vocabulary size cannot be changed"
):
layer.set_vocabulary(vocab_data[:2])
def test_count_output_hard_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0, 0], [2, 1, 0, 1, 0, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=6,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=True,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
def test_count_output_soft_maximum(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 2, 1, 1, 0], [2, 1, 0, 1, 0]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=False,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
@parameterized.named_parameters(
("sparse", True),
("dense", False),
)
def test_tfidf_output_hard_maximum(self, sparse):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [0.4, 0.25, 0.75, 0.6]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 0.8, 0.25, 0.75, 0, 0], [1, 0.4, 0, 0, 0.6, 0]]
# pyformat: enable
max_tokens = 6
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=6,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
pad_to_max_tokens=True,
sparse=sparse,
vocabulary=vocab_data,
idf_weights=idf_weights,
)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
if sparse:
output_dataset = tf.sparse.to_dense(output_dataset)
self.assertAllClose(expected_output, output_dataset)
@parameterized.named_parameters(
("sparse", True),
("dense", False),
)
def test_tfidf_output_soft_maximum(self, sparse):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [0.4, 0.25, 0.75, 0.6]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 0.8, 0.25, 0.75, 0], [1, 0.4, 0, 0, 0.6]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
pad_to_max_tokens=False,
sparse=sparse,
vocabulary=vocab_data,
idf_weights=idf_weights,
)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
if sparse:
output_dataset = tf.sparse.to_dense(output_dataset)
self.assertAllClose(expected_output, output_dataset)
@parameterized.named_parameters(
("sparse", True),
("dense", False),
)
def test_tfidf_output_set_oov_weight(self, sparse):
vocab_data = ["[UNK]", "earth", "wind", "and", "fire"]
idf_weights = [0.1, 0.4, 0.25, 0.75, 0.6]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 0.8, 0.25, 0.75, 0], [0.2, 0.4, 0, 0, 0.6]]
# pyformat: enable
max_tokens = 5
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
pad_to_max_tokens=False,
sparse=sparse,
vocabulary=vocab_data,
idf_weights=idf_weights,
)
int_data = layer(input_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
if sparse:
output_dataset = tf.sparse.to_dense(output_dataset)
self.assertAllClose(expected_output, output_dataset)
def test_accept_1D_input(self):
input_array = np.array(
["earth wind and fire", "fire and earth michigan"]
)
layer = text_vectorization.TextVectorization(
standardize=None, split=None, output_mode="int"
)
layer.adapt(input_array)
_ = layer(input_array)
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationModelBuildingTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
@parameterized.named_parameters(
{
"testcase_name": "count_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.COUNT,
},
{
"testcase_name": "count_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.COUNT,
},
{
"testcase_name": "binary_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.MULTI_HOT,
},
{
"testcase_name": "binary_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.MULTI_HOT,
},
{
"testcase_name": "tfidf_hard_max",
"pad_to_max_tokens": True,
"output_mode": text_vectorization.TF_IDF,
},
{
"testcase_name": "tfidf_soft_max",
"pad_to_max_tokens": False,
"output_mode": text_vectorization.TF_IDF,
},
)
def test_end_to_end_bagged_modeling(self, output_mode, pad_to_max_tokens):
vocab_data = ["earth", "wind", "and", "fire"]
if output_mode == text_vectorization.TF_IDF:
idf_weights = [0.5, 0.25, 0.2, 0.125]
else:
idf_weights = None
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=10,
standardize=None,
split=None,
output_mode=output_mode,
pad_to_max_tokens=pad_to_max_tokens,
vocabulary=vocab_data,
idf_weights=idf_weights,
)
int_data = layer(input_data)
float_data = backend.cast(int_data, dtype="float32")
output_data = core.Dense(64)(float_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
def test_end_to_end_vocab_modeling(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[["earth wind and also fire"], ["fire and earth michigan"]]
)
output_sequence_length = 6
max_tokens = 5
# The input shape here is explicitly 1 because we're tokenizing.
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=text_vectorization.WHITESPACE,
output_mode=text_vectorization.INT,
output_sequence_length=output_sequence_length,
)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
embedded_data = core.Embedding(input_dim=max_tokens + 1, output_dim=32)(
int_data
)
output_data = convolutional.Conv1D(
250, 3, padding="valid", activation="relu", strides=1
)(embedded_data)
model = keras.Model(inputs=input_data, outputs=output_data)
_ = model.predict(input_array)
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationVocabularyTest(
test_combinations.TestCase,
preprocessing_test_utils.PreprocessingLayerTest,
):
def test_get_vocabulary(self):
vocab = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(vocabulary=vocab)
self.assertAllEqual(
layer.get_vocabulary(),
["", "[UNK]", "earth", "wind", "and", "fire"],
)
def test_get_vocabulary_adapt(self):
vocab = np.array(
[["earth earth earth earth wind wind wind and and fire"]]
)
layer = text_vectorization.TextVectorization()
layer.adapt(vocab)
self.assertAllEqual(
layer.get_vocabulary(),
["", "[UNK]", "earth", "wind", "and", "fire"],
)
def test_get_vocabulary_no_special_tokens(self):
vocab = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(vocabulary=vocab)
self.assertAllEqual(
layer.get_vocabulary(include_special_tokens=False),
["earth", "wind", "and", "fire"],
)
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationErrorTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_too_long_vocab_fails_in_single_setting(self):
vocab_data = ["earth", "wind", "and", "fire"]
layer = text_vectorization.TextVectorization(
max_tokens=4,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
)
with self.assertRaisesRegex(
ValueError, "vocabulary larger than the maximum vocab.*"
):
layer.set_vocabulary(vocab_data)
def test_setting_vocab_without_idf_weights_fails_in_tfidf_mode(self):
vocab_data = ["earth", "wind", "and", "fire"]
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be set if output_mode is TF_IDF"
):
text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
vocabulary=vocab_data,
)
def test_idf_weights_length_mismatch_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [1, 2, 3]
with self.assertRaisesRegex(
ValueError, "`idf_weights` must be the same length as vocab"
):
text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
vocabulary=vocab_data,
idf_weights=idf_weights,
)
def test_set_tfidf_in_non_tfidf_fails(self):
vocab_data = ["earth", "wind", "and", "fire"]
idf_weights = [1, 2, 3, 4]
with self.assertRaisesRegex(
ValueError, "`idf_weights` should only be set if"
):
text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.MULTI_HOT,
vocabulary=vocab_data,
idf_weights=idf_weights,
)
def test_zero_max_tokens_fails(self):
with self.assertRaisesRegex(ValueError, "max_tokens.*"):
_ = text_vectorization.TextVectorization(max_tokens=0)
def test_non_string_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "dtype of string.*"):
_ = text_vectorization.TextVectorization(dtype=tf.int64)
def test_unknown_standardize_arg_fails(self):
with self.assertRaisesRegex(
ValueError, "`standardize` arg.*unsupported_value"
):
_ = text_vectorization.TextVectorization(
standardize="unsupported_value"
)
def test_unknown_split_arg_fails(self):
with self.assertRaisesRegex(
ValueError, "`split` arg.*unsupported_value"
):
_ = text_vectorization.TextVectorization(split="unsupported_value")
def test_unknown_output_mode_arg_fails(self):
with self.assertRaisesRegex(
ValueError, "`output_mode` arg.*unsupported_value"
):
_ = text_vectorization.TextVectorization(
output_mode="unsupported_value"
)
def test_unknown_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*unsupported_value"):
_ = text_vectorization.TextVectorization(ngrams="unsupported_value")
def test_float_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*2.9"):
_ = text_vectorization.TextVectorization(ngrams=2.9)
def test_float_tuple_ngrams_arg_fails(self):
with self.assertRaisesRegex(ValueError, "ngrams.*(1.3, 2.9)"):
_ = text_vectorization.TextVectorization(ngrams=(1.3, 2.9))
def test_non_int_output_sequence_length_dtype_fails(self):
with self.assertRaisesRegex(ValueError, "output_sequence_length.*2.0"):
_ = text_vectorization.TextVectorization(
output_mode="int", output_sequence_length=2.0
)
def test_non_none_output_sequence_length_fails_if_output_mode_not_int(self):
with self.assertRaisesRegex(
ValueError, "`output_sequence_length` must not be set"
):
_ = text_vectorization.TextVectorization(
output_mode="count", output_sequence_length=2
)
def test_non_none_output_sequence_length_fails_if_ragged_true(self):
with self.assertRaisesRegex(
ValueError, "`output_sequence_length` must not be set"
):
_ = text_vectorization.TextVectorization(
ragged=True, output_sequence_length=2
)
def test_ragged_true_fails_if_output_mode_not_int(self):
with self.assertRaisesRegex(ValueError, "`ragged` must not be true if"):
_ = text_vectorization.TextVectorization(
ragged=True, output_mode=text_vectorization.MULTI_HOT
)
def test_sparse_true_fails_if_output_mode_is_int(self):
with self.assertRaisesRegex(ValueError, "`sparse` may only be true if"):
_ = text_vectorization.TextVectorization(
sparse=True, output_mode=text_vectorization.INT
)
# Custom functions for the custom callable serialization test. Declared here
# to avoid multiple registrations from run_all_keras_modes().
@register_keras_serializable(package="Test")
def custom_standardize_fn(x):
return tf.strings.lower(x)
@register_keras_serializable(package="Test")
def custom_split_fn(x):
return tf.strings.split(x, sep=">")
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationSavingTest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def tearDown(self):
keras.backend.clear_session()
gc.collect()
super(TextVectorizationSavingTest, self).tearDown()
@parameterized.parameters(
{"init_vocab": True},
{"init_vocab": False},
)
def test_saving(self, init_vocab):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
vocabulary = vocab_data if init_vocab else None
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocabulary,
)
if not init_vocab:
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
@parameterized.parameters(
{"init_vocab": True},
{"init_vocab": False},
)
def test_saving_when_nested(self, init_vocab):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
vocabulary = vocab_data if init_vocab else None
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocabulary,
)
if not init_vocab:
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
outer_input = keras.Input(shape=(None,), dtype=tf.string)
outer_output = model(outer_input)
outer_model = keras.Model(inputs=outer_input, outputs=outer_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
outer_model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
def test_saving_when_adapted(self):
adapt_data = [
"earth",
"earth",
"earth",
"earth",
"wind",
"wind",
"wind",
"and",
"and",
"fire",
]
input_array = np.array(
[
["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"],
]
)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
)
layer.adapt(adapt_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
# Delete the session and graph to ensure that the loaded model is
# generated from scratch.
keras.backend.clear_session()
loaded_model = keras.models.load_model(output_path)
self.assertAllEqual(loaded_model.predict(input_array), expected_output)
def test_saving_with_tfidf(self):
vocab_data = ["earth", "wind", "and", "fire"]
# OOV idf weight (bucket 0) should 0.5, the average of passed weights.
idf_weights = [0.4, 0.25, 0.75, 0.6]
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "fire", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[0, 0.8, 0.25, 0.75, 0], [1, 0.4, 0, 0, 0.6]]
vocab_data = ["earth", "wind", "and", "fire"]
# pyformat: enable
# Build and validate a golden model.
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=5,
standardize=None,
split=None,
output_mode=text_vectorization.TF_IDF,
)
layer.set_vocabulary(vocab_data, idf_weights=idf_weights)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllClose(output_dataset, expected_output)
# Save the model to disk.
output_path = os.path.join(self.get_temp_dir(), "tf_keras_saved_model")
model.save(output_path, save_format="tf")
loaded_model = keras.models.load_model(output_path)
# Ensure that the loaded model is unique (so that the save/load is real)
self.assertIsNot(model, loaded_model)
# Validate correctness of the new model.
new_output_dataset = loaded_model.predict(input_array)
self.assertAllClose(new_output_dataset, expected_output)
def test_serialization_with_custom_callables(self):
input_array = np.array(
[["earth>wind>and Fire"], ["\tfire>And\nearth>michigan"]]
)
expected_output = [
[b"earth", b"wind", b"and fire"],
[b"\tfire", b"and\nearth", b"michigan"],
]
input_data = keras.Input(shape=(1,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=custom_standardize_fn,
split=custom_split_fn,
ngrams=None,
output_mode=None,
)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
self.assertAllEqual(expected_output, output_dataset)
serialized_model_data = model.get_config()
new_model = keras.Model.from_config(serialized_model_data)
new_output_dataset = new_model.predict(input_array)
self.assertAllEqual(expected_output, new_output_dataset)
@test_utils.run_v2_only()
def test_saving_v3(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array(["earth, wind, and fire"])
# First, with a static vocabulary.
input_data = keras.Input(shape=(), dtype=tf.string)
layer = text_vectorization.TextVectorization(vocabulary=vocab_data)
output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=output)
ref_output = model.predict(input_array)
temp_dir = self.get_temp_dir()
model_path = os.path.join(temp_dir, "mymodel.keras")
model.save(model_path, save_format="keras_v3")
model = keras.models.load_model(model_path)
output = model.predict(input_array)
self.assertAllEqual(output, ref_output)
# Second, with adapt().
input_data = keras.Input(shape=(), dtype=tf.string)
layer = text_vectorization.TextVectorization()
layer.adapt(vocab_data)
output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=output)
ref_output = model.predict(input_array)
model.save(model_path, save_format="keras_v3", overwrite=True)
model = keras.models.load_model(model_path)
output = model.predict(input_array)
self.assertAllEqual(output, ref_output)
# Test TF-IDF + adapt().
input_data = keras.Input(shape=(), dtype=tf.string)
layer = text_vectorization.TextVectorization(output_mode="tf_idf")
layer.adapt(vocab_data)
output = layer(input_data)
model = keras.Model(inputs=input_data, outputs=output)
ref_output = model.predict(input_array)
model.save(model_path, save_format="keras_v3", overwrite=True)
model = keras.models.load_model(model_path)
output = model.predict(input_array)
self.assertAllEqual(output, ref_output)
@test_utils.run_v2_only
@test_combinations.run_all_keras_modes(always_skip_v1=True)
class TextVectorizationE2ETest(
test_combinations.TestCase, preprocessing_test_utils.PreprocessingLayerTest
):
def test_keras_vocab_trimming_example(self):
vocab_data = np.array(
[
"earth",
"earth",
"earth",
"earth",
"wind",
"wind",
"wind",
"and",
"and",
"fire",
]
)
input_array = np.array(
[
["earth", "wind", "and", "earth"],
["ohio", "and", "earth", "michigan"],
]
)
# pyformat: disable
expected_output = [[1, 2, 1], [3, 1, 0]]
# pyformat: enable
max_tokens = 3
expected_output_shape = [None, max_tokens]
input_data = keras.Input(shape=(None,), dtype=tf.string)
layer = text_vectorization.TextVectorization(
max_tokens=max_tokens,
standardize=None,
split=None,
output_mode=text_vectorization.COUNT,
pad_to_max_tokens=True,
)
int_data = layer(input_data)
layer.adapt(vocab_data)
self.assertAllEqual(expected_output_shape, int_data.shape.as_list())
model = keras.Model(input_data, int_data)
output = model.predict(input_array)
self.assertAllEqual(expected_output, output)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/preprocessing/text_vectorization_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/preprocessing/text_vectorization_test.py",
"repo_id": "tf-keras",
"token_count": 46838
} | 196 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras upsampling layer for 2D inputs."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine.base_layer import Layer
from tf_keras.engine.input_spec import InputSpec
from tf_keras.utils import conv_utils
from tf_keras.utils import image_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.UpSampling2D")
class UpSampling2D(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by `size[0]` and `size[1]` respectively.
Examples:
>>> input_shape = (2, 2, 1, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[ 0 1 2]]
[[ 3 4 5]]]
[[[ 6 7 8]]
[[ 9 10 11]]]]
>>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x)
>>> print(y)
tf.Tensor(
[[[[ 0 1 2]
[ 0 1 2]]
[[ 3 4 5]
[ 3 4 5]]]
[[[ 6 7 8]
[ 6 7 8]]
[[ 9 10 11]
[ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)
Args:
size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
When unspecified, uses
`image_data_format` value found in your TF-Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to 'channels_last'.
interpolation: A string, one of `"area"`, `"bicubic"`, `"bilinear"`,
`"gaussian"`, `"lanczos3"`, `"lanczos5"`, `"mitchellcubic"`,
`"nearest"`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(
self, size=(2, 2), data_format=None, interpolation="nearest", **kwargs
):
super().__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, "size")
self.interpolation = image_utils.get_interpolation(interpolation)
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if self.data_format == "channels_first":
height = (
self.size[0] * input_shape[2]
if input_shape[2] is not None
else None
)
width = (
self.size[1] * input_shape[3]
if input_shape[3] is not None
else None
)
return tf.TensorShape(
[input_shape[0], input_shape[1], height, width]
)
else:
height = (
self.size[0] * input_shape[1]
if input_shape[1] is not None
else None
)
width = (
self.size[1] * input_shape[2]
if input_shape[2] is not None
else None
)
return tf.TensorShape(
[input_shape[0], height, width, input_shape[3]]
)
def call(self, inputs):
return backend.resize_images(
inputs,
self.size[0],
self.size[1],
self.data_format,
interpolation=self.interpolation,
)
def get_config(self):
config = {
"size": self.size,
"data_format": self.data_format,
"interpolation": self.interpolation,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| tf-keras/tf_keras/layers/reshaping/up_sampling2d.py/0 | {
"file_path": "tf-keras/tf_keras/layers/reshaping/up_sampling2d.py",
"repo_id": "tf-keras",
"token_count": 2299
} | 197 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GRU layer."""
import copy
import os
import shutil
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras as keras
from tf_keras.layers.rnn import gru_lstm_utils
from tf_keras.testing_infra import test_combinations
from tf_keras.testing_infra import test_utils
from tf_keras.utils import np_utils
# isort: off
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import (
test_util as tf_test_util,
)
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = tf.compat.v1.GraphOptions(rewrite_options=_rewrites)
_config = tf.compat.v1.ConfigProto(graph_options=_graph_options)
@test_utils.run_all_without_tensor_float_32("RNN GRU can use TF32 on GPU")
@test_combinations.run_all_keras_modes(config=_config)
class GRUGraphRewriteTest(test_combinations.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
@parameterized.named_parameters(
("non_tan_activation", "relu", "sigmoid", 0, False, True, True),
("non_sigmoid_recur_activation", "tanh", "relu", 0, False, True, True),
("use_recurrent_dropout", "tanh", "sigmoid", 0.1, False, True, True),
("unroll", "tanh", "sigmoid", 0, True, True, True),
("not_use_bias", "tanh", "sigmoid", 0, False, False, True),
("not_reset_after", "tanh", "sigmoid", 0, False, True, False),
)
@test_utils.run_v2_only
def test_could_use_defun_backend(
self,
activation,
recurrent_activation,
recurrent_dropout,
unroll,
use_bias,
reset_after,
):
layer = keras.layers.GRU(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias,
reset_after=reset_after,
)
self.assertFalse(layer._could_use_gpu_kernel)
@test_utils.run_v2_only
def test_use_on_default_activation_with_gpu_kernel(self):
layer = keras.layers.GRU(1, activation=tf.tanh)
self.assertTrue(layer._could_use_gpu_kernel)
layer = keras.layers.GRU(1, recurrent_activation=tf.sigmoid)
self.assertTrue(layer._could_use_gpu_kernel)
def test_keras_model_with_gru(self):
epoch = 10
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape,
)
y_train = np_utils.to_categorical(y_train, self.output_shape)
layer = keras.layers.GRU(self.rnn_state_size)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile("rmsprop", loss="mse")
model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_train, y_train)
model.predict(x_train)
def test_dynamic_behavior_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.001), "mse")
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.GRU(10, return_sequences=True, unroll=False))
model.add(keras.layers.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss="categorical_crossentropy",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_GRU(self):
layer_class = keras.layers.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
@parameterized.named_parameters(
# test_name, use_bias, bias_initializer, activation
("normal", True, "zeros"),
("no_bias", False, "zeros"),
("random_bias", True, "random_uniform"),
)
def test_gru_v2_model_save_load(self, use_bias, bias_initializer):
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, "test.h5")
batch = 10
timestep = 3
input_dim = 5
units = 2
x = np.random.random((batch, timestep, input_dim))
def build_model():
inputs = keras.layers.Input(
shape=[timestep, input_dim], dtype=tf.float32
)
layer = keras.layers.GRU(
units, use_bias=use_bias, bias_initializer=bias_initializer
)
output = layer(inputs)
return keras.models.Model(inputs, output), layer
model, layer = build_model()
y_ref = model.predict(x)
model.save_weights(h5_path)
cloned_model, new_layer = build_model()
cloned_model.load_weights(h5_path)
y = cloned_model.predict(x)
self.assertAllClose(y, y_ref)
self.assertAllClose(layer.get_weights(), new_layer.get_weights())
def test_gru_v2_output_on_multiple_kernel(self):
x_train = np.random.random(
(self.batch, self.timestep, self.input_shape)
)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
with test_utils.device(should_use_gpu=False):
layer = keras.layers.GRU(self.rnn_state_size)
output = layer(inputs)
cpu_model = keras.models.Model(inputs, output)
weights = cpu_model.get_weights()
y_1 = cpu_model.predict(x_train)
with test_utils.device(should_use_gpu=True):
layer = keras.layers.GRU(self.rnn_state_size)
output = layer(inputs)
gpu_model = keras.models.Model(inputs, output)
gpu_model.set_weights(weights)
y_2 = gpu_model.predict(x_train)
self.assertAllClose(y_1, y_2, rtol=1e-5, atol=1e-5)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message=(
"Skipping as ROCm MIOpen does not support padded input yet."
),
)
def test_with_masking_layer_GRU(self):
layer_class = keras.layers.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss="categorical_crossentropy",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.001),
)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message=(
"Skipping as ROCm MIOpen does not support padded input yet."
),
)
def test_masking_with_stacking_GRU(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(keras.layers.GRU(10, return_sequences=True, unroll=False))
model.add(keras.layers.GRU(5, return_sequences=True, unroll=False))
model.compile(
loss="categorical_crossentropy",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_return_sequences_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "return_sequences": True},
input_shape=(num_samples, timesteps, embedding_dim),
)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Double type is not yet supported in ROCm",
)
@test_utils.run_v2_only
def test_float64_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={
"units": units,
"return_sequences": True,
"dtype": "float64",
},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype="float64",
)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message=(
"Skipping as ROCm MIOpen does not support padded input yet."
),
)
def test_return_states_GRU(self):
layer_class = keras.layers.GRU
x = np.random.random((2, 3, 4))
y = np.abs(np.random.random((2, 5)))
s = np.abs(np.random.random((2, 5)))
inputs = keras.layers.Input(shape=[3, 4], dtype=tf.float32)
masked = keras.layers.Masking()(inputs)
outputs, states = layer_class(units=5, return_state=True)(masked)
model = keras.models.Model(inputs, [outputs, states])
model.compile(
loss="categorical_crossentropy",
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.001),
)
model.fit(x, [y, s], epochs=1, batch_size=2, verbose=1)
def test_dropout_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "dropout": 0.1, "recurrent_dropout": 0.1},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_constraints_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint,
)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_GRU(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "implementation": implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_regularizers_GRU(self):
embedding_dim = 4
layer_class = keras.layers.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer="l2",
activity_regularizer="l1",
)
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if tf.executing_eagerly():
self.assertEqual(len(layer.losses), 4)
else:
self.assertEqual(len(layer.get_losses_for(x)), 1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message=(
"Skipping as ROCm MIOpen does not support padded input yet."
),
)
def test_statefulness_GRU(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps),
)
)
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None
)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units))
)
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
layer.reset_states()
mix_padded_input = np.ones((num_samples, timesteps))
mix_padded_input[0, 1] = 0
mix_padded_input[1, 0] = 0
mix_padded_input[1, 2] = 0
out8 = model.predict(mix_padded_input)
self.assertAllClose(out7, out6, atol=1e-5)
self.assertAllClose(out8, out7, atol=1e-5)
def test_stateful_GRU_training(self):
# See b/123587692 for more context.
vocab_size = 20
embedding_dim = 10
batch_size = 8
timestep = 12
units = 5
x = np.random.randint(0, vocab_size, size=(batch_size, timestep))
y = np.random.randint(0, vocab_size, size=(batch_size, timestep))
model = keras.Sequential(
[
keras.layers.Embedding(
vocab_size,
embedding_dim,
batch_input_shape=[batch_size, timestep],
),
keras.layers.GRU(units, return_sequences=True, stateful=True),
keras.layers.Dense(vocab_size),
]
)
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x, y, epochs=1, shuffle=False)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message=(
"Skipping as ROCm MIOpen does not support padded input yet."
),
)
@test_utils.run_v2_only
def test_explicit_device_with_go_backward_and_mask(self):
batch_size = 8
timestep = 7
masksteps = 5
units = 4
inputs = np.random.randn(batch_size, timestep, units).astype(np.float32)
mask = np.ones((batch_size, timestep)).astype(bool)
mask[:, masksteps:] = 0
gru_layer = keras.layers.GRU(
units, return_sequences=True, go_backwards=True
)
with test_utils.device(should_use_gpu=True):
outputs_masked = gru_layer(inputs, mask=tf.constant(mask))
outputs_trimmed = gru_layer(inputs[:, :masksteps])
self.assertAllClose(outputs_masked[:, -masksteps:], outputs_trimmed)
@tf_test_util.enable_output_all_intermediates
def test_v1_session_behavior(self):
with tf.compat.v1.get_default_graph().as_default():
# See b/139132348 for more details.
x = np.random.uniform(size=(100, 4, 8))
y = np.random.uniform(size=(100, 1))
dataset = (
tf.data.Dataset.from_tensor_slices((x, y))
.shuffle(100)
.batch(32)
)
inp = keras.layers.Input(shape=(4, 8))
layer = keras.layers.GRU(1)(inp)
layer = keras.layers.Dense(1)(layer)
model = keras.models.Model(inp, layer)
model.compile(loss="mse", optimizer="sgd")
model.fit(dataset)
def test_with_fully_masked_inputs(self):
num_samples = 8
timestep = 5
embedding_dim = 4
vocab_size = 20
units = 2
inputs = np.random.randint(0, vocab_size, size=(num_samples, timestep))
# Set the first inputs to be fully zero.
inputs[0, :] = 0.0
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
vocab_size,
embedding_dim,
mask_zero=True,
input_length=timestep,
batch_input_shape=(num_samples, timestep),
)
)
layer = keras.layers.GRU(units)
model.add(layer)
model.compile(
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01),
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
# Make sure it doesn't crash with cudnn kernel.
model.predict(inputs)
# TODO (b/169895267): test with xla_gpu is disabled.
def test_deepcopy(self):
if not tf.executing_eagerly():
self.skipTest("v2-only test")
original_layer = keras.layers.GRU(5)
copied_layer = copy.deepcopy(original_layer)
self.assertEqual(copied_layer.units, 5)
self.assertEqual(
original_layer.get_config(), original_layer.get_config()
)
# Copy layer before layer call on inputs without weight initialization.
inputs = np.random.normal(size=[32, 10, 8]).astype(np.float32)
original_layer = keras.layers.GRU(4)
copied_layer = copy.deepcopy(original_layer)
outputs = original_layer(inputs)
copied_outputs = copied_layer(inputs)
self.assertNotAllClose(
self.evaluate(outputs), self.evaluate(copied_outputs)
)
# Copy layer after layer call on inputs with weight initialization.
original_layer = keras.layers.GRU(4)
outputs = original_layer(inputs)
copied_layer = copy.deepcopy(original_layer)
copied_outputs = copied_layer(inputs)
self.assertAllClose(
self.evaluate(outputs), self.evaluate(copied_outputs)
)
def _test_runtime_with_model(self, model):
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape,
)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(optimizer="sgd", loss=["categorical_crossentropy", None])
existing_loss = 0
for _ in range(self.epoch):
history = model.fit(x_train, y_train)
loss_value = history.history["loss"][0]
self.assertNotEqual(existing_loss, loss_value)
existing_loss = loss_value
_, runtime_value = model.predict(x_train)
if not tf.sysconfig.get_build_info()["is_rocm_build"]:
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
@test_utils.run_v2_only
def test_GRU_runtime(self):
layer = keras.layers.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
outputs, runtime = layer(inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1))(
runtime
)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message=(
"Skipping as ROCm MIOpen does not support padded input yet."
),
)
@test_utils.run_v2_only
def test_GRU_runtime_with_mask(self):
# Masking will affect which backend is selected based on whether the
# mask is strictly right padded.
layer = keras.layers.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
masked_inputs = keras.layers.Masking()(inputs)
outputs, runtime = layer(masked_inputs)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1))(
runtime
)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=self.batch,
test_samples=0,
input_shape=(self.timestep, self.input_shape),
num_classes=self.output_shape,
)
y_train = np_utils.to_categorical(y_train, self.output_shape)
model.compile(
optimizer="sgd",
loss=["categorical_crossentropy", None],
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(x_train, y_train)
# Verify unpadded data.
_, runtime_value = model.predict(x_train)
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
# Update x/y to be right padded by setting the last timestep to 0
x_train[:, -1, :] = 0
y_train[:, -1] = 0
_, runtime_value = model.predict(x_train)
if tf.test.is_gpu_available():
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_GPU)
else:
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
# Further update x/y to be mix padded (masks in the middle), and verify
# only cpu kernel can be selected.
x_train[:, -3, :] = 0
y_train[:, -3] = 0
_, runtime_value = model.predict(x_train)
self.assertEqual(runtime_value[0], gru_lstm_utils.RUNTIME_CPU)
@test_utils.run_v2_only
def test_GRU_runtime_with_cond(self):
# This test is to demonstrate the graph rewrite of grappler plugin under
# the condition that the function returns different number of internal
# states.
layer = keras.layers.GRU(self.rnn_state_size, return_runtime=True)
inputs = keras.layers.Input(
shape=[self.timestep, self.input_shape], dtype=tf.float32
)
zeros = tf.zeros([self.batch, self.output_shape])
dummy_runtime = gru_lstm_utils.runtime(gru_lstm_utils.RUNTIME_UNKNOWN)
a = tf.constant(0)
b = tf.constant(1)
# Will always run the GRU layer.
outputs, runtime = tf.cond(
tf.less(a, b), lambda: layer(inputs), lambda: (zeros, dummy_runtime)
)
# Expand the runtime so that it is a 1D tensor instead of scalar.
# TF model does not work with scalar model output, specially during
# aggregation.
runtime = keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1))(
runtime
)
model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
self._test_runtime_with_model(model)
@test_utils.run_all_without_tensor_float_32("RNN GRU can use TF32 on GPU")
class GRULayerGradientTapeTest(test_combinations.TestCase):
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
def test_in_tape(self):
with self.test_session(config=_config):
time_steps = 10
embedding_size = 11
gru_unit_size = 12
gru_layer = keras.layers.GRU(
gru_unit_size,
return_sequences=True,
return_state=True,
recurrent_activation="sigmoid",
recurrent_initializer="glorot_uniform",
)
x = tf.random.uniform([1, time_steps, embedding_size])
y = tf.random.uniform([1, gru_unit_size])
with tf.GradientTape() as tape:
hidden_state = tf.zeros([1, gru_unit_size], dtype=tf.float32)
_, state = gru_layer(x, initial_state=hidden_state)
loss = tf.reduce_mean(tf.square(state - y))
tape.gradient(loss, gru_layer.variables)
@test_combinations.run_all_keras_modes
class GRULayerTest(test_combinations.TestCase):
def test_return_sequences_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "return_sequences": True},
input_shape=(num_samples, timesteps, embedding_dim),
)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="Double type is not yet supported in ROCm",
)
@test_utils.run_v2_only
def test_float64_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={
"units": units,
"return_sequences": True,
"dtype": "float64",
},
input_shape=(num_samples, timesteps, embedding_dim),
input_dtype="float64",
)
def test_dynamic_behavior_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.GRU(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "dropout": 0.1, "recurrent_dropout": 0.1},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_recurrent_dropout_with_implementation_restriction(self):
layer = keras.layers.GRU(2, recurrent_dropout=0.1, implementation=2)
# The implementation is force to 1 due to the limit of
# recurrent_dropout.
self.assertEqual(layer.implementation, 1)
@test_utils.run_v2_only
def test_dropout_variable_name(self):
layer = keras.layers.RNN(
keras.layers.GRUCell(2, dropout=0.1, force_generator=True)
)
layer(np.random.random((2, 3, 4)))
self.assertEqual(
layer.cell._random_generator._generator._state_var.name,
"rnn/gru_cell/StateVar:0",
)
layer = keras.layers.GRU(2, dropout=0.1, force_generator=True)
layer(np.random.random((2, 3, 4)))
self.assertEqual(
layer._random_generator._generator._state_var.name,
"gru/StateVar:0",
)
@parameterized.parameters([0, 1, 2])
def test_implementation_mode_gru(self, implementation_mode):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
test_utils.layer_test(
keras.layers.GRU,
kwargs={"units": units, "implementation": implementation_mode},
input_shape=(num_samples, timesteps, embedding_dim),
)
def test_reset_after_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
(x_train, y_train), _ = test_utils.get_test_data(
train_samples=num_samples,
test_samples=0,
input_shape=(timesteps, embedding_dim),
num_classes=units,
)
y_train = np_utils.to_categorical(y_train, units)
inputs = keras.layers.Input(shape=[timesteps, embedding_dim])
gru_layer = keras.layers.GRU(units, reset_after=True)
output = gru_layer(inputs)
gru_model = keras.models.Model(inputs, output)
gru_model.compile(
"rmsprop", "mse", run_eagerly=test_utils.should_run_eagerly()
)
gru_model.fit(x_train, y_train)
gru_model.predict(x_train)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="MIOpen only supports packed input output",
)
def test_with_masking_layer_gru(self):
layer_class = keras.layers.GRU
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(
loss="categorical_crossentropy",
optimizer="rmsprop",
run_eagerly=test_utils.should_run_eagerly(),
)
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
@tf.test.disable_with_predicate(
pred=tf.test.is_built_with_rocm,
skip_message="MIOpen only supports packed input output",
)
def test_statefulness_gru(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.GRU
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps),
)
)
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None
)
model.add(layer)
model.compile(
optimizer="sgd",
loss="mse",
run_eagerly=test_utils.should_run_eagerly(),
)
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units))
)
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_get_initial_states(self):
batch_size = 4
cell = keras.layers.GRUCell(20)
initial_state = cell.get_initial_state(
batch_size=batch_size, dtype=tf.float32
)
_, state = cell(
np.ones((batch_size, 20), dtype=np.float32), initial_state
)
self.assertEqual(state.shape, initial_state.shape)
@test_utils.run_v2_only
def test_cloned_weight_names(self):
inp = keras.Input([None, 3])
rnn = keras.layers.GRU(units=3)
model = keras.Model(inp, rnn(inp))
clone = keras.models.clone_model(model)
model_names = [x.name for x in model.weights]
clone_names = [x.name for x in clone.weights]
self.assertEqual(model_names, clone_names)
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class GRULayerGenericTest(tf.test.TestCase):
def test_constraints_gru(self):
embedding_dim = 4
layer_class = keras.layers.GRU
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint,
)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_from_config_gru(self):
layer_class = keras.layers.GRU
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_deep_copy_gru(self):
cell = keras.layers.GRUCell(5)
copied_cell = copy.deepcopy(cell)
self.assertEqual(copied_cell.units, 5)
self.assertEqual(cell.get_config(), copied_cell.get_config())
def test_regularizers_gru(self):
embedding_dim = 4
layer_class = keras.layers.GRU
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer="l2",
activity_regularizer="l1",
)
layer.build((None, None, 2))
self.assertLen(layer.losses, 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
if tf.executing_eagerly():
self.assertLen(layer.losses, 4)
else:
self.assertLen(layer.get_losses_for(x), 1)
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/layers/rnn/gru_test.py/0 | {
"file_path": "tf-keras/tf_keras/layers/rnn/gru_test.py",
"repo_id": "tf-keras",
"token_count": 18555
} | 198 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer serialization/deserialization functions."""
import threading
import tensorflow.compat.v2 as tf
from tf_keras.engine import base_layer
from tf_keras.engine import input_layer
from tf_keras.engine import input_spec
from tf_keras.layers import activation
from tf_keras.layers import attention
from tf_keras.layers import convolutional
from tf_keras.layers import core
from tf_keras.layers import locally_connected
from tf_keras.layers import merging
from tf_keras.layers import pooling
from tf_keras.layers import regularization
from tf_keras.layers import reshaping
from tf_keras.layers import rnn
from tf_keras.layers.normalization import batch_normalization
from tf_keras.layers.normalization import batch_normalization_v1
from tf_keras.layers.normalization import group_normalization
from tf_keras.layers.normalization import layer_normalization
from tf_keras.layers.normalization import unit_normalization
from tf_keras.layers.preprocessing import category_encoding
from tf_keras.layers.preprocessing import discretization
from tf_keras.layers.preprocessing import hashed_crossing
from tf_keras.layers.preprocessing import hashing
from tf_keras.layers.preprocessing import image_preprocessing
from tf_keras.layers.preprocessing import integer_lookup
from tf_keras.layers.preprocessing import (
normalization as preprocessing_normalization,
)
from tf_keras.layers.preprocessing import string_lookup
from tf_keras.layers.preprocessing import text_vectorization
from tf_keras.layers.rnn import cell_wrappers
from tf_keras.layers.rnn import gru
from tf_keras.layers.rnn import lstm
from tf_keras.metrics import base_metric
from tf_keras.saving import serialization_lib
from tf_keras.saving.legacy import serialization as legacy_serialization
from tf_keras.saving.legacy.saved_model import json_utils
from tf_keras.utils import generic_utils
from tf_keras.utils import tf_inspect as inspect
# isort: off
from tensorflow.python.util.tf_export import keras_export
ALL_MODULES = (
base_layer,
input_layer,
activation,
attention,
convolutional,
core,
locally_connected,
merging,
batch_normalization_v1,
group_normalization,
layer_normalization,
unit_normalization,
pooling,
image_preprocessing,
regularization,
reshaping,
rnn,
hashing,
hashed_crossing,
category_encoding,
discretization,
integer_lookup,
preprocessing_normalization,
string_lookup,
text_vectorization,
)
ALL_V2_MODULES = (
batch_normalization,
layer_normalization,
cell_wrappers,
gru,
lstm,
)
# ALL_OBJECTS is meant to be a global mutable. Hence we need to make it
# thread-local to avoid concurrent mutations.
LOCAL = threading.local()
def populate_deserializable_objects():
"""Populates dict ALL_OBJECTS with every built-in layer."""
global LOCAL
if not hasattr(LOCAL, "ALL_OBJECTS"):
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = None
if (
LOCAL.ALL_OBJECTS
and LOCAL.GENERATED_WITH_V2 == tf.__internal__.tf2.enabled()
):
# Objects dict is already generated for the proper TF version:
# do nothing.
return
LOCAL.ALL_OBJECTS = {}
LOCAL.GENERATED_WITH_V2 = tf.__internal__.tf2.enabled()
base_cls = base_layer.Layer
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls),
)
# Overwrite certain V1 objects with V2 versions
if tf.__internal__.tf2.enabled():
generic_utils.populate_dict_with_module_objects(
LOCAL.ALL_OBJECTS,
ALL_V2_MODULES,
obj_filter=lambda x: inspect.isclass(x) and issubclass(x, base_cls),
)
# These deserialization aliases are added for backward compatibility,
# as in TF 1.13, "BatchNormalizationV1" and "BatchNormalizationV2"
# were used as class name for v1 and v2 version of BatchNormalization,
# respectively. Here we explicitly convert them to their canonical names.
LOCAL.ALL_OBJECTS[
"BatchNormalizationV1"
] = batch_normalization_v1.BatchNormalization
LOCAL.ALL_OBJECTS[
"BatchNormalizationV2"
] = batch_normalization.BatchNormalization
# Prevent circular dependencies.
from tf_keras import models
from tf_keras.feature_column.sequence_feature_column import (
SequenceFeatures,
)
from tf_keras.premade_models.linear import (
LinearModel,
)
from tf_keras.premade_models.wide_deep import (
WideDeepModel,
)
LOCAL.ALL_OBJECTS["Input"] = input_layer.Input
LOCAL.ALL_OBJECTS["InputSpec"] = input_spec.InputSpec
LOCAL.ALL_OBJECTS["Functional"] = models.Functional
LOCAL.ALL_OBJECTS["Model"] = models.Model
LOCAL.ALL_OBJECTS["SequenceFeatures"] = SequenceFeatures
LOCAL.ALL_OBJECTS["Sequential"] = models.Sequential
LOCAL.ALL_OBJECTS["LinearModel"] = LinearModel
LOCAL.ALL_OBJECTS["WideDeepModel"] = WideDeepModel
if tf.__internal__.tf2.enabled():
from tf_keras.feature_column.dense_features_v2 import (
DenseFeatures,
)
LOCAL.ALL_OBJECTS["DenseFeatures"] = DenseFeatures
else:
from tf_keras.feature_column.dense_features import (
DenseFeatures,
)
LOCAL.ALL_OBJECTS["DenseFeatures"] = DenseFeatures
# Merging layers, function versions.
LOCAL.ALL_OBJECTS["add"] = merging.add
LOCAL.ALL_OBJECTS["subtract"] = merging.subtract
LOCAL.ALL_OBJECTS["multiply"] = merging.multiply
LOCAL.ALL_OBJECTS["average"] = merging.average
LOCAL.ALL_OBJECTS["maximum"] = merging.maximum
LOCAL.ALL_OBJECTS["minimum"] = merging.minimum
LOCAL.ALL_OBJECTS["concatenate"] = merging.concatenate
LOCAL.ALL_OBJECTS["dot"] = merging.dot
@keras_export("keras.layers.serialize")
def serialize(layer, use_legacy_format=False):
"""Serializes a `Layer` object into a JSON-compatible representation.
Args:
layer: The `Layer` object to serialize.
Returns:
A JSON-serializable dict representing the object's config.
Example:
```python
from pprint import pprint
model = tf.keras.models.Sequential()
model.add(tf.keras.Input(shape=(16,)))
model.add(tf.keras.layers.Dense(32, activation='relu'))
pprint(tf.keras.layers.serialize(model))
# prints the configuration of the model, as a dict.
"""
if isinstance(layer, base_metric.Metric):
raise ValueError(
f"Cannot serialize {layer} since it is a metric. "
"Please use the `keras.metrics.serialize()` and "
"`keras.metrics.deserialize()` APIs to serialize "
"and deserialize metrics."
)
if use_legacy_format:
return legacy_serialization.serialize_keras_object(layer)
return serialization_lib.serialize_keras_object(layer)
@keras_export("keras.layers.deserialize")
def deserialize(config, custom_objects=None, use_legacy_format=False):
"""Instantiates a layer from a config dictionary.
Args:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names) of custom
(non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
Example:
```python
# Configuration of Dense(32, activation='relu')
config = {
'class_name': 'Dense',
'config': {
'activation': 'relu',
'activity_regularizer': None,
'bias_constraint': None,
'bias_initializer': {'class_name': 'Zeros', 'config': {}},
'bias_regularizer': None,
'dtype': 'float32',
'kernel_constraint': None,
'kernel_initializer': {'class_name': 'GlorotUniform',
'config': {'seed': None}},
'kernel_regularizer': None,
'name': 'dense',
'trainable': True,
'units': 32,
'use_bias': True
}
}
dense_layer = tf.keras.layers.deserialize(config)
```
"""
populate_deserializable_objects()
if not config:
raise ValueError(
f"Cannot deserialize empty config. Received: config={config}"
)
if use_legacy_format:
return legacy_serialization.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name="layer",
)
return serialization_lib.deserialize_keras_object(
config,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name="layer",
)
def get_builtin_layer(class_name):
"""Returns class if `class_name` is registered, else returns None."""
if not hasattr(LOCAL, "ALL_OBJECTS"):
populate_deserializable_objects()
return LOCAL.ALL_OBJECTS.get(class_name)
def deserialize_from_json(json_string, custom_objects=None):
"""Instantiates a layer from a JSON string."""
populate_deserializable_objects()
config = json_utils.decode_and_deserialize(
json_string,
module_objects=LOCAL.ALL_OBJECTS,
custom_objects=custom_objects,
)
return deserialize(config, custom_objects)
| tf-keras/tf_keras/layers/serialization.py/0 | {
"file_path": "tf-keras/tf_keras/layers/serialization.py",
"repo_id": "tf-keras",
"token_count": 3890
} | 199 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the pooling layer classes and their functional aliases."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from tf_keras import layers as keras_layers
from tf_keras.legacy_tf_layers import base
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=["keras.__internal__.legacy.layers.AveragePooling1D"])
class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):
"""Average Pooling layer for 1D inputs.
Args:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.AveragePooling1D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
pooling = tf.compat.v1.layers.AveragePooling1D(pool_size=2, strides=2)
```
After:
```python
pooling = tf.keras.layers.AveragePooling1D(pool_size=2, strides=2)
```
@end_compatibility
"""
def __init__(
self,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
if strides is None:
raise ValueError("Argument `strides` must not be None.")
super().__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.average_pooling1d"])
def average_pooling1d(
inputs,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
):
"""Average Pooling layer for 1D inputs.
Args:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.AveragePooling1D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.average_pooling1d(x, pool_size=2, strides=2)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.AveragePooling1D(pool_size=2, strides=2)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.average_pooling1d` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.AveragePooling1D` instead.",
stacklevel=2,
)
layer = AveragePooling1D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.MaxPooling1D"])
class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):
"""Max Pooling layer for 1D inputs.
Args:
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.MaxPooling1D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
pooling = tf.compat.v1.layers.MaxPooling1D(pool_size=2, strides=2)
```
After:
```python
pooling = tf.keras.layers.MaxPooling1D(pool_size=2, strides=2)
```
@end_compatibility
"""
def __init__(
self,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
if strides is None:
raise ValueError("Argument `strides` must not be None.")
super().__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.max_pooling1d"])
def max_pooling1d(
inputs,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
):
"""Max Pooling layer for 1D inputs.
Args:
inputs: The tensor over which to pool. Must have rank 3.
pool_size: An integer or tuple/list of a single integer,
representing the size of the pooling window.
strides: An integer or tuple/list of a single integer, specifying the
strides of the pooling operation.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string, one of `channels_last` (default) or
`channels_first`. The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, length, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, length)`.
name: A string, the name of the layer.
Returns:
The output tensor, of rank 3.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.MaxPooling1D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.max_pooling1d(x, pool_size=2, strides=2)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.MaxPooling1D(pool_size=2, strides=2)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.max_pooling1d` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.MaxPooling1D` instead.",
stacklevel=2,
)
layer = MaxPooling1D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.AveragePooling2D"])
class AveragePooling2D(keras_layers.AveragePooling2D, base.Layer):
"""Average pooling layer for 2D inputs (e.g. images).
Args:
pool_size: An integer or tuple/list of 2 integers: (pool_height,
pool_width) specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.AveragePooling2D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
pooling = tf.compat.v1.layers.AveragePooling2D(pool_size=2, strides=2)
```
After:
```python
pooling = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2)
```
@end_compatibility
"""
def __init__(
self,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
if strides is None:
raise ValueError("Argument `strides` must not be None.")
super().__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.average_pooling2d"])
def average_pooling2d(
inputs,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
):
"""Average pooling layer for 2D inputs (e.g. images).
Args:
inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height,
pool_width) specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.AveragePooling2D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.average_pooling2d(x, pool_size=2, strides=2)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.average_pooling2d` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.AveragePooling2D` instead.",
stacklevel=2,
)
layer = AveragePooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.MaxPooling2D"])
class MaxPooling2D(keras_layers.MaxPooling2D, base.Layer):
"""Max pooling layer for 2D inputs (e.g. images).
Args:
pool_size: An integer or tuple/list of 2 integers: (pool_height,
pool_width) specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.MaxPooling2D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
pooling = tf.compat.v1.layers.MaxPooling2D(pool_size=2, strides=2)
```
After:
```python
pooling = tf.keras.layers.MaxPooling2D(pool_size=2, strides=2)
```
@end_compatibility
"""
def __init__(
self,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
if strides is None:
raise ValueError("Argument `strides` must not be None.")
super().__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.max_pooling2d"])
def max_pooling2d(
inputs,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
):
"""Max pooling layer for 2D inputs (e.g. images).
Args:
inputs: The tensor over which to pool. Must have rank 4.
pool_size: An integer or tuple/list of 2 integers: (pool_height,
pool_width) specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.MaxPooling2D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.max_pooling2d(x, pool_size=2, strides=2)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.MaxPooling2D(pool_size=2, strides=2)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.max_pooling2d` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.MaxPooling2D` instead.",
stacklevel=2,
)
layer = MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.AveragePooling3D"])
class AveragePooling3D(keras_layers.AveragePooling3D, base.Layer):
"""Average pooling layer for 3D inputs (e.g. volumes).
Args:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.AveragePooling3D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
pooling = tf.compat.v1.layers.AveragePooling3D(pool_size=2, strides=2)
```
After:
```python
pooling = tf.keras.layers.AveragePooling3D(pool_size=2, strides=2)
```
@end_compatibility
"""
def __init__(
self,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
if strides is None:
raise ValueError("Argument `strides` must not be None.")
super().__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.average_pooling3d"])
def average_pooling3d(
inputs,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
):
"""Average pooling layer for 3D inputs (e.g. volumes).
Args:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.AveragePooling3D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.average_pooling3d(x, pool_size=2, strides=2)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.AveragePooling3D(pool_size=2, strides=2)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.average_pooling3d` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.AveragePooling3D` instead.",
stacklevel=2,
)
layer = AveragePooling3D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
)
return layer(inputs)
@keras_export(v1=["keras.__internal__.legacy.layers.MaxPooling3D"])
class MaxPooling3D(keras_layers.MaxPooling3D, base.Layer):
"""Max pooling layer for 3D inputs (e.g. volumes).
Args:
pool_size: An integer or tuple/list of 3 integers:
(pool_depth, pool_height, pool_width)
specifying the size of the pooling window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 3 integers,
specifying the strides of the pooling operation.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape
`(batch, depth, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.MaxPooling3D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
pooling = tf.compat.v1.layers.MaxPooling3D(pool_size=2, strides=2)
```
After:
```python
pooling = tf.keras.layers.MaxPooling3D(pool_size=2, strides=2)
```
@end_compatibility
"""
def __init__(
self,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
**kwargs
):
if strides is None:
raise ValueError("Argument `strides` must not be None.")
super().__init__(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
**kwargs
)
@keras_export(v1=["keras.__internal__.legacy.layers.max_pooling3d"])
def max_pooling3d(
inputs,
pool_size,
strides,
padding="valid",
data_format="channels_last",
name=None,
):
"""Max pooling layer for 3D inputs (e.g.
volumes).
Args:
inputs: The tensor over which to pool. Must have rank 5.
pool_size: An integer or tuple/list of 3 integers: (pool_depth,
pool_height, pool_width) specifying the size of the pooling window. Can
be a single integer to specify the same value for all spatial
dimensions.
strides: An integer or tuple/list of 3 integers, specifying the strides of
the pooling operation. Can be a single integer to specify the same value
for all spatial dimensions.
padding: A string. The padding method, either 'valid' or 'same'.
Case-insensitive.
data_format: A string. The ordering of the dimensions in the inputs.
`channels_last` (default) and `channels_first` are supported.
`channels_last` corresponds to inputs with shape `(batch, depth, height,
width, channels)` while `channels_first` corresponds to inputs with
shape `(batch, channels, depth, height, width)`.
name: A string, the name of the layer.
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
@compatibility(TF2)
This API is a legacy api that is only compatible with eager execution and
`tf.function` if you combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`
Please refer to [tf.layers model mapping section of the migration guide]
(https://www.tensorflow.org/guide/migrate/model_mapping)
to learn how to use your TensorFlow v1 model in TF2 with TF-Keras.
The corresponding TensorFlow v2 layer is
`tf.keras.layers.MaxPooling3D`.
#### Structural Mapping to Native TF2
None of the supported arguments have changed name.
Before:
```python
y = tf.compat.v1.layers.max_pooling3d(x, pool_size=2, strides=2)
```
After:
To migrate code using TF1 functional layers use the [Keras Functional API]
(https://www.tensorflow.org/guide/keras/functional):
```python
x = tf.keras.Input((28, 28, 1))
y = tf.keras.layers.MaxPooling3D(pool_size=2, strides=2)(x)
model = tf.keras.Model(x, y)
```
@end_compatibility
"""
warnings.warn(
"`tf.layers.max_pooling3d` is deprecated and "
"will be removed in a future version. "
"Please use `tf.keras.layers.MaxPooling3D` instead.",
stacklevel=2,
)
layer = MaxPooling3D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
name=name,
)
return layer(inputs)
# Aliases
AvgPool2D = AveragePooling2D
MaxPool2D = MaxPooling2D
max_pool2d = max_pooling2d
avg_pool2d = average_pooling2d
| tf-keras/tf_keras/legacy_tf_layers/pooling.py/0 | {
"file_path": "tf-keras/tf_keras/legacy_tf_layers/pooling.py",
"repo_id": "tf-keras",
"token_count": 12544
} | 200 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hinge metrics."""
from tf_keras.dtensor import utils as dtensor_utils
from tf_keras.losses import categorical_hinge
from tf_keras.losses import hinge
from tf_keras.losses import squared_hinge
from tf_keras.metrics import base_metric
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.metrics.Hinge")
class Hinge(base_metric.MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Hinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.3
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.1
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd', loss='mse', metrics=[tf.keras.metrics.Hinge()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="hinge", dtype=None):
super().__init__(hinge, name, dtype=dtype)
@keras_export("keras.metrics.SquaredHinge")
class SquaredHinge(base_metric.MeanMetricWrapper):
"""Computes the squared hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.SquaredHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.86
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.46
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SquaredHinge()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="squared_hinge", dtype=None):
super().__init__(squared_hinge, name, dtype=dtype)
@keras_export("keras.metrics.CategoricalHinge")
class CategoricalHinge(base_metric.MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result().numpy()
1.2
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.CategoricalHinge()])
```
"""
@dtensor_utils.inject_mesh
def __init__(self, name="categorical_hinge", dtype=None):
super().__init__(categorical_hinge, name, dtype=dtype)
| tf-keras/tf_keras/metrics/hinge_metrics.py/0 | {
"file_path": "tf-keras/tf_keras/metrics/hinge_metrics.py",
"repo_id": "tf-keras",
"token_count": 1680
} | 201 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains function to log if devices are compatible with mixed precision."""
import itertools
import tensorflow.compat.v2 as tf
# isort: off
from tensorflow.python.platform import tf_logging
_COMPAT_CHECK_PREFIX = "Mixed precision compatibility check (mixed_float16): "
_COMPAT_CHECK_OK_PREFIX = _COMPAT_CHECK_PREFIX + "OK"
_COMPAT_CHECK_WARNING_PREFIX = _COMPAT_CHECK_PREFIX + "WARNING"
_COMPAT_CHECK_WARNING_SUFFIX = (
"If you will use compatible GPU(s) not attached to this host, e.g. by "
"running a multi-worker model, you can ignore this warning. This message "
"will only be logged once"
)
def _dedup_strings(device_strs):
"""Groups together consecutive identical strings.
For example, given:
['GPU 1', 'GPU 2', 'GPU 2', 'GPU 3', 'GPU 3', 'GPU 3']
This function returns:
['GPU 1', 'GPU 2 (x2)', 'GPU 3 (x3)']
Args:
device_strs: A list of strings, each representing a device.
Returns:
A copy of the input, but identical consecutive strings are merged into a
single string.
"""
new_device_strs = []
for device_str, vals in itertools.groupby(device_strs):
num = len(list(vals))
if num == 1:
new_device_strs.append(device_str)
else:
new_device_strs.append("%s (x%d)" % (device_str, num))
return new_device_strs
def _log_device_compatibility_check(policy_name, gpu_details_list):
"""Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16.
Args:
policy_name: The name of the dtype policy.
gpu_details_list: A list of dicts, one dict per GPU. Each dict
is the device details for a GPU, as returned by
`tf.config.experimental.get_device_details()`.
"""
if policy_name != "mixed_float16":
# TODO(b/145686977): Log if the policy is 'mixed_bfloat16'. This
# requires checking if a TPU is available.
return
supported_device_strs = []
unsupported_device_strs = []
for details in gpu_details_list:
name = details.get("device_name", "Unknown GPU")
cc = details.get("compute_capability")
if cc:
device_str = f"{name}, compute capability {cc[0]}.{cc[1]}"
if cc >= (7, 0):
supported_device_strs.append(device_str)
else:
unsupported_device_strs.append(device_str)
else:
unsupported_device_strs.append(
name + ", no compute capability (probably not an Nvidia GPU)"
)
if unsupported_device_strs:
warning_str = _COMPAT_CHECK_WARNING_PREFIX + "\n"
if supported_device_strs:
warning_str += (
"Some of your GPUs may run slowly with dtype policy "
"mixed_float16 because they do not all have compute "
"capability of at least 7.0. Your GPUs:\n"
)
elif len(unsupported_device_strs) == 1:
warning_str += (
"Your GPU may run slowly with dtype policy mixed_float16 "
"because it does not have compute capability of at least "
"7.0. Your GPU:\n"
)
else:
warning_str += (
"Your GPUs may run slowly with dtype policy "
"mixed_float16 because they do not have compute "
"capability of at least 7.0. Your GPUs:\n"
)
for device_str in _dedup_strings(
supported_device_strs + unsupported_device_strs
):
warning_str += " " + device_str + "\n"
warning_str += (
"See https://developer.nvidia.com/cuda-gpus for a list of "
"GPUs and their compute capabilities.\n"
)
warning_str += _COMPAT_CHECK_WARNING_SUFFIX
tf_logging.warning(warning_str)
elif not supported_device_strs:
tf_logging.warning(
"%s\n"
"The dtype policy mixed_float16 may run slowly because "
"this machine does not have a GPU. Only Nvidia GPUs with "
"compute capability of at least 7.0 run quickly with "
"mixed_float16.\n%s"
% (_COMPAT_CHECK_WARNING_PREFIX, _COMPAT_CHECK_WARNING_SUFFIX)
)
elif len(supported_device_strs) == 1:
tf_logging.info(
"%s\n"
"Your GPU will likely run quickly with dtype policy "
"mixed_float16 as it has compute capability of at least "
"7.0. Your GPU: %s"
% (_COMPAT_CHECK_OK_PREFIX, supported_device_strs[0])
)
else:
tf_logging.info(
"%s\n"
"Your GPUs will likely run quickly with dtype policy "
"mixed_float16 as they all have compute capability of at "
"least 7.0" % _COMPAT_CHECK_OK_PREFIX
)
_logged_compatibility_check = False
def log_device_compatibility_check(policy_name):
"""Logs a compatibility check if the devices support the policy.
Currently only logs for the policy mixed_float16. A log is shown only the
first time this function is called.
Args:
policy_name: The name of the dtype policy.
"""
global _logged_compatibility_check
if _logged_compatibility_check:
return
_logged_compatibility_check = True
gpus = tf.config.list_physical_devices("GPU")
gpu_details_list = [
tf.config.experimental.get_device_details(g) for g in gpus
]
_log_device_compatibility_check(policy_name, gpu_details_list)
| tf-keras/tf_keras/mixed_precision/device_compatibility_check.py/0 | {
"file_path": "tf-keras/tf_keras/mixed_precision/device_compatibility_check.py",
"repo_id": "tf-keras",
"token_count": 2598
} | 202 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras.optimizers import optimizer
from tf_keras.saving.object_registration import register_keras_serializable
# isort: off
from tensorflow.python.util.tf_export import keras_export
@register_keras_serializable()
@keras_export(
"keras.optimizers.Adam",
"keras.optimizers.experimental.Adam",
"keras.dtensor.experimental.optimizers.Adam",
v1=[],
)
class Adam(optimizer.Optimizer):
r"""Optimizer that implements the Adam algorithm.
Adam optimization is a stochastic gradient descent method that is based on
adaptive estimation of first-order and second-order moments.
According to
[Kingma et al., 2014](http://arxiv.org/abs/1412.6980),
the method is "*computationally
efficient, has little memory requirement, invariant to diagonal rescaling of
gradients, and is well suited for problems that are large in terms of
data/parameters*".
Args:
learning_rate: A `tf.Tensor`, floating point value, a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 1st moment estimates.
Defaults to `0.9`.
beta_2: A float value or a constant float tensor, or a callable
that takes no arguments and returns the actual value to use. The
exponential decay rate for the 2nd moment estimates.
Defaults to `0.999`.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1) if `adaptive_epsilon` is `False`, not the epsilon in
Algorithm 1 of the paper. Defaults to `1e-7`.
adaptive_epsilon: If `True` the epsilon hat is computed adaptively
from the given epsilon (Algorithm 1 on the paper). If `False`, the
epsilon given will be "epsilon hat". Default to `False`.
amsgrad: Boolean. Whether to apply AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and beyond".
Defaults to `False`.
{{base_optimizer_keyword_args}}
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
- [Reddi et al., 2018](
https://openreview.net/pdf?id=ryQu7f-RZ) for `amsgrad`.
Notes:
The default value of 1e-7 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since Adam uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
adaptive_epsilon=False,
amsgrad=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
jit_compile=True,
name="Adam",
**kwargs
):
super().__init__(
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
jit_compile=jit_compile,
**kwargs
)
self._learning_rate = self._build_learning_rate(learning_rate)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.adaptive_epsilon = adaptive_epsilon
self.amsgrad = amsgrad
def build(self, var_list):
"""Initialize optimizer variables.
Adam optimizer has 3 types of variables: momentums, velocities and
velocity_hat (only set when amsgrad is applied),
Args:
var_list: list of model variables to build Adam variables on.
"""
super().build(var_list)
if hasattr(self, "_built") and self._built:
return
self._built = True
self._momentums = []
self._velocities = []
for var in var_list:
self._momentums.append(
self.add_variable_from_reference(
model_variable=var, variable_name="m"
)
)
self._velocities.append(
self.add_variable_from_reference(
model_variable=var, variable_name="v"
)
)
if self.amsgrad:
self._velocity_hats = []
for var in var_list:
self._velocity_hats.append(
self.add_variable_from_reference(
model_variable=var, variable_name="vhat"
)
)
def update_step(self, gradient, variable):
"""Update step given gradient and the associated model variable."""
lr = tf.cast(self.learning_rate, variable.dtype)
local_step = tf.cast(self.iterations + 1, variable.dtype)
beta_1_power = tf.pow(tf.cast(self.beta_1, variable.dtype), local_step)
beta_2_power = tf.pow(tf.cast(self.beta_2, variable.dtype), local_step)
var_key = self._var_key(variable)
m = self._momentums[self._index_dict[var_key]]
v = self._velocities[self._index_dict[var_key]]
alpha = lr * tf.sqrt(1 - beta_2_power) / (1 - beta_1_power)
if self.adaptive_epsilon:
epsilon_hat = self.epsilon * tf.sqrt(1 - beta_2_power)
else:
epsilon_hat = self.epsilon
if isinstance(gradient, tf.IndexedSlices):
# Sparse gradients.
m.assign_add(-m * (1 - self.beta_1))
m.scatter_add(
tf.IndexedSlices(
gradient.values * (1 - self.beta_1), gradient.indices
)
)
v.assign_add(-v * (1 - self.beta_2))
v.scatter_add(
tf.IndexedSlices(
tf.square(gradient.values) * (1 - self.beta_2),
gradient.indices,
)
)
if self.amsgrad:
v_hat = self._velocity_hats[self._index_dict[var_key]]
v_hat.assign(tf.maximum(v_hat, v))
v = v_hat
variable.assign_sub((m * alpha) / (tf.sqrt(v) + epsilon_hat))
else:
# Dense gradients.
m.assign_add((gradient - m) * (1 - self.beta_1))
v.assign_add((tf.square(gradient) - v) * (1 - self.beta_2))
if self.amsgrad:
v_hat = self._velocity_hats[self._index_dict[var_key]]
v_hat.assign(tf.maximum(v_hat, v))
v = v_hat
variable.assign_sub((m * alpha) / (tf.sqrt(v) + epsilon_hat))
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
self._learning_rate
),
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
}
)
return config
Adam.__doc__ = Adam.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
| tf-keras/tf_keras/optimizers/adam.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/adam.py",
"repo_id": "tf-keras",
"token_count": 4083
} | 203 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SGD optimizer implementation."""
import tensorflow.compat.v2 as tf
from tf_keras.optimizers.legacy import optimizer_v2
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export(
"keras.optimizers.legacy.SGD",
v1=["keras.optimizers.SGD", "keras.optimizers.legacy.SGD"],
)
class SGD(optimizer_v2.OptimizerV2):
r"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum=0`:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Args:
learning_rate: A `Tensor`, floating point value, or a schedule that is a
`tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
that takes no arguments and returns the actual value to use. The
learning rate. Defaults to `0.01`.
momentum: float hyperparameter >= 0 that accelerates gradient descent in
the relevant direction and dampens oscillations. Vanilla gradient
descent means no momentum. Defaults to `0.`.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
name: Optional name prefix for the operations created when applying
gradients. Defaults to `"SGD"`.
**kwargs: keyword arguments. Allowed arguments are `clipvalue`,
`clipnorm`, `global_clipnorm`.
If `clipvalue` (float) is set, the gradient of each weight
is clipped to be no higher than this value.
If `clipnorm` (float) is set, the gradient of each weight
is individually clipped so that its norm is no higher than this value.
If `global_clipnorm` (float) is set the gradient of all weights is
clipped so that their global norm is no higher than this value.
Usage:
>>> opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.1)
>>> var = tf.Variable(1.0)
>>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> # Step is `- learning_rate * grad`
>>> var.numpy()
0.9
>>> opt = tf.keras.optimizers.legacy.SGD(learning_rate=0.1, momentum=0.9)
>>> var = tf.Variable(1.0)
>>> val0 = var.value()
>>> loss = lambda: (var ** 2)/2.0 # d(loss)/d(var1) = var1
>>> # First step is `- learning_rate * grad`
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> val1 = var.value()
>>> (val0 - val1).numpy()
0.1
>>> # On later steps, step-size increases because of momentum
>>> step_count = opt.minimize(loss, [var]).numpy()
>>> val2 = var.value()
>>> (val1 - val2).numpy()
0.18
Reference:
- For `nesterov=True`, See [Sutskever et al., 2013](
https://github.com/mlresearch/v28/blob/gh-pages/sutskever13.pdf).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(
self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
name="SGD",
**kwargs,
):
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._momentum = False
if (
isinstance(momentum, tf.Tensor)
or callable(momentum)
or momentum > 0
):
self._momentum = True
if isinstance(momentum, (int, float)) and (
momentum < 0 or momentum > 1
):
raise ValueError(
"`momentum` must be between [0, 1]. Received: "
f"momentum={momentum} (of type {type(momentum)})."
)
self._set_hyper("momentum", momentum)
self.nesterov = nesterov
def _create_slots(self, var_list):
if self._momentum:
for var in var_list:
self.add_slot(var, "momentum")
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)]["momentum"] = tf.identity(
self._get_hyper("momentum", var_dtype)
)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
if self._momentum:
momentum_var = self.get_slot(var, "momentum")
return tf.raw_ops.ResourceApplyKerasMomentum(
var=var.handle,
accum=momentum_var.handle,
lr=coefficients["lr_t"],
grad=grad,
momentum=coefficients["momentum"],
use_locking=self._use_locking,
use_nesterov=self.nesterov,
)
else:
return tf.raw_ops.ResourceApplyGradientDescent(
var=var.handle,
alpha=coefficients["lr_t"],
delta=grad,
use_locking=self._use_locking,
)
def _resource_apply_sparse_duplicate_indices(
self, grad, var, indices, **kwargs
):
if self._momentum:
return super()._resource_apply_sparse_duplicate_indices(
grad, var, indices, **kwargs
)
else:
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = kwargs.get("apply_state", {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
return tf.raw_ops.ResourceScatterAdd(
resource=var.handle,
indices=indices,
updates=-grad * coefficients["lr_t"],
)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
# This method is only needed for momentum optimization.
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
momentum_var = self.get_slot(var, "momentum")
return tf.raw_ops.ResourceSparseApplyKerasMomentum(
var=var.handle,
accum=momentum_var.handle,
lr=coefficients["lr_t"],
grad=grad,
indices=indices,
momentum=coefficients["momentum"],
use_locking=self._use_locking,
use_nesterov=self.nesterov,
)
def get_config(self):
config = super().get_config()
config.update(
{
"learning_rate": self._serialize_hyperparameter(
"learning_rate"
),
"decay": self._initial_decay,
"momentum": self._serialize_hyperparameter("momentum"),
"nesterov": self.nesterov,
}
)
return config
| tf-keras/tf_keras/optimizers/legacy/gradient_descent.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/legacy/gradient_descent.py",
"repo_id": "tf-keras",
"token_count": 3542
} | 204 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Legacy v1 optimizer classes.
For more examples see the base class `tf.compat.v1.keras.optimizers.Optimizer`.
"""
import tensorflow.compat.v2 as tf
from tf_keras import backend
class Optimizer:
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All TF-Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {"clipnorm", "clipvalue"}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError(
"Unexpected keyword argument passed to optimizer: " + str(k)
)
# checks that clipnorm >= 0 and clipvalue >= 0
if kwargs[k] < 0:
raise ValueError(f"Expected {k} >= 0, received: {kwargs[k]}")
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
# Set this to False, indicating `apply_gradients` does not take the
# `experimental_aggregate_gradients` argument.
_HAS_AGGREGATE_GRAD = False
def _create_all_weights(self, params):
"""Creates and sets all optimizer weights.
Args:
params: list or tuple of `Variable` objects that will be minimized
using this optimizer.
Returns:
Specific weight values that are used in `get_updates`
"""
raise NotImplementedError
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Args:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if
gradient function not implemented).
"""
grads = backend.gradients(loss, params)
if any(g is None for g in grads):
raise ValueError(
"An operation has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"backend.argmax, backend.round, backend.eval."
)
if hasattr(self, "clipnorm"):
grads = [tf.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
tf.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Args:
weights: a list of Numpy arrays. The number of arrays and their
shape must match number of the dimensions of the weights of the
optimizer (i.e. it should match the output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError(
"Length of the specified weight list ("
+ str(len(weights))
+ ") does not match the number of weights of the optimizer ("
+ str(len(params))
+ ")"
)
weight_value_tuples = []
param_values = backend.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError(
"Optimizer weight shape "
+ str(pv.shape)
+ " not compatible with provided weight shape "
+ str(w.shape)
)
weight_value_tuples.append((p, w))
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return backend.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, "clipnorm"):
config["clipnorm"] = self.clipnorm
if hasattr(self, "clipvalue"):
config["clipvalue"] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Args:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD in the relevant
direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(
self, lr=0.01, momentum=0.0, decay=0.0, nesterov=False, **kwargs
):
super().__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.iterations = backend.variable(
0, dtype="int64", name="iterations"
)
self.lr = backend.variable(lr, name="lr")
self.momentum = backend.variable(momentum, name="momentum")
self.decay = backend.variable(decay, name="decay")
self.initial_decay = decay
self.nesterov = nesterov
def _create_all_weights(self, params):
shapes = [backend.int_shape(p) for p in params]
moments = [backend.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
return moments
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [tf.compat.v1.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1.0
/ (
1.0
+ self.decay
* tf.cast(self.iterations, backend.dtype(self.decay))
)
)
# momentum
moments = self._create_all_weights(params)
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(tf.compat.v1.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, "constraint", None) is not None:
new_p = p.constraint(new_p)
self.updates.append(tf.compat.v1.assign(p, new_p))
return self.updates
def get_config(self):
config = {
"lr": float(backend.get_value(self.lr)),
"momentum": float(backend.get_value(self.momentum)),
"decay": float(backend.get_value(self.decay)),
"nesterov": self.nesterov,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
Args:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor.
If `None`, defaults to `backend.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0.0, **kwargs):
super().__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.lr = backend.variable(lr, name="lr")
self.rho = backend.variable(rho, name="rho")
self.decay = backend.variable(decay, name="decay")
self.iterations = backend.variable(
0, dtype="int64", name="iterations"
)
if epsilon is None:
epsilon = backend.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
accumulators = [
backend.zeros(backend.int_shape(p), dtype=backend.dtype(p))
for p in params
]
self.weights = accumulators
return accumulators
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = self._create_all_weights(params)
self.updates = [tf.compat.v1.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1.0
/ (
1.0
+ self.decay
* tf.cast(self.iterations, backend.dtype(self.decay))
)
)
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1.0 - self.rho) * tf.square(g)
self.updates.append(tf.compat.v1.assign(a, new_a))
new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, "constraint", None) is not None:
new_p = p.constraint(new_p)
self.updates.append(tf.compat.v1.assign(p, new_p))
return self.updates
def get_config(self):
config = {
"lr": float(backend.get_value(self.lr)),
"rho": float(backend.get_value(self.rho)),
"decay": float(backend.get_value(self.decay)),
"epsilon": self.epsilon,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate.
epsilon: float >= 0. If `None`, defaults to `backend.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=None, decay=0.0, **kwargs):
super().__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.lr = backend.variable(lr, name="lr")
self.decay = backend.variable(decay, name="decay")
self.iterations = backend.variable(
0, dtype="int64", name="iterations"
)
if epsilon is None:
epsilon = backend.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
shapes = [backend.int_shape(p) for p in params]
accumulators = [backend.zeros(shape) for shape in shapes]
self.weights = accumulators
return accumulators
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = self._create_all_weights(params)
self.updates = [tf.compat.v1.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1.0
/ (
1.0
+ self.decay
* tf.cast(self.iterations, backend.dtype(self.decay))
)
)
for p, g, a in zip(params, grads, accumulators):
new_a = a + tf.square(g) # update accumulator
self.updates.append(tf.compat.v1.assign(a, new_a))
new_p = p - lr * g / (backend.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, "constraint", None) is not None:
new_p = p.constraint(new_p)
self.updates.append(tf.compat.v1.assign(p, new_p))
return self.updates
def get_config(self):
config = {
"lr": float(backend.get_value(self.lr)),
"decay": float(backend.get_value(self.decay)),
"epsilon": self.epsilon,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
"""Adadelta optimizer.
Adadelta is a more robust extension of Adagrad
that adapts learning rates based on a moving window of gradient updates,
instead of accumulating all past gradients. This way, Adadelta continues
learning even when many updates have been done. Compared to Adagrad, in the
original version of Adadelta you don't have to set an initial learning
rate. In this version, initial learning rate and decay factor can
be set, as in most other TF-Keras optimizers.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Initial learning rate, defaults to 1.
It is recommended to leave it at the default value.
rho: float >= 0. Adadelta decay factor, corresponding to fraction of
gradient to keep at each time step.
epsilon: float >= 0. Fuzz factor.
If `None`, defaults to `backend.epsilon()`.
decay: float >= 0. Initial learning rate decay.
References:
- [Adadelta - an adaptive learning rate
method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0.0, **kwargs):
super().__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.lr = backend.variable(lr, name="lr")
self.decay = backend.variable(decay, name="decay")
self.iterations = backend.variable(
0, dtype="int64", name="iterations"
)
if epsilon is None:
epsilon = backend.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
shapes = [backend.int_shape(p) for p in params]
accumulators = [backend.zeros(shape) for shape in shapes]
delta_accumulators = [backend.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
return accumulators, delta_accumulators
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [tf.compat.v1.assign_add(self.iterations, 1)]
accumulators, delta_accumulators = self._create_all_weights(params)
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1.0
/ (
1.0
+ self.decay
* tf.cast(self.iterations, backend.dtype(self.decay))
)
)
for p, g, a, d_a in zip(
params, grads, accumulators, delta_accumulators
):
# update accumulator
new_a = self.rho * a + (1.0 - self.rho) * tf.square(g)
self.updates.append(tf.compat.v1.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = (
g
* backend.sqrt(d_a + self.epsilon)
/ backend.sqrt(new_a + self.epsilon)
)
new_p = p - lr * update
# Apply constraints.
if getattr(p, "constraint", None) is not None:
new_p = p.constraint(new_p)
self.updates.append(tf.compat.v1.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * tf.square(update)
self.updates.append(tf.compat.v1.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
"lr": float(backend.get_value(self.lr)),
"rho": self.rho,
"decay": float(backend.get_value(self.decay)),
"epsilon": self.epsilon,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Args:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor.
If `None`, defaults to `backend.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm
from the paper "On the Convergence of Adam and Beyond".
"""
def __init__(
self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.0,
amsgrad=False,
**kwargs,
):
super().__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.iterations = backend.variable(
0, dtype="int64", name="iterations"
)
self.lr = backend.variable(lr, name="lr")
self.beta_1 = backend.variable(beta_1, name="beta_1")
self.beta_2 = backend.variable(beta_2, name="beta_2")
self.decay = backend.variable(decay, name="decay")
if epsilon is None:
epsilon = backend.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def _create_all_weights(self, params):
ms = [
backend.zeros(backend.int_shape(p), dtype=backend.dtype(p))
for p in params
]
vs = [
backend.zeros(backend.int_shape(p), dtype=backend.dtype(p))
for p in params
]
if self.amsgrad:
vhats = [
backend.zeros(backend.int_shape(p), dtype=backend.dtype(p))
for p in params
]
else:
vhats = [backend.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
return ms, vs, vhats
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1.0
/ (
1.0
+ self.decay
* tf.cast(self.iterations, backend.dtype(self.decay))
)
)
with tf.control_dependencies(
[tf.compat.v1.assign_add(self.iterations, 1)]
):
t = tf.cast(self.iterations, backend.floatx())
lr_t = lr * (
backend.sqrt(1.0 - tf.pow(self.beta_2, t))
/ (1.0 - tf.pow(self.beta_1, t))
)
ms, vs, vhats = self._create_all_weights(params)
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1.0 - self.beta_1) * g
v_t = (self.beta_2 * v) + (1.0 - self.beta_2) * tf.square(g)
if self.amsgrad:
vhat_t = tf.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (backend.sqrt(vhat_t) + self.epsilon)
self.updates.append(tf.compat.v1.assign(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (backend.sqrt(v_t) + self.epsilon)
self.updates.append(tf.compat.v1.assign(m, m_t))
self.updates.append(tf.compat.v1.assign(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, "constraint", None) is not None:
new_p = p.constraint(new_p)
self.updates.append(tf.compat.v1.assign(p, new_p))
return self.updates
def get_config(self):
config = {
"lr": float(backend.get_value(self.lr)),
"beta_1": float(backend.get_value(self.beta_1)),
"beta_2": float(backend.get_value(self.beta_2)),
"decay": float(backend.get_value(self.decay)),
"epsilon": self.epsilon,
"amsgrad": self.amsgrad,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Args:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor.
If `None`, defaults to `backend.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(
self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.0,
**kwargs,
):
super().__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.iterations = backend.variable(
0, dtype="int64", name="iterations"
)
self.lr = backend.variable(lr, name="lr")
self.beta_1 = backend.variable(beta_1, name="beta_1")
self.beta_2 = backend.variable(beta_2, name="beta_2")
self.decay = backend.variable(decay, name="decay")
if epsilon is None:
epsilon = backend.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def _create_all_weights(self, params):
shapes = [backend.int_shape(p) for p in params]
# zero init of 1st moment
ms = [backend.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [backend.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
return ms, us
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * (
1.0
/ (
1.0
+ self.decay
* tf.cast(self.iterations, backend.dtype(self.decay))
)
)
with tf.control_dependencies(
[tf.compat.v1.assign_add(self.iterations, 1)]
):
t = tf.cast(self.iterations, backend.floatx())
lr_t = lr / (1.0 - tf.pow(self.beta_1, t))
ms, us = self._create_all_weights(params)
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1.0 - self.beta_1) * g
u_t = tf.maximum(self.beta_2 * u, tf.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(tf.compat.v1.assign(m, m_t))
self.updates.append(tf.compat.v1.assign(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, "constraint", None) is not None:
new_p = p.constraint(new_p)
self.updates.append(tf.compat.v1.assign(p, new_p))
return self.updates
def get_config(self):
config = {
"lr": float(backend.get_value(self.lr)),
"beta_1": float(backend.get_value(self.beta_1)),
"beta_2": float(backend.get_value(self.beta_2)),
"decay": float(backend.get_value(self.decay)),
"epsilon": self.epsilon,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Args:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor.
If `None`, defaults to `backend.epsilon()`.
"""
def __init__(
self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs,
):
super().__init__(**kwargs)
with backend.name_scope(self.__class__.__name__):
self.iterations = backend.variable(
0, dtype="int64", name="iterations"
)
self.m_schedule = backend.variable(1.0, name="m_schedule")
self.lr = backend.variable(lr, name="lr")
self.beta_1 = backend.variable(beta_1, name="beta_1")
self.beta_2 = backend.variable(beta_2, name="beta_2")
if epsilon is None:
epsilon = backend.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def _create_all_weights(self, params):
shapes = [backend.int_shape(p) for p in params]
ms = [backend.zeros(shape) for shape in shapes]
vs = [backend.zeros(shape) for shape in shapes]
self.weights = [self.iterations, self.m_schedule] + ms + vs
return ms, vs
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
with tf.control_dependencies(
[tf.compat.v1.assign_add(self.iterations, 1)]
):
t = tf.cast(self.iterations, backend.floatx())
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1.0
- 0.5
* (tf.pow(backend.cast_to_floatx(0.96), t * self.schedule_decay))
)
momentum_cache_t_1 = self.beta_1 * (
1.0
- 0.5
* (
tf.pow(
backend.cast_to_floatx(0.96), (t + 1) * self.schedule_decay
)
)
)
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = (
self.m_schedule * momentum_cache_t * momentum_cache_t_1
)
self.updates.append((self.m_schedule, m_schedule_new))
ms, vs = self._create_all_weights(params)
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1.0 - m_schedule_new)
m_t = self.beta_1 * m + (1.0 - self.beta_1) * g
m_t_prime = m_t / (1.0 - m_schedule_next)
v_t = self.beta_2 * v + (1.0 - self.beta_2) * tf.square(g)
v_t_prime = v_t / (1.0 - tf.pow(self.beta_2, t))
m_t_bar = (
1.0 - momentum_cache_t
) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(tf.compat.v1.assign(m, m_t))
self.updates.append(tf.compat.v1.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (
backend.sqrt(v_t_prime) + self.epsilon
)
new_p = p_t
# Apply constraints.
if getattr(p, "constraint", None) is not None:
new_p = p.constraint(new_p)
self.updates.append(tf.compat.v1.assign(p, new_p))
return self.updates
def get_config(self):
config = {
"lr": float(backend.get_value(self.lr)),
"beta_1": float(backend.get_value(self.beta_1)),
"beta_2": float(backend.get_value(self.beta_2)),
"epsilon": self.epsilon,
"schedule_decay": self.schedule_decay,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer, tf.__internal__.tracking.Trackable):
"""Wrapper class for native TensorFlow optimizers."""
def __init__(self, optimizer, iterations=None):
self.optimizer = optimizer
self._track_trackable(optimizer, name="optimizer")
if iterations is None:
with backend.name_scope(self.__class__.__name__):
self.iterations = backend.variable(
0, dtype="int64", name="iterations"
)
else:
self.iterations = iterations
self._track_trackable(self.iterations, name="global_step")
def _clip_gradients(self, grads):
"""Clip gradients according to the clipnorm and clipvalue attributes."""
# TFOptimizer wrapper has no gradient clipping options.
return grads
def minimize(self, loss, var_list, grad_loss=None, tape=None):
"""Mimics the `OptimizerV2.minimize` API."""
if not callable(loss) and tape is None:
raise ValueError(
"`tape` is required when a `Tensor` loss is passed."
)
tape = tape if tape is not None else tf.GradientTape()
if callable(loss):
with tape:
if not callable(var_list):
tape.watch(var_list)
loss = loss()
if callable(var_list):
var_list = var_list()
var_list = tf.nest.flatten(var_list)
if var_list:
grads = tape.gradient(loss, var_list, grad_loss)
grads_and_vars = list(zip(grads, var_list))
self.apply_gradients(grads_and_vars)
def apply_gradients(self, grads_and_vars):
self.optimizer.apply_gradients(
grads_and_vars, global_step=self.iterations
)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
if tf.distribute.has_strategy():
self.updates = []
if not params:
# After the model vars have been created, the second call to
# get_updates is called with params as an empty list. This
# ensures that we call compute_gradients with params=None.
grads = self.optimizer.compute_gradients(loss)
else:
grads = self.optimizer.compute_gradients(loss, params)
global_step = tf.compat.v1.train.get_global_step()
opt_update = self.optimizer.apply_gradients(grads, global_step)
else:
if not params:
self.updates = [tf.compat.v1.assign_add(self.iterations, 1)]
return self.updates
# Updates list starts out empty because the iterations variable is
# incremented in optimizer.apply_gradients()
self.updates = []
grads = self.optimizer.compute_gradients(loss, params)
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations
)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
| tf-keras/tf_keras/optimizers/optimizer_v1.py/0 | {
"file_path": "tf-keras/tf_keras/optimizers/optimizer_v1.py",
"repo_id": "tf-keras",
"token_count": 15840
} | 205 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to preprocess data before training.
Deprecated: `tf.keras.preprocessing` APIs do not operate on tensors and are
not recommended for new code. Prefer loading data with either
`tf.keras.utils.text_dataset_from_directory` or
`tf.keras.utils.image_dataset_from_directory`, and then transforming the output
`tf.data.Dataset` with preprocessing layers. These approaches will offer
better performance and intergration with the broader Tensorflow ecosystem. For
more information, see the tutorials for [loading text](
https://www.tensorflow.org/tutorials/load_data/text), [loading images](
https://www.tensorflow.org/tutorials/load_data/images), and [augmenting images](
https://www.tensorflow.org/tutorials/images/data_augmentation), as well as the
[preprocessing layer guide](
https://www.tensorflow.org/guide/keras/preprocessing_layers).
"""
from tf_keras import backend
from tf_keras.preprocessing import image
from tf_keras.preprocessing import sequence
from tf_keras.preprocessing import text
| tf-keras/tf_keras/preprocessing/__init__.py/0 | {
"file_path": "tf-keras/tf_keras/preprocessing/__init__.py",
"repo_id": "tf-keras",
"token_count": 451
} | 206 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for saving and loading a TF-Keras Model from HDF5 format."""
import json
import os
import numpy as np
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.optimizers import optimizer as optimizer_base
from tf_keras.optimizers import optimizer_v1
from tf_keras.saving import object_registration
from tf_keras.saving.legacy import model_config as model_config_lib
from tf_keras.saving.legacy import saving_utils
from tf_keras.saving.legacy.saved_model import json_utils
from tf_keras.utils.generic_utils import LazyLoader
from tf_keras.utils.io_utils import ask_to_proceed_with_overwrite
# isort: off
from tensorflow.python.platform import tf_logging as logging
try:
import h5py
HDF5_OBJECT_HEADER_LIMIT = 64512
except ImportError:
h5py = None
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
sequential_lib = LazyLoader(
"sequential_lib", globals(), "tf_keras.engine.sequential"
)
def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):
"""Saves a model to a HDF5 file.
The saved model contains:
- the model's configuration (topology)
- the model's weights
- the model's optimizer's state (if any)
Thus the saved model can be reinstantiated in
the exact same state, without any of the code
used for model definition or training.
Args:
model: TF-Keras model instance to be saved.
filepath: One of the following:
- String, path where to save the model
- `h5py.File` object where to save the model
overwrite: Whether we should overwrite any existing
model at the target location, or instead
ask the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
Raises:
ImportError: if h5py is not available.
"""
if h5py is None:
raise ImportError(
"`save_model()` using h5 format requires h5py. Could not "
"import h5py."
)
# Ensures that all models saved in HDF5 format follow the old serialization
model.use_legacy_config = True
# TODO(psv) Add warning when we save models that contain non-serializable
# entities like metrics added using `add_metric` and losses added using
# `add_loss.`
if len(model.weights) != len(model._undeduplicated_weights):
logging.warning(
"Found duplicated `Variable`s in Model's `weights`. "
"This is usually caused by `Variable`s being shared by "
"Layers in the Model. These `Variable`s will be treated "
"as separate `Variable`s when the Model is restored. To "
'avoid this, please save with `save_format="tf"`.'
)
if not isinstance(filepath, h5py.File):
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
# Try creating dir if not exist
dirpath = os.path.dirname(filepath)
if not os.path.exists(dirpath):
tf.io.gfile.makedirs(dirpath)
f = h5py.File(filepath, mode="w")
opened_new_file = True
else:
f = filepath
opened_new_file = False
try:
model_metadata = saving_utils.model_metadata(model, include_optimizer)
for k, v in model_metadata.items():
if isinstance(v, (dict, list, tuple)):
f.attrs[k] = json.dumps(
v, default=json_utils.get_json_type
).encode("utf8")
else:
f.attrs[k] = v
model_weights_group = f.create_group("model_weights")
save_weights_to_hdf5_group(model_weights_group, model)
# TODO(b/128683857): Add integration tests between tf.keras and external
# Keras, to avoid breaking TF.js users.
if (
include_optimizer
and model.optimizer
and not isinstance(model.optimizer, optimizer_v1.TFOptimizer)
):
save_optimizer_weights_to_hdf5_group(f, model.optimizer)
f.flush()
finally:
if opened_new_file:
f.close()
# Remove legacy serialization attribute after H5 saving complete
delattr(model, "use_legacy_config")
def load_model_from_hdf5(filepath, custom_objects=None, compile=True):
"""Loads a model saved via `save_model_to_hdf5`.
Args:
filepath: One of the following:
- String, path to the saved model
- `h5py.File` object from which to load the model
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
compile: Boolean, whether to compile the model
after loading.
Returns:
A TF-Keras model instance. If an optimizer was found
as part of the saved model, the model is already
compiled. Otherwise, the model is uncompiled and
a warning will be displayed. When `compile` is set
to False, the compilation is omitted without any
warning.
Raises:
ImportError: if h5py is not available.
ValueError: In case of an invalid savefile.
"""
if h5py is None:
raise ImportError(
"`load_model()` using h5 format requires h5py. Could not "
"import h5py."
)
if not custom_objects:
custom_objects = {}
tlco = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__
gco = object_registration._GLOBAL_CUSTOM_OBJECTS
custom_objects = {**custom_objects, **tlco, **gco}
opened_new_file = not isinstance(filepath, h5py.File)
if opened_new_file:
f = h5py.File(filepath, mode="r")
else:
f = filepath
model = None
try:
# instantiate model
model_config = f.attrs.get("model_config")
if model_config is None:
raise ValueError(
f"No model config found in the file at {filepath}."
)
if hasattr(model_config, "decode"):
model_config = model_config.decode("utf-8")
model_config = json_utils.decode(model_config)
model = model_config_lib.model_from_config(
model_config, custom_objects=custom_objects
)
# set weights
load_weights_from_hdf5_group(f["model_weights"], model)
if compile:
# instantiate optimizer
training_config = f.attrs.get("training_config")
if hasattr(training_config, "decode"):
training_config = training_config.decode("utf-8")
if training_config is None:
logging.warning(
"No training configuration found in the save file, so "
"the model was *not* compiled. Compile it manually."
)
return model
training_config = json_utils.decode(training_config)
# Compile model.
model.compile(
**saving_utils.compile_args_from_training_config(
training_config, custom_objects
),
from_serialized=True,
)
saving_utils.try_build_compiled_arguments(model)
# Set optimizer weights.
if "optimizer_weights" in f:
try:
if isinstance(model.optimizer, optimizer_base.Optimizer):
model.optimizer.build(model.trainable_variables)
else:
model.optimizer._create_all_weights(
model.trainable_variables
)
except (NotImplementedError, AttributeError):
logging.warning(
"Error when creating the weights of optimizer {}, "
"making it impossible to restore the saved optimizer "
"state. As a result, your model is starting with "
"a freshly initialized optimizer."
)
optimizer_weight_values = (
load_optimizer_weights_from_hdf5_group(f)
)
try:
model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
logging.warning(
"Error in loading the saved optimizer "
"state. As a result, your model is "
"starting with a freshly initialized "
"optimizer."
)
finally:
if opened_new_file:
f.close()
return model
def preprocess_weights_for_loading(
layer, weights, original_keras_version=None, original_backend=None
):
"""Preprocess layer weights between different TF-Keras formats.
Converts layers weights from TF-Keras 1 format to TF-Keras 2 and also
weights of cuDNN layers in TF-Keras 2.
Args:
layer: Layer instance.
weights: List of weights values (Numpy arrays).
original_keras_version: TF-Keras version for the weights, as a string.
original_backend: TF-Keras backend the weights were trained with,
as a string.
Returns:
A list of weights values (Numpy arrays).
"""
def convert_nested_bidirectional(weights):
"""Converts layers nested in `Bidirectional` wrapper.
This function uses `preprocess_weights_for_loading()` for converting
layers.
Args:
weights: List of weights values (Numpy arrays).
Returns:
A list of weights values (Numpy arrays).
"""
num_weights_per_layer = len(weights) // 2
forward_weights = preprocess_weights_for_loading(
layer.forward_layer,
weights[:num_weights_per_layer],
original_keras_version,
original_backend,
)
backward_weights = preprocess_weights_for_loading(
layer.backward_layer,
weights[num_weights_per_layer:],
original_keras_version,
original_backend,
)
return forward_weights + backward_weights
def convert_nested_time_distributed(weights):
"""Converts layers nested in `TimeDistributed` wrapper.
This function uses `preprocess_weights_for_loading()` for converting
nested layers.
Args:
weights: List of weights values (Numpy arrays).
Returns:
A list of weights values (Numpy arrays).
"""
return preprocess_weights_for_loading(
layer.layer, weights, original_keras_version, original_backend
)
def convert_nested_model(weights):
"""Converts layers nested in `Model` or `Sequential`.
This function uses `preprocess_weights_for_loading()` for converting
nested layers.
Args:
weights: List of weights values (Numpy arrays).
Returns:
A list of weights values (Numpy arrays).
"""
trainable_weights = weights[: len(layer.trainable_weights)]
non_trainable_weights = weights[len(layer.trainable_weights) :]
new_trainable_weights = []
new_non_trainable_weights = []
for sublayer in layer.layers:
num_trainable_weights = len(sublayer.trainable_weights)
num_non_trainable_weights = len(sublayer.non_trainable_weights)
if sublayer.weights:
preprocessed = preprocess_weights_for_loading(
layer=sublayer,
weights=(
trainable_weights[:num_trainable_weights]
+ non_trainable_weights[:num_non_trainable_weights]
),
original_keras_version=original_keras_version,
original_backend=original_backend,
)
new_trainable_weights.extend(
preprocessed[:num_trainable_weights]
)
new_non_trainable_weights.extend(
preprocessed[num_trainable_weights:]
)
trainable_weights = trainable_weights[num_trainable_weights:]
non_trainable_weights = non_trainable_weights[
num_non_trainable_weights:
]
new_trainable_weights += layer._trainable_weights
new_non_trainable_weights += layer._non_trainable_weights
return new_trainable_weights + new_non_trainable_weights
# Convert layers nested in Bidirectional/Model/Sequential.
# Both transformation should be ran for both TF-Keras 1->2 conversion
# and for conversion of cuDNN layers.
if layer.__class__.__name__ == "Bidirectional":
weights = convert_nested_bidirectional(weights)
if layer.__class__.__name__ == "TimeDistributed":
weights = convert_nested_time_distributed(weights)
elif layer.__class__.__name__ in ["Model", "Sequential", "Functional"]:
weights = convert_nested_model(weights)
if original_keras_version == "1":
if layer.__class__.__name__ == "TimeDistributed":
weights = preprocess_weights_for_loading(
layer.layer, weights, original_keras_version, original_backend
)
if layer.__class__.__name__ == "Conv1D":
shape = weights[0].shape
# Handle TF-Keras 1.1 format
if (
shape[:2] != (layer.kernel_size[0], 1)
or shape[3] != layer.filters
):
# Legacy shape:
# (filters, input_dim, filter_length, 1)
assert shape[0] == layer.filters and shape[2:] == (
layer.kernel_size[0],
1,
)
weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
weights[0] = weights[0][:, 0, :, :]
if layer.__class__.__name__ == "Conv2D":
if layer.data_format == "channels_first":
# old: (filters, stack_size, kernel_rows, kernel_cols)
# new: (kernel_rows, kernel_cols, stack_size, filters)
weights[0] = np.transpose(weights[0], (2, 3, 1, 0))
if layer.__class__.__name__ == "Conv2DTranspose":
if layer.data_format == "channels_last":
# old: (kernel_rows, kernel_cols, stack_size, filters)
# new: (kernel_rows, kernel_cols, filters, stack_size)
weights[0] = np.transpose(weights[0], (0, 1, 3, 2))
if layer.data_format == "channels_first":
# old: (filters, stack_size, kernel_rows, kernel_cols)
# new: (kernel_rows, kernel_cols, filters, stack_size)
weights[0] = np.transpose(weights[0], (2, 3, 0, 1))
if layer.__class__.__name__ == "Conv3D":
if layer.data_format == "channels_first":
# old: (filters, stack_size, ...)
# new: (..., stack_size, filters)
weights[0] = np.transpose(weights[0], (2, 3, 4, 1, 0))
if layer.__class__.__name__ == "GRU":
if len(weights) == 9:
kernel = np.concatenate(
[weights[0], weights[3], weights[6]], axis=-1
)
recurrent_kernel = np.concatenate(
[weights[1], weights[4], weights[7]], axis=-1
)
bias = np.concatenate(
[weights[2], weights[5], weights[8]], axis=-1
)
weights = [kernel, recurrent_kernel, bias]
if layer.__class__.__name__ == "LSTM":
if len(weights) == 12:
# old: i, c, f, o
# new: i, f, c, o
kernel = np.concatenate(
[weights[0], weights[6], weights[3], weights[9]], axis=-1
)
recurrent_kernel = np.concatenate(
[weights[1], weights[7], weights[4], weights[10]], axis=-1
)
bias = np.concatenate(
[weights[2], weights[8], weights[5], weights[11]], axis=-1
)
weights = [kernel, recurrent_kernel, bias]
if layer.__class__.__name__ == "ConvLSTM2D":
if len(weights) == 12:
kernel = np.concatenate(
[weights[0], weights[6], weights[3], weights[9]], axis=-1
)
recurrent_kernel = np.concatenate(
[weights[1], weights[7], weights[4], weights[10]], axis=-1
)
bias = np.concatenate(
[weights[2], weights[8], weights[5], weights[11]], axis=-1
)
if layer.data_format == "channels_first":
# old: (filters, stack_size, kernel_rows, kernel_cols)
# new: (kernel_rows, kernel_cols, stack_size, filters)
kernel = np.transpose(kernel, (2, 3, 1, 0))
recurrent_kernel = np.transpose(
recurrent_kernel, (2, 3, 1, 0)
)
weights = [kernel, recurrent_kernel, bias]
conv_layers = [
"Conv1D",
"Conv2D",
"Conv3D",
"Conv2DTranspose",
"ConvLSTM2D",
]
if layer.__class__.__name__ in conv_layers:
if backend.int_shape(layer.weights[0]) != weights[0].shape:
weights[0] = np.transpose(weights[0], (3, 2, 0, 1))
if layer.__class__.__name__ == "ConvLSTM2D":
weights[1] = np.transpose(weights[1], (3, 2, 0, 1))
# convert cuDNN layers
return _convert_rnn_weights(layer, weights)
def _convert_rnn_weights(layer, weights):
"""Converts weights for RNN layers between native and cuDNN format.
Input kernels for each gate are transposed and converted between Fortran
and C layout, recurrent kernels are transposed. For LSTM biases are summed/
split in half, for GRU biases are reshaped.
Weights can be converted in both directions between `LSTM` and`CuDNNSLTM`
and between `CuDNNGRU` and `GRU(reset_after=True)`. Default `GRU` is not
compatible with `CuDNNGRU`.
For missing biases in `LSTM`/`GRU` (`use_bias=False`) no conversion is made.
Args:
layer: Target layer instance.
weights: List of source weights values (input kernels, recurrent
kernels, [biases]) (Numpy arrays).
Returns:
A list of converted weights values (Numpy arrays).
Raises:
ValueError: for incompatible GRU layer/weights or incompatible biases
"""
def transform_kernels(kernels, func, n_gates):
"""Transforms kernel for each gate separately using given function.
Args:
kernels: Stacked array of kernels for individual gates.
func: Function applied to kernel of each gate.
n_gates: Number of gates (4 for LSTM, 3 for GRU).
Returns:
Stacked array of transformed kernels.
"""
return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)])
def transpose_input(from_cudnn):
"""Makes a function that transforms input kernels from/to cuDNN format.
It keeps the shape, but changes between the layout (Fortran/C). Eg.:
```
TF-Keras cuDNN
[[0, 1, 2], <---> [[0, 2, 4],
[3, 4, 5]] [1, 3, 5]]
```
It can be passed to `transform_kernels()`.
Args:
from_cudnn: `True` if source weights are in cuDNN format, `False` if
they're in plain TF-Keras format.
Returns:
Function that converts input kernel to the other format.
"""
order = "F" if from_cudnn else "C"
def transform(kernel):
return kernel.T.reshape(kernel.shape, order=order)
return transform
target_class = layer.__class__.__name__
# convert the weights between CuDNNLSTM and LSTM
if target_class in ["LSTM", "CuDNNLSTM"] and len(weights) == 3:
# determine if we're loading a CuDNNLSTM layer
# from the number of bias weights:
# CuDNNLSTM has (units * 8) weights; while LSTM has (units * 4)
# if there's no bias weight in the file, skip this conversion
units = weights[1].shape[0]
bias_shape = weights[2].shape
n_gates = 4
if bias_shape == (2 * units * n_gates,):
source = "CuDNNLSTM"
elif bias_shape == (units * n_gates,):
source = "LSTM"
else:
raise ValueError("Invalid bias shape: " + str(bias_shape))
def convert_lstm_weights(weights, from_cudnn=True):
"""Converts the weights between CuDNNLSTM and LSTM.
Args:
weights: Original weights.
from_cudnn: Indicates whether original weights are from cuDNN
layer.
Returns:
Updated weights compatible with LSTM.
"""
# Transpose (and reshape) input and recurrent kernels
kernels = transform_kernels(
weights[0], transpose_input(from_cudnn), n_gates
)
recurrent_kernels = transform_kernels(
weights[1], lambda k: k.T, n_gates
)
if from_cudnn:
# merge input and recurrent biases into a single set
biases = np.sum(np.split(weights[2], 2, axis=0), axis=0)
else:
# Split single set of biases evenly to two sets. The way of
# splitting doesn't matter as long as the two sets sum is kept.
biases = np.tile(0.5 * weights[2], 2)
return [kernels, recurrent_kernels, biases]
if source != target_class:
weights = convert_lstm_weights(
weights, from_cudnn=source == "CuDNNLSTM"
)
# convert the weights between CuDNNGRU and GRU(reset_after=True)
if target_class in ["GRU", "CuDNNGRU"] and len(weights) == 3:
# We can determine the source of the weights from the shape of the bias.
# If there is no bias we skip the conversion since
# CuDNNGRU always has biases.
units = weights[1].shape[0]
bias_shape = weights[2].shape
n_gates = 3
def convert_gru_weights(weights, from_cudnn=True):
"""Converts the weights between CuDNNGRU and GRU.
Args:
weights: Original weights.
from_cudnn: Indicates whether original weights are from cuDNN
layer.
Returns:
Updated weights compatible with GRU.
"""
kernels = transform_kernels(
weights[0], transpose_input(from_cudnn), n_gates
)
recurrent_kernels = transform_kernels(
weights[1], lambda k: k.T, n_gates
)
biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1)
return [kernels, recurrent_kernels, biases]
if bias_shape == (2 * units * n_gates,):
source = "CuDNNGRU"
elif bias_shape == (2, units * n_gates):
source = "GRU(reset_after=True)"
elif bias_shape == (units * n_gates,):
source = "GRU(reset_after=False)"
else:
raise ValueError("Invalid bias shape: " + str(bias_shape))
if target_class == "CuDNNGRU":
target = "CuDNNGRU"
elif layer.reset_after:
target = "GRU(reset_after=True)"
else:
target = "GRU(reset_after=False)"
# only convert between different types
if source != target:
types = (source, target)
if "GRU(reset_after=False)" in types:
raise ValueError("%s is not compatible with %s" % types)
if source == "CuDNNGRU":
weights = convert_gru_weights(weights, from_cudnn=True)
elif source == "GRU(reset_after=True)":
weights = convert_gru_weights(weights, from_cudnn=False)
return weights
def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):
"""Saves optimizer weights of a optimizer to a HDF5 group.
Args:
hdf5_group: HDF5 group.
optimizer: optimizer instance.
"""
if isinstance(optimizer, optimizer_base.Optimizer):
symbolic_weights = optimizer.variables
else:
symbolic_weights = getattr(optimizer, "weights")
if symbolic_weights:
weights_group = hdf5_group.create_group("optimizer_weights")
weight_names = [str(w.name).encode("utf8") for w in symbolic_weights]
save_attributes_to_hdf5_group(
weights_group, "weight_names", weight_names
)
weight_values = backend.batch_get_value(symbolic_weights)
for name, val in zip(weight_names, weight_values):
param_dset = weights_group.create_dataset(
name, val.shape, dtype=val.dtype
)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
def load_optimizer_weights_from_hdf5_group(hdf5_group):
"""Load optimizer weights from a HDF5 group.
Args:
hdf5_group: A pointer to a HDF5 group.
Returns:
data: List of optimizer weight names.
"""
weights_group = hdf5_group["optimizer_weights"]
optimizer_weight_names = load_attributes_from_hdf5_group(
weights_group, "weight_names"
)
return [
weights_group[weight_name] for weight_name in optimizer_weight_names
]
def save_subset_weights_to_hdf5_group(f, weights):
"""Save top-level weights of a model to a HDF5 group.
Args:
f: HDF5 group.
weights: List of weight variables.
"""
weight_values = backend.batch_get_value(weights)
weight_names = [w.name.encode("utf8") for w in weights]
save_attributes_to_hdf5_group(f, "weight_names", weight_names)
for name, val in zip(weight_names, weight_values):
param_dset = f.create_dataset(name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
def save_weights_to_hdf5_group(f, model):
"""Saves the weights of a list of layers to a HDF5 group.
Args:
f: HDF5 group.
model: Model instance.
"""
from tf_keras import __version__ as keras_version
save_attributes_to_hdf5_group(
f, "layer_names", [layer.name.encode("utf8") for layer in model.layers]
)
f.attrs["backend"] = backend.backend().encode("utf8")
f.attrs["keras_version"] = str(keras_version).encode("utf8")
# Sort model layers by layer name to ensure that group names are strictly
# growing to avoid prefix issues.
for layer in sorted(model.layers, key=lambda x: x.name):
g = f.create_group(layer.name)
weights = _legacy_weights(layer)
save_subset_weights_to_hdf5_group(g, weights)
weights = model._trainable_weights + model._non_trainable_weights
g = f.create_group("top_level_model_weights")
save_subset_weights_to_hdf5_group(g, weights)
def load_subset_weights_from_hdf5_group(f):
"""Load layer weights of a model from hdf5.
Args:
f: A pointer to a HDF5 group.
Returns:
List of NumPy arrays of the weight values.
Raises:
ValueError: in case of mismatch between provided model
and weights file.
"""
weight_names = load_attributes_from_hdf5_group(f, "weight_names")
return [np.asarray(f[weight_name]) for weight_name in weight_names]
def load_weights_from_hdf5_group(f, model):
"""Implements topological (order-based) weight loading.
Args:
f: A pointer to a HDF5 group.
model: Model instance.
Raises:
ValueError: in case of mismatch between provided layers
and weights file.
"""
if "keras_version" in f.attrs:
original_keras_version = f.attrs["keras_version"]
if hasattr(original_keras_version, "decode"):
original_keras_version = original_keras_version.decode("utf8")
else:
original_keras_version = "1"
if "backend" in f.attrs:
original_backend = f.attrs["backend"]
if hasattr(original_backend, "decode"):
original_backend = original_backend.decode("utf8")
else:
original_backend = None
filtered_layers = []
for layer in model.layers:
weights = _legacy_weights(layer)
if weights:
filtered_layers.append(layer)
layer_names = load_attributes_from_hdf5_group(f, "layer_names")
filtered_layer_names = []
for name in layer_names:
g = f[name]
weight_names = load_attributes_from_hdf5_group(g, "weight_names")
if weight_names:
filtered_layer_names.append(name)
layer_names = filtered_layer_names
if len(layer_names) != len(filtered_layers):
raise ValueError(
"Layer count mismatch when loading weights from file. "
f"Model expected {len(filtered_layers)} layers, found "
f"{len(layer_names)} saved layers."
)
# We batch weight value assignments in a single backend call
# which provides a speedup in TensorFlow.
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
layer = filtered_layers[k]
symbolic_weights = _legacy_weights(layer)
weight_values = load_subset_weights_from_hdf5_group(g)
weight_values = preprocess_weights_for_loading(
layer, weight_values, original_keras_version, original_backend
)
if len(weight_values) != len(symbolic_weights):
raise ValueError(
f"Weight count mismatch for layer #{k} (named {layer.name} in "
f"the current model, {name} in the save file). "
f"Layer expects {len(symbolic_weights)} weight(s). Received "
f"{len(weight_values)} saved weight(s)"
)
weight_value_tuples += zip(symbolic_weights, weight_values)
if "top_level_model_weights" in f:
symbolic_weights = (
model._trainable_weights + model._non_trainable_weights
)
weight_values = load_subset_weights_from_hdf5_group(
f["top_level_model_weights"]
)
if len(weight_values) != len(symbolic_weights):
raise ValueError(
"Weight count mismatch for top-level weights when loading "
"weights from file. "
f"Model expects {len(symbolic_weights)} top-level weight(s). "
f"Received {len(weight_values)} saved top-level weight(s)"
)
weight_value_tuples += zip(symbolic_weights, weight_values)
backend.batch_set_value(weight_value_tuples)
# Perform any layer defined finalization of the layer state.
for layer in model._flatten_layers():
layer.finalize_state()
def load_weights_from_hdf5_group_by_name(f, model, skip_mismatch=False):
"""Implements name-based weight loading (instead of topological loading).
Layers that have no matching name are skipped.
Args:
f: A pointer to a HDF5 group.
model: Model instance.
skip_mismatch: Boolean, whether to skip loading of layers
where there is a mismatch in the number of weights,
or a mismatch in the shape of the weights.
Raises:
ValueError: in case of mismatch between provided layers
and weights file and skip_match=False.
"""
if "keras_version" in f.attrs:
original_keras_version = f.attrs["keras_version"]
if hasattr(original_keras_version, "decode"):
original_keras_version = original_keras_version.decode("utf8")
else:
original_keras_version = "1"
if "backend" in f.attrs:
original_backend = f.attrs["backend"]
if hasattr(original_backend, "decode"):
original_backend = original_backend.decode("utf8")
else:
original_backend = None
# New file format.
layer_names = load_attributes_from_hdf5_group(f, "layer_names")
# Reverse index of layer name to list of layers with name.
index = {}
for layer in model.layers:
if layer.name:
index.setdefault(layer.name, []).append(layer)
# We batch weight value assignments in a single backend call
# which provides a speedup in TensorFlow.
weight_value_tuples = []
for k, name in enumerate(layer_names):
g = f[name]
weight_values = load_subset_weights_from_hdf5_group(g)
for layer in index.get(name, []):
symbolic_weights = _legacy_weights(layer)
weight_values = preprocess_weights_for_loading(
layer, weight_values, original_keras_version, original_backend
)
if len(weight_values) != len(symbolic_weights):
if skip_mismatch:
logging.warning(
f"Skipping loading of weights for layer #{k} (named "
f"{layer.name}) due to mismatch in number of weights. "
f"Layer expects {len(symbolic_weights)} weight(s). "
f"Received {len(weight_values)} saved weight(s)"
)
continue
raise ValueError(
f"Weight count mismatch for layer #{k} "
f"(named {layer.name}). "
f"Layer expects {len(symbolic_weights)} weight(s). "
f"Received {len(weight_values)} saved weight(s)"
)
# Set values.
for i in range(len(weight_values)):
expected_shape = backend.int_shape(symbolic_weights[i])
received_shape = weight_values[i].shape
if expected_shape != received_shape:
if skip_mismatch:
logging.warning(
f"Skipping loading weights for layer #{k} (named "
f"{layer.name}) due to mismatch in shape for "
f"weight {symbolic_weights[i].name}. "
f"Weight expects shape {expected_shape}. "
"Received saved weight "
f"with shape {received_shape}"
)
continue
raise ValueError(
f"Shape mismatch in layer #{k} (named {layer.name}) "
f"for weight {symbolic_weights[i].name}. "
f"Weight expects shape {expected_shape}. "
"Received saved weight "
f"with shape {received_shape}"
)
else:
weight_value_tuples.append(
(symbolic_weights[i], weight_values[i])
)
if "top_level_model_weights" in f:
symbolic_weights = (
model._trainable_weights + model._non_trainable_weights
)
weight_values = load_subset_weights_from_hdf5_group(
f["top_level_model_weights"]
)
if len(weight_values) != len(symbolic_weights):
if skip_mismatch:
logging.warning(
"Skipping loading top-level weights for model due to "
"mismatch in number of weights. "
f"Model expects {len(symbolic_weights)} "
"top-level weight(s). "
f"Received {len(weight_values)} saved top-level weight(s)"
)
else:
raise ValueError(
"Weight count mismatch for top-level weights of model. "
f"Model expects {len(symbolic_weights)} "
"top-level weight(s). "
f"Received {len(weight_values)} saved top-level weight(s)"
)
else:
for i in range(len(weight_values)):
expected_shape = backend.int_shape(symbolic_weights[i])
received_shape = weight_values[i].shape
if expected_shape != received_shape:
if skip_mismatch:
logging.warning(
"Skipping loading top-level weight for model due "
"to mismatch in shape for "
f"weight {symbolic_weights[i].name}. "
f"Weight expects shape {expected_shape}. "
"Received saved weight "
f"with shape {received_shape}"
)
else:
raise ValueError(
"Shape mismatch in model for top-level weight "
f"{symbolic_weights[i].name}. "
f"Weight expects shape {expected_shape}. "
"Received saved weight "
f"with shape {received_shape}"
)
else:
weight_value_tuples.append(
(symbolic_weights[i], weight_values[i])
)
backend.batch_set_value(weight_value_tuples)
# Perform any layer defined finalization of the layer state.
for layer in model._flatten_layers():
layer.finalize_state()
def save_attributes_to_hdf5_group(group, name, data):
"""Saves attributes (data) of the specified name into the HDF5 group.
This method deals with an inherent problem of HDF5 file which is not
able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
Args:
group: A pointer to a HDF5 group.
name: A name of the attributes to save.
data: Attributes data to store.
Raises:
RuntimeError: If any single attribute is too large to be saved.
"""
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}"
)
data_npy = np.asarray(data)
num_chunks = 1
chunked_data = np.array_split(data_npy, num_chunks)
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):
num_chunks += 1
chunked_data = np.array_split(data_npy, num_chunks)
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(chunked_data):
group.attrs["%s%d" % (name, chunk_id)] = chunk_data
else:
group.attrs[name] = data
def load_attributes_from_hdf5_group(group, name):
"""Loads attributes of the specified name from the HDF5 group.
This method deals with an inherent problem
of HDF5 file which is not able to store
data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
Args:
group: A pointer to a HDF5 group.
name: A name of the attributes to load.
Returns:
data: Attributes data.
"""
if name in group.attrs:
data = [
n.decode("utf8") if hasattr(n, "decode") else n
for n in group.attrs[name]
]
else:
data = []
chunk_id = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[
n.decode("utf8") if hasattr(n, "decode") else n
for n in group.attrs["%s%d" % (name, chunk_id)]
]
)
chunk_id += 1
return data
def _legacy_weights(layer):
"""DO NOT USE.
For legacy reason, the layer.weights was in the order of
[self.trainable_weights + self.non_trainable_weights], and this order was
used for preserving the weights in h5 format. The new order of layer.weights
are the same as layer.get_weights() which is more intuitive for user. To
keep supporting the existing saved h5 file, this method should be used to
save/load weights. In future version, we will delete this method and
introduce a breaking change for h5 and stay with the new order for weights.
Args:
layer: a `tf.keras.Model` or `tf.keras.layers.Layer` instance.
Returns:
A list of variables with the order of trainable_weights, followed by
non_trainable_weights.
"""
weights = layer.trainable_weights + layer.non_trainable_weights
if any(not isinstance(w, tf.Variable) for w in weights):
raise NotImplementedError(
"Save or restore weights that is not an instance of `tf.Variable` "
"is not supported in h5, use `save_format='tf'` instead. Received "
f"a model or layer {layer.__class__.__name__} "
f"with weights {weights}"
)
return weights
| tf-keras/tf_keras/saving/legacy/hdf5_format.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/hdf5_format.py",
"repo_id": "tf-keras",
"token_count": 19696
} | 207 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions implementing Layer SavedModel serialization."""
import tensorflow.compat.v2 as tf
from tf_keras.mixed_precision import policy
from tf_keras.saving.legacy import serialization
from tf_keras.saving.legacy.saved_model import base_serialization
from tf_keras.saving.legacy.saved_model import constants
from tf_keras.saving.legacy.saved_model import save_impl
from tf_keras.saving.legacy.saved_model import serialized_attributes
class LayerSavedModelSaver(base_serialization.SavedModelSaver):
"""Implements Layer SavedModel serialization."""
@property
def object_identifier(self):
return constants.LAYER_IDENTIFIER
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
return self._python_properties_internal()
def _python_properties_internal(self):
"""Returns dictionary of all python properties."""
# TODO(kathywu): Add support for metrics serialization.
# TODO(kathywu): Synchronize with the keras spec (go/keras-json-spec)
# once the python config serialization has caught up.
metadata = dict(
name=self.obj.name,
trainable=self.obj.trainable,
expects_training_arg=self.obj._expects_training_arg,
dtype=policy.serialize(self.obj._dtype_policy),
batch_input_shape=getattr(self.obj, "_batch_input_shape", None),
stateful=self.obj.stateful,
must_restore_from_config=self.obj._must_restore_from_config,
preserve_input_structure_in_config=self.obj._preserve_input_structure_in_config, # noqa: E501
autocast=self.obj._autocast,
)
metadata.update(get_serialized(self.obj))
if self.obj.input_spec is not None:
# Layer's input_spec has already been type-checked in the property
# setter.
metadata["input_spec"] = tf.nest.map_structure(
lambda x: serialization.serialize_keras_object(x)
if x
else None,
self.obj.input_spec,
)
if self.obj.activity_regularizer is not None and hasattr(
self.obj.activity_regularizer, "get_config"
):
metadata[
"activity_regularizer"
] = serialization.serialize_keras_object(
self.obj.activity_regularizer
)
if self.obj._build_input_shape is not None:
metadata["build_input_shape"] = self.obj._build_input_shape
return metadata
def objects_to_serialize(self, serialization_cache):
return self._get_serialized_attributes(
serialization_cache
).objects_to_serialize
def functions_to_serialize(self, serialization_cache):
return self._get_serialized_attributes(
serialization_cache
).functions_to_serialize
def _get_serialized_attributes(self, serialization_cache):
"""Generates or retrieves serialized attributes from cache."""
keras_cache = serialization_cache.setdefault(
constants.KERAS_CACHE_KEY, {}
)
if self.obj in keras_cache:
return keras_cache[self.obj]
serialized_attr = keras_cache[
self.obj
] = serialized_attributes.SerializedAttributes.new(self.obj)
if (
save_impl.should_skip_serialization(self.obj)
or self.obj._must_restore_from_config
):
return serialized_attr
object_dict, function_dict = self._get_serialized_attributes_internal(
serialization_cache
)
serialized_attr.set_and_validate_objects(object_dict)
serialized_attr.set_and_validate_functions(function_dict)
return serialized_attr
def _get_serialized_attributes_internal(self, serialization_cache):
"""Returns dictionary of serialized attributes."""
objects = save_impl.wrap_layer_objects(self.obj, serialization_cache)
functions = save_impl.wrap_layer_functions(
self.obj, serialization_cache
)
# Attribute validator requires that the default save signature is added
# to function dict, even if the value is None.
functions["_default_save_signature"] = None
return objects, functions
# TODO(kathywu): Move serialization utils (and related utils from
# generic_utils.py) to a separate file.
def get_serialized(obj):
with serialization.skip_failed_serialization():
# Store the config dictionary, which may be used when reviving the
# object. When loading, the program will attempt to revive the object
# from config, and if that fails, the object will be revived from the
# SavedModel.
return serialization.serialize_keras_object(obj)
class InputLayerSavedModelSaver(base_serialization.SavedModelSaver):
"""InputLayer serialization."""
@property
def object_identifier(self):
return constants.INPUT_LAYER_IDENTIFIER
@property
def python_properties(self):
return dict(
class_name=type(self.obj).__name__,
name=self.obj.name,
dtype=self.obj.dtype,
sparse=self.obj.sparse,
ragged=self.obj.ragged,
batch_input_shape=self.obj._batch_input_shape,
config=self.obj.get_config(),
)
def objects_to_serialize(self, serialization_cache):
return {}
def functions_to_serialize(self, serialization_cache):
return {}
class RNNSavedModelSaver(LayerSavedModelSaver):
"""RNN layer serialization."""
@property
def object_identifier(self):
return constants.RNN_LAYER_IDENTIFIER
def _get_serialized_attributes_internal(self, serialization_cache):
objects, functions = super()._get_serialized_attributes_internal(
serialization_cache
)
states = tf.__internal__.tracking.wrap(self.obj.states)
# SaveModel require all the objects to be Trackable when saving. If the
# states is still a tuple after wrap_or_unwrap, it means it doesn't
# contain any trackable item within it, eg empty tuple or (None, None)
# for stateless ConvLSTM2D. We convert them to list so that
# wrap_or_unwrap can make it a Trackable again for saving. When loaded,
# ConvLSTM2D is able to handle the tuple/list conversion.
if isinstance(states, tuple):
states = tf.__internal__.tracking.wrap(list(states))
objects["states"] = states
return objects, functions
class VocabularySavedModelSaver(LayerSavedModelSaver):
"""Handles vocabulary layer serialization.
This class is needed for StringLookup, IntegerLookup, and TextVectorization,
which all have a vocabulary as part of the config. Currently, we keep this
vocab as part of the config until saving, when we need to clear it to avoid
initializing a StaticHashTable twice (once when restoring the config and
once when restoring restoring module resources). After clearing the vocab,
we persist a property to the layer indicating it was constructed with a
vocab.
"""
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
metadata = self._python_properties_internal()
# Clear the vocabulary from the config during saving.
metadata["config"]["vocabulary"] = None
# Persist a property to track that a vocabulary was passed on
# construction.
metadata["config"][
"has_input_vocabulary"
] = self.obj._has_input_vocabulary
return metadata
| tf-keras/tf_keras/saving/legacy/saved_model/layer_serialization.py/0 | {
"file_path": "tf-keras/tf_keras/saving/legacy/saved_model/layer_serialization.py",
"repo_id": "tf-keras",
"token_count": 3218
} | 208 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras serializable object registration functionality."""
import tensorflow.compat.v2 as tf
import tf_keras as keras
from tf_keras.saving import object_registration
from tf_keras.saving import serialization_lib
class TestObjectRegistration(tf.test.TestCase):
def test_custom_object_scope(self):
def custom_fn():
pass
class CustomClass:
pass
def check_get_in_thread():
with object_registration.custom_object_scope(
{"CustomClass": CustomClass, "custom_fn": custom_fn}
):
actual_custom_fn = keras.activations.get("custom_fn")
self.assertEqual(actual_custom_fn, custom_fn)
actual_custom_class = keras.regularizers.get("CustomClass")
self.assertEqual(actual_custom_class.__class__, CustomClass)
with object_registration.custom_object_scope(
{"CustomClass": CustomClass, "custom_fn": custom_fn}
):
actual_custom_fn = keras.activations.get("custom_fn")
self.assertEqual(actual_custom_fn, custom_fn)
actual_custom_class = keras.regularizers.get("CustomClass")
self.assertEqual(actual_custom_class.__class__, CustomClass)
checked_thread = self.checkedThread(check_get_in_thread)
checked_thread.start()
checked_thread.join()
def test_serialize_custom_class_with_default_name(self):
@object_registration.register_keras_serializable()
class TestClass:
def __init__(self, value):
self._value = value
def get_config(self):
return {"value": self._value}
serialized_name = "Custom>TestClass"
inst = TestClass(value=10)
class_name = object_registration._GLOBAL_CUSTOM_NAMES[TestClass]
self.assertEqual(serialized_name, class_name)
config = serialization_lib.serialize_keras_object(inst)
self.assertEqual(class_name, config["class_name"])
new_inst = serialization_lib.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, TestClass)
self.assertEqual(10, new_inst._value)
# Make sure registering a new class with same name will fail.
with self.assertRaisesRegex(
ValueError, ".*has already been registered.*"
):
@object_registration.register_keras_serializable()
class TestClass:
def __init__(self, value):
self._value = value
def get_config(self):
return {"value": self._value}
def test_serialize_custom_class_with_custom_name(self):
@object_registration.register_keras_serializable(
"TestPackage", "CustomName"
)
class OtherTestClass:
def __init__(self, val):
self._val = val
def get_config(self):
return {"val": self._val}
serialized_name = "TestPackage>CustomName"
inst = OtherTestClass(val=5)
class_name = object_registration._GLOBAL_CUSTOM_NAMES[OtherTestClass]
self.assertEqual(serialized_name, class_name)
fn_class_name = object_registration.get_registered_name(OtherTestClass)
self.assertEqual(fn_class_name, class_name)
cls = object_registration.get_registered_object(fn_class_name)
self.assertEqual(OtherTestClass, cls)
config = keras.utils.serialization.serialize_keras_object(inst)
self.assertEqual(class_name, config["class_name"])
new_inst = keras.utils.serialization.deserialize_keras_object(config)
self.assertIsNot(inst, new_inst)
self.assertIsInstance(new_inst, OtherTestClass)
self.assertEqual(5, new_inst._val)
def test_serialize_custom_function(self):
@object_registration.register_keras_serializable()
def my_fn():
return 42
serialized_name = "Custom>my_fn"
class_name = object_registration._GLOBAL_CUSTOM_NAMES[my_fn]
self.assertEqual(serialized_name, class_name)
fn_class_name = object_registration.get_registered_name(my_fn)
self.assertEqual(fn_class_name, class_name)
config = keras.utils.serialization.serialize_keras_object(my_fn)
self.assertEqual(class_name, config)
fn = keras.utils.serialization.deserialize_keras_object(config)
self.assertEqual(42, fn())
fn_2 = object_registration.get_registered_object(fn_class_name)
self.assertEqual(42, fn_2())
def test_serialize_custom_class_without_get_config_fails(self):
with self.assertRaisesRegex(
ValueError,
"Cannot register a class that does not have a get_config.*",
):
@object_registration.register_keras_serializable(
"TestPackage", "TestClass"
)
class TestClass:
def __init__(self, value):
self._value = value
| tf-keras/tf_keras/saving/object_registration_test.py/0 | {
"file_path": "tf-keras/tf_keras/saving/object_registration_test.py",
"repo_id": "tf-keras",
"token_count": 2469
} | 209 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras models for use in Model subclassing tests."""
import tf_keras as keras
from tf_keras.testing_infra import test_utils
class SimpleConvTestModel(keras.Model):
def __init__(self, num_classes=10):
super().__init__(name="test_model")
self.num_classes = num_classes
self.conv1 = keras.layers.Conv2D(32, (3, 3), activation="relu")
self.flatten = keras.layers.Flatten()
self.dense1 = keras.layers.Dense(num_classes, activation="softmax")
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
return self.dense1(x)
def get_multi_io_subclass_model(use_bn=False, use_dp=False, num_classes=(2, 3)):
"""Creates MultiIOModel for the tests of subclass model."""
shared_layer = keras.layers.Dense(32, activation="relu")
branch_a = [shared_layer]
if use_dp:
branch_a.append(keras.layers.Dropout(0.5))
branch_a.append(keras.layers.Dense(num_classes[0], activation="softmax"))
branch_b = [shared_layer]
if use_bn:
branch_b.append(keras.layers.BatchNormalization())
branch_b.append(keras.layers.Dense(num_classes[1], activation="softmax"))
model = test_utils._MultiIOSubclassModel(
branch_a, branch_b, name="test_model"
)
return model
class NestedTestModel1(keras.Model):
"""A model subclass nested inside a model subclass."""
def __init__(self, num_classes=2):
super().__init__(name="nested_model_1")
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(num_classes, activation="relu")
self.bn = keras.layers.BatchNormalization()
self.test_net = test_utils.SmallSubclassMLP(
num_hidden=32, num_classes=4, use_bn=True, use_dp=True
)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
class NestedTestModel2(keras.Model):
"""A model subclass with a functional-API graph network inside."""
def __init__(self, num_classes=2):
super().__init__(name="nested_model_2")
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(num_classes, activation="relu")
self.bn = self.bn = keras.layers.BatchNormalization()
self.test_net = self.get_functional_graph_model(32, 4)
@staticmethod
def get_functional_graph_model(input_dim, num_classes):
# A simple functional-API model (a.k.a. graph network)
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation="relu")(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_nested_model_3(input_dim, num_classes):
# A functional-API model with a subclassed model inside.
# NOTE: this requires the inner subclass to implement
# `compute_output_shape`.
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation="relu")(inputs)
x = keras.layers.BatchNormalization()(x)
class Inner(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(32, activation="relu")
self.dense2 = keras.layers.Dense(5, activation="relu")
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
test_model = Inner()
x = test_model(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs, name="nested_model_3")
class CustomCallModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(1, activation="relu")
self.dense2 = keras.layers.Dense(1, activation="softmax")
def call(self, first, second, fiddle_with_output="no", training=True):
combined = self.dense1(first) + self.dense2(second)
if fiddle_with_output == "yes":
return 10.0 * combined
else:
return combined
class TrainingNoDefaultModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training):
return self.dense1(x)
class TrainingMaskingModel(keras.Model):
def __init__(self):
super().__init__()
self.dense1 = keras.layers.Dense(1)
def call(self, x, training=False, mask=None):
return self.dense1(x)
| tf-keras/tf_keras/tests/model_subclassing_test_util.py/0 | {
"file_path": "tf-keras/tf_keras/tests/model_subclassing_test_util.py",
"repo_id": "tf-keras",
"token_count": 2303
} | 210 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow TF-Keras.
TensorFlow TF-Keras is an implementation of the TF-Keras API that uses
TensorFlow as a backend.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import setuptools
DOCLINES = __doc__.split("\n")
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
_VERSION = "2.16.0"
REQUIRED_PACKAGES = [
# We depend on TensorFlow's declared pip dependencies.
# Add a new dep there if one is needed.
]
project_name = "tf_keras"
if "--project_name" in sys.argv:
project_name_idx = sys.argv.index("--project_name")
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove("--project_name")
sys.argv.pop(project_name_idx)
setuptools.setup(
name=project_name,
version=_VERSION.replace("-", ""),
description="Deep learning for humans.",
long_description="\n".join(DOCLINES[2:]),
url="https://keras.io/",
download_url="https://github.com/keras-team/tf-keras/tags",
author="Keras team",
author_email="[email protected]",
packages=setuptools.find_packages(),
install_requires=REQUIRED_PACKAGES,
# Supported Python versions
python_requires=">=3.9",
# PyPI package information.
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
license="Apache 2.0",
keywords=["keras", "tensorflow", "machine learning", "deep learning"],
)
| tf-keras/tf_keras/tools/pip_package/setup.py/0 | {
"file_path": "tf-keras/tf_keras/tools/pip_package/setup.py",
"repo_id": "tf-keras",
"token_count": 988
} | 211 |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FeatureSpace structured data preprocessing & encoding utility."""
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras.engine import base_layer
from tf_keras.saving import saving_lib
from tf_keras.saving import serialization_lib
from tf_keras.utils.generic_utils import LazyLoader
# isort: off
from tensorflow.python.util.tf_export import keras_export
layers = LazyLoader("layers", globals(), "tf_keras.layers")
class Cross:
def __init__(self, feature_names, crossing_dim, output_mode="one_hot"):
if output_mode not in {"int", "one_hot"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'int', 'one_hot'}. "
f"Received: output_mode={output_mode}"
)
self.feature_names = tuple(feature_names)
self.crossing_dim = crossing_dim
self.output_mode = output_mode
@property
def name(self):
return "_X_".join(self.feature_names)
def get_config(self):
return {
"feature_names": self.feature_names,
"crossing_dim": self.crossing_dim,
"output_mode": self.output_mode,
}
@classmethod
def from_config(cls, config):
return cls(**config)
class Feature:
def __init__(self, dtype, preprocessor, output_mode):
if output_mode not in {"int", "one_hot", "float"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'int', 'one_hot', 'float'}. "
f"Received: output_mode={output_mode}"
)
self.dtype = dtype
if isinstance(preprocessor, dict):
preprocessor = serialization_lib.deserialize_keras_object(
preprocessor
)
self.preprocessor = preprocessor
self.output_mode = output_mode
def get_config(self):
return {
"dtype": self.dtype,
"preprocessor": serialization_lib.serialize_keras_object(
self.preprocessor
),
"output_mode": self.output_mode,
}
@classmethod
def from_config(cls, config):
return cls(**config)
@keras_export("keras.utils.FeatureSpace", v1=[])
class FeatureSpace(base_layer.Layer):
"""One-stop utility for preprocessing and encoding structured data.
Arguments:
feature_names: Dict mapping the names of your features to their
type specification, e.g. `{"my_feature": "integer_categorical"}`
or `{"my_feature": FeatureSpace.integer_categorical()}`.
For a complete list of all supported types, see
"Available feature types" paragraph below.
output_mode: One of `"concat"` or `"dict"`. In concat mode, all
features get concatenated together into a single vector.
In dict mode, the FeatureSpace returns a dict of individually
encoded features (with the same keys as the input dict keys).
crosses: List of features to be crossed together, e.g.
`crosses=[("feature_1", "feature_2")]`. The features will be
"crossed" by hashing their combined value into
a fixed-length vector.
crossing_dim: Default vector size for hashing crossed features.
Defaults to `32`.
hashing_dim: Default vector size for hashing features of type
`"integer_hashed"` and `"string_hashed"`. Defaults to `32`.
num_discretization_bins: Default number of bins to be used for
discretizing features of type `"float_discretized"`.
Defaults to `32`.
**Available feature types:**
Note that all features can be referred to by their string name,
e.g. `"integer_categorical"`. When using the string name, the default
argument values are used.
```python
# Plain float values.
FeatureSpace.float(name=None)
# Float values to be preprocessed via featurewise standardization
# (i.e. via a `keras.layers.Normalization` layer).
FeatureSpace.float_normalized(name=None)
# Float values to be preprocessed via linear rescaling
# (i.e. via a `keras.layers.Rescaling` layer).
FeatureSpace.float_rescaled(scale=1., offset=0., name=None)
# Float values to be discretized. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.float_discretized(
num_bins, bin_boundaries=None, output_mode="one_hot", name=None)
# Integer values to be indexed. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.integer_categorical(
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
# String values to be indexed. By default, the discrete
# representation will then be one-hot encoded.
FeatureSpace.string_categorical(
max_tokens=None, num_oov_indices=1, output_mode="one_hot", name=None)
# Integer values to be hashed into a fixed number of bins.
# By default, the discrete representation will then be one-hot encoded.
FeatureSpace.integer_hashed(num_bins, output_mode="one_hot", name=None)
# String values to be hashed into a fixed number of bins.
# By default, the discrete representation will then be one-hot encoded.
FeatureSpace.string_hashed(num_bins, output_mode="one_hot", name=None)
```
Examples:
**Basic usage with a dict of input data:**
```python
raw_data = {
"float_values": [0.0, 0.1, 0.2, 0.3],
"string_values": ["zero", "one", "two", "three"],
"int_values": [0, 1, 2, 3],
}
dataset = tf.data.Dataset.from_tensor_slices(raw_data)
feature_space = FeatureSpace(
features={
"float_values": "float_normalized",
"string_values": "string_categorical",
"int_values": "integer_categorical",
},
crosses=[("string_values", "int_values")],
output_mode="concat",
)
# Before you start using the FeatureSpace,
# you must `adapt()` it on some data.
feature_space.adapt(dataset)
# You can call the FeatureSpace on a dict of data (batched or unbatched).
output_vector = feature_space(raw_data)
```
**Basic usage with `tf.data`:**
```python
# Unlabeled data
preprocessed_ds = unlabeled_dataset.map(feature_space)
# Labeled data
preprocessed_ds = labeled_dataset.map(lambda x, y: (feature_space(x), y))
```
**Basic usage with the TF-Keras Functional API:**
```python
# Retrieve a dict TF-Keras Input objects
inputs = feature_space.get_inputs()
# Retrieve the corresponding encoded TF-Keras tensors
encoded_features = feature_space.get_encoded_features()
# Build a Functional model
outputs = keras.layers.Dense(1, activation="sigmoid")(encoded_features)
model = keras.Model(inputs, outputs)
```
**Customizing each feature or feature cross:**
```python
feature_space = FeatureSpace(
features={
"float_values": FeatureSpace.float_normalized(),
"string_values": FeatureSpace.string_categorical(max_tokens=10),
"int_values": FeatureSpace.integer_categorical(max_tokens=10),
},
crosses=[
FeatureSpace.cross(("string_values", "int_values"), crossing_dim=32)
],
output_mode="concat",
)
```
**Returning a dict of integer-encoded features:**
```python
feature_space = FeatureSpace(
features={
"string_values": FeatureSpace.string_categorical(output_mode="int"),
"int_values": FeatureSpace.integer_categorical(output_mode="int"),
},
crosses=[
FeatureSpace.cross(
feature_names=("string_values", "int_values"),
crossing_dim=32,
output_mode="int",
)
],
output_mode="dict",
)
```
**Specifying your own TF-Keras preprocessing layer:**
```python
# Let's say that one of the features is a short text paragraph that
# we want to encode as a vector (one vector per paragraph) via TF-IDF.
data = {
"text": ["1st string", "2nd string", "3rd string"],
}
# There's a TF-Keras layer for this: TextVectorization.
custom_layer = layers.TextVectorization(output_mode="tf_idf")
# We can use FeatureSpace.feature to create a custom feature
# that will use our preprocessing layer.
feature_space = FeatureSpace(
features={
"text": FeatureSpace.feature(
preprocessor=custom_layer, dtype="string", output_mode="float"
),
},
output_mode="concat",
)
feature_space.adapt(tf.data.Dataset.from_tensor_slices(data))
output_vector = feature_space(data)
```
**Retrieving the underlying TF-Keras preprocessing layers:**
```python
# The preprocessing layer of each feature is available in `.preprocessors`.
preprocessing_layer = feature_space.preprocessors["feature1"]
# The crossing layer of each feature cross is available in `.crossers`.
# It's an instance of keras.layers.HashedCrossing.
crossing_layer = feature_space.crossers["feature1_X_feature2"]
```
**Saving and reloading a FeatureSpace:**
```python
feature_space.save("myfeaturespace.keras")
reloaded_feature_space = keras.models.load_model("myfeaturespace.keras")
```
"""
@classmethod
def cross(cls, feature_names, crossing_dim, output_mode="one_hot"):
return Cross(feature_names, crossing_dim, output_mode=output_mode)
@classmethod
def feature(cls, dtype, preprocessor, output_mode):
return Feature(dtype, preprocessor, output_mode)
@classmethod
def float(cls, name=None):
from tf_keras.layers.core import identity
name = name or backend.unique_object_name("float")
preprocessor = identity.Identity(
dtype="float32", name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_rescaled(cls, scale=1.0, offset=0.0, name=None):
name = name or backend.unique_object_name("float_rescaled")
preprocessor = layers.Rescaling(
scale=scale, offset=offset, name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_normalized(cls, name=None):
name = name or backend.unique_object_name("float_normalized")
preprocessor = layers.Normalization(
axis=-1, name=f"{name}_preprocessor"
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode="float"
)
@classmethod
def float_discretized(
cls, num_bins, bin_boundaries=None, output_mode="one_hot", name=None
):
name = name or backend.unique_object_name("float_discretized")
preprocessor = layers.Discretization(
num_bins=num_bins,
bin_boundaries=bin_boundaries,
name=f"{name}_preprocessor",
)
return Feature(
dtype="float32", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def integer_categorical(
cls,
max_tokens=None,
num_oov_indices=1,
output_mode="one_hot",
name=None,
):
name = name or backend.unique_object_name("integer_categorical")
preprocessor = layers.IntegerLookup(
name=f"{name}_preprocessor",
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
)
return Feature(
dtype="int64", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def string_categorical(
cls,
max_tokens=None,
num_oov_indices=1,
output_mode="one_hot",
name=None,
):
name = name or backend.unique_object_name("string_categorical")
preprocessor = layers.StringLookup(
name=f"{name}_preprocessor",
max_tokens=max_tokens,
num_oov_indices=num_oov_indices,
)
return Feature(
dtype="string", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def string_hashed(cls, num_bins, output_mode="one_hot", name=None):
name = name or backend.unique_object_name("string_hashed")
preprocessor = layers.Hashing(
name=f"{name}_preprocessor", num_bins=num_bins
)
return Feature(
dtype="string", preprocessor=preprocessor, output_mode=output_mode
)
@classmethod
def integer_hashed(cls, num_bins, output_mode="one_hot", name=None):
name = name or backend.unique_object_name("integer_hashed")
preprocessor = layers.Hashing(
name=f"{name}_preprocessor", num_bins=num_bins
)
return Feature(
dtype="int64", preprocessor=preprocessor, output_mode=output_mode
)
def __init__(
self,
features,
output_mode="concat",
crosses=None,
crossing_dim=32,
hashing_dim=32,
num_discretization_bins=32,
):
if not features:
raise ValueError("The `features` argument cannot be None or empty.")
self.crossing_dim = crossing_dim
self.hashing_dim = hashing_dim
self.num_discretization_bins = num_discretization_bins
self.features = {
name: self._standardize_feature(name, value)
for name, value in features.items()
}
self.crosses = []
if crosses:
feature_set = set(features.keys())
for cross in crosses:
if isinstance(cross, dict):
cross = serialization_lib.deserialize_keras_object(cross)
if isinstance(cross, Cross):
self.crosses.append(cross)
else:
if not crossing_dim:
raise ValueError(
"When specifying `crosses`, the argument "
"`crossing_dim` "
"(dimensionality of the crossing space) "
"should be specified as well."
)
for key in cross:
if key not in feature_set:
raise ValueError(
"All features referenced "
"in the `crosses` argument "
"should be present in the `features` dict. "
f"Received unknown features: {cross}"
)
self.crosses.append(Cross(cross, crossing_dim=crossing_dim))
self.crosses_by_name = {cross.name: cross for cross in self.crosses}
if output_mode not in {"dict", "concat"}:
raise ValueError(
"Invalid value for argument `output_mode`. "
"Expected one of {'dict', 'concat'}. "
f"Received: output_mode={output_mode}"
)
self.output_mode = output_mode
self.inputs = {
name: self._feature_to_input(name, value)
for name, value in self.features.items()
}
self.preprocessors = {
name: value.preprocessor for name, value in self.features.items()
}
self.encoded_features = None
self.crossers = {
cross.name: self._cross_to_crosser(cross) for cross in self.crosses
}
self.one_hot_encoders = {}
self.built = False
self._is_adapted = False
self.concat = None
self._preprocessed_features_names = None
self._crossed_features_names = None
def _feature_to_input(self, name, feature):
return layers.Input(shape=(1,), dtype=feature.dtype, name=name)
def _standardize_feature(self, name, feature):
if isinstance(feature, Feature):
return feature
if isinstance(feature, dict):
return serialization_lib.deserialize_keras_object(feature)
if feature == "float":
return self.float(name=name)
elif feature == "float_normalized":
return self.float_normalized(name=name)
elif feature == "float_rescaled":
return self.float_rescaled(name=name)
elif feature == "float_discretized":
return self.float_discretized(
name=name, num_bins=self.num_discretization_bins
)
elif feature == "integer_categorical":
return self.integer_categorical(name=name)
elif feature == "string_categorical":
return self.string_categorical(name=name)
elif feature == "integer_hashed":
return self.integer_hashed(self.hashing_dim, name=name)
elif feature == "string_hashed":
return self.string_hashed(self.hashing_dim, name=name)
else:
raise ValueError(f"Invalid feature type: {feature}")
def _cross_to_crosser(self, cross):
return layers.HashedCrossing(cross.crossing_dim, name=cross.name)
def _list_adaptable_preprocessors(self):
adaptable_preprocessors = []
for name in self.features.keys():
preprocessor = self.preprocessors[name]
# Special case: a Normalization layer with preset mean/variance.
# Not adaptable.
if isinstance(preprocessor, layers.Normalization):
if preprocessor.input_mean is not None:
continue
if hasattr(preprocessor, "adapt"):
adaptable_preprocessors.append(name)
return adaptable_preprocessors
def adapt(self, dataset):
if not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"`adapt()` can only be called on a tf.data.Dataset. "
f"Received instead: {dataset} (of type {type(dataset)})"
)
for name in self._list_adaptable_preprocessors():
# Call adapt() on each individual adaptable layer.
# TODO: consider rewriting this to instead iterate on the
# dataset once, split each batch into individual features,
# and call the layer's `_adapt_function` on each batch
# to simulate the behavior of adapt() in a more performant fashion.
feature_dataset = dataset.map(lambda x: x[name])
preprocessor = self.preprocessors[name]
# TODO: consider adding an adapt progress bar.
# Sample 1 element to check the rank
for x in feature_dataset.take(1):
pass
if x.shape.rank == 0:
# The dataset yields unbatched scalars; batch it.
feature_dataset = feature_dataset.batch(32)
if x.shape.rank in {0, 1}:
# If the rank is 1, add a dimension
# so we can reduce on axis=-1.
# Note: if rank was previously 0, it is now 1.
feature_dataset = feature_dataset.map(
lambda x: tf.expand_dims(x, -1)
)
preprocessor.adapt(feature_dataset)
self._is_adapted = True
self.get_encoded_features() # Finish building the layer
self.built = True
def get_inputs(self):
self._check_if_built()
return self.inputs
def get_encoded_features(self):
self._check_if_adapted()
if self.encoded_features is None:
preprocessed_features = self._preprocess_features(self.inputs)
crossed_features = self._cross_features(preprocessed_features)
merged_features = self._merge_features(
preprocessed_features, crossed_features
)
self.encoded_features = merged_features
return self.encoded_features
def _preprocess_features(self, features):
return {
name: self.preprocessors[name](features[name])
for name in features.keys()
}
def _cross_features(self, features):
all_outputs = {}
for cross in self.crosses:
inputs = [features[name] for name in cross.feature_names]
outputs = self.crossers[cross.name](inputs)
all_outputs[cross.name] = outputs
return all_outputs
def _merge_features(self, preprocessed_features, crossed_features):
if not self._preprocessed_features_names:
self._preprocessed_features_names = sorted(
preprocessed_features.keys()
)
self._crossed_features_names = sorted(crossed_features.keys())
all_names = (
self._preprocessed_features_names + self._crossed_features_names
)
all_features = [
preprocessed_features[name]
for name in self._preprocessed_features_names
] + [crossed_features[name] for name in self._crossed_features_names]
if self.output_mode == "dict":
output_dict = {}
else:
features_to_concat = []
if self.built:
# Fast mode.
for name, feature in zip(all_names, all_features):
encoder = self.one_hot_encoders.get(name, None)
if encoder:
feature = encoder(feature)
if self.output_mode == "dict":
output_dict[name] = feature
else:
features_to_concat.append(feature)
if self.output_mode == "dict":
return output_dict
else:
return self.concat(features_to_concat)
# If the object isn't built,
# we create the encoder and concat layers below
all_specs = [
self.features[name] for name in self._preprocessed_features_names
] + [
self.crosses_by_name[name] for name in self._crossed_features_names
]
for name, feature, spec in zip(all_names, all_features, all_specs):
dtype = feature.dtype.name
if spec.output_mode == "one_hot":
preprocessor = self.preprocessors.get(
name
) or self.crossers.get(name)
cardinality = None
if not feature.dtype.name.startswith("int"):
raise ValueError(
f"Feature '{name}' has `output_mode='one_hot'`. "
"Thus its preprocessor should return an int64 dtype. "
f"Instead it returns a {dtype} dtype."
)
if isinstance(
preprocessor, (layers.IntegerLookup, layers.StringLookup)
):
cardinality = preprocessor.vocabulary_size()
elif isinstance(preprocessor, layers.CategoryEncoding):
cardinality = preprocessor.num_tokens
elif isinstance(preprocessor, layers.Discretization):
cardinality = preprocessor.num_bins
elif isinstance(
preprocessor, (layers.HashedCrossing, layers.Hashing)
):
cardinality = preprocessor.num_bins
else:
raise ValueError(
f"Feature '{name}' has `output_mode='one_hot'`. "
"However it isn't a standard feature and the "
"dimensionality of its output space is not known, "
"thus it cannot be one-hot encoded. "
"Try using `output_mode='int'`."
)
if cardinality is not None:
encoder = layers.CategoryEncoding(
num_tokens=cardinality, output_mode="multi_hot"
)
self.one_hot_encoders[name] = encoder
feature = encoder(feature)
if self.output_mode == "concat":
dtype = feature.dtype.name
if dtype.startswith("int") or dtype == "string":
raise ValueError(
f"Cannot concatenate features because feature '{name}' "
f"has not been encoded (it has dtype {dtype}). "
"Consider using `output_mode='dict'`."
)
features_to_concat.append(feature)
else:
output_dict[name] = feature
if self.output_mode == "concat":
self.concat = layers.Concatenate(axis=-1)
return self.concat(features_to_concat)
else:
return output_dict
def _check_if_adapted(self):
if not self._is_adapted:
if not self._list_adaptable_preprocessors():
self._is_adapted = True
else:
raise ValueError(
"You need to call `.adapt(dataset)` on the FeatureSpace "
"before you can start using it."
)
def _check_if_built(self):
if not self.built:
self._check_if_adapted()
# Finishes building
self.get_encoded_features()
self.built = True
def __call__(self, data):
self._check_if_built()
if not isinstance(data, dict):
raise ValueError(
"A FeatureSpace can only be called with a dict. "
f"Received: data={data} (of type {type(data)}"
)
data = {key: tf.convert_to_tensor(value) for key, value in data.items()}
rebatched = False
for name, x in data.items():
if x.shape.rank == 0:
data[name] = tf.reshape(x, [1, 1])
rebatched = True
elif x.shape.rank == 1:
data[name] = tf.expand_dims(x, -1)
preprocessed_data = self._preprocess_features(data)
crossed_data = self._cross_features(preprocessed_data)
merged_data = self._merge_features(preprocessed_data, crossed_data)
if rebatched:
if self.output_mode == "concat":
assert merged_data.shape[0] == 1
return tf.squeeze(merged_data, axis=0)
else:
for name, x in merged_data.items():
if x.shape.rank == 2 and x.shape[0] == 1:
merged_data[name] = tf.squeeze(x, axis=0)
return merged_data
def get_config(self):
return {
"features": serialization_lib.serialize_keras_object(self.features),
"output_mode": self.output_mode,
"crosses": serialization_lib.serialize_keras_object(self.crosses),
"crossing_dim": self.crossing_dim,
"hashing_dim": self.hashing_dim,
"num_discretization_bins": self.num_discretization_bins,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def get_build_config(self):
return {
name: feature.preprocessor.get_build_config()
for name, feature in self.features.items()
}
def build_from_config(self, config):
for name in config.keys():
self.features[name].preprocessor.build_from_config(config[name])
self._is_adapted = True
def save(self, filepath):
"""Save the `FeatureSpace` instance to a `.keras` file.
You can reload it via `keras.models.load_model()`:
```python
feature_space.save("myfeaturespace.keras")
reloaded_feature_space = keras.models.load_model("myfeaturespace.keras")
```
"""
saving_lib.save_model(self, filepath)
def save_own_variables(self, store):
return
def load_own_variables(self, store):
return
| tf-keras/tf_keras/utils/feature_space.py/0 | {
"file_path": "tf-keras/tf_keras/utils/feature_space.py",
"repo_id": "tf-keras",
"token_count": 13168
} | 212 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFDecorator-aware replacements for the inspect module."""
import collections
import functools
import inspect as _inspect
import tensorflow.compat.v2 as tf
if hasattr(_inspect, "ArgSpec"):
ArgSpec = _inspect.ArgSpec
else:
ArgSpec = collections.namedtuple(
"ArgSpec",
[
"args",
"varargs",
"keywords",
"defaults",
],
)
if hasattr(_inspect, "FullArgSpec"):
FullArgSpec = _inspect.FullArgSpec
else:
FullArgSpec = collections.namedtuple(
"FullArgSpec",
[
"args",
"varargs",
"varkw",
"defaults",
"kwonlyargs",
"kwonlydefaults",
"annotations",
],
)
def _convert_maybe_argspec_to_fullargspec(argspec):
if isinstance(argspec, FullArgSpec):
return argspec
return FullArgSpec(
args=argspec.args,
varargs=argspec.varargs,
varkw=argspec.keywords,
defaults=argspec.defaults,
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
)
if hasattr(_inspect, "getfullargspec"):
_getfullargspec = _inspect.getfullargspec
def _getargspec(target):
"""A python3 version of getargspec.
Calls `getfullargspec` and assigns args, varargs,
varkw, and defaults to a python 2/3 compatible `ArgSpec`.
The parameter name 'varkw' is changed to 'keywords' to fit the
`ArgSpec` struct.
Args:
target: the target object to inspect.
Returns:
An ArgSpec with args, varargs, keywords, and defaults parameters
from FullArgSpec.
"""
fullargspecs = getfullargspec(target)
argspecs = ArgSpec(
args=fullargspecs.args,
varargs=fullargspecs.varargs,
keywords=fullargspecs.varkw,
defaults=fullargspecs.defaults,
)
return argspecs
else:
_getargspec = _inspect.getargspec
def _getfullargspec(target):
"""A python2 version of getfullargspec.
Args:
target: the target object to inspect.
Returns:
A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.
"""
return _convert_maybe_argspec_to_fullargspec(getargspec(target))
def currentframe():
"""TFDecorator-aware replacement for inspect.currentframe."""
return _inspect.stack()[1][0]
def getargspec(obj):
"""TFDecorator-aware replacement for `inspect.getargspec`.
Note: `getfullargspec` is recommended as the python 2/3 compatible
replacement for this function.
Args:
obj: A function, partial function, or callable object, possibly decorated.
Returns:
The `ArgSpec` that describes the signature of the outermost decorator that
changes the callable's signature, or the `ArgSpec` that describes
the object if not decorated.
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
TypeError: For objects of unsupported types.
"""
if isinstance(obj, functools.partial):
return _get_argspec_for_partial(obj)
decorators, target = tf.__internal__.decorator.unwrap(obj)
spec = next(
(
d.decorator_argspec
for d in decorators
if d.decorator_argspec is not None
),
None,
)
if spec:
return spec
try:
# Python3 will handle most callables here (not partial).
return _getargspec(target)
except TypeError:
pass
if isinstance(target, type):
try:
return _getargspec(target.__init__)
except TypeError:
pass
try:
return _getargspec(target.__new__)
except TypeError:
pass
# The `type(target)` ensures that if a class is received we don't return
# the signature of its __call__ method.
return _getargspec(type(target).__call__)
def _get_argspec_for_partial(obj):
"""Implements `getargspec` for `functools.partial` objects.
Args:
obj: The `functools.partial` object
Returns:
An `inspect.ArgSpec`
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
"""
# When callable is a functools.partial object, we construct its ArgSpec with
# following strategy:
# - If callable partial contains default value for positional arguments (ie.
# object.args), then final ArgSpec doesn't contain those positional
# arguments.
# - If callable partial contains default value for keyword arguments (ie.
# object.keywords), then we merge them with wrapped target. Default values
# from callable partial takes precedence over those from wrapped target.
#
# However, there is a case where it is impossible to construct a valid
# ArgSpec. Python requires arguments that have no default values must be
# defined before those with default values. ArgSpec structure is only valid
# when this presumption holds true because default values are expressed as a
# tuple of values without keywords and they are always assumed to belong to
# last K arguments where K is number of default values present.
#
# Since functools.partial can give default value to any argument, this
# presumption may no longer hold in some cases. For example:
#
# def func(m, n):
# return 2 * m + n
# partialed = functools.partial(func, m=1)
#
# This example will result in m having a default value but n doesn't. This
# is usually not allowed in Python and can not be expressed in ArgSpec
# correctly.
#
# Thus, we must detect cases like this by finding first argument with
# default value and ensures all following arguments also have default
# values. When this is not true, a ValueError is raised.
n_prune_args = len(obj.args)
partial_keywords = obj.keywords or {}
args, varargs, keywords, defaults = getargspec(obj.func)
# Pruning first n_prune_args arguments.
args = args[n_prune_args:]
# Partial function may give default value to any argument, therefore length
# of default value list must be len(args) to allow each argument to
# potentially be given a default value.
no_default = object()
all_defaults = [no_default] * len(args)
if defaults:
all_defaults[-len(defaults) :] = defaults
# Fill in default values provided by partial function in all_defaults.
for kw, default in partial_keywords.items():
if kw in args:
idx = args.index(kw)
all_defaults[idx] = default
elif not keywords:
raise ValueError(
"Function does not have **kwargs parameter, but "
"contains an unknown partial keyword."
)
# Find first argument with default value set.
first_default = next(
(idx for idx, x in enumerate(all_defaults) if x is not no_default), None
)
# If no default values are found, return ArgSpec with defaults=None.
if first_default is None:
return ArgSpec(args, varargs, keywords, None)
# Checks if all arguments have default value set after first one.
invalid_default_values = [
args[i]
for i, j in enumerate(all_defaults)
if j is no_default and i > first_default
]
if invalid_default_values:
raise ValueError(
f"Some arguments {invalid_default_values} do not have "
"default value, but they are positioned after those with "
"default values. This can not be expressed with ArgSpec."
)
return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))
def getfullargspec(obj):
"""TFDecorator-aware replacement for `inspect.getfullargspec`.
This wrapper emulates `inspect.getfullargspec` in[^)]* Python2.
Args:
obj: A callable, possibly decorated.
Returns:
The `FullArgSpec` that describes the signature of
the outermost decorator that changes the callable's signature. If the
callable is not decorated, `inspect.getfullargspec()` will be called
directly on the callable.
"""
decorators, target = tf.__internal__.decorator.unwrap(obj)
for d in decorators:
if d.decorator_argspec is not None:
return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)
return _getfullargspec(target)
def getcallargs(*func_and_positional, **named):
"""TFDecorator-aware replacement for inspect.getcallargs.
Args:
*func_and_positional: A callable, possibly decorated, followed by any
positional arguments that would be passed to `func`.
**named: The named argument dictionary that would be passed to `func`.
Returns:
A dictionary mapping `func`'s named arguments to the values they would
receive if `func(*positional, **named)` were called.
`getcallargs` will use the argspec from the outermost decorator that
provides it. If no attached decorators modify argspec, the final unwrapped
target's argspec will be used.
"""
func = func_and_positional[0]
positional = func_and_positional[1:]
argspec = getfullargspec(func)
call_args = named.copy()
this = getattr(func, "im_self", None) or getattr(func, "__self__", None)
if ismethod(func) and this:
positional = (this,) + positional
remaining_positionals = [
arg for arg in argspec.args if arg not in call_args
]
call_args.update(dict(zip(remaining_positionals, positional)))
default_count = 0 if not argspec.defaults else len(argspec.defaults)
if default_count:
for arg, value in zip(argspec.args[-default_count:], argspec.defaults):
if arg not in call_args:
call_args[arg] = value
if argspec.kwonlydefaults is not None:
for k, v in argspec.kwonlydefaults.items():
if k not in call_args:
call_args[k] = v
return call_args
def getframeinfo(*args, **kwargs):
return _inspect.getframeinfo(*args, **kwargs)
def getdoc(obj):
"""TFDecorator-aware replacement for inspect.getdoc.
Args:
obj: An object, possibly decorated.
Returns:
The docstring associated with the object.
The outermost-decorated object is intended to have the most complete
documentation, so the decorated parameter is not unwrapped.
"""
return _inspect.getdoc(obj)
def getfile(obj):
"""TFDecorator-aware replacement for inspect.getfile."""
unwrapped_object = tf.__internal__.decorator.unwrap(obj)[1]
# Work around for the case when object is a stack frame
# and only .pyc files are used. In this case, getfile
# might return incorrect path. So, we get the path from f_globals
# instead.
if (
hasattr(unwrapped_object, "f_globals")
and "__file__" in unwrapped_object.f_globals
):
return unwrapped_object.f_globals["__file__"]
return _inspect.getfile(unwrapped_object)
def getmembers(obj, predicate=None):
"""TFDecorator-aware replacement for inspect.getmembers."""
return _inspect.getmembers(obj, predicate)
def getmodule(obj):
"""TFDecorator-aware replacement for inspect.getmodule."""
return _inspect.getmodule(obj)
def getmro(cls):
"""TFDecorator-aware replacement for inspect.getmro."""
return _inspect.getmro(cls)
def getsource(obj):
"""TFDecorator-aware replacement for inspect.getsource."""
return _inspect.getsource(tf.__internal__.decorator.unwrap(obj)[1])
def getsourcefile(obj):
"""TFDecorator-aware replacement for inspect.getsourcefile."""
return _inspect.getsourcefile(tf.__internal__.decorator.unwrap(obj)[1])
def getsourcelines(obj):
"""TFDecorator-aware replacement for inspect.getsourcelines."""
return _inspect.getsourcelines(tf.__internal__.decorator.unwrap(obj)[1])
def isbuiltin(obj):
"""TFDecorator-aware replacement for inspect.isbuiltin."""
return _inspect.isbuiltin(tf.__internal__.decorator.unwrap(obj)[1])
def isclass(obj):
"""TFDecorator-aware replacement for inspect.isclass."""
return _inspect.isclass(tf.__internal__.decorator.unwrap(obj)[1])
def isfunction(obj):
"""TFDecorator-aware replacement for inspect.isfunction."""
return _inspect.isfunction(tf.__internal__.decorator.unwrap(obj)[1])
def isframe(obj):
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.isframe(tf.__internal__.decorator.unwrap(obj)[1])
def isgenerator(obj):
"""TFDecorator-aware replacement for inspect.isgenerator."""
return _inspect.isgenerator(tf.__internal__.decorator.unwrap(obj)[1])
def isgeneratorfunction(obj):
"""TFDecorator-aware replacement for inspect.isgeneratorfunction."""
return _inspect.isgeneratorfunction(
tf.__internal__.decorator.unwrap(obj)[1]
)
def ismethod(obj):
"""TFDecorator-aware replacement for inspect.ismethod."""
return _inspect.ismethod(tf.__internal__.decorator.unwrap(obj)[1])
def ismodule(obj):
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.ismodule(tf.__internal__.decorator.unwrap(obj)[1])
def isroutine(obj):
"""TFDecorator-aware replacement for inspect.isroutine."""
return _inspect.isroutine(tf.__internal__.decorator.unwrap(obj)[1])
def stack(context=1):
"""TFDecorator-aware replacement for inspect.stack."""
return _inspect.stack(context)[1:]
| tf-keras/tf_keras/utils/tf_inspect.py/0 | {
"file_path": "tf-keras/tf_keras/utils/tf_inspect.py",
"repo_id": "tf-keras",
"token_count": 5332
} | 213 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import tensorflow as tf
from autokeras.engine import adapter as adapter_module
class InputAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError(
"Expect the data to Input to be numpy.ndarray or "
"tf.data.Dataset, but got {type}.".format(type=type(x))
)
if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.number):
raise TypeError(
"Expect the data to Input to be numerical, but got "
"{type}.".format(type=x.dtype)
)
class ImageAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError(
"Expect the data to ImageInput to be numpy.ndarray or "
"tf.data.Dataset, but got {type}.".format(type=type(x))
)
if isinstance(x, np.ndarray) and not np.issubdtype(x.dtype, np.number):
raise TypeError(
"Expect the data to ImageInput to be numerical, but got "
"{type}.".format(type=x.dtype)
)
class TextAdapter(adapter_module.Adapter):
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (np.ndarray, tf.data.Dataset)):
raise TypeError(
"Expect the data to TextInput to be numpy.ndarray or "
"tf.data.Dataset, but got {type}.".format(type=type(x))
)
class StructuredDataAdapter(adapter_module.Adapter):
def check(self, x):
if not isinstance(x, (pd.DataFrame, np.ndarray, tf.data.Dataset)):
raise TypeError(
"Unsupported type {type} for "
"{name}.".format(type=type(x), name=self.__class__.__name__)
)
def convert_to_dataset(self, dataset, batch_size):
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
if isinstance(dataset, np.ndarray) and dataset.dtype == object:
dataset = dataset.astype(str)
return super().convert_to_dataset(dataset, batch_size)
class TimeseriesAdapter(adapter_module.Adapter):
def __init__(self, lookback=None, **kwargs):
super().__init__(**kwargs)
self.lookback = lookback
def check(self, x):
"""Record any information needed by transform."""
if not isinstance(x, (pd.DataFrame, np.ndarray, tf.data.Dataset)):
raise TypeError(
"Expect the data in TimeseriesInput to be numpy.ndarray"
" or tf.data.Dataset or pd.DataFrame, but got {type}.".format(
type=type(x)
)
)
def convert_to_dataset(self, dataset, batch_size):
if isinstance(dataset, pd.DataFrame):
dataset = dataset.values
return super().convert_to_dataset(dataset, batch_size)
| autokeras/autokeras/adapters/input_adapters.py/0 | {
"file_path": "autokeras/autokeras/adapters/input_adapters.py",
"repo_id": "autokeras",
"token_count": 1617
} | 0 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import Tuple
from typing import Union
from keras_tuner.engine import hyperparameters
from tensorflow import nest
from tensorflow.keras import layers
from autokeras import analysers
from autokeras import keras_layers
from autokeras.engine import block as block_module
from autokeras.utils import io_utils
from autokeras.utils import utils
class Normalization(block_module.Block):
"""Perform feature-wise normalization on data.
Refer to Normalization layer in keras preprocessing layers for more
information.
# Arguments
axis: Integer or tuple of integers, the axis or axes that should be
normalized (typically the features axis). We will normalize each
element in the specified axis. The default is '-1' (the innermost
axis); 0 (the batch axis) is not allowed.
"""
def __init__(self, axis: int = -1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
return layers.Normalization(axis=self.axis)(input_node)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
class TextToIntSequence(block_module.Block):
"""Convert raw texts to sequences of word indices.
# Arguments
output_sequence_length: Int. The maximum length of a sentence. If
unspecified, it would be tuned automatically.
max_tokens: Int. The maximum size of the vocabulary. Defaults to 20000.
"""
def __init__(
self,
output_sequence_length: Optional[int] = None,
max_tokens: int = 20000,
**kwargs
):
super().__init__(**kwargs)
self.output_sequence_length = output_sequence_length
self.max_tokens = max_tokens
def get_config(self):
config = super().get_config()
config.update(
{
"output_sequence_length": self.output_sequence_length,
"max_tokens": self.max_tokens,
}
)
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
if self.output_sequence_length is not None:
output_sequence_length = self.output_sequence_length
else:
output_sequence_length = hp.Choice(
"output_sequence_length", [64, 128, 256, 512], default=64
)
output_node = layers.TextVectorization(
max_tokens=self.max_tokens,
output_mode="int",
output_sequence_length=output_sequence_length,
)(input_node)
return output_node
class TextToNgramVector(block_module.Block):
"""Convert raw texts to n-gram vectors.
# Arguments
max_tokens: Int. The maximum size of the vocabulary. Defaults to 20000.
ngrams: Int or tuple of ints. Passing an integer will create ngrams up
to that integer, and passing a tuple of integers will create ngrams
for the specified values in the tuple. If left unspecified, it will
be tuned automatically.
"""
def __init__(
self,
max_tokens: int = 20000,
ngrams: Union[int, Tuple[int], None] = None,
**kwargs
):
super().__init__(**kwargs)
self.max_tokens = max_tokens
self.ngrams = ngrams
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
if self.ngrams is not None:
ngrams = self.ngrams
else:
ngrams = hp.Int("ngrams", min_value=1, max_value=2, default=2)
return layers.TextVectorization(
max_tokens=self.max_tokens,
ngrams=ngrams,
output_mode="tf-idf",
pad_to_max_tokens=True,
)(input_node)
def get_config(self):
config = super().get_config()
config.update({"max_tokens": self.max_tokens, "ngrams": self.ngrams})
return config
class ImageAugmentation(block_module.Block):
"""Collection of various image augmentation methods.
# Arguments
translation_factor: A positive float represented as fraction value, or a
tuple of 2 representing fraction for translation vertically and
horizontally, or a kerastuner.engine.hyperparameters.Choice range
of positive floats. For instance, `translation_factor=0.2` result
in a random translation factor within 20% of the width and height.
If left unspecified, it will be tuned automatically.
vertical_flip: Boolean. Whether to flip the image vertically.
If left unspecified, it will be tuned automatically.
horizontal_flip: Boolean. Whether to flip the image horizontally.
If left unspecified, it will be tuned automatically.
rotation_factor: Float or kerastuner.engine.hyperparameters.Choice range
between [0, 1]. A positive float represented as fraction of 2pi
upper bound for rotating clockwise and counter-clockwise. When
represented as a single float, lower = upper.
If left unspecified, it will be tuned automatically.
zoom_factor: A positive float represented as fraction value, or a tuple
of 2 representing fraction for zooming vertically and horizontally,
or a kerastuner.engine.hyperparameters.Choice range of positive
floats. For instance, `zoom_factor=0.2` result in a random zoom
factor from 80% to 120%. If left unspecified, it will be tuned
automatically.
contrast_factor: A positive float represented as fraction of value, or a
tuple of size 2 representing lower and upper bound, or a
kerastuner.engine.hyperparameters.Choice range of floats to find the
optimal value. When represented as a single float, lower = upper.
The contrast factor will be randomly picked
between [1.0 - lower, 1.0 + upper]. If left unspecified, it will be
tuned automatically.
"""
def __init__(
self,
translation_factor: Optional[
Union[float, Tuple[float, float], hyperparameters.Choice]
] = None,
vertical_flip: Optional[bool] = None,
horizontal_flip: Optional[bool] = None,
rotation_factor: Optional[Union[float, hyperparameters.Choice]] = None,
zoom_factor: Optional[
Union[float, Tuple[float, float], hyperparameters.Choice]
] = None,
contrast_factor: Optional[
Union[float, Tuple[float, float], hyperparameters.Choice]
] = None,
**kwargs
):
super().__init__(**kwargs)
self.translation_factor = utils.get_hyperparameter(
translation_factor,
hyperparameters.Choice("translation_factor", [0.0, 0.1]),
Union[float, Tuple[float, float]],
)
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rotation_factor = utils.get_hyperparameter(
rotation_factor,
hyperparameters.Choice("rotation_factor", [0.0, 0.1]),
float,
)
self.zoom_factor = utils.get_hyperparameter(
zoom_factor,
hyperparameters.Choice("zoom_factor", [0.0, 0.1]),
Union[float, Tuple[float, float]],
)
self.contrast_factor = utils.get_hyperparameter(
contrast_factor,
hyperparameters.Choice("contrast_factor", [0.0, 0.1]),
Union[float, Tuple[float, float]],
)
@staticmethod
def _get_fraction_value(value):
if isinstance(value, tuple):
return value
return value, value
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
# Translate
translation_factor = utils.add_to_hp(self.translation_factor, hp)
if translation_factor not in [0, (0, 0)]:
height_factor, width_factor = self._get_fraction_value(
translation_factor
)
output_node = layers.RandomTranslation(height_factor, width_factor)(
output_node
)
# Flip
horizontal_flip = self.horizontal_flip
if horizontal_flip is None:
horizontal_flip = hp.Boolean("horizontal_flip", default=True)
vertical_flip = self.vertical_flip
if self.vertical_flip is None:
vertical_flip = hp.Boolean("vertical_flip", default=True)
if not horizontal_flip and not vertical_flip:
flip_mode = ""
elif horizontal_flip and vertical_flip:
flip_mode = "horizontal_and_vertical"
elif horizontal_flip and not vertical_flip:
flip_mode = "horizontal"
elif not horizontal_flip and vertical_flip:
flip_mode = "vertical"
if flip_mode != "":
output_node = layers.RandomFlip(mode=flip_mode)(output_node)
# Rotate
rotation_factor = utils.add_to_hp(self.rotation_factor, hp)
if rotation_factor != 0:
output_node = layers.RandomRotation(rotation_factor)(output_node)
# Zoom
zoom_factor = utils.add_to_hp(self.zoom_factor, hp)
if zoom_factor not in [0, (0, 0)]:
height_factor, width_factor = self._get_fraction_value(zoom_factor)
# TODO: Add back RandomZoom when it is ready.
# output_node = layers.RandomZoom(
# height_factor, width_factor)(output_node)
# Contrast
contrast_factor = utils.add_to_hp(self.contrast_factor, hp)
if contrast_factor not in [0, (0, 0)]:
output_node = layers.RandomContrast(contrast_factor)(output_node)
return output_node
def get_config(self):
config = super().get_config()
config.update(
{
"translation_factor": io_utils.serialize_block_arg(
self.translation_factor
),
"horizontal_flip": self.horizontal_flip,
"vertical_flip": self.vertical_flip,
"rotation_factor": io_utils.serialize_block_arg(
self.rotation_factor
),
"zoom_factor": io_utils.serialize_block_arg(self.zoom_factor),
"contrast_factor": io_utils.serialize_block_arg(
self.contrast_factor
),
}
)
return config
@classmethod
def from_config(cls, config):
config["translation_factor"] = io_utils.deserialize_block_arg(
config["translation_factor"]
)
config["rotation_factor"] = io_utils.deserialize_block_arg(
config["rotation_factor"]
)
config["zoom_factor"] = io_utils.deserialize_block_arg(
config["zoom_factor"]
)
config["contrast_factor"] = io_utils.deserialize_block_arg(
config["contrast_factor"]
)
return cls(**config)
class CategoricalToNumerical(block_module.Block):
"""Encode the categorical features to numerical features."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.column_types = None
self.column_names = None
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
encoding = []
for column_name in self.column_names:
column_type = self.column_types[column_name]
if column_type == analysers.CATEGORICAL:
# TODO: Search to use one-hot or int.
encoding.append(keras_layers.INT)
else:
encoding.append(keras_layers.NONE)
return keras_layers.MultiCategoryEncoding(encoding)(input_node)
@classmethod
def from_config(cls, config):
column_types = config.pop("column_types")
column_names = config.pop("column_names")
instance = cls(**config)
instance.column_types = column_types
instance.column_names = column_names
return instance
def get_config(self):
config = super().get_config()
config.update(
{
"column_types": self.column_types,
"column_names": self.column_names,
}
)
return config
| autokeras/autokeras/blocks/preprocessing.py/0 | {
"file_path": "autokeras/autokeras/blocks/preprocessing.py",
"repo_id": "autokeras",
"token_count": 5619
} | 1 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autokeras.engine import block as block_module
class IOHyperModel(block_module.Block):
"""A mixin class connecting the input nodes and heads with the adapters.
This class is extended by the input nodes and the heads. The AutoModel calls
the functions to get the corresponding adapters and pass the information
back to the input nodes and heads.
"""
def __init__(self, shape=None, **kwargs):
super().__init__(**kwargs)
self.shape = shape
self.data_shape = None
self.dtype = None
self.batch_size = None
self.num_samples = None
def get_analyser(self):
"""Get the corresponding Analyser.
# Returns
An instance of a subclass of autokeras.engine.Analyser.
"""
raise NotImplementedError
def get_adapter(self):
"""Get the corresponding Adapter.
# Returns
An instance of a subclass of autokeras.engine.Adapter.
"""
raise NotImplementedError
def config_from_analyser(self, analyser):
"""Load the learned information on dataset from the Analyser.
# Arguments
adapter: An instance of a subclass of autokeras.engine.Adapter.
"""
self.data_shape = analyser.shape
self.dtype = analyser.dtype
self.batch_size = analyser.batch_size
self.num_samples = analyser.num_samples
def get_hyper_preprocessors(self):
"""Construct a list of HyperPreprocessors based on learned information.
# Returns
A list of HyperPreprocessors for the corresponding data.
"""
raise NotImplementedError
def get_config(self):
config = super().get_config()
config.update({"shape": self.shape})
return config
| autokeras/autokeras/engine/io_hypermodel.py/0 | {
"file_path": "autokeras/autokeras/engine/io_hypermodel.py",
"repo_id": "autokeras",
"token_count": 853
} | 2 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import tensorflow as tf
from tensorflow import keras
from tensorflow import nest
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
from autokeras.utils import data_utils
INT = "int"
NONE = "none"
ONE_HOT = "one-hot"
@keras.utils.register_keras_serializable()
class CastToFloat32(preprocessing.PreprocessingLayer):
def get_config(self):
return super().get_config()
def call(self, inputs):
return data_utils.cast_to_float32(inputs)
def adapt(self, data):
return
@keras.utils.register_keras_serializable()
class ExpandLastDim(preprocessing.PreprocessingLayer):
def get_config(self):
return super().get_config()
def call(self, inputs):
return tf.expand_dims(inputs, axis=-1)
def adapt(self, data):
return
@keras.utils.register_keras_serializable()
class MultiCategoryEncoding(preprocessing.PreprocessingLayer):
"""Encode the categorical features to numerical features.
# Arguments
encoding: A list of strings, which has the same number of elements as
the columns in the structured data. Each of the strings specifies
the encoding method used for the corresponding column. Use 'int' for
categorical columns and 'none' for numerical columns.
"""
# TODO: Support one-hot encoding.
# TODO: Support frequency encoding.
def __init__(self, encoding: List[str], **kwargs):
super().__init__(**kwargs)
self.encoding = encoding
self.encoding_layers = []
for encoding in self.encoding:
if encoding == NONE:
self.encoding_layers.append(None)
elif encoding == INT:
# Set a temporary vocabulary to prevent the error of no
# vocabulary when calling the layer to build the model. The
# vocabulary would be reset by adapting the layer later.
self.encoding_layers.append(layers.StringLookup())
elif encoding == ONE_HOT:
self.encoding_layers.append(None)
def build(self, input_shape):
for encoding_layer in self.encoding_layers:
if encoding_layer is not None:
encoding_layer.build(tf.TensorShape([1]))
def call(self, inputs):
input_nodes = nest.flatten(inputs)[0]
split_inputs = tf.split(input_nodes, [1] * len(self.encoding), axis=-1)
output_nodes = []
for input_node, encoding_layer in zip(
split_inputs, self.encoding_layers
):
if encoding_layer is None:
number = data_utils.cast_to_float32(input_node)
# Replace NaN with 0.
imputed = tf.where(
tf.math.is_nan(number), tf.zeros_like(number), number
)
output_nodes.append(imputed)
else:
output_nodes.append(
data_utils.cast_to_float32(
encoding_layer(data_utils.cast_to_string(input_node))
)
)
if len(output_nodes) == 1:
return output_nodes[0]
return layers.Concatenate()(output_nodes)
def adapt(self, data):
for index, encoding_layer in enumerate(self.encoding_layers):
if encoding_layer is None:
continue
data_column = data.map(lambda x: tf.slice(x, [0, index], [-1, 1]))
encoding_layer.adapt(data_column.map(data_utils.cast_to_string))
def get_config(self):
config = {
"encoding": self.encoding,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras.utils.register_keras_serializable()
class WarmUp(keras.optimizers.schedules.LearningRateSchedule):
"""official.nlp.optimization.WarmUp"""
def __init__(
self,
initial_learning_rate,
decay_schedule_fn,
warmup_steps,
power=1.0,
name=None,
):
super(WarmUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps,
# the learning rate will be
# `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = self.initial_learning_rate * tf.math.pow(
warmup_percent_done, self.power
)
return tf.cond(
global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name,
)
def get_config(self):
return { # pragma: no cover
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
| autokeras/autokeras/keras_layers.py/0 | {
"file_path": "autokeras/autokeras/keras_layers.py",
"repo_id": "autokeras",
"token_count": 2653
} | 3 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import autokeras as ak
from autokeras import test_utils
@mock.patch("autokeras.AutoModel.fit")
@mock.patch("autokeras.AutoModel.evaluate")
def test_tsf_evaluate_call_automodel_evaluate(evaluate, fit, tmp_path):
auto_model = ak.TimeseriesForecaster(
lookback=10, directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(x=test_utils.TRAIN_CSV_PATH, y="survived")
auto_model.evaluate(x=test_utils.TRAIN_CSV_PATH, y="survived")
assert evaluate.is_called
@mock.patch("autokeras.AutoModel.fit")
@mock.patch("autokeras.AutoModel.predict")
def test_tsf_predict_call_automodel_predict(predict, fit, tmp_path):
auto_model = ak.TimeseriesForecaster(
lookback=10, directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(x=test_utils.TRAIN_CSV_PATH, y="survived")
auto_model.predict(x=test_utils.TRAIN_CSV_PATH, y="survived")
assert predict.is_called
@mock.patch("autokeras.AutoModel.fit")
@mock.patch("autokeras.AutoModel.predict")
def test_tsf_predict_call_automodel_predict_fails(predict, fit, tmp_path):
auto_model = ak.TimeseriesForecaster(
lookback=10, directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(x=test_utils.TRAIN_CSV_PATH, y="survived")
# Predict data doesn't contain train time steps
try:
auto_model.predict(x=test_utils.TEST_CSV_PATH, y="survived")
except ValueError as e:
assert fit.is_called
assert "The prediction data requires the original training data to make"
" predictions on subsequent data points" in str(e)
@mock.patch("autokeras.AutoModel.fit")
def test_tsf_fit_call_automodel_fit(fit, tmp_path):
auto_model = ak.TimeseriesForecaster(
lookback=10, directory=tmp_path, seed=test_utils.SEED
)
auto_model.fit(
x=test_utils.TRAIN_CSV_PATH,
y="survived",
validation_data=(test_utils.TRAIN_CSV_PATH, "survived"),
)
assert fit.is_called
| autokeras/autokeras/tasks/time_series_forecaster_test.py/0 | {
"file_path": "autokeras/autokeras/tasks/time_series_forecaster_test.py",
"repo_id": "autokeras",
"token_count": 974
} | 4 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import multiprocessing
import os
from typing import Optional
from typing import Tuple
import numpy as np
import tensorflow as tf
from keras_tuner.engine import hyperparameters
WHITELIST_FORMATS = (".bmp", ".gif", ".jpeg", ".jpg", ".png")
def save_json(path, obj):
obj = json.dumps(obj)
with tf.io.gfile.GFile(path, "w") as f:
f.write(obj)
def load_json(path):
with tf.io.gfile.GFile(path, "r") as f:
obj = f.read()
return json.loads(obj)
def index_directory(
directory,
labels,
formats,
class_names=None,
shuffle=True,
seed=None,
follow_links=False,
):
"""Make list of all files in the subdirs of `directory`, with their labels.
# Arguments
directory: The target directory (string).
labels: Either "inferred"
(labels are generated from the directory structure),
None (no labels),
or a list/tuple of integer labels of the same size as the number of
valid files found in the directory. Labels should be sorted according
to the alphanumeric order of the image file paths
(obtained via `os.walk(directory)` in Python).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
class_names: Only valid if "labels" is "inferred". This is the explicit
list of class names (must match names of subdirectories). Used
to control the order of the classes
(otherwise alphanumerical order is used).
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling.
follow_links: Whether to visits subdirectories pointed to by symlinks.
# Returns
tuple (file_paths, labels, class_names).
file_paths: list of file paths (strings).
labels: list of matching integer labels (same length as file_paths)
class_names: names of the classes corresponding to these labels, in
order.
"""
subdirs = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
subdirs.append(subdir)
if not class_names:
class_names = subdirs
else:
if set(class_names) != set(subdirs): # pragma: no cover
raise ValueError( # pragma: no cover
"The `class_names` passed did not match the "
"names of the subdirectories of the target directory. "
"Expected: %s, but received: %s" % (subdirs, class_names)
)
class_indices = dict(zip(class_names, range(len(class_names))))
# Build an index of the files
# in the different class subfolders.
pool = multiprocessing.pool.ThreadPool()
results = []
filenames = []
for dirpath in (os.path.join(directory, subdir) for subdir in subdirs):
results.append(
pool.apply_async(
index_subdirectory,
(dirpath, class_indices, follow_links, formats),
)
)
labels_list = []
for res in results:
partial_filenames, partial_labels = res.get()
labels_list.append(partial_labels)
filenames += partial_filenames
i = 0
labels = np.zeros((len(filenames),), dtype="int32")
for partial_labels in labels_list:
labels[i : i + len(partial_labels)] = partial_labels
i += len(partial_labels)
print(
"Found %d files belonging to %d classes."
% (len(filenames), len(class_names))
)
pool.close()
pool.join()
file_paths = [os.path.join(directory, fname) for fname in filenames]
if shuffle:
# Shuffle globally to erase macro-structure
if seed is None:
seed = np.random.randint(1e6) # pragma: no cover
rng = np.random.RandomState(seed)
rng.shuffle(file_paths)
rng = np.random.RandomState(seed)
rng.shuffle(labels)
return file_paths, labels, class_names
def iter_valid_files(directory, follow_links, formats):
walk = os.walk(directory, followlinks=follow_links)
for root, _, files in sorted(walk, key=lambda x: x[0]):
for fname in sorted(files):
if fname.lower().endswith(formats):
yield root, fname
def index_subdirectory(directory, class_indices, follow_links, formats):
"""Recursively walks directory and list image paths and their class index.
# Arguments
directory: string, target directory.
class_indices: dict mapping class names to their index.
follow_links: boolean, whether to recursively follow subdirectories
(if False, we only list top-level images in `directory`).
formats: Allowlist of file extensions to index (e.g. ".jpg", ".txt").
# Returns
tuple `(filenames, labels)`. `filenames` is a list of relative file
paths, and `labels` is a list of integer labels corresponding to these
files.
"""
dirname = os.path.basename(directory)
valid_files = iter_valid_files(directory, follow_links, formats)
labels = []
filenames = []
for root, fname in valid_files:
labels.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory)
)
filenames.append(relative_path)
return filenames, labels
def get_training_or_validation_split(samples, labels, validation_split, subset):
"""Potentially restict samples & labels to a training or validation split.
# Arguments
samples: List of elements.
labels: List of corresponding labels.
validation_split: Float, fraction of data to reserve for validation.
subset: Subset of the data to return.
Either "training", "validation", or None.
If None, we return all of the data.
# Returns
tuple (samples, labels), potentially restricted to the specified subset.
"""
if not validation_split:
return samples, labels
num_val_samples = int(validation_split * len(samples))
if subset == "training":
print(
"Using %d files for training." % (len(samples) - num_val_samples,)
)
samples = samples[:-num_val_samples]
labels = labels[:-num_val_samples]
elif subset == "validation":
print("Using %d files for validation." % (num_val_samples,))
samples = samples[-num_val_samples:]
labels = labels[-num_val_samples:]
else:
raise ValueError(
'`subset` must be either "training" '
'or "validation", received: %s' % (subset,)
)
return samples, labels
def text_dataset_from_directory(
directory: str,
batch_size: int = 32,
max_length: Optional[int] = None,
shuffle: bool = True,
seed: Optional[int] = None,
validation_split: Optional[float] = None,
subset: Optional[str] = None,
) -> tf.data.Dataset:
"""Generates a `tf.data.Dataset` from text files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_text_1.txt
......a_text_2.txt
...class_b/
......b_text_1.txt
......b_text_2.txt
```
Then calling `text_dataset_from_directory(main_directory)`
will return a `tf.data.Dataset` that yields batches of texts from
the subdirectories `class_a` and `class_b`, together with labels
'class_a' and 'class_b'.
Only `.txt` files are supported at this time.
# Arguments
directory: Directory where the data is located.
If `labels` is "inferred", it should contain
subdirectories, each containing text files for a class.
Otherwise, the directory structure is ignored.
batch_size: Size of the batches of data. Defaults to 32.
max_length: Maximum size of a text string. Texts longer than this will
be truncated to `max_length`.
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation".
Only used if `validation_split` is set.
# Returns
A `tf.data.Dataset` object, which yields a tuple `(texts, labels)`,
where both has shape `(batch_size,)` and type of tf.string.
"""
if seed is None:
seed = np.random.randint(1e6)
file_paths, labels, class_names = index_directory(
directory, "inferred", formats=(".txt",), shuffle=shuffle, seed=seed
)
file_paths, labels = get_training_or_validation_split(
file_paths, labels, validation_split, subset
)
strings = tf.data.Dataset.from_tensor_slices(file_paths)
strings = strings.map(tf.io.read_file)
if max_length is not None:
strings = strings.map(lambda x: tf.strings.substr(x, 0, max_length))
labels = np.array(class_names)[np.array(labels)]
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((strings, labels))
dataset = dataset.batch(batch_size)
return dataset
def image_dataset_from_directory(
directory: str,
batch_size: int = 32,
color_mode: str = "rgb",
image_size: Tuple[int, int] = (256, 256),
interpolation: str = "bilinear",
shuffle: bool = True,
seed: Optional[int] = None,
validation_split: Optional[float] = None,
subset: Optional[str] = None,
) -> tf.data.Dataset:
"""Generates a `tf.data.Dataset` from image files in a directory.
If your directory structure is:
```
main_directory/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
```
Then calling `image_dataset_from_directory(main_directory)`
will return a `tf.data.Dataset` that yields batches of images from
the subdirectories `class_a` and `class_b`, together with labels
'class_a' and 'class_b'.
Supported image formats: jpeg, png, bmp, gif.
Animated gifs are truncated to the first frame.
# Arguments
directory: Directory where the data is located.
If `labels` is "inferred", it should contain
subdirectories, each containing images for a class.
Otherwise, the directory structure is ignored.
batch_size: Size of the batches of data. Default: 32.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
Whether the images will be converted to
have 1, 3, or 4 channels.
image_size: Size to resize images to after they are read from disk.
Defaults to `(256, 256)`.
Since the pipeline processes batches of images that must all have
the same size, this must be provided.
interpolation: String, the interpolation method used when resizing
images. Defaults to `bilinear`. Supports `bilinear`, `nearest`,
`bicubic`, `area`, `lanczos3`, `lanczos5`, `gaussian`,
`mitchellcubic`.
shuffle: Whether to shuffle the data. Default: True.
If set to False, sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
validation_split: Optional float between 0 and 1,
fraction of data to reserve for validation.
subset: One of "training" or "validation".
Only used if `validation_split` is set.
# Returns
A `tf.data.Dataset` object, which yields a tuple `(texts, labels)`,
where `images` has shape `(batch_size, image_size[0], image_size[1],
num_channels)` where `labels` has shape `(batch_size,)` and type of
tf.string.
- if `color_mode` is `grayscale`, there's 1 channel in the image
tensors.
- if `color_mode` is `rgb`, there are 3 channel in the image tensors.
- if `color_mode` is `rgba`, there are 4 channel in the image tensors.
"""
if color_mode == "rgb":
num_channels = 3
elif color_mode == "rgba":
num_channels = 4
elif color_mode == "grayscale":
num_channels = 1
else:
raise ValueError(
'`color_mode` must be one of {"rbg", "rgba", "grayscale"}. '
"Received: %s" % (color_mode,)
)
if seed is None:
seed = np.random.randint(1e6)
image_paths, labels, class_names = index_directory(
directory,
"inferred",
formats=WHITELIST_FORMATS,
shuffle=shuffle,
seed=seed,
)
image_paths, labels = get_training_or_validation_split(
image_paths, labels, validation_split, subset
)
images = tf.data.Dataset.from_tensor_slices(image_paths)
images = images.map(
lambda img: path_to_image(img, num_channels, image_size, interpolation)
)
labels = np.array(class_names)[np.array(labels)]
labels = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((images, labels))
dataset = dataset.batch(batch_size)
return dataset
def path_to_image(image, num_channels, image_size, interpolation):
image = tf.io.read_file(image)
image = tf.io.decode_image(
image, channels=num_channels, expand_animations=False
)
image = tf.image.resize(image, image_size, method=interpolation)
image.set_shape((image_size[0], image_size[1], num_channels))
return image
def deserialize_block_arg(arg):
if isinstance(arg, dict):
return hyperparameters.deserialize(arg)
return arg
def serialize_block_arg(arg):
if isinstance(arg, hyperparameters.HyperParameter):
return hyperparameters.serialize(arg)
return arg
| autokeras/autokeras/utils/io_utils.py/0 | {
"file_path": "autokeras/autokeras/utils/io_utils.py",
"repo_id": "autokeras",
"token_count": 5753
} | 5 |
FROM tensorflow/tensorflow:2.3.0
WORKDIR /opt/autokeras
COPY . .
RUN python -m pip install --no-cache-dir --editable .
WORKDIR /work
| autokeras/docker/Dockerfile/0 | {
"file_path": "autokeras/docker/Dockerfile",
"repo_id": "autokeras",
"token_count": 54
} | 6 |
<jupyter_start><jupyter_code>!pip install autokeras
import os
import numpy as np
import tensorflow as tf
from sklearn.datasets import load_files
import autokeras as ak<jupyter_output><empty_output><jupyter_text>A Simple ExampleThe first step is to prepare your data. Here we use the [IMDBdataset](https://keras.io/datasets/imdb-movie-reviews-sentiment-classification)as an example.<jupyter_code>dataset = tf.keras.utils.get_file(
fname="aclImdb.tar.gz",
origin="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz",
extract=True,
)
# set path to dataset
IMDB_DATADIR = os.path.join(os.path.dirname(dataset), "aclImdb")
classes = ["pos", "neg"]
train_data = load_files(
os.path.join(IMDB_DATADIR, "train"), shuffle=True, categories=classes
)
test_data = load_files(
os.path.join(IMDB_DATADIR, "test"), shuffle=False, categories=classes
)
x_train = np.array(train_data.data)
y_train = np.array(train_data.target)
x_test = np.array(test_data.data)
y_test = np.array(test_data.target)
print(x_train.shape) # (25000,)
print(y_train.shape) # (25000, 1)
print(x_train[0][:50]) # this film was just brilliant casting<jupyter_output><empty_output><jupyter_text>The second step is to run the [TextClassifier](/text_classifier). As a quickdemo, we set epochs to 2. You can also leave the epochs unspecified for anadaptive number of epochs.<jupyter_code># Initialize the text classifier.
clf = ak.TextClassifier(
overwrite=True, max_trials=1
) # It only tries 1 model as a quick demo.
# Feed the text classifier with training data.
clf.fit(x_train, y_train, epochs=2)
# Predict with the best model.
predicted_y = clf.predict(x_test)
# Evaluate the best model with testing data.
print(clf.evaluate(x_test, y_test))<jupyter_output><empty_output><jupyter_text>Validation DataBy default, AutoKeras use the last 20% of training data as validation data. Asshown in the example below, you can use `validation_split` to specify thepercentage.<jupyter_code>clf.fit(
x_train,
y_train,
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
)<jupyter_output><empty_output><jupyter_text>You can also use your own validation set instead of splitting it from thetraining data with `validation_data`.<jupyter_code>split = 5000
x_val = x_train[split:]
y_val = y_train[split:]
x_train = x_train[:split]
y_train = y_train[:split]
clf.fit(
x_train,
y_train,
epochs=2,
# Use your own validation set.
validation_data=(x_val, y_val),
)<jupyter_output><empty_output><jupyter_text>Customized Search SpaceFor advanced users, you may customize your search space by using[AutoModel](/auto_model/automodel-class) instead of[TextClassifier](/text_classifier). You can configure the[TextBlock](/block/textblock-class) for some high-level configurations, e.g.,`vectorizer` for the type of text vectorization method to use. You can use'sequence', which uses [TextToInteSequence](/block/texttointsequence-class) toconvert the words to integers and use [Embedding](/block/embedding-class) forembedding the integer sequences, or you can use 'ngram', which uses[TextToNgramVector](/block/texttongramvector-class) to vectorize thesentences. You can also do not specify these arguments, which would leave thedifferent choices to be tuned automatically. See the following example fordetail.<jupyter_code>input_node = ak.TextInput()
output_node = ak.TextBlock(block_type="ngram")(input_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
clf.fit(x_train, y_train, epochs=2)<jupyter_output><empty_output><jupyter_text>The usage of [AutoModel](/auto_model/automodel-class) is similar to the[functional API](https://www.tensorflow.org/guide/keras/functional) of Keras.Basically, you are building a graph, whose edges are blocks and the nodes areintermediate outputs of blocks. To add an edge from `input_node` to`output_node` with `output_node = ak.[some_block]([block_args])(input_node)`.You can even also use more fine grained blocks to customize the search spaceeven further. See the following example.<jupyter_code>input_node = ak.TextInput()
output_node = ak.TextToIntSequence()(input_node)
output_node = ak.Embedding()(output_node)
# Use separable Conv layers in Keras.
output_node = ak.ConvBlock(separable=True)(output_node)
output_node = ak.ClassificationHead()(output_node)
clf = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
clf.fit(x_train, y_train, epochs=2)<jupyter_output><empty_output><jupyter_text>Data FormatThe AutoKeras TextClassifier is quite flexible for the data format.For the text, the input data should be one-dimensional For the classificationlabels, AutoKeras accepts both plain labels, i.e. strings or integers, andone-hot encoded encoded labels, i.e. vectors of 0s and 1s.We also support using [tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset?version=stable)format for the training data.<jupyter_code>train_set = tf.data.Dataset.from_tensor_slices(((x_train,), (y_train,))).batch(
32
)
test_set = tf.data.Dataset.from_tensor_slices(((x_test,), (y_test,))).batch(32)
clf = ak.TextClassifier(overwrite=True, max_trials=2)
# Feed the tensorflow Dataset to the classifier.
clf.fit(train_set, epochs=2)
# Predict with the best model.
predicted_y = clf.predict(test_set)
# Evaluate the best model with testing data.
print(clf.evaluate(test_set))<jupyter_output><empty_output> | autokeras/docs/ipynb/text_classification.ipynb/0 | {
"file_path": "autokeras/docs/ipynb/text_classification.ipynb",
"repo_id": "autokeras",
"token_count": 1923
} | 7 |
"""shell
pip install autokeras
"""
import numpy as np
import autokeras as ak
"""
In this tutorial we are making use of the
[AutoModel](/auto_model/#automodel-class)
API to show how to handle multi-modal data and multi-task.
## What is multi-modal?
Multi-modal data means each data instance has multiple forms of information.
For example, a photo can be saved as a image. Besides the image, it may also
have when and where it was taken as its attributes, which can be represented as
structured data.
## What is multi-task?
Multi-task here we refer to we want to predict multiple targets with the same
input features. For example, we not only want to classify an image according to
its content, but we also want to regress its quality as a float number between
0 and 1.
The following diagram shows an example of multi-modal and multi-task neural
network model.
<div class="mermaid">
graph TD
id1(ImageInput) --> id3(Some Neural Network Model)
id2(StructuredDataInput) --> id3
id3 --> id4(ClassificationHead)
id3 --> id5(RegressionHead)
</div>
It has two inputs the images and the structured data. Each image is associated
with a set of attributes in the structured data. From these data, we are trying
to predict the classification label and the regression value at the same time.
## Data Preparation
To illustrate our idea, we generate some random image and structured data as
the multi-modal data.
"""
num_instances = 100
# Generate image data.
image_data = np.random.rand(num_instances, 32, 32, 3).astype(np.float32)
# Generate structured data.
structured_data = np.random.rand(num_instances, 20).astype(np.float32)
"""
We also generate some multi-task targets for classification and regression.
"""
# Generate regression targets.
regression_target = np.random.rand(num_instances, 1).astype(np.float32)
# Generate classification labels of five classes.
classification_target = np.random.randint(5, size=num_instances)
"""
## Build and Train the Model
Then we initialize the multi-modal and multi-task model with
[AutoModel](/auto_model/#automodel-class).
Since this is just a demo, we use small amount of `max_trials` and `epochs`.
"""
# Initialize the multi with multiple inputs and outputs.
model = ak.AutoModel(
inputs=[ak.ImageInput(), ak.StructuredDataInput()],
outputs=[
ak.RegressionHead(metrics=["mae"]),
ak.ClassificationHead(
loss="categorical_crossentropy", metrics=["accuracy"]
),
],
overwrite=True,
max_trials=2,
)
# Fit the model with prepared data.
model.fit(
[image_data, structured_data],
[regression_target, classification_target],
epochs=3,
)
"""
## Validation Data
By default, AutoKeras use the last 20% of training data as validation data.
As shown in the example below, you can use `validation_split` to specify the
percentage.
"""
model.fit(
[image_data, structured_data],
[regression_target, classification_target],
# Split the training data and use the last 15% as validation data.
validation_split=0.15,
epochs=2,
)
"""
You can also use your own validation set
instead of splitting it from the training data with `validation_data`.
"""
split = 20
image_val = image_data[split:]
structured_val = structured_data[split:]
regression_val = regression_target[split:]
classification_val = classification_target[split:]
image_data = image_data[:split]
structured_data = structured_data[:split]
regression_target = regression_target[:split]
classification_target = classification_target[:split]
model.fit(
[image_data, structured_data],
[regression_target, classification_target],
# Use your own validation set.
validation_data=(
[image_val, structured_val],
[regression_val, classification_val],
),
epochs=2,
)
"""
## Customized Search Space
You can customize your search space.
The following figure shows the search space we want to define.
<div class="mermaid">
graph LR
id1(ImageInput) --> id2(Normalization)
id2 --> id3(Image Augmentation)
id3 --> id4(Convolutional)
id3 --> id5(ResNet V2)
id4 --> id6(Merge)
id5 --> id6
id7(StructuredDataInput) --> id8(CategoricalToNumerical)
id8 --> id9(DenseBlock)
id6 --> id10(Merge)
id9 --> id10
id10 --> id11(Classification Head)
id10 --> id12(Regression Head)
</div>
"""
input_node1 = ak.ImageInput()
output_node = ak.Normalization()(input_node1)
output_node = ak.ImageAugmentation()(output_node)
output_node1 = ak.ConvBlock()(output_node)
output_node2 = ak.ResNetBlock(version="v2")(output_node)
output_node1 = ak.Merge()([output_node1, output_node2])
input_node2 = ak.StructuredDataInput()
output_node = ak.CategoricalToNumerical()(input_node2)
output_node2 = ak.DenseBlock()(output_node)
output_node = ak.Merge()([output_node1, output_node2])
output_node1 = ak.ClassificationHead()(output_node)
output_node2 = ak.RegressionHead()(output_node)
auto_model = ak.AutoModel(
inputs=[input_node1, input_node2],
outputs=[output_node1, output_node2],
overwrite=True,
max_trials=2,
)
image_data = np.random.rand(num_instances, 32, 32, 3).astype(np.float32)
structured_data = np.random.rand(num_instances, 20).astype(np.float32)
regression_target = np.random.rand(num_instances, 1).astype(np.float32)
classification_target = np.random.randint(5, size=num_instances)
auto_model.fit(
[image_data, structured_data],
[classification_target, regression_target],
batch_size=32,
epochs=3,
)
"""
## Data Format
You can refer to the documentation of
[ImageInput](/node/#imageinput-class),
[StructuredDataInput](/node/#structureddatainput-class),
[TextInput](/node/#textinput-class),
[RegressionHead](/block/#regressionhead-class),
[ClassificationHead](/block/#classificationhead-class),
for the format of different types of data.
You can also refer to the Data Format section of the tutorials of
[Image Classification](/tutorial/image_classification/#data-format),
[Text Classification](/tutorial/text_classification/#data-format),
[Structured Data Classification](
/tutorial/structured_data_classification/#data-format).
## Reference
[AutoModel](/auto_model/#automodel-class),
[ImageInput](/node/#imageinput-class),
[StructuredDataInput](/node/#structureddatainput-class),
[DenseBlock](/block/#denseblock-class),
[RegressionHead](/block/#regressionhead-class),
[ClassificationHead](/block/#classificationhead-class),
[CategoricalToNumerical](/block/#categoricaltonumerical-class).
"""
| autokeras/docs/py/multi.py/0 | {
"file_path": "autokeras/docs/py/multi.py",
"repo_id": "autokeras",
"token_count": 2142
} | 8 |
"""
Search for a good model for the [Titanic](https://www.kaggle.com/c/titanic)
dataset.
"""
import timeit
import tensorflow as tf
import autokeras as ak
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
def main():
# Initialize the classifier.
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
clf = ak.StructuredDataClassifier(
max_trials=10, directory="tmp_dir", overwrite=True
)
start_time = timeit.default_timer()
# x is the path to the csv file. y is the column name of the column to
# predict.
clf.fit(train_file_path, "survived")
stop_time = timeit.default_timer()
# Evaluate the accuracy of the found model.
accuracy = clf.evaluate(test_file_path, "survived")[1]
print("Accuracy: {accuracy}%".format(accuracy=round(accuracy * 100, 2)))
print(
"Total time: {time} seconds.".format(
time=round(stop_time - start_time, 2)
)
)
if __name__ == "__main__":
main()
| autokeras/examples/titanic.py/0 | {
"file_path": "autokeras/examples/titanic.py",
"repo_id": "autokeras",
"token_count": 481
} | 9 |
#!/usr/bin/env bash
rm dist/*
python setup.py sdist
twine upload --repository-url https://upload.pypi.org/legacy/ dist/*
| autokeras/shell/pypi.sh/0 | {
"file_path": "autokeras/shell/pypi.sh",
"repo_id": "autokeras",
"token_count": 45
} | 10 |
# Keras categorical inputs
| Status | Implemented (https://github.com/tensorflow/community/pull/209) |
:-------------- |:---------------------------------------------------- |
| **Author(s)** | Zhenyu Tan ([email protected]), Francois Chollet ([email protected])|
| **Sponsor** | Karmel Allison ([email protected]), Martin Wicke ([email protected]) |
| **Updated** | 2019-02-22 |
## Objective
This document proposes 5 new Keras preprocessing layers (KPL) (`StringLookup`, `CategoryCrossing`, `CategoryEncoding`, `Hashing`, `IntegerLookup`) and allow users to:
* Perform basic feature engineering for categorical inputs
* Replace feature columns and `tf.keras.layers.DenseFeatures` with proposed layers
* Introduce sparse inputs that work with Keras linear models and other layers that support sparsity
Other proposed layers for replacement of feature columns such as `tf.feature_column.bucketized_column` and `tf.feature_column.numeric_column` has been discussed [here](https://github.com/keras-team/governance/blob/master/rfcs/20190502-preprocessing-layers.md).
The proposed layers should support ragged tensors.
## Motivation
Specifically, by introducing the 5 layers, we aim to address these pain points:
* Users have to define both feature columns and Keras Inputs for the model, resulting in code duplication and deviation from DRY (Do not repeat yourself) principle. See this [Github issue](https://github.com/tensorflow/tensorflow/issues/27416).
* Users with large dimension categorical inputs will incur large memory footprint and computation cost, if wrapped with indicator column through `tf.keras.layers.DenseFeatures`.
* Currently there is no way to correctly feed Keras linear model or dense layer with multivalent categorical inputs or weighted categorical inputs, or shared embedding inputs.
* Feature columns offer black-box implementations, mix feature engineering with trainable objects, and lead to
unintended coding pattern.
## User Benefit
We expect to get rid of the user painpoints once migrating off feature columns.
## Example Workflows
Two example workflows are presented below. These workflows can be found at this [colab](https://colab.sandbox.google.com/drive/1cEJhSYLcc2MKH7itwcDvue4PfvrLN-OR).
### Workflow 1 -- Official guide on how to replace feature columns with KPL
Refer to [tf.feature_column](https://www.tensorflow.org/api_docs/python/tf/feature_column) for a complete list of feature columns.
1. Replacing `tf.feature_column.categorical_column_with_hash_bucket` with `Hashing`
from
```python
tf.feature_column.categorical_column_with_hash_bucket(key, hash_bucket_size)
```
to
```python
keras_input = tf.keras.Input(shape=(1,), name=key, dtype=dtype)
hashed_input = tf.keras.experimental.preprocessing.Hashing(num_bins=hash_bucket_size)(keras_input)
```
Note the hashed output from KPL will be different than the hashed output from feature column, given how seed is choosen. `Hashing` also supports customized `salt`.
2. `tf.feature_column.categorical_column_with_identity`
This feature column is merely for having identical inputs and outputs except mapping out-of-range value into `default_value`, thus can easily be done at data cleaning stage,
not be part of feature engineering, and hence dropped in this proposal.
3. Replacing `tf.feature_column.categorical_column_with_vocabulary_file` and `tf.feature_column.categorical_column_with_vocabulary_list` with `StringLookup` or `IntegerLookup`.
for string inputs,
from
```python
tf.feature_column.categorical_column_with_vocabulary_file(key, vocabulary_file, vocabulary_size, tf.dtypes.string, default_value, num_oov_buckets)
```
to
```python
keras_input = tf.keras.Input(shape=(1,), name=key, dtype=tf.dtypes.string)
id_input = tf.keras.experimental.preprocessing.StringLookup(max_tokens=vocabulary_size + num_oov_buckets,
num_oov_indices=num_oov_buckets, mask_token=None, vocabulary=vocabulary_file)(keras_input)
```
Similarly, from
```python
tf.feature_column.categorical_column_with_vocabulary_list(key, vocabulary_list, tf.dtypes.string, default_value, num_oov_buckets)
```
to
```python
keras_input = tf.keras.Input(shape=(1,), name=key, dtype=tf.dtypes.string)
id_input = tf.keras.experimental.preprocessing.StringLookup(max_tokens=len(vocabulary_list) + num_oov_buckets, num_oov_indices=num_oov_buckets,
mask_token=None, vocabulary=vocabulary_list)(keras_input)
```
Note that `default_value` is mutually exclusive with `num_oov_buckets`, in the case of `num_oov_buckets=0` and `default_value=-1`, simply set `num_oov_indices=0`. We do not support
any values other than `default_value=-1`.
Note the out-of-range values for `StringLookup` is prepended, i.e., [0,..., num_oov_tokens) for out-of-range values, whereas for `categorical_colulmn_with_vocabulary_file` is
appended, i.e., [vocabulary_size, vocabulary_size + num_oov_tokens) for out-of-range values. The former can give you more flexibility when reloading and adding vocab.
For integer inputs,
from
```python
tf.feature_column.categorical_column_with_vocabulary_file(key, vocabulary_file, vocabulary_size, tf.dtypes.int64, default_value, num_oov_buckets)
```
to
```python
keras_input = tf.keras.Input(shape=(1,), name=key, dtype=tf.dtypes.int64)
id_input = tf.keras.experimental.preprocessing.IntegerLookup(max_values=vocabulary_size + num_oov_buckets, num_oov_indices=num_oov_buckets, mask_value=None, vocabulary=vocabulary_file)(keras_input)
```
Similarly, from
```python
tf.feature_column.categorical_column_with_vocabulary_list(key, vocabulary_list, tf.dtypes.int64, default_value, num_oov_buckets)
```
to
```python
keras_input = tf.keras.Input(shape=(1,), name=key, dtype=tf.dtypes.int64)
id_input = tf.keras.experimental.preprocessing.IntegerLookup(max_values=len(vocabulary_list) + num_oov_buckets, num_oov_indices=num_oov_buckets, mask_value=None, vocabulary=vocabulary_list)(keras_input)
```
4. Replacing `tf.feature_column.crossed_column` with `CategoryCrossing` or `Hashing`
from
```python
tf.feature_column.crossed_column(keys, hash_bucket_size, hash_key)
```
to
```python
keras_inputs = []
for key in keys:
keras_inputs.append(tf.keras.Input(shape=(1,), name=key, dtype=tf.dtypes.string))
hashed_input = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=hash_bucket_size)(keras_inputs)
```
Note when `hash_bucket_size=0`, no hashing is performed, in this case it should be replaced with:
```python
keras_inputs = []
for key in keys:
keras_inputs.append(tf.keras.Input(shape=(1,), name=key, dtype=tf.dtypes.string))
crossed_input = tf.keras.layers.experimental.preprocessing.CategoryCrossing()(keras_inputs)
```
5. Replacing `tf.feature_column.embedding_column` with `tf.keras.layers.Embedding`
Note that `combiner=sum` can be replaced with `tf.reduce_sum` and `combiner=mean` with `tf.reduce_mean` after
the embedding output. `sqrtn` can also be implemented using tf operations. For example:
```python
categorical_column = tf.feature_column.categorical_column_with_vocabulary_list(key, vocabulary_list)
tf.feature_column.embedding_column(categorical_column, dimension=dimension, combiner="sum", initializer=initializer,
max_norm=max_norm)
```
can be replaced with:
```python
categorical_input = tf.keras.Input(name=key, dtype=tf.string)
id_input = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=vocabulary_list)(categorical_input)
embedding_input = tf.keras.layers.Embedding(input_dim=len(vocabulary_list), output_dim=dimension,
embeddings_initializer=initializer, embeddings_constraint=tf.keras.constraints.MaxNorm(max_norm))(id_input)
embedding_input = tf.reduce_sum(embedding_input, axis=-2)
```
6. Replacing `tf.feature_column.indicator_column` with `CategoryEncoding`
from
```python
categorical_column = tf.feature_column.categorical_column_with_vocabulary_list(key, vocabulary_list)
tf.feature_column.indicator_column(categorical_column)
```
to
```python
categorical_input = tf.keras.Input(name=key, dtype=tf.string)
id_input = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=vocabulary_list)(categorical_input)
encoded_input = tf.keras.layers.experimental.preprocessing.CateogoryEncoding(
max_tokens=categorical_column.num_buckets, output_mode="count", sparse=True)(id_input)
```
Note that `CategoryEncoding` supports one-hot through `output_mode="binary"` as well. This is a much more
efficient approach than `tf.one_hot` + `tf.reduce_sum(axis=-2)` to reduce the multivalent categorical inputs.
Note that by specifing `sparse` flag, the output can be either a `tf.Tensor` or `tf.SparseTensor`.
7. Replacing `tf.feature_column.weighted_categorical_column` with `CategoryEncoding`
from
```python
categorical_column = tf.feature_column.categorical_column_with_vocabulary_list(key, vocabulary_list)
tf.feature_column.weighted_categorical_column(categorical_column, weight_feature_key)
```
to
```python
categorical_input = tf.keras.Input(name=key, dtype=tf.string)
lookup_output = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=vocabulary_list)(categorical_input)
weight_input = tf.keras.Input(shape=(1,), dtype=tf.float32, name=weight_feature_key)
weighted_output = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
max_tokens=categorical_column.num_buckets)(lookup_output, weight_input)
```
8. Replacing `tf.feature_column.shared_embeddings` with a single `tf.keras.layers.Embedding`.
Similar to 5, but with multiple categorical inputs:
from
```python
watched_video_id = tf.feature_column.categorical_column_with_vocabulary_list('watched_video_id', video_vocab_list)
impression_video_id = tf.feature_column.categorical_column_with_vocabulary_list('impression_video_id', video_vocab_list)
tf.feature_column.shared_embeddings([watched_video_id, impression_video_id], dimension)
```
to
```python
watched_video_input = tf.keras.Input(shape=(1,), name='watched_video_id', dtype=tf.int64)
impression_video_input = tf.keras.Input(shape=(1,), name='impression_video_id', dtype=tf.int64)
embed_layer = tf.keras.layers.Embedding(input_dim=len(video_vocab_list), output_dim=dimension)
embedded_watched_video_input = embed_layer(watched_video_input)
embedded_impression_video_input = embed_layer(impression_video_input)
```
9. Replacing `tf.estimator.LinearXXX` with `CategoryEncoding` and `tf.keras.experimental.LinearModel`.
LinearClassifier or LinearRegressor treats categorical columns by multi-hot, this can be replaced by encoding layer and Keras linear model, see Workflow 2 for details.
10. Replacing `tf.feature_column.numeric_column` and `tf.feature_column.sequence_numeric_column` with `tf.keras.Input` and `Normalization`.
`tf.keras.layers.experimental.preprocessing.Normalization` with `set_weights` on mean and standard deviation.
11. Replacing `tf.feature_column.sequence_categorical_xxx`.
Replacing `tf.feature_column.sequence_categorical_xxx` is similar to `tf.feature_column.categorical_xxx` except `tf.keras.Input` should take time dimension into
`input_shape` as well.
12. Replacing `tf.feature_column.bucketized_column` with `Discretization`.
from
```python
source_column = tf.feature_column.numeric_column(key)
tf.feature_column.bucketized_column(source_column, boundaries)
```
to
```python
keras_input = tf.keras.Input(shape=(1,), name=key, dtype=tf.float32)
bucketized_input = tf.keras.experimental.preprocessing.Discretization(bins=boundaries)(keras_input)
```
### Workflow 2 -- Complete Example
This example gives an equivalent code snippet to canned `LinearEstimator` [tutorial](https://www.tensorflow.org/tutorials/estimator/linear) on the Titanic dataset:
Refer to this [colab](https://colab.sandbox.google.com/drive/1cEJhSYLcc2MKH7itwcDvue4PfvrLN-OR) to reproduce.
```python
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
y_train = dftrain.pop('survived')
STRING_CATEGORICAL_COLUMNS = ['sex', 'class', 'deck', 'embark_town', 'alone']
INT_CATEGORICAL_COLUMNS = ['n_siblings_spouses', 'parch']
NUMERIC_COLUMNS = ['age', 'fare']
keras_inputs = {}
keras_preproc_inputs = []
for key in STRING_CATEGORICAL_COLUMNS:
keras_input = tf.keras.Input(shape=(1,), dtype=tf.string, name=key)
keras_inputs[key] = keras_input
vocab = dftrain[key].unique()
keras_preproc_input = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=vocab, num_oov_indices=0, mask_token=None, name='lookup' + key)(keras_input)
keras_preproc_input = tf.keras.layers.experimental.preprocessing.CategoryEncoding(max_tokens=len(vocab), output_mode='count', sparse=True, name='encode' + key)(keras_preproc_input)
keras_preproc_inputs.append(keras_preproc_input)
for key in INT_CATEGORICAL_COLUMNS:
keras_input = tf.keras.Input(shape=(1,), dtype=tf.int64, name=key)
keras_inputs[key] = keras_input
vocab = dftrain[key].unique()
keras_preproc_input = tf.keras.layers.experimental.preprocessing.IntegerLookup(vocabulary=vocab, num_oov_indices=0, mask_value=None, name='lookup' + key)(keras_input)
keras_preproc_input = tf.keras.layers.experimental.preprocessing.CategoryEncoding(max_tokens=len(vocab), output_mode='count', sparse=True, name='encode' + key)(keras_preproc_input)
keras_preproc_inputs.append(keras_preproc_input)
for key in NUMERIC_COLUMNS:
keras_input = tf.keras.Input(shape=(1,), dtype=tf.float32, name=key)
keras_inputs[key] = keras_input
keras_preproc_inputs.append(keras_preproc_input)
age_x_sex = tf.keras.layers.experimental.preprocessing.CategoryCrossing(name='age_x_sex_crossing')([keras_inputs['age'], keras_inputs['sex']])
age_x_sex = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=100, name='age_x_sex_hashing')(age_x_sex)
keras_output_age_x_sex = tf.keras.layers.experimental.preprocessing.CategoryEncoding(max_tokens=100, output_mode='count', sparse=True, name='age_x_sex_encoding')(age_x_sex)
keras_preproc_inputs.append(keras_output_age_x_sex)
linear_model = tf.keras.experimental.LinearModel(units=1, kernel_initializer='zeros', activation='sigmoid')
linear_logits = linear_model(keras_preproc_inputs)
sorted_keras_inputs = tuple(keras_inputs[key] for key in sorted(keras_inputs.keys()))
model = tf.keras.Model(sorted_keras_inputs, linear_logits)
model.compile('ftrl', 'binary_crossentropy', metrics=['accuracy'])
df_dataset = tf.data.Dataset.from_tensor_slices((dict(dftrain), y_train))
def encode_map(features, labels):
encoded_features = tuple(tf.expand_dims(features[key], axis=1) for key in sorted(features.keys()))
return (encoded_features, labels)
encoded_dataset = df_dataset.batch(32).map(encode_map)
model.fit(encoded_dataset)
```
## Design Proposal
```python
`tf.keras.layers.StringLookup`
StringLookup(PreprocessingLayer):
"""This layer transforms categorical inputs to index space.
If input is dense/sparse, then output is dense/sparse."""
def __init__(self, max_tokens=None, num_oov_indices=1, mask_token="",
oov_token="[UNK]", vocabulary=None, encoding=None,
invert=False, name=None, **kwargs):
"""Constructs a IndexLookup layer.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary
includes the OOV and mask tokens, so the effective number of tokens is
(max_tokens - num_oov_indices - (1 if mask_token else 0))
num_oov_indices: The number of out-of-vocabulary tokens to use; defaults to
1. If this value is more than 1, OOV inputs are hashed to determine their
OOV value; if this value is 0, passing an OOV input will result in a '-1'
being returned for that value in the output tensor. (Note that, because
the value is -1 and not 0, this will allow you to effectively drop OOV
values from categorical encodings.)
mask_token: A token that represents masked values, and which is mapped to
index 0. Defaults to the empty string "". If set to None, no mask term
will be added and the OOV tokens, if any, will be indexed from
(0...num_oov_indices) instead of (1...num_oov_indices+1).
oov_token: The token representing an out-of-vocabulary value. Defaults to
"[UNK]".
vocabulary: An optional list of vocabulary terms, or a path to a text file
containing a vocabulary to load into this layer. The file should contain
one token per line. If the list or file contains the same token multiple
times, an error will be thrown.
encoding: The Python string encoding to use. Defaults to `'utf-8'`.
invert: If true, this layer will map indices to vocabulary items instead
of mapping vocabulary items to indices.
name: Name of the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape:
a string or int tensor of shape `[batch_size, d1, ..., dm]`
Output shape:
an int tensor of shape `[batch_size, d1, ..., dm]`
Example:
>>> vocab = ["a", "b", "c", "d"]
>>> data = tf.constant([["a", "c", "d"], ["d", "z", "b"]])
>>> layer = StringLookup(vocabulary=vocab)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[2, 4, 5],
[5, 1, 3]])>
"""
pass
`tf.keras.layers.IntegerLookup`
IntegerLookup(PreprocessingLayer):
"""This layer transforms categorical inputs to index space.
If input is dense/sparse, then output is dense/sparse."""
def __init__(self, max_values=None, num_oov_indices=1, mask_value=0,
oov_value=-1, vocabulary=None, invert=False, name=None, **kwargs):
"""Constructs a IndexLookup layer.
Args:
max_values: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary
includes the OOV and mask values, so the effective number of values is
(max_values - num_oov_values - (1 if mask_token else 0))
num_oov_indices: The number of out-of-vocabulary values to use; defaults to
1. If this value is more than 1, OOV inputs are modulated to determine
their OOV value; if this value is 0, passing an OOV input will result in
a '-1' being returned for that value in the output tensor. (Note that,
because the value is -1 and not 0, this will allow you to effectively drop
OOV values from categorical encodings.)
mask_value: A value that represents masked inputs, and which is mapped to
index 0. Defaults to 0. If set to None, no mask term will be added and the
OOV values, if any, will be indexed from (0...num_oov_values) instead of
(1...num_oov_values+1).
oov_value: The value representing an out-of-vocabulary value. Defaults to -1.
vocabulary: An optional list of values, or a path to a text file containing
a vocabulary to load into this layer. The file should contain one value
per line. If the list or file contains the same token multiple times, an
error will be thrown.
invert: If true, this layer will map indices to vocabulary items instead
of mapping vocabulary items to indices.
name: Name of the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape:
a string or int tensor of shape `[batch_size, d1, ..., dm]`
Output shape:
an int tensor of shape `[batch_size, d1, ..., dm]`
Example:
>>> vocab = [12, 36, 1138, 42]
>>> data = tf.constant([[12, 1138, 42], [42, 1000, 36]])
>>> layer = IntegerLookup(vocabulary=vocab)
>>> layer(data)
<tf.Tensor: shape=(2, 3), dtype=int64, numpy=
array([[2, 4, 5],
[5, 1, 3]])>
"""
pass
`tf.keras.layers.CategoryCrossing`
CategoryCrossing(PreprocessingLayer):
"""This layer transforms multiple categorical inputs to categorical outputs
by Cartesian product, and hash the output if necessary.
If any of the inputs is sparse, then all outputs will be sparse. Otherwise, all outputs will be dense."""
def __init__(self, depth=None, separator=None, name=None, **kwargs):
"""Constructs a CategoryCrossing layer.
Args:
depth: depth of input crossing. By default None, all inputs are crossed into
one output. It can also be an int or tuple/list of ints. Passing an
integer will create combinations of crossed outputs with depth up to that
integer, i.e., [1, 2, ..., `depth`), and passing a tuple of integers will
create crossed outputs with depth for the specified values in the tuple,
i.e., `depth`=(N1, N2) will create all possible crossed outputs with depth
equal to N1 or N2. Passing `None` means a single crossed output with all
inputs. For example, with inputs `a`, `b` and `c`, `depth=2` means the
output will be [a;b;c;cross(a, b);cross(bc);cross(ca)].
separator: A string added between each input being joined. Defaults to '_X_'.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: a list of string or int tensors or sparse tensors of shape
`[batch_size, d1, ..., dm]`
Output shape: a single string or int tensor or sparse tensor of shape
`[batch_size, d1, ..., dm]`
Example: (`depth`=None)
If the layer receives three inputs:
`a=[[1], [4]]`, `b=[[2], [5]]`, `c=[[3], [6]]`
the output will be a string tensor:
`[[b'1_X_2_X_3'], [b'4_X_5_X_6']]`
"""
pass
`tf.keras.layers.CategoryEncoding`
CategoryEncoding(PreprocessingLayer):
"""This layer transforms categorical inputs from index space to category space.
If input is dense/sparse, then output is dense/sparse."""
def __init__(self, max_tokens=None, output_mode="binary", sparse=False, name=None, **kwargs):
"""Constructs a CategoryEncoding layer.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary.
output_mode: Specification for the output of the layer.
Defaults to "binary". Values can be "binary", "count" or "tf-idf",
configuring the layer as follows:
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
sparse: Boolean. If true, returns a `SparseTensor` instead of a dense
`Tensor`. Defaults to `False`.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: A int tensor of shape `[batch_size, d1, ..., dm-1, dm]`
Output shape: a float tensor of shape `[batch_size, d1, ..., dm-1, num_categories]`
Example:
>>> layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
... max_tokens=4, output_mode="count")
>>> layer([[0, 1], [0, 0], [1, 2], [3, 1]])
<tf.Tensor: shape=(4, 4), dtype=float32, numpy=
array([[1., 1., 0., 0.],
[2., 0., 0., 0.],
[0., 1., 1., 0.],
[0., 1., 0., 1.]], dtype=float32)>
"""
pass
`tf.keras.layers.Hashing`
Hashing(PreprocessingLayer):
"""This layer transforms categorical inputs to hashed output.
If input is dense/sparse, then output is dense/sparse."""
def __init__(self, num_bins, salt=None, name=None, **kwargs):
"""Constructs a Hashing layer.
Args:
num_bins: Number of hash bins.
salt: A single unsigned integer or None.
If passed, the hash function used will be SipHash64, with these values
used as an additional input (known as a "salt" in cryptography).
These should be non-zero. Defaults to `None` (in that
case, the FarmHash64 hash function is used). It also supports
tuple/list of 2 unsigned integer numbers, see reference paper for details.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: A single or list of string, int32 or int64 `Tensor`,
`SparseTensor` or `RaggedTensor` of shape `[batch_size, ...,]`
Output shape: An int64 `Tensor`, `SparseTensor` or `RaggedTensor` of shape
`[batch_size, ...]`. If any input is `RaggedTensor` then output is
`RaggedTensor`, otherwise if any input is `SparseTensor` then output is
`SparseTensor`, otherwise the output is `Tensor`.
Example:
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[0],
[1],
[1],
[2]])>
"""
pass
```
### Alternatives Considered
An alternative is to provide solutions on top of feature columns. This will make user code to be slightly cleaner but far less flexible.
### Performance Implications
End to End benchmark should be same or faster than feature columns implementations.
### Dependencies
This proposal does not add any new dependencies.
### Engineering Impact
These changes will include more layers and thus binary size and build time. It will not impact startup time.
This code can be tested in its own and maintained in its own buildable unit.
### Platforms and Environments
This proposal should work in all platforms and environments.
### Best Practices, Tutorials and Examples
This proposal does not change the best engineering practices.
### Compatibility
No backward compatibility issues.
### User Impact
User facing changes to migrate feature column based Keras modeling to preprocessing layer based Keras modeling, as the example workflow suggests.
## Questions and Meeting Notes
We'd like to gather feedbacks on `IndexLookup`, specifically we propose migrating off from mutually exclusive `num_oov_buckets` and `default_value` and replace with `num_oov_tokens`.
1. Naming for encoding v.s. vectorize: encoding can mean many things, vectorize seems to general. We will go with "CategoryEncoding"
2. "mode" should be "count" or "avg_count", instead of "sum" and "mean".
3. Rename "sparse_combiner" to "mode", which aligns with scikit-learn.
4. Have a 'sparse_out' flag for "CategoryEncoding" layer.
5. Hashing -- we refer to hashing when we mean fingerprinting. Keep using "Hashing" for layer name, but document how it relies on tf.fingerprint, and also provides option for salt.
5. Rename "CategoryLookup" to "IndexLookup"
## Updates on 07/14/20
Mark the RFC as completed, update the layer naming and arguments.
| governance/rfcs/20191212-keras-categorical-inputs.md/0 | {
"file_path": "governance/rfcs/20191212-keras-categorical-inputs.md",
"repo_id": "governance",
"token_count": 9825
} | 11 |
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |
------------------------------------------------------------------------
The weights for all 16 models are obtained and translated
from TensorFlow checkpoints found at
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md
# Reference
- [MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications](https://arxiv.org/abs/1704.04861)
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import warnings
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
BASE_WEIGHT_PATH = ('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.6/')
backend = None
layers = None
models = None
keras_utils = None
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the MobileNet architecture.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)`
(with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network. This is known as the
width multiplier in the MobileNet paper.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution. This
is called the resolution multiplier in the MobileNet paper.
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
warnings.warn('`input_shape` is undefined or non-square, '
'or `rows` is not in [128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier,
strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier,
strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier,
strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier,
strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if backend.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape(shape, name='reshape_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1),
padding='same',
name='conv_preds')(x)
x = layers.Reshape((classes,), name='reshape_2')(x)
x = layers.Activation('softmax', name='act_softmax')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras_utils.get_file(model_name,
weight_path,
cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = keras_utils.get_file(model_name,
weight_path,
cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
# Arguments
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution
along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)), name='conv1_pad')(inputs)
x = layers.Conv2D(filters, kernel,
padding='valid',
use_bias=False,
strides=strides,
name='conv1')(x)
x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return layers.ReLU(6., name='conv1_relu')(x)
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha,
depth_multiplier=1, strides=(1, 1), block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
# Arguments
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution
along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating
the block number.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(batch, filters, new_rows, new_cols)`
if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)`
if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)),
name='conv_pad_%d' % block_id)(inputs)
x = layers.DepthwiseConv2D((3, 3),
padding='same' if strides == (1, 1) else 'valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x)
x = layers.Conv2D(pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(x)
x = layers.BatchNormalization(axis=channel_axis,
name='conv_pw_%d_bn' % block_id)(x)
return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
| keras-applications/keras_applications/mobilenet.py/0 | {
"file_path": "keras-applications/keras_applications/mobilenet.py",
"repo_id": "keras-applications",
"token_count": 8482
} | 12 |
import pytest
import numpy as np
from numpy.testing import assert_allclose
# We don't use keras.applications.imagenet_utils here
# because we also test _obtain_input_shape which is not exposed.
from keras_applications import imagenet_utils as utils
from keras import backend
from keras import models
from keras import layers
from keras import utils as keras_utils
def decode_predictions(*args, **kwargs):
kwargs['backend'] = backend
kwargs['utils'] = keras_utils
return utils.decode_predictions(*args, **kwargs)
def preprocess_input(*args, **kwargs):
kwargs['backend'] = backend
return utils.preprocess_input(*args, **kwargs)
def test_preprocess_input():
# Test image batch with float and int image input
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype('int32')
assert preprocess_input(x).shape == x.shape
assert preprocess_input(xint).shape == xint.shape
out1 = preprocess_input(x, 'channels_last')
out1int = preprocess_input(xint, 'channels_last')
out2 = preprocess_input(np.transpose(x, (0, 3, 1, 2)), 'channels_first')
out2int = preprocess_input(np.transpose(xint, (0, 3, 1, 2)), 'channels_first')
assert_allclose(out1, out2.transpose(0, 2, 3, 1))
assert_allclose(out1int, out2int.transpose(0, 2, 3, 1))
# Test single image
x = np.random.uniform(0, 255, (10, 10, 3))
xint = x.astype('int32')
assert preprocess_input(x).shape == x.shape
assert preprocess_input(xint).shape == xint.shape
out1 = preprocess_input(x, 'channels_last')
out1int = preprocess_input(xint, 'channels_last')
out2 = preprocess_input(np.transpose(x, (2, 0, 1)), 'channels_first')
out2int = preprocess_input(np.transpose(xint, (2, 0, 1)), 'channels_first')
assert_allclose(out1, out2.transpose(1, 2, 0))
assert_allclose(out1int, out2int.transpose(1, 2, 0))
# Test that writing over the input data works predictably
for mode in ['torch', 'tf']:
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype('int')
x2 = preprocess_input(x, mode=mode)
xint2 = preprocess_input(xint)
assert_allclose(x, x2)
assert xint.astype('float').max() != xint2.max()
# Caffe mode works differently from the others
x = np.random.uniform(0, 255, (2, 10, 10, 3))
xint = x.astype('int')
x2 = preprocess_input(x, data_format='channels_last', mode='caffe')
xint2 = preprocess_input(xint)
assert_allclose(x, x2[..., ::-1])
assert xint.astype('float').max() != xint2.max()
def test_preprocess_input_symbolic():
# Test image batch
x = np.random.uniform(0, 255, (2, 10, 10, 3))
inputs = layers.Input(shape=x.shape[1:])
outputs = layers.Lambda(preprocess_input, output_shape=x.shape[1:])(inputs)
model = models.Model(inputs, outputs)
assert model.predict(x).shape == x.shape
outputs1 = layers.Lambda(
lambda x: preprocess_input(x, 'channels_last'),
output_shape=x.shape[1:])(inputs)
model1 = models.Model(inputs, outputs1)
out1 = model1.predict(x)
x2 = np.transpose(x, (0, 3, 1, 2))
inputs2 = layers.Input(shape=x2.shape[1:])
outputs2 = layers.Lambda(
lambda x: preprocess_input(x, 'channels_first'),
output_shape=x2.shape[1:])(inputs2)
model2 = models.Model(inputs2, outputs2)
out2 = model2.predict(x2)
assert_allclose(out1, out2.transpose(0, 2, 3, 1))
# Test single image
x = np.random.uniform(0, 255, (10, 10, 3))
inputs = layers.Input(shape=x.shape)
outputs = layers.Lambda(preprocess_input, output_shape=x.shape)(inputs)
model = models.Model(inputs, outputs)
assert model.predict(x[np.newaxis])[0].shape == x.shape
outputs1 = layers.Lambda(
lambda x: preprocess_input(x, 'channels_last'),
output_shape=x.shape)(inputs)
model1 = models.Model(inputs, outputs1)
out1 = model1.predict(x[np.newaxis])[0]
x2 = np.transpose(x, (2, 0, 1))
inputs2 = layers.Input(shape=x2.shape)
outputs2 = layers.Lambda(
lambda x: preprocess_input(x, 'channels_first'),
output_shape=x2.shape)(inputs2)
model2 = models.Model(inputs2, outputs2)
out2 = model2.predict(x2[np.newaxis])[0]
assert_allclose(out1, out2.transpose(1, 2, 0))
def test_decode_predictions():
x = np.zeros((2, 1000))
x[0, 372] = 1.0
x[1, 549] = 1.0
outs = decode_predictions(x, top=1)
scores = [out[0][2] for out in outs]
assert scores[0] == scores[1]
# the numbers of columns and ImageNet classes are not identical.
with pytest.raises(ValueError):
decode_predictions(np.ones((2, 100)))
def test_obtain_input_shape():
# input_shape and default_size are not identical.
with pytest.raises(ValueError):
utils._obtain_input_shape(
input_shape=(224, 224, 3),
default_size=299,
min_size=139,
data_format='channels_last',
require_flatten=True,
weights='imagenet')
# Test invalid use cases
for data_format in ['channels_last', 'channels_first']:
# test warning
shape = (139, 139)
if data_format == 'channels_last':
input_shape = shape + (99,)
else:
input_shape = (99,) + shape
with pytest.warns(UserWarning):
utils._obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False,
weights='fake_weights')
# input_shape is smaller than min_size.
shape = (100, 100)
if data_format == 'channels_last':
input_shape = shape + (3,)
else:
input_shape = (3,) + shape
with pytest.raises(ValueError):
utils._obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False)
# shape is 1D.
shape = (100,)
if data_format == 'channels_last':
input_shape = shape + (3,)
else:
input_shape = (3,) + shape
with pytest.raises(ValueError):
utils._obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False)
# the number of channels is 5 not 3.
shape = (100, 100)
if data_format == 'channels_last':
input_shape = shape + (5,)
else:
input_shape = (5,) + shape
with pytest.raises(ValueError):
utils._obtain_input_shape(
input_shape=input_shape,
default_size=None,
min_size=139,
data_format=data_format,
require_flatten=False)
# require_flatten=True with dynamic input shape.
with pytest.raises(ValueError):
utils._obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format='channels_first',
require_flatten=True)
# test include top
assert utils._obtain_input_shape(
input_shape=(3, 200, 200),
default_size=None,
min_size=139,
data_format='channels_first',
require_flatten=True) == (3, 200, 200)
assert utils._obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format='channels_last',
require_flatten=False) == (None, None, 3)
assert utils._obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format='channels_first',
require_flatten=False) == (3, None, None)
assert utils._obtain_input_shape(
input_shape=None,
default_size=None,
min_size=139,
data_format='channels_last',
require_flatten=False) == (None, None, 3)
assert utils._obtain_input_shape(
input_shape=(150, 150, 3),
default_size=None,
min_size=139,
data_format='channels_last',
require_flatten=False) == (150, 150, 3)
assert utils._obtain_input_shape(
input_shape=(3, None, None),
default_size=None,
min_size=139,
data_format='channels_first',
require_flatten=False) == (3, None, None)
if __name__ == '__main__':
pytest.main([__file__])
| keras-applications/tests/imagenet_utils_test.py/0 | {
"file_path": "keras-applications/tests/imagenet_utils_test.py",
"repo_id": "keras-applications",
"token_count": 3995
} | 13 |
# On Github Issues and Pull Requests
Found a bug? Want to contribute changes to the codebase? Make sure to read this first.
## Update Your Environment
To easily update Keras: `pip install git+https://www.github.com/keras-team/keras.git --upgrade`
To easily update Keras-Contrib: `pip install git+https://www.github.com/keras-team/keras-contrib.git --upgrade`
To easily update Theano: `pip install git+git://github.com/Theano/Theano.git --upgrade`
To update TensorFlow: See [TensorFlow Installation instructions](https://github.com/tensorflow/tensorflow#installation)
## Bug reporting
Your code doesn't work, **and you have determined that the issue lies with Keras-Contrib**? Follow these steps to report a bug.
1. Your bug may already be fixed. Make sure to update to the current Keras master branch and Keras-Contrib master branch, as well as the latest Theano/TensorFlow master branch.
2. [Search for similar issues](https://github.com/keras-team/keras-contrib/issues?utf8=%E2%9C%93&q=is%3Aissue). It's possible somebody has encountered this bug already. Still having a problem? Open an issue on Github to let us know.
3. Make sure you provide us with useful information about your configuration: what OS are you using? What Keras backend are you using? Are you running on GPU? If so, what is your version of Cuda, of cuDNN? What is your GPU?
4. Provide us with a script to reproduce the issue. This script should be runnable as-is and should not require external data download (use randomly generated data if you need to run a model on some test data). We recommend that you use Github Gists to post your code. Any issue that cannot be reproduced is likely to be closed.
5. If possible, take a stab at fixing the bug yourself --if you can!
The more information you provide, the easier it is for us to validate that there is a bug and the faster we'll be able to take action. If you want your issue to be resolved quickly, following the steps above is crucial.
## Pull Requests
We love pull requests. Here's a quick guide:
1. If your PR introduces a change in functionality, make sure you start by opening an issue to discuss whether the change should be made, and how to handle it. This will save you from having your PR closed down the road! Of course, if your PR is a simple bug fix, you don't need to do that.
2. Ensure that your environment (Keras, Keras-Contrib, and your backend) are up to date. See "Update Your Environment". Create a new branch for your changes.
3. Write the code. This is the hard part! If you are adding a layer, advanced activation, or any other feature which has configurable parameters, please ensure that the feature is searializeable (to allow for saving and loading). For details on this aspect, please see the Keras ["Writing Your Own Layer"](https://keras.io/layers/writing-your-own-keras-layers/) guide and the source code for the relevant feature type from both Keras and Keras-Contrib.
4. Make sure any new function or class you introduce has proper docstrings. Make sure any code you touch still has up-to-date docstrings and documentation.
5. Write tests. Your code should have full unit test coverage. If you want to see your PR merged promptly, this is crucial. If your PR is a bug fix, it is advisable to add a new test, which, without your fix in this PR, would have failed.
6. Run our test suite locally. It's easy: within the root Keras-Contrib folder, simply run: `py.test tests/`.
- You will need to install `pytest`, `coveralls`, `pytest-cov`, `pytest-xdist`: `pip install pytest pytest-cov python-coveralls pytest-xdist pep8 pytest-pep8`
7. Make sure all tests are passing:
- with the Theano backend, on Python 2.7 and Python 3.5
- with the TensorFlow backend, on Python 2.7
- **Please Note:** all tests run on top of the very latest Keras master branch.
8. We use PEP8 syntax conventions, but we aren't dogmatic when it comes to line length. Make sure your lines stay reasonably sized, though. To make your life easier, we recommend running a PEP8 linter:
- Install PEP8 packages: `pip install pep8 pytest-pep8 autopep8`
- Run a standalone PEP8 check: `py.test --pep8 -m pep8`
- You can automatically fix some PEP8 error by running: `autopep8 -i --select <errors> <FILENAME>` for example: `autopep8 -i --select E128 tests/keras/backend/test_backends.py`
9. When committing, use appropriate, descriptive commit messages. Make sure that your branch history is not a string of "bug fix", "fix", "oops", etc. When submitting your PR, squash your commits into a single commit with an appropriate commit message, to make sure the project history stays clean and readable. See ['rebase and squash'](http://rebaseandsqua.sh/) for technical help on how to squash your commits.
10. Update the documentation. If introducing new functionality, make sure you include code snippets demonstrating the usage of your new feature.
11. Submit your PR. If your changes have been approved in a previous discussion, and if you have complete (and passing) unit tests, your PR is likely to be merged promptly. Otherwise, well...
## About keras-team/keras and tensorflow.keras
This repo supports both keras-team/keras and tensorflow.keras. The way this is done is by changing all the imports in the code by parsing it. This is checked with travis.ci every time you push a commit in a pull request.
There are a number of reasons why your code would work with keras-team/keras but not with tf.keras. The most common is that you use keras' private API. Since both keras are only similar in behavior with respect to their public API, you should only use this. Otherwise it's likely that the function you are using is not in the same place in tf.keras (or does not even exist at all).
Another gotcha is that when creating custom layers and implementing the `build` function, keras-team/keras expects as `input_shape` a tuple of ints. With tf.keras, `input_shape` is a tuple with `Dimensions` objects. This is likely to make the code incompatible. To solve this problem, you should do:
```python
from keras.layers import Layer
from keras_contrib.utils.test_utils import to_tuple
class MyLayer(Layer):
...
def build(self, input_shape):
input_shape = to_tuple(input_shape)
# now `input_shape` is a tuple of ints or None like in keras-team/keras
...
```
To change all the imports in your code to tf.keras to test compatibility, you can do:
```
python convert_to_tf_keras.py
```
To convert your codebase back to keras-team/keras, do:
```
python convert_to_tf_keras.py --revert
```
Note that you are strongly encouraged to commit your code before in case the parsing would go wrong. To discard all the changes you made since the previous commit:
```
# saves a copy of your current codebase in the git stash and comes back to the previous commit
git stash
git stash pop # get your copy back from the git stash if you need to.
```
## A Note for Contributors
Both Keras-Contrib and Keras operate under the [MIT License](LICENSE). At the discretion of the maintainers of both repositories, code may be moved from Keras-Contrib to Keras and vice versa.
The maintainers will ensure that the proper chain of commits will flow in both directions, with proper attribution of code. Maintainers will also do their best to notify contributors when their work is moved between repositories.
## About the `CODEOWNERS` file
If you add a new feature to keras-contrib, you should add yourself and your file in the `CODEOWNERS` file. Doing so will, in the future, tag you whenever an issue or a pull request about your feature is opened. Be aware that it represents some work, and in addition of being tagged, we would appreciate that you review new pull requests related to your feature.
| keras-contrib/CONTRIBUTING.md/0 | {
"file_path": "keras-contrib/CONTRIBUTING.md",
"repo_id": "keras-contrib",
"token_count": 2100
} | 14 |
{% extends "base.html" %}
| keras-contrib/contrib_docs/theme/main.html/0 | {
"file_path": "keras-contrib/contrib_docs/theme/main.html",
"repo_id": "keras-contrib",
"token_count": 11
} | 15 |
import keras
from keras import backend as K
from keras_contrib.losses.jaccard import jaccard_distance
import numpy as np
# Test and plot
y_pred = np.array([np.arange(-10, 10 + 0.1, 0.1)]).T
y_true = np.zeros(y_pred.shape)
name = 'jaccard_distance_loss'
try:
loss = jaccard_distance_loss(
K.variable(y_true), K.variable(y_pred)
).eval(session=K.get_session())
except Exception as e:
print("error plotting", name, e)
else:
plt.title(name)
plt.plot(y_pred, loss)
plt.show()
print("TYPE |Almost_right |half right |all_wrong")
y_true = np.array([[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1., 0.]])
y_pred = np.array([[0, 0, 0.9, 0], [0, 0, 0.1, 0], [1, 1, 0.1, 1.]])
r = jaccard_distance(
K.variable(y_true),
K.variable(y_pred),
).eval(session=K.get_session())
print('jaccard_distance_loss', r)
assert r[0] < r[1]
assert r[1] < r[2]
r = keras.losses.binary_crossentropy(
K.variable(y_true),
K.variable(y_pred),
).eval(session=K.get_session())
print('binary_crossentropy', r)
print('binary_crossentropy_scaled', r / r.max())
assert r[0] < r[1]
assert r[1] < r[2]
"""
TYPE |Almost_right |half right |all_wrong
jaccard_distance_loss [ 0.09900928 0.89108944 3.75000238]
binary_crossentropy [ 0.02634021 0.57564634 12.53243446]
binary_crossentropy_scaled [ 0.00210176 0.04593252 1. ]
"""
| keras-contrib/examples/jaccard_loss.py/0 | {
"file_path": "keras-contrib/examples/jaccard_loss.py",
"repo_id": "keras-contrib",
"token_count": 633
} | 16 |
import numpy as np
from keras.callbacks import Callback
from keras import backend as K
class DeadReluDetector(Callback):
"""Reports the number of dead ReLUs after each training epoch
ReLU is considered to be dead if it did not fire once for entire training set
# Arguments
x_train: Training dataset to check whether or not neurons fire
verbose: verbosity mode
True means that even a single dead neuron triggers a warning message
False means that only significant number of dead neurons (10% or more)
triggers a warning message
"""
def __init__(self, x_train, verbose=False):
super(DeadReluDetector, self).__init__()
self.x_train = x_train
self.verbose = verbose
self.dead_neurons_share_threshold = 0.1
@staticmethod
def is_relu_layer(layer):
# Should work for all layers with relu
# activation. Tested for Dense and Conv2D
return layer.get_config().get('activation', None) == 'relu'
def get_relu_activations(self):
model_input = self.model.input
is_multi_input = isinstance(model_input, list)
if not is_multi_input:
model_input = [model_input]
funcs = {}
for index, layer in enumerate(self.model.layers):
if not layer.get_weights():
continue
funcs[index] = K.function(model_input
+ [K.learning_phase()], [layer.output])
if is_multi_input:
list_inputs = []
list_inputs.extend(self.x_train)
list_inputs.append(1.)
else:
list_inputs = [self.x_train, 1.]
layer_outputs = {}
for index, func in funcs.items():
layer_outputs[index] = func(list_inputs)[0]
for layer_index, layer_activations in layer_outputs.items():
if self.is_relu_layer(self.model.layers[layer_index]):
layer_name = self.model.layers[layer_index].name
# layer_weight is a list [W] (+ [b])
layer_weight = self.model.layers[layer_index].get_weights()
# with kernel and bias, the weights are saved as a list [W, b].
# If only weights, it is [W]
if type(layer_weight) is not list:
raise ValueError("'Layer_weight' should be a list, "
"but was {}".format(type(layer_weight)))
# there are no weights for current layer; skip it
# this is only legitimate if layer is "Activation"
if len(layer_weight) == 0:
continue
layer_weight_shape = np.shape(layer_weight[0])
yield [layer_index,
layer_activations,
layer_name,
layer_weight_shape]
def on_epoch_end(self, epoch, logs={}):
for relu_activation in self.get_relu_activations():
layer_index = relu_activation[0]
activation_values = relu_activation[1]
layer_name = relu_activation[2]
layer_weight_shape = relu_activation[3]
shape_act = activation_values.shape
weight_len = len(layer_weight_shape)
act_len = len(shape_act)
# should work for both Conv and Flat
if K.image_data_format() == 'channels_last':
# features in last axis
axis_filter = -1
else:
# features before the convolution axis, for weight_
# len the input and output have to be subtracted
axis_filter = -1 - (weight_len - 2)
total_featuremaps = shape_act[axis_filter]
axis = []
for i in range(act_len):
if (i != axis_filter) and (i != (len(shape_act) + axis_filter)):
axis.append(i)
axis = tuple(axis)
dead_neurons = np.sum(np.sum(activation_values, axis=axis) == 0)
dead_neurons_share = float(dead_neurons) / float(total_featuremaps)
if ((self.verbose and dead_neurons > 0)
or dead_neurons_share >= self.dead_neurons_share_threshold):
str_warning = ('Layer {} (#{}) has {} '
'dead neurons ({:.2%})!').format(layer_name,
layer_index,
dead_neurons,
dead_neurons_share)
print(str_warning)
| keras-contrib/keras_contrib/callbacks/dead_relu_detector.py/0 | {
"file_path": "keras-contrib/keras_contrib/callbacks/dead_relu_detector.py",
"repo_id": "keras-contrib",
"token_count": 2347
} | 17 |
from keras import backend as K
from keras.layers import Layer
class Swish(Layer):
""" Swish (Ramachandranet al., 2017)
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as the input.
# Arguments
beta: float >= 0. Scaling factor
if set to 1 and trainable set to False (default),
Swish equals the SiLU activation (Elfwing et al., 2017)
trainable: whether to learn the scaling factor during training or not
# References
- [Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
- [Sigmoid-weighted linear units for neural network function
approximation in reinforcement learning](https://arxiv.org/abs/1702.03118)
"""
def __init__(self, beta=1.0, trainable=False, **kwargs):
super(Swish, self).__init__(**kwargs)
self.supports_masking = True
self.beta = beta
self.trainable = trainable
def build(self, input_shape):
self.scaling_factor = K.variable(self.beta,
dtype=K.floatx(),
name='scaling_factor')
if self.trainable:
self._trainable_weights.append(self.scaling_factor)
super(Swish, self).build(input_shape)
def call(self, inputs, mask=None):
return inputs * K.sigmoid(self.scaling_factor * inputs)
def get_config(self):
config = {'beta': self.get_weights()[0] if self.trainable else self.beta,
'trainable': self.trainable}
base_config = super(Swish, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
| keras-contrib/keras_contrib/layers/advanced_activations/swish.py/0 | {
"file_path": "keras-contrib/keras_contrib/layers/advanced_activations/swish.py",
"repo_id": "keras-contrib",
"token_count": 807
} | 18 |
from .ftml import FTML
from .padam import Padam
from .yogi import Yogi
from .lars import LARS
# aliases
ftml = FTML
lars = LARS
| keras-contrib/keras_contrib/optimizers/__init__.py/0 | {
"file_path": "keras-contrib/keras_contrib/optimizers/__init__.py",
"repo_id": "keras-contrib",
"token_count": 48
} | 19 |
import pytest
from keras_contrib.utils.test_utils import layer_test
from keras_contrib.layers import Swish
@pytest.mark.parametrize('trainable', [False, True])
def test_swish(trainable):
layer_test(Swish, kwargs={'beta': 1.0, 'trainable': trainable},
input_shape=(2, 3, 4))
if __name__ == '__main__':
pytest.main([__file__])
| keras-contrib/tests/keras_contrib/layers/advanced_activations/test_swish.py/0 | {
"file_path": "keras-contrib/tests/keras_contrib/layers/advanced_activations/test_swish.py",
"repo_id": "keras-contrib",
"token_count": 144
} | 20 |
from keras_core import activations
from keras_core import applications
from keras_core import backend
from keras_core import constraints
from keras_core import datasets
from keras_core import initializers
from keras_core import layers
from keras_core import models
from keras_core import ops
from keras_core import optimizers
from keras_core import regularizers
from keras_core import utils
from keras_core.backend import KerasTensor
from keras_core.layers import Input
from keras_core.layers import Layer
from keras_core.models import Functional
from keras_core.models import Model
from keras_core.models import Sequential
from keras_core.version import __version__
| keras-core/keras_core/__init__.py/0 | {
"file_path": "keras-core/keras_core/__init__.py",
"repo_id": "keras-core",
"token_count": 180
} | 21 |
import warnings
from keras_core import backend
from keras_core import layers
from keras_core.api_export import keras_core_export
from keras_core.applications import imagenet_utils
from keras_core.models import Functional
from keras_core.ops import operation_utils
from keras_core.utils import file_utils
BASE_WEIGHT_PATH = (
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/"
)
@keras_core_export(
[
"keras_core.applications.mobilenet_v2.MobileNetV2",
"keras_core.applications.MobileNetV2",
]
)
def MobileNetV2(
input_shape=None,
alpha=1.0,
include_top=True,
weights="imagenet",
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
):
"""Instantiates the MobileNetV2 architecture.
MobileNetV2 is very similar to the original MobileNet,
except that it uses inverted residual blocks with
bottlenecking features. It has a drastically lower
parameter count than the original MobileNet.
MobileNets support any input size greater
than 32 x 32, with larger image sizes
offering better performance.
Reference:
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](
https://arxiv.org/abs/1801.04381) (CVPR 2018)
This function returns a Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNetV2, call
`keras_core.applications.mobilenet_v2.preprocess_input`
on your inputs before passing them to the model.
`mobilenet_v2.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, only to be specified if `include_top`
is `False` (otherwise the input shape has to be `(224, 224, 3)`
(with `"channels_last"` data format) or `(3, 224, 224)`
(with `"channels_first"` data format).
It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would
be one valid value. Defaults to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper.
- If `alpha < 1.0`, proportionally decreases the number
of filters in each layer.
- If `alpha > 1.0`, proportionally increases the number
of filters in each layer.
- If `alpha == 1`, default number of filters from the paper
are used at each layer. Defaults to `1.0`.
include_top: Boolean, whether to include the fully-connected layer
at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization), `"imagenet"`
(pre-training on ImageNet), or the path to the weights file
to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. `input_tensor` is useful
for sharing inputs between multiple different networks.
Defaults to `None`.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into,
only to be specified if `include_top` is `True`, and if
no `weights` argument is specified. Defaults to `1000`.
classifier_activation: A `str` or callable. The activation function
to use on the "top" layer. Ignored unless `include_top=True`.
Set `classifier_activation=None` to return the logits of the "top"
layer. When loading pretrained weights, `classifier_activation`
can only be `None` or `"softmax"`.
Returns:
A model instance.
"""
if not (weights in {"imagenet", None} or file_utils.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded. "
f"Received `weights={weights}`"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
'If using `weights="imagenet"` with `include_top` '
f"as true, `classes` should be 1000. Received `classes={classes}`"
)
# Determine proper input shape and default size.
# If both input_shape and input_tensor are used, they should match
if input_shape is not None and input_tensor is not None:
try:
is_input_t_tensor = backend.is_keras_tensor(input_tensor)
except ValueError:
try:
is_input_t_tensor = backend.is_keras_tensor(
operation_utils.get_source_inputs(input_tensor)
)
except ValueError:
raise ValueError(
f"input_tensor: {input_tensor}"
"is not type input_tensor. "
f"Received `type(input_tensor)={type(input_tensor)}`"
)
if is_input_t_tensor:
if backend.image_data_format() == "channels_first":
if input_tensor.shape[1] != input_shape[1]:
raise ValueError(
"input_shape[1] must equal shape(input_tensor)[1] "
"when `image_data_format` is `channels_first`; "
"Received `input_tensor.shape="
f"{input_tensor.shape}`"
f", `input_shape={input_shape}`"
)
else:
if input_tensor.shape[2] != input_shape[1]:
raise ValueError(
"input_tensor.shape[2] must equal input_shape[1]; "
"Received `input_tensor.shape="
f"{input_tensor.shape}`, "
f"`input_shape={input_shape}`"
)
else:
raise ValueError(
"input_tensor is not a Keras tensor; "
f"Received `input_tensor={input_tensor}`"
)
# If input_shape is None, infer shape from input_tensor.
if input_shape is None and input_tensor is not None:
try:
backend.is_keras_tensor(input_tensor)
except ValueError:
raise ValueError(
"input_tensor must be a valid Keras tensor type; "
f"Received {input_tensor} of type {type(input_tensor)}"
)
if input_shape is None and not backend.is_keras_tensor(input_tensor):
default_size = 224
elif input_shape is None and backend.is_keras_tensor(input_tensor):
if backend.image_data_format() == "channels_first":
rows = input_tensor.shape[2]
cols = input_tensor.shape[3]
else:
rows = input_tensor.shape[1]
cols = input_tensor.shape[2]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
# If input_shape is None and no input_tensor
elif input_shape is None:
default_size = 224
# If input_shape is not None, assume default size.
else:
if backend.image_data_format() == "channels_first":
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if backend.image_data_format() == "channels_last":
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == "imagenet":
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError(
"If imagenet weights are being loaded, "
"alpha must be one of `0.35`, `0.50`, `0.75`, "
"`1.0`, `1.3` or `1.4` only;"
f" Received `alpha={alpha}`"
)
if rows != cols or rows not in [96, 128, 160, 192, 224]:
rows = 224
warnings.warn(
"`input_shape` is undefined or non-square, "
"or `rows` is not in [96, 128, 160, 192, 224]. "
"Weights for input shape (224, 224) will be "
"loaded as the default.",
stacklevel=2,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
first_block_filters = _make_divisible(32 * alpha, 8)
x = layers.Conv2D(
first_block_filters,
kernel_size=3,
strides=(2, 2),
padding="same",
use_bias=False,
name="Conv1",
)(img_input)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="bn_Conv1"
)(x)
x = layers.ReLU(6.0, name="Conv1_relu")(x)
x = _inverted_res_block(
x, filters=16, alpha=alpha, stride=1, expansion=1, block_id=0
)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=2, expansion=6, block_id=1
)
x = _inverted_res_block(
x, filters=24, alpha=alpha, stride=1, expansion=6, block_id=2
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=2, expansion=6, block_id=3
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=4
)
x = _inverted_res_block(
x, filters=32, alpha=alpha, stride=1, expansion=6, block_id=5
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=2, expansion=6, block_id=6
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=7
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=8
)
x = _inverted_res_block(
x, filters=64, alpha=alpha, stride=1, expansion=6, block_id=9
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=10
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=11
)
x = _inverted_res_block(
x, filters=96, alpha=alpha, stride=1, expansion=6, block_id=12
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=2, expansion=6, block_id=13
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=14
)
x = _inverted_res_block(
x, filters=160, alpha=alpha, stride=1, expansion=6, block_id=15
)
x = _inverted_res_block(
x, filters=320, alpha=alpha, stride=1, expansion=6, block_id=16
)
# no alpha applied to last conv as stated in the paper:
# if the width multiplier is greater than 1 we increase the number of output
# channels.
if alpha > 1.0:
last_block_filters = _make_divisible(1280 * alpha, 8)
else:
last_block_filters = 1280
x = layers.Conv2D(
last_block_filters, kernel_size=1, use_bias=False, name="Conv_1"
)(x)
x = layers.BatchNormalization(
axis=channel_axis, epsilon=1e-3, momentum=0.999, name="Conv_1_bn"
)(x)
x = layers.ReLU(6.0, name="out_relu")(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes, activation=classifier_activation, name="predictions"
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account any potential predecessors of
# `input_tensor`.
if input_tensor is not None:
inputs = operation_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Functional(inputs, x, name=f"mobilenetv2_{alpha:0.2f}_{rows}")
# Load weights.
if weights == "imagenet":
if include_top:
model_name = (
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_"
+ str(float(alpha))
+ "_"
+ str(rows)
+ ".h5"
)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = file_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
else:
model_name = (
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_"
+ str(float(alpha))
+ "_"
+ str(rows)
+ "_no_top"
+ ".h5"
)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = file_utils.get_file(
model_name, weight_path, cache_subdir="models"
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id):
"""Inverted ResNet block."""
channel_axis = 1 if backend.image_data_format() == "channels_first" else -1
in_channels = inputs.shape[channel_axis]
pointwise_conv_filters = int(filters * alpha)
# Ensure the number of filters on the last 1x1 convolution is divisible by
# 8.
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = f"block_{block_id}_"
if block_id:
# Expand with a pointwise 1x1 convolution.
x = layers.Conv2D(
expansion * in_channels,
kernel_size=1,
padding="same",
use_bias=False,
activation=None,
name=prefix + "expand",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "expand_BN",
)(x)
x = layers.ReLU(6.0, name=prefix + "expand_relu")(x)
else:
prefix = "expanded_conv_"
# Depthwise 3x3 convolution.
if stride == 2:
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, 3), name=prefix + "pad"
)(x)
x = layers.DepthwiseConv2D(
kernel_size=3,
strides=stride,
activation=None,
use_bias=False,
padding="same" if stride == 1 else "valid",
name=prefix + "depthwise",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "depthwise_BN",
)(x)
x = layers.ReLU(6.0, name=prefix + "depthwise_relu")(x)
# Project with a pointwise 1x1 convolution.
x = layers.Conv2D(
pointwise_filters,
kernel_size=1,
padding="same",
use_bias=False,
activation=None,
name=prefix + "project",
)(x)
x = layers.BatchNormalization(
axis=channel_axis,
epsilon=1e-3,
momentum=0.999,
name=prefix + "project_BN",
)(x)
if in_channels == pointwise_filters and stride == 1:
return layers.Add(name=prefix + "add")([inputs, x])
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@keras_core_export("keras_core.applications.mobilenet_v2.preprocess_input")
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(
x, data_format=data_format, mode="tf"
)
@keras_core_export("keras_core.applications.mobilenet_v2.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode="",
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC,
)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| keras-core/keras_core/applications/mobilenet_v2.py/0 | {
"file_path": "keras-core/keras_core/applications/mobilenet_v2.py",
"repo_id": "keras-core",
"token_count": 8393
} | 22 |
from unittest.mock import Mock
from unittest.mock import patch
import numpy as np
import tensorflow as tf
from keras_core import backend
from keras_core import ops
from keras_core import testing
from keras_core.backend.common import keras_tensor
class KerasTensorTest(testing.TestCase):
def test_attributes(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32", sparse=True)
self.assertEqual(x.dtype, "float32")
self.assertEqual(x.shape, (3,))
self.assertEqual(x.sparse, True)
def test_numpy_methods(self):
x = keras_tensor.KerasTensor(shape=(3, 2), dtype="float32")
# reshape
x = x.reshape((6,))
self.assertEqual(x.shape, (6,))
# expand_dims, squeeze
x = ops.expand_dims(x, -1)
self.assertEqual(x.shape, (6, 1))
x = x.squeeze()
self.assertEqual(x.shape, (6,))
x = ops.expand_dims(x, axis=0)
self.assertEqual(x.shape, (1, 6))
x = x.squeeze(axis=0)
self.assertEqual(x.shape, (6,))
def test_invalid_usage(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32")
with self.assertRaisesRegex(
ValueError, "doesn't have any actual numerical value"
):
np.array(x)
if backend.backend() == "jax":
from jax import numpy as jnp
with self.assertRaisesRegex(
ValueError, "cannot be used as input to a JAX function"
):
jnp.array(x)
with self.assertRaisesRegex(
ValueError, "cannot be used as input to a TensorFlow function"
):
tf.convert_to_tensor(x)
def test_bool(self):
tensor = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
with self.assertRaisesRegex(TypeError, "cannot be used as a boolean."):
bool(tensor)
def test_representation(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
self.assertIn("<KerasTensor shape=(3, 4)", repr(x))
def test_iterating(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
with self.assertRaises(NotImplementedError):
iter(x)
def test_any_symbolic_tensors(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = np.array([1, 2, 3])
self.assertTrue(keras_tensor.any_symbolic_tensors(args=[x, y]))
self.assertFalse(keras_tensor.any_symbolic_tensors(args=[y]))
def test_is_keras_tensor(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
self.assertTrue(keras_tensor.is_keras_tensor(x))
y = np.array([1, 2, 3])
self.assertFalse(keras_tensor.is_keras_tensor(y))
@patch("keras_core.ops.Absolute.symbolic_call")
def test_abs_method(self, mock_symbolic_call):
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
abs_x = abs(x) # this will internally call x.__abs__()
mock_symbolic_call.assert_called_once_with(x)
self.assertEqual(abs_x, mock_tensor)
@patch("keras_core.ops.Negative.symbolic_call")
def test_neg_method(self, mock_method):
self._test_unary_op_method(mock_method, lambda x: -x)
@patch("keras_core.ops.Subtract.symbolic_call")
def test_sub_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x - y)
@patch("keras_core.ops.Multiply.symbolic_call")
def test_mul_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x * y)
@patch("keras_core.ops.Matmul.symbolic_call")
def test_matmul_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x @ y)
@patch("keras_core.ops.Power.symbolic_call")
def test_pow_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x**y)
@patch("keras_core.ops.Mod.symbolic_call")
def test_mod_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x % y)
@patch("keras_core.ops.Less.symbolic_call")
def test_lt_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x < y)
@patch("keras_core.ops.LogicalAnd.symbolic_call")
def test_and_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x & y)
@patch("keras_core.ops.LogicalOr.symbolic_call")
def test_or_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x | y)
@patch("keras_core.ops.GetItem.symbolic_call")
def test_getitem_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x[y])
def _test_unary_op_method(self, mock_method, operator):
mock_tensor = Mock()
mock_method.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
result = operator(x)
mock_method.assert_called_once_with(x)
self.assertEqual(result, mock_tensor)
def _test_binary_op_method(self, mock_method, other, operator):
mock_tensor = Mock()
mock_method.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
result = operator(x, other)
mock_method.assert_called_once_with(x, other)
self.assertEqual(result, mock_tensor)
| keras-core/keras_core/backend/common/keras_tensor_test.py/0 | {
"file_path": "keras-core/keras_core/backend/common/keras_tensor_test.py",
"repo_id": "keras-core",
"token_count": 2687
} | 23 |
import jax.numpy as jnp
from keras_core.backend import config
from keras_core.backend.jax.core import cast
from keras_core.backend.jax.core import convert_to_tensor
def add(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.add(x1, x2)
def bincount(x, weights=None, minlength=0):
if len(x.shape) == 2:
if weights is None:
def bincount_fn(arr):
return jnp.bincount(arr, minlength=minlength)
bincounts = list(map(bincount_fn, x))
else:
def bincount_fn(arr_w):
return jnp.bincount(
arr_w[0], weights=arr_w[1], minlength=minlength
)
bincounts = list(map(bincount_fn, zip(x, weights)))
return jnp.stack(bincounts)
return jnp.bincount(x, weights=weights, minlength=minlength)
def einsum(subscripts, *operands, **kwargs):
operands = [convert_to_tensor(x) for x in operands]
return jnp.einsum(subscripts, *operands, **kwargs)
def subtract(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.subtract(x1, x2)
def matmul(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.matmul(x1, x2)
def multiply(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
# `jnp.mean` does not handle low precision (e.g., float16) overflow
# correctly, so we compute with float32 and cast back to the original type.
outputs = jnp.mean(x, axis=axis, keepdims=keepdims, dtype=jnp.float32)
dtype = getattr(x, "dtype", None)
if hasattr(dtype, "name") and "float" in dtype.name:
return cast(outputs, dtype)
else:
return cast(outputs, config.floatx())
def max(x, axis=None, keepdims=False, initial=None):
return jnp.max(x, axis=axis, keepdims=keepdims, initial=initial)
def ones(shape, dtype="float32"):
return jnp.ones(shape, dtype=dtype)
def zeros(shape, dtype="float32"):
return jnp.zeros(shape, dtype=dtype)
def absolute(x):
return jnp.absolute(x)
def abs(x):
return absolute(x)
def all(x, axis=None, keepdims=False):
return jnp.all(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
return jnp.any(x, axis=axis, keepdims=keepdims)
def amax(x, axis=None, keepdims=False):
return jnp.amax(x, axis=axis, keepdims=keepdims)
def amin(x, axis=None, keepdims=False):
return jnp.amin(x, axis=axis, keepdims=keepdims)
def append(
x1,
x2,
axis=None,
):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.append(x1, x2, axis=axis)
def arange(start, stop=None, step=1, dtype=None):
if dtype is None:
if hasattr(start, "dtype"):
dtype = start.dtype
elif isinstance(start, int):
dtype = "int32"
else:
dtype = config.floatx()
return jnp.arange(start, stop, step=step, dtype=dtype)
def arccos(x):
return jnp.arccos(x)
def arccosh(x):
return jnp.arccosh(x)
def arcsin(x):
return jnp.arcsin(x)
def arcsinh(x):
return jnp.arcsinh(x)
def arctan(x):
return jnp.arctan(x)
def arctan2(x1, x2):
return jnp.arctan2(x1, x2)
def arctanh(x):
return jnp.arctanh(x)
def argmax(x, axis=None):
return jnp.argmax(x, axis=axis)
def argmin(x, axis=None):
return jnp.argmin(x, axis=axis)
def argsort(x, axis=-1):
return jnp.argsort(x, axis=axis)
def array(x, dtype=None):
return jnp.array(x, dtype=dtype)
def average(x, axis=None, weights=None):
return jnp.average(x, weights=weights, axis=axis)
def broadcast_to(x, shape):
return jnp.broadcast_to(x, shape)
def ceil(x):
return jnp.ceil(x)
def clip(x, x_min, x_max):
return jnp.clip(x, x_min, x_max)
def concatenate(xs, axis=0):
return jnp.concatenate(xs, axis=axis)
def conjugate(x):
return jnp.conjugate(x)
def conj(x):
return conjugate(x)
def copy(x):
return jnp.copy(x)
def cos(x):
return jnp.cos(x)
def cosh(x):
return jnp.cosh(x)
def count_nonzero(x, axis=None):
return jnp.count_nonzero(x, axis=axis)
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
return jnp.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
def cumprod(x, axis=None):
return jnp.cumprod(x, axis=axis)
def cumsum(x, axis=None):
return jnp.cumsum(x, axis=axis)
def diag(x, k=0):
return jnp.diag(x, k=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
return jnp.diagonal(
x,
offset=offset,
axis1=axis1,
axis2=axis2,
)
def digitize(x, bins):
x = convert_to_tensor(x)
bins = convert_to_tensor(bins)
return jnp.digitize(x, bins)
def dot(x, y):
return jnp.dot(x, y)
def empty(shape, dtype="float32"):
return jnp.empty(shape, dtype=dtype)
def equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.equal(x1, x2)
def exp(x):
return jnp.exp(x)
def expand_dims(x, axis):
return jnp.expand_dims(x, axis)
def expm1(x):
return jnp.expm1(x)
def flip(x, axis=None):
return jnp.flip(x, axis=axis)
def floor(x):
return jnp.floor(x)
def full(shape, fill_value, dtype=None):
return jnp.full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, dtype=None):
return jnp.full_like(x, fill_value, dtype=dtype)
def greater(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.greater(x1, x2)
def greater_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.greater_equal(x1, x2)
def hstack(xs):
return jnp.hstack(xs)
def identity(n, dtype="float32"):
return jnp.identity(n, dtype=dtype)
def imag(x):
return jnp.imag(x)
def isclose(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.isclose(x1, x2)
def isfinite(x):
return jnp.isfinite(x)
def isinf(x):
return jnp.isinf(x)
def isnan(x):
return jnp.isnan(x)
def less(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.less(x1, x2)
def less_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
return jnp.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
def log(x):
return jnp.log(x)
def log10(x):
return jnp.log10(x)
def log1p(x):
return jnp.log1p(x)
def log2(x):
return jnp.log2(x)
def logaddexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logaddexp(x1, x2)
def logical_and(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logical_and(x1, x2)
def logical_not(x):
return jnp.logical_not(x)
def logical_or(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
return jnp.logspace(
start,
stop,
num=num,
endpoint=endpoint,
base=base,
dtype=dtype,
axis=axis,
)
def maximum(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.maximum(x1, x2)
def meshgrid(*x, indexing="xy"):
return jnp.meshgrid(*x, indexing=indexing)
def min(x, axis=None, keepdims=False, initial=None):
return jnp.min(x, axis=axis, keepdims=keepdims, initial=initial)
def minimum(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.minimum(x1, x2)
def mod(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.mod(x1, x2)
def moveaxis(x, source, destination):
return jnp.moveaxis(x, source=source, destination=destination)
def nan_to_num(x):
return jnp.nan_to_num(x)
def ndim(x):
return jnp.ndim(x)
def nonzero(x):
return jnp.nonzero(x)
def not_equal(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.not_equal(x1, x2)
def ones_like(x, dtype=None):
return jnp.ones_like(x, dtype=dtype)
def zeros_like(x, dtype=None):
return jnp.zeros_like(x, dtype=dtype)
def outer(x1, x2):
return jnp.outer(x1, x2)
def pad(x, pad_width, mode="constant"):
return jnp.pad(x, pad_width, mode=mode)
def prod(x, axis=None, keepdims=False, dtype=None):
return jnp.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
def ravel(x):
return jnp.ravel(x)
def real(x):
return jnp.real(x)
def reciprocal(x):
return jnp.reciprocal(x)
def repeat(x, repeats, axis=None):
return jnp.repeat(x, repeats, axis=axis)
def reshape(x, new_shape):
return jnp.reshape(x, new_shape)
def roll(x, shift, axis=None):
return jnp.roll(x, shift, axis=axis)
def sign(x):
return jnp.sign(x)
def sin(x):
return jnp.sin(x)
def sinh(x):
return jnp.sinh(x)
def size(x):
return jnp.size(x)
def sort(x, axis=-1):
return jnp.sort(x, axis=axis)
def split(x, indices_or_sections, axis=0):
return jnp.split(x, indices_or_sections, axis=axis)
def stack(x, axis=0):
return jnp.stack(x, axis=axis)
def std(x, axis=None, keepdims=False):
return jnp.std(x, axis=axis, keepdims=keepdims)
def swapaxes(x, axis1, axis2):
return jnp.swapaxes(x, axis1=axis1, axis2=axis2)
def take(x, indices, axis=None):
x = convert_to_tensor(x)
indices = convert_to_tensor(indices)
return jnp.take(x, indices, axis=axis)
def take_along_axis(x, indices, axis=None):
return jnp.take_along_axis(x, indices, axis=axis)
def tan(x):
return jnp.tan(x)
def tanh(x):
return jnp.tanh(x)
def tensordot(x1, x2, axes=2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.tensordot(x1, x2, axes=axes)
def round(x, decimals=0):
return jnp.round(x, decimals=decimals)
def tile(x, repeats):
return jnp.tile(x, repeats)
def trace(x, offset=0, axis1=0, axis2=1):
return jnp.trace(x, offset=offset, axis1=axis1, axis2=axis2)
def tri(N, M=None, k=0, dtype="float32"):
return jnp.tri(N, M=M, k=k, dtype=dtype)
def tril(x, k=0):
return jnp.tril(x, k=k)
def triu(x, k=0):
return jnp.triu(x, k=k)
def vdot(x1, x2):
return jnp.vdot(x1, x2)
def vstack(xs):
return jnp.vstack(xs)
def where(condition, x1, x2):
return jnp.where(condition, x1, x2)
def divide(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.divide(x1, x2)
def true_divide(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.true_divide(x1, x2)
def power(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.power(x1, x2)
def negative(x):
return jnp.negative(x)
def square(x):
return jnp.square(x)
def sqrt(x):
return jnp.sqrt(x)
def squeeze(x, axis=None):
return jnp.squeeze(x, axis=axis)
def transpose(x, axes=None):
x = convert_to_tensor(x)
return jnp.transpose(x, axes=axes)
def var(x, axis=None, keepdims=False):
# `jnp.var` does not handle low precision (e.g., float16) overflow
# correctly, so we compute with float32 and cast back to the original type.
outputs = jnp.var(x, axis=axis, keepdims=keepdims, dtype=jnp.float32)
dtype = getattr(x, "dtype", None)
if hasattr(dtype, "name") and "float" in dtype.name:
return cast(outputs, dtype)
else:
return cast(outputs, config.floatx())
def sum(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
return jnp.sum(x, axis=axis, keepdims=keepdims)
def eye(N, M=None, k=0, dtype="float32"):
return jnp.eye(N, M=M, k=k, dtype=dtype)
def floor_divide(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.floor_divide(x1, x2)
def logical_xor(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return jnp.logical_xor(x1, x2)
| keras-core/keras_core/backend/jax/numpy.py/0 | {
"file_path": "keras-core/keras_core/backend/jax/numpy.py",
"repo_id": "keras-core",
"token_count": 5980
} | 24 |
import contextlib
import os
import numpy as np
import torch
import tree
from keras_core.backend.common import KerasVariable
from keras_core.backend.common import global_state
from keras_core.backend.common import standardize_dtype
from keras_core.backend.common.keras_tensor import KerasTensor
from keras_core.backend.common.stateless_scope import StatelessScope
from keras_core.utils.nest import pack_sequence_as
SUPPORTS_SPARSE_TENSORS = False
# Some operators such as 'aten::_foreach_mul_.Scalar'
# are not currently implemented for the MPS device.
# check https://github.com/pytorch/pytorch/issues/77764.
if (
torch.backends.mps.is_available()
and os.getenv("PYTORCH_ENABLE_MPS_FALLBACK") == "1"
):
DEFAULT_DEVICE = "mps"
elif torch.cuda.is_available():
DEFAULT_DEVICE = "cuda"
else:
DEFAULT_DEVICE = "cpu"
TORCH_DTYPES = {
"float16": torch.float16,
"float32": torch.float32,
"float64": torch.float64,
"uint8": torch.uint8,
"uint16": torch.int32, # TODO: Torch doesn't have `uint16` dtype.
"uint32": torch.int64, # TODO: Torch doesn't have `uint32` dtype.
"int8": torch.int8,
"int16": torch.int16,
"int32": torch.int32,
"int64": torch.int64,
"bfloat16": torch.bfloat16,
"bool": torch.bool,
}
@contextlib.contextmanager
def device_scope(device):
previous_device = global_state.get_global_attribute("torch_device", None)
global_state.set_global_attribute("torch_device", device)
try:
yield
finally:
global_state.set_global_attribute("torch_device", previous_device)
def get_device():
device = global_state.get_global_attribute("torch_device", None)
if device is None:
return DEFAULT_DEVICE
return device
def to_torch_dtype(dtype):
standardized_dtype = TORCH_DTYPES.get(standardize_dtype(dtype), None)
if standardized_dtype is None:
raise ValueError(f"Unsupported dtype for PyTorch: {dtype}")
return standardized_dtype
class Variable(KerasVariable):
def _initialize(self, value):
self._value = torch.nn.Parameter(
convert_to_tensor(value, dtype=self._dtype),
requires_grad=self.trainable,
).to(get_device())
def _direct_assign(self, value):
with torch.no_grad():
self.value.copy_(value)
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype)
# Overload native accessor.
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
args = [
arg.value if isinstance(arg, KerasVariable) else arg for arg in args
]
if kwargs is None:
kwargs = {}
kwargs = {
key: value.value if isinstance(value, KerasVariable) else value
for key, value in kwargs.items()
}
return func(*args, **kwargs)
def __array__(self, dtype=None):
value = convert_to_numpy(self.value)
if dtype:
return value.astype(dtype)
return value
@property
def value(self):
value = super().value
# Create and use a symbolic tensor stub in symbolic calls.
if get_device() == "meta" and str(value.device) != "meta":
return torch.empty(
size=value.shape,
dtype=value.dtype,
device="meta",
)
return value
def __eq__(self, other):
try:
return super().__eq__(other)
except Exception:
return False
def convert_to_tensor(x, dtype=None, sparse=False):
if sparse:
raise ValueError("`sparse=True` is not supported with torch backend")
if is_tensor(x):
device = get_device()
if x.device != device:
x = x.to(device)
if dtype is None:
return x
return x.to(to_torch_dtype(dtype))
if isinstance(x, Variable):
# TorchDynamo has bugs supporting nn.Parameter type check.
# Return it directly instead of pass it to the rest of the logic in the
# function.
return x.value
if isinstance(x, int):
return torch.as_tensor(x, dtype=torch.int32, device=get_device())
if isinstance(x, float):
return torch.as_tensor(x, dtype=torch.float32, device=get_device())
# Convert to np in case of any array-like that is not list or tuple.
if not isinstance(x, (list, tuple)):
x = np.array(x)
elif len(x) > 0 and any(isinstance(x1, torch.Tensor) for x1 in x):
# Handle list or tuple of torch tensors
return torch.stack([convert_to_tensor(x1) for x1 in x])
if isinstance(x, np.ndarray):
if x.dtype == np.uint32:
# Torch backend does not support uint32.
x = x.astype(np.int64)
dtype = dtype or x.dtype
dtype = to_torch_dtype(dtype)
return torch.as_tensor(x, dtype=dtype, device=get_device())
def convert_to_numpy(x):
def transform(x):
if is_tensor(x):
if x.requires_grad:
x = x.detach()
# Tensor has to be moved to CPU before converting to numpy.
if x.is_cuda or x.is_mps:
x = x.cpu()
return np.array(x)
if isinstance(x, (list, tuple)):
return np.array([transform(e) for e in x])
return transform(x)
def is_tensor(x):
return torch.is_tensor(x)
def shape(x):
return x.shape
def cast(x, dtype):
dtype = to_torch_dtype(dtype)
if isinstance(x, KerasVariable):
x = x.value
if is_tensor(x):
if x.dtype == dtype:
return x
else:
return x.to(dtype)
return convert_to_tensor(x, dtype)
# Shape / dtype inference util
def compute_output_spec(fn, *args, **kwargs):
def has_none_shape(x):
"""Check for if a `KerasTensor` has dynamic shape."""
if isinstance(x, KerasTensor):
return None in x.shape
return False
def convert_keras_tensor_to_torch(x, fill_value=None):
"""Convert `KerasTensor`s to `torch.Tensor`s."""
if isinstance(x, KerasTensor):
shape = list(x.shape)
if fill_value:
for i, e in enumerate(shape):
if e is None:
shape[i] = fill_value
return torch.ones(
size=shape,
dtype=TORCH_DTYPES[x.dtype],
device=get_device(),
)
return x
def convert_torch_to_keras_tensor(x):
"""Convert `torch.Tensor`s to `KerasTensor`s."""
if is_tensor(x):
return KerasTensor(x.shape, standardize_dtype(x.dtype))
return x
def symbolic_call(fn, args, kwargs, fill_value):
"""Call `fn` to infer output shape and dtype."""
try:
# First try instantiating all tensors on the `"meta"` device,
# which should give a "zero flop" way to trace shape, but does
# not have universal support with torch operations.
with device_scope("meta"):
meta_args, meta_kwargs = tree.map_structure(
lambda x: convert_keras_tensor_to_torch(x, fill_value),
(args, kwargs),
)
return fn(*meta_args, **meta_kwargs)
except:
with device_scope(DEFAULT_DEVICE):
# If the `"meta"` device placement fails, fall back to tracing
# eagerly with tensors on the default device. This will be
# more robust, but more expensive.
eager_args, eager_kwargs = tree.map_structure(
lambda x: convert_keras_tensor_to_torch(x, fill_value),
(args, kwargs),
)
return fn(*eager_args, **eager_kwargs)
with StatelessScope(), torch.no_grad():
outputs = symbolic_call(fn, args, kwargs, fill_value=83)
none_in_shape = any(map(has_none_shape, tree.flatten((args, kwargs))))
if none_in_shape:
outputs_1 = outputs
outputs_2 = symbolic_call(fn, args, kwargs, fill_value=89)
flat_out_1 = tree.flatten(outputs_1)
flat_out_2 = tree.flatten(outputs_2)
flat_out = []
for x1, x2 in zip(flat_out_1, flat_out_2):
shape = list(x1.shape)
for i, e in enumerate(x2.shape):
if e != shape[i]:
shape[i] = None
flat_out.append(KerasTensor(shape, standardize_dtype(x1.dtype)))
outputs = pack_sequence_as(outputs_1, flat_out)
output_spec = tree.map_structure(convert_torch_to_keras_tensor, outputs)
return output_spec
def cond(pred, true_fn, false_fn):
# When symbolic execution, take pred as true.
if get_device() == "meta":
return true_fn()
if pred:
return true_fn()
return false_fn()
def vectorized_map(function, elements):
return torch.vmap(function)(elements)
def scatter(indices, values, shape):
indices = convert_to_tensor(indices)
values = convert_to_tensor(values)
zeros = torch.zeros(shape, dtype=values.dtype, device=get_device())
index_length = indices.shape[-1]
value_shape = shape[index_length:]
indices = torch.reshape(indices, [-1, index_length])
values = torch.reshape(values, [-1] + list(value_shape))
for i in range(indices.shape[0]):
index = indices[i]
zeros[tuple(index)] += values[i]
return zeros
def scatter_update(inputs, indices, updates):
inputs = convert_to_tensor(inputs)
indices = convert_to_tensor(indices, dtype="int64")
updates = convert_to_tensor(updates)
indices = torch.transpose(indices, 0, 1)
inputs[tuple(indices)] = updates
return inputs
def slice(inputs, start_indices, shape):
shape_dtype = to_torch_dtype("int64")
inputs = convert_to_tensor(inputs)
start_indices = convert_to_tensor(start_indices).to(shape_dtype)
shape = convert_to_tensor(shape).to(shape_dtype)
python_slice = __builtins__["slice"]
slices = [
python_slice(start_index, start_index + length)
for start_index, length in zip(start_indices, shape)
]
return inputs[slices]
def slice_update(inputs, start_indices, updates):
shape_dtype = to_torch_dtype("int64")
inputs = convert_to_tensor(inputs)
start_indices = convert_to_tensor(start_indices).to(shape_dtype)
updates = convert_to_tensor(updates)
python_slice = __builtins__["slice"]
slices = [
python_slice(start_index, start_index + update_length)
for start_index, update_length in zip(start_indices, updates.shape)
]
outputs = torch.clone(inputs)
outputs[slices] = updates
return outputs
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
current_iter = 0
iteration_check = (
lambda iter: maximum_iterations is None or iter < maximum_iterations
)
loop_vars = tuple([convert_to_tensor(v) for v in loop_vars])
while cond(*loop_vars) and iteration_check(current_iter):
loop_vars = body(*loop_vars)
if not isinstance(loop_vars, (list, tuple)):
loop_vars = (loop_vars,)
loop_vars = tuple(loop_vars)
current_iter += 1
return loop_vars
def fori_loop(lower, upper, body_fun, init_val):
val = init_val
for i in range(lower, upper):
val = body_fun(i, val)
return val
def stop_gradient(variable):
# We can't use `.requires_grad_(False)` here since it only
# works when the tensor is a leaf node in the graph.
return variable.detach()
def unstack(x, num=None, axis=0):
return x.unbind(axis)
| keras-core/keras_core/backend/torch/core.py/0 | {
"file_path": "keras-core/keras_core/backend/torch/core.py",
"repo_id": "keras-core",
"token_count": 5318
} | 25 |
from keras_core import backend
from keras_core import ops
from keras_core.api_export import keras_core_export
@keras_core_export("keras_core.constraints.Constraint")
class Constraint:
"""Base class for weight constraints.
A `Constraint` instance works like a stateless function.
Users who subclass this
class should override the `__call__()` method, which takes a single
weight parameter and return a projected version of that parameter
(e.g. normalized or clipped). Constraints can be used with various Keras
layers via the `kernel_constraint` or `bias_constraint` arguments.
Here's a simple example of a non-negative weight constraint:
>>> class NonNegative(keras_core.constraints.Constraint):
...
... def __call__(self, w):
... return w * ops.cast(ops.greater_equal(w, 0.), dtype=w.dtype)
>>> weight = ops.convert_to_tensor((-1.0, 1.0))
>>> NonNegative()(weight)
[0., 1.]
Usage in a layer:
>>> keras_core.layers.Dense(4, kernel_constraint=NonNegative())
"""
def __call__(self, w):
"""Applies the constraint to the input weight variable.
By default, the inputs weight variable is not modified.
Users should override this method to implement their own projection
function.
Args:
w: Input weight variable.
Returns:
Projected variable (by default, returns unmodified inputs).
"""
return w
def get_config(self):
"""Returns a Python dict of the object config.
A constraint config is a Python dictionary (JSON-serializable) that can
be used to reinstantiate the same object.
Returns:
Python dict containing the configuration of the constraint object.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates a weight constraint from a configuration dictionary.
Example:
```python
constraint = UnitNorm()
config = constraint.get_config()
constraint = UnitNorm.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
A `keras_core.constraints.Constraint` instance.
"""
return cls(**config)
@keras_core_export(
["keras_core.constraints.MaxNorm", "keras_core.constraints.max_norm"]
)
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Also available via the shortcut function `keras_core.constraints.max_norm`.
Args:
max_value: the maximum norm value for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
desired = ops.clip(norms, 0, self.max_value)
return w * (desired / (backend.epsilon() + norms))
def get_config(self):
return {"max_value": self.max_value, "axis": self.axis}
@keras_core_export(
["keras_core.constraints.NonNeg", "keras_core.constraints.non_neg"]
)
class NonNeg(Constraint):
"""Constrains the weights to be non-negative."""
def __call__(self, w):
w = backend.convert_to_tensor(w)
return w * ops.cast(ops.greater_equal(w, 0.0), dtype=w.dtype)
@keras_core_export(
["keras_core.constraints.UnitNorm", "keras_core.constraints.unit_norm"]
)
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
Args:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
return w / (
backend.epsilon()
+ ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
)
def get_config(self):
return {"axis": self.axis}
@keras_core_export(
["keras_core.constraints.MinMaxNorm", "keras_core.constraints.min_max_norm"]
)
class MinMaxNorm(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Args:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
desired = (
self.rate * ops.clip(norms, self.min_value, self.max_value)
+ (1 - self.rate) * norms
)
return w * (desired / (backend.epsilon() + norms))
def get_config(self):
return {
"min_value": self.min_value,
"max_value": self.max_value,
"rate": self.rate,
"axis": self.axis,
}
| keras-core/keras_core/constraints/constraints.py/0 | {
"file_path": "keras-core/keras_core/constraints/constraints.py",
"repo_id": "keras-core",
"token_count": 3134
} | 26 |
import pytest
from keras_core import layers
from keras_core import ops
from keras_core import testing
class ExampleWrapper(layers.Wrapper):
"""Simple Wrapper subclass."""
def call(self, inputs, **kwargs):
return ops.cast(self.layer(inputs, **kwargs), self.compute_dtype)
class WrapperTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_wrapper_basics(self):
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2),
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2, activity_regularizer="l2"),
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2),
"activity_regularizer": "l2",
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.BatchNormalization(),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
| keras-core/keras_core/layers/core/wrapper_test.py/0 | {
"file_path": "keras-core/keras_core/layers/core/wrapper_test.py",
"repo_id": "keras-core",
"token_count": 1177
} | 27 |
from keras_core import constraints
from keras_core import initializers
from keras_core import ops
from keras_core import regularizers
from keras_core.api_export import keras_core_export
from keras_core.backend import standardize_dtype
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
@keras_core_export("keras_core.layers.BatchNormalization")
class BatchNormalization(Layer):
"""Layer that normalizes its inputs.
Batch normalization applies a transformation that maintains the mean output
close to 0 and the output standard deviation close to 1.
Importantly, batch normalization works differently during training and
during inference.
**During training** (i.e. when using `fit()` or when calling the layer/model
with the argument `training=True`), the layer normalizes its output using
the mean and standard deviation of the current batch of inputs. That is to
say, for each channel being normalized, the layer returns
`gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where:
- `epsilon` is small constant (configurable as part of the constructor
arguments)
- `gamma` is a learned scaling factor (initialized as 1), which
can be disabled by passing `scale=False` to the constructor.
- `beta` is a learned offset factor (initialized as 0), which
can be disabled by passing `center=False` to the constructor.
**During inference** (i.e. when using `evaluate()` or `predict()` or when
calling the layer/model with the argument `training=False` (which is the
default), the layer normalizes its output using a moving average of the
mean and standard deviation of the batches it has seen during training. That
is to say, it returns
`gamma * (batch - self.moving_mean) / sqrt(self.moving_var+epsilon) + beta`.
`self.moving_mean` and `self.moving_var` are non-trainable variables that
are updated each time the layer in called in training mode, as such:
- `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)`
- `moving_var = moving_var * momentum + var(batch) * (1 - momentum)`
As such, the layer will only normalize its inputs during inference
*after having been trained on data that has similar statistics as the
inference data*.
Args:
axis: Integer, the axis that should be normalized
(typically the features axis). For instance, after a `Conv2D` layer
with `data_format="channels_first"`, use `axis=1`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If `True`, add offset of `beta` to normalized tensor.
If `False`, `beta` is ignored.
scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used.
When the next layer is linear this can be disabled
since the scaling will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
- `training=True`: The layer will normalize its inputs using
the mean and variance of the current batch of inputs.
- `training=False`: The layer will normalize its inputs using
the mean and variance of its moving statistics, learned during
training.
Reference:
- [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).
**About setting `layer.trainable = False` on a `BatchNormalization` layer:**
The meaning of setting `layer.trainable = False` is to freeze the layer,
i.e. its internal state will not change during training:
its trainable weights will not be updated
during `fit()` or `train_on_batch()`, and its state updates will not be run.
Usually, this does not necessarily mean that the layer is run in inference
mode (which is normally controlled by the `training` argument that can
be passed when calling a layer). "Frozen state" and "inference mode"
are two separate concepts.
However, in the case of the `BatchNormalization` layer, **setting
`trainable = False` on the layer means that the layer will be
subsequently run in inference mode** (meaning that it will use
the moving mean and the moving variance to normalize the current batch,
rather than using the mean and variance of the current batch).
Note that:
- Setting `trainable` on an model containing other layers will recursively
set the `trainable` value of all inner layers.
- If the value of the `trainable` attribute is changed after calling
`compile()` on a model, the new value doesn't take effect for this model
until `compile()` is called again.
"""
def __init__(
self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs,
):
super().__init__(**kwargs)
self.axis = int(axis)
self.momentum = float(momentum)
self.epsilon = float(epsilon)
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(
moving_variance_initializer
)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
def build(self, input_shape):
shape = (input_shape[self.axis],)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
)
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
)
self.moving_mean = self.add_weight(
shape=shape,
name="moving_mean",
initializer=self.moving_mean_initializer,
trainable=False,
)
self.moving_variance = self.add_weight(
shape=shape,
name="moving_variance",
initializer=self.moving_variance_initializer,
trainable=False,
)
self.input_spec = InputSpec(
ndim=len(input_shape), axes={self.axis: input_shape[self.axis]}
)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
self._reduction_axes = reduction_axes
self.built = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, training=None, mask=None):
input_dtype = standardize_dtype(inputs.dtype)
if input_dtype in ("float16", "bfloat16"):
# BN is prone to overflowing for float16/bfloat16 inputs, so we opt
# out BN for mixed precision.
inputs = ops.cast(inputs, "float32")
broadcast_shape = [1] * len(inputs.shape)
broadcast_shape[self.axis] = inputs.shape[self.axis]
if training and self.trainable:
mean, variance = ops.moments(
inputs, axes=self._reduction_axes, keepdims=True
)
moving_mean = ops.cast(self.moving_mean, inputs.dtype)
moving_variance = ops.cast(self.moving_variance, inputs.dtype)
self.moving_mean.assign(
ops.cast(
moving_mean * self.momentum
+ ops.squeeze(mean, self._reduction_axes)
* (1.0 - self.momentum),
inputs.dtype,
)
)
self.moving_variance.assign(
ops.cast(
moving_variance * self.momentum
+ ops.squeeze(variance, self._reduction_axes)
* (1.0 - self.momentum),
inputs.dtype,
)
)
else:
moving_mean = ops.cast(self.moving_mean, inputs.dtype)
moving_variance = ops.cast(self.moving_variance, inputs.dtype)
moving_mean = ops.reshape(moving_mean, broadcast_shape)
moving_variance = ops.reshape(moving_variance, broadcast_shape)
mean = moving_mean
variance = moving_variance
inv = ops.rsqrt(variance + self.epsilon)
if self.scale:
gamma = ops.reshape(self.gamma, broadcast_shape)
gamma = ops.cast(gamma, inputs.dtype)
inv = inv * gamma
res = -mean * inv
if self.center:
beta = ops.reshape(self.beta, broadcast_shape)
beta = ops.cast(beta, inputs.dtype)
res = res + beta
# Note: Folding BatchNormalization depends on the precise order of ops
# that are generated by the expression below
outputs = inputs * inv + res
return ops.cast(outputs, input_dtype)
def get_config(self):
base_config = super().get_config()
config = {
"axis": self.axis,
"momentum": self.momentum,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"moving_mean_initializer": initializers.serialize(
self.moving_mean_initializer
),
"moving_variance_initializer": initializers.serialize(
self.moving_variance_initializer
),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
}
return {**base_config, **config}
| keras-core/keras_core/layers/normalization/batch_normalization.py/0 | {
"file_path": "keras-core/keras_core/layers/normalization/batch_normalization.py",
"repo_id": "keras-core",
"token_count": 4762
} | 28 |
from keras_core import backend
from keras_core import ops
from keras_core.layers.input_spec import InputSpec
from keras_core.layers.layer import Layer
from keras_core.ops.operation_utils import compute_pooling_output_shape
from keras_core.utils import argument_validation
class BasePooling(Layer):
"""Base pooling layer."""
def __init__(
self,
pool_size,
strides,
pool_dimensions,
pool_mode="max",
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(name=name, **kwargs)
self.pool_size = argument_validation.standardize_tuple(
pool_size, pool_dimensions, "pool_size"
)
strides = pool_size if strides is None else strides
self.strides = argument_validation.standardize_tuple(
strides, pool_dimensions, "strides", allow_zero=True
)
self.pool_mode = pool_mode
self.padding = padding
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(ndim=pool_dimensions + 2)
def call(self, inputs):
if self.pool_mode == "max":
return ops.max_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
elif self.pool_mode == "average":
return ops.average_pool(
inputs,
pool_size=self.pool_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
)
else:
raise ValueError(
"`pool_mode` must be either 'max' or 'average'. Received: "
f"{self.pool_mode}."
)
def compute_output_shape(self, input_shape):
return compute_pooling_output_shape(
input_shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def get_config(self):
config = super().get_config()
config.update(
{
"pool_size": self.pool_size,
"padding": self.padding,
"strides": self.strides,
"data_format": self.data_format,
}
)
return config
| keras-core/keras_core/layers/pooling/base_pooling.py/0 | {
"file_path": "keras-core/keras_core/layers/pooling/base_pooling.py",
"repo_id": "keras-core",
"token_count": 1225
} | 29 |
import pytest
from absl.testing import parameterized
from keras_core import backend
from keras_core import layers
from keras_core import testing
class ReshapeTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_reshape(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (8,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, 4)},
input_shape=(3, 8),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1, 1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8, 1),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (1, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 1, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (-1,)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 8),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
self.run_layer_test(
layers.Reshape,
init_kwargs={"target_shape": (2, -1)},
input_shape=(3, 2, 4),
input_sparse=sparse,
expected_output_shape=(3, 2, 4),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
def test_reshape_with_dynamic_batch_size(self):
input_layer = layers.Input(shape=(2, 4))
reshaped = layers.Reshape((8,))(input_layer)
self.assertEqual(reshaped.shape, (None, 8))
def test_reshape_sets_static_shape(self):
input_layer = layers.Input(batch_shape=(2, None))
reshaped = layers.Reshape((3, 5))(input_layer)
# Also make sure the batch dim is not lost after reshape.
self.assertEqual(reshaped.shape, (2, 3, 5))
| keras-core/keras_core/layers/reshaping/reshape_test.py/0 | {
"file_path": "keras-core/keras_core/layers/reshaping/reshape_test.py",
"repo_id": "keras-core",
"token_count": 1693
} | 30 |
from keras_core import backend
from keras_core.api_export import keras_core_export
from keras_core.backend.common import global_state
@keras_core_export(
[
"keras_core.mixed_precision.DTypePolicy",
"keras_core.mixed_precision.Policy",
]
)
class DTypePolicy:
"""A dtype policy for a Keras layer.
A dtype policy determines a layer's computation and variable dtypes. Each
layer has a policy. Policies can be passed to the `dtype` argument of layer
constructors, or a global policy can be set with
`keras_core.mixed_precision.set_dtype_policy`.
Args:
name: The policy name, which determines the compute and variable dtypes.
Can be any dtype name, such as `"float32"` or `"float64"`,
which causes both the compute and variable dtypes
will be that dtype.
Can also be the string `"mixed_float16"` or `"mixed_bfloat16"`,
which causes the compute dtype to be `float16` or `bfloat16`
and the variable dtype to be `float32`.
Typically you only need to interact with dtype policies when using mixed
precision, which is the use of float16 or bfloat16 for computations and
float32 for variables. This is why the term `mixed_precision` appears in the
API name. Mixed precision can be enabled by passing `"mixed_float16"` or
`"mixed_bfloat16"` to `keras_core.mixed_precision.set_dtype_policy()`.
>>> keras_core.mixed_precision.set_dtype_policy("mixed_float16")
>>> layer1 = keras_core.layers.Dense(10)
>>> layer1.dtype_policy # layer1 will automatically use mixed precision
<DTypePolicy "mixed_float16">
>>> # Can optionally override layer to use float32
>>> # instead of mixed precision.
>>> layer2 = keras_core.layers.Dense(10, dtype="float32")
>>> layer2.dtype_policy
<DTypePolicy "float32">
>>> # Set policy back to initial float32.
>>> keras_core.mixed_precision.set_dtype_policy('float32')
In the example above, passing `dtype="float32"` to the layer is
equivalent to passing
`dtype=keras_core.mixed_precision.DTypePolicy("float32")`.
In general, passing a dtype policy name to a layer is equivalent
to passing the corresponding policy, so it is never necessary
to explicitly construct a `DTypePolicy` object.
"""
def __init__(self, name):
if not isinstance(name, str):
raise TypeError(
"'name' must be a string, such as 'mixed_float16'. "
f"Received: name={name} (of type {type(name)})"
)
self._name = name
self._compute_dtype, self._variable_dtype = self._parse_name(name)
# TODO: check that the current hardware supports the provided
# dtype policy and raise/warn otherwise.
def _parse_name(self, name):
"""Parses a `DTypePolicy` name into a compute and variable dtype.
Args:
name: The name of the policy.
Returns:
The `(compute_dtype, variable_dtype)` pair.
"""
if name == "mixed_float16":
return "float16", "float32"
elif name == "mixed_bfloat16":
return "bfloat16", "float32"
try:
dtype = backend.standardize_dtype(name)
return dtype, dtype
except ValueError:
raise ValueError(
f"Cannot convert '{name}' to a mixed precision DTypePolicy."
" Valid policies include 'mixed_float16', 'mixed_bfloat16', "
"and the name of any dtype such as 'float32'."
)
@property
def variable_dtype(self):
"""The variable dtype of this policy.
This is the dtype layers will create their variables in, unless a layer
explicitly chooses a different dtype. If this is different than
`DTypePolicy.compute_dtype`, Layers will cast variables to
the compute dtype to avoid type errors.
Variable regularizers are run in the variable dtype, not the compute
dtype.
Returns:
The variable dtype of this policy, as a string.
"""
return self._variable_dtype
@property
def compute_dtype(self):
"""The compute dtype of this policy.
This is the dtype layers will do their computations in. Typically layers
output tensors with the compute dtype as well.
Note that even if the compute dtype is float16 or bfloat16, hardware
devices may not do individual adds, multiplies, and other fundamental
operations in float16 or bfloat16, but instead may do some of them in
float32 for numeric stability. The compute dtype is the dtype of the
inputs and outputs of the ops that the layer executes.
Internally, many ops will do certain internal calculations in
float32 or some other device-internal intermediate format with higher
precision than float16/bfloat16, to increase numeric stability.
Returns:
The compute dtype of this policy, as a string.
"""
return self._compute_dtype
@property
def name(self):
"""Returns the name of this policy."""
return self._name
def __repr__(self):
return f'<DTypePolicy "{self._name}">'
def get_config(self):
return {"name": self.name}
@classmethod
def from_config(cls, config):
return cls(**config)
@keras_core_export(
[
"keras_core.mixed_precision.set_dtype_policy",
"keras_core.mixed_precision.set_global_policy",
]
)
def set_dtype_policy(policy):
"""Sets the default dtype policy globally.
Example:
>>> keras_core.mixed_precision.set_dtype_policy("mixed_float16")
"""
if not isinstance(policy, DTypePolicy):
if isinstance(policy, str):
policy = DTypePolicy(policy)
else:
raise ValueError(
"Invalid `policy` argument. "
"Expected the string name of a policy "
"(such as 'mixed_float16') or a `DTypePolicy` "
f"instance. Received: policy={policy} "
f"(of type {type(policy)})"
)
global_state.set_global_attribute("dtype_policy", policy)
@keras_core_export(
[
"keras_core.mixed_precision.dtype_policy",
"keras_core.mixed_precision.global_policy",
]
)
def dtype_policy():
"""Returns the current default dtype policy object."""
policy = global_state.get_global_attribute("dtype_policy", None)
if policy is None:
policy = DTypePolicy(backend.floatx())
set_dtype_policy(policy)
return policy
| keras-core/keras_core/mixed_precision/dtype_policy.py/0 | {
"file_path": "keras-core/keras_core/mixed_precision/dtype_policy.py",
"repo_id": "keras-core",
"token_count": 2656
} | 31 |
from keras_core.testing.test_case import TestCase
| keras-core/keras_core/testing/__init__.py/0 | {
"file_path": "keras-core/keras_core/testing/__init__.py",
"repo_id": "keras-core",
"token_count": 15
} | 32 |
import numpy as np
import tensorflow as tf
from keras_core import testing
from keras_core.trainers.data_adapters import tf_dataset_adapter
class TestTFDatasetAdapter(testing.TestCase):
def test_basic_flow(self):
x = tf.random.normal((34, 4))
y = tf.random.normal((34, 2))
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(base_ds)
self.assertEqual(adapter.num_batches, 3)
self.assertEqual(adapter.batch_size, None)
self.assertEqual(adapter.has_partial_batch, None)
self.assertEqual(adapter.partial_batch_size, None)
gen = adapter.get_numpy_iterator()
for i, batch in enumerate(gen):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, "float32")
if i < 2:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 4))
self.assertEqual(by.shape, (2, 2))
ds = adapter.get_tf_dataset()
for i, batch in enumerate(ds):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.dtype, "float32")
if i < 2:
self.assertEqual(tuple(bx.shape), (16, 4))
self.assertEqual(tuple(by.shape), (16, 2))
else:
self.assertEqual(tuple(bx.shape), (2, 4))
self.assertEqual(tuple(by.shape), (2, 2))
def _test_class_weights(self, target_encoding="int"):
x = np.random.random((4, 2))
if target_encoding == "int":
y = np.array([[0], [1], [2], [3]], dtype="int64")
else:
y = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype="float32",
)
class_weight = {
0: 0.1,
1: 0.2,
2: 0.3,
3: 0.4,
}
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 3)
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
def test_class_weights_int_targets(self):
self._test_class_weights(target_encoding="int")
def test_class_weights_categorical_targets(self):
self._test_class_weights(target_encoding="categorical")
def test_num_batches(self):
dataset = tf.data.Dataset.range(42)
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, 42)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertEqual(adapter.num_batches, 42)
# Test for Infiniate Cardinality
dataset = tf.data.Dataset.range(42)
dataset = dataset.repeat()
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, tf.data.INFINITE_CARDINALITY)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
# Test for Unknown Cardinality
dataset = dataset.filter(lambda x: True)
cardinality = int(dataset.cardinality())
self.assertEqual(cardinality, tf.data.UNKNOWN_CARDINALITY)
adapter = tf_dataset_adapter.TFDatasetAdapter(dataset)
self.assertIsNone(adapter.num_batches)
def test_invalid_dataset_type(self):
with self.assertRaisesRegex(
ValueError, "Expected argument `dataset` to be a tf.data.Dataset"
):
invalid_data = "This is not a tf.data.Dataset"
tf_dataset_adapter.TFDatasetAdapter(invalid_data)
def test_class_weight_and_sample_weight_together(self):
x = np.random.random((4, 2))
y = np.array([[0], [1], [2], [3]], dtype="int64")
sw = np.array([0.5, 0.5, 0.5, 0.5])
base_ds = tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(16)
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
with self.assertRaisesRegex(
ValueError,
"You cannot `class_weight` and `sample_weight` at the same time.",
):
tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
def test_different_y_shapes_with_class_weight(self):
x = np.random.random((4, 2))
y = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]],
dtype="float32",
)
base_ds = tf.data.Dataset.from_tensor_slices((x, y)).batch(16)
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
y_sparse = np.array([0, 1, 2, 3], dtype="int64")
base_ds = tf.data.Dataset.from_tensor_slices((x, y_sparse)).batch(16)
adapter = tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
gen = adapter.get_numpy_iterator()
for batch in gen:
_, _, bw = batch
self.assertAllClose(bw, [0.1, 0.2, 0.3, 0.4])
def test_nested_y_with_class_weight(self):
x = np.random.random((4, 2))
# Define two target outputs, y1 and y2, for the dataset
y1 = np.array([0, 1, 2, 3], dtype="int64")
y2 = np.array([0, 1, 2, 3], dtype="int64")
# Create a tf.data Dataset from the input data and two target outputs
base_ds = tf.data.Dataset.from_tensor_slices((x, (y1, y2))).batch(16)
# Define class weights for potential classes in the output
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
with self.assertRaisesRegex(
ValueError,
"`class_weight` is only supported for Models with a single output.",
):
tf_dataset_adapter.TFDatasetAdapter(
base_ds, class_weight=class_weight
)
def test_class_weights_map_fn_with_sample_weight(self):
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
class_weights_map_fn = tf_dataset_adapter.make_class_weight_map_fn(
class_weight
)
x = np.array([[0.5, 0.5], [0.5, 0.5]])
y = np.array([[1, 0], [0, 1]])
sw = np.array([1.0, 1.0])
with self.assertRaisesRegex(
ValueError,
"You cannot `class_weight` and `sample_weight` at the same time.",
):
class_weights_map_fn(x, y, sw)
def test_class_weights_map_fn_nested_y(self):
class_weight = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
class_weights_map_fn = tf_dataset_adapter.make_class_weight_map_fn(
class_weight
)
x = np.array([[0.5, 0.5]])
y1 = np.array([1])
y2 = np.array([0])
with self.assertRaisesRegex(
ValueError,
"`class_weight` is only supported for Models with a single output.",
):
class_weights_map_fn(x, (y1, y2))
| keras-core/keras_core/trainers/data_adapters/tf_dataset_adapter_test.py/0 | {
"file_path": "keras-core/keras_core/trainers/data_adapters/tf_dataset_adapter_test.py",
"repo_id": "keras-core",
"token_count": 3983
} | 33 |
from keras_core.api_export import keras_core_export
from keras_core.layers import Layer
@keras_core_export("keras_core.layers.TorchModuleWrapper")
class TorchModuleWrapper(Layer):
"""Torch module wrapper layer.
`TorchModuleWrapper` is a wrapper class that can turn any
`torch.nn.Module` into a Keras layer, in particular by making its
parameters trackable by Keras.
Args:
module: `torch.nn.Module` instance. If it's a `LazyModule`
instance, then its parameters must be initialized before
passing the instance to `TorchModuleWrapper` (e.g. by calling
it once).
name: The name of the layer (string).
Examples:
Here's an example of how the `TorchModuleWrapper` can be used with vanilla
PyTorch modules.
```python
import torch.nn as nn
import torch.nn.functional as F
import keras_core
from keras_core.layers import TorchModuleWrapper
class Classifier(keras_core.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Wrap `torch.nn.Module`s with `TorchModuleWrapper`
# if they contain parameters
self.conv1 = TorchModuleWrapper(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3))
)
self.conv2 = TorchModuleWrapper(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))
)
self.pool = nn.MaxPool2d(kernel_size=(2, 2))
self.flatten = nn.Flatten()
self.dropout = nn.Dropout(p=0.5)
self.fc = TorchModuleWrapper(nn.Linear(1600, 10))
def call(self, inputs):
x = F.relu(self.conv1(inputs))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = self.flatten(x)
x = self.dropout(x)
x = self.fc(x)
return F.softmax(x, dim=1)
model = Classifier()
model.build((1, 28, 28))
print("Output shape:", model(torch.ones(1, 1, 28, 28).to("cuda")).shape)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"]
)
model.fit(train_loader, epochs=5)
```
"""
def __init__(self, module, name=None):
super().__init__(name=name)
import torch.nn as nn
if (
isinstance(module, nn.modules.lazy.LazyModuleMixin)
and module.has_uninitialized_params()
):
raise ValueError(
"LazyModules are not supported unless they "
"are already initialized. "
f"Received uninitialized LazyModule: module={module}"
)
from keras_core.backend.torch.core import get_device
self.module = module.to(get_device())
self._track_module_parameters()
def parameters(self, recurse=True):
return self.module.parameters(recurse=recurse)
def _track_module_parameters(self):
from keras_core.backend.torch import Variable
for param in self.module.parameters():
variable = Variable(
initializer=param, trainable=param.requires_grad
)
variable._value = param
self._track_variable(variable)
self.built = True
def call(self, *args, **kwargs):
return self.module.forward(*args, **kwargs)
| keras-core/keras_core/utils/torch_utils.py/0 | {
"file_path": "keras-core/keras_core/utils/torch_utils.py",
"repo_id": "keras-core",
"token_count": 1593
} | 34 |
#!/bin/bash
SETUP="
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import keras_cv.point_cloud
num_points = 200000
num_boxes = 1000
box_dimension = 20.0
def get_points_boxes():
points = tf.random.uniform(
shape=[num_points, 2], minval=0, maxval=box_dimension, dtype=tf.float32
)
points_z = 5.0 * tf.ones(shape=[num_points, 1], dtype=tf.float32)
points = tf.concat([points, points_z], axis=-1)
boxes_x = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_y = tf.random.uniform(
shape=[num_boxes, 1],
minval=0,
maxval=box_dimension - 1.0,
dtype=tf.float32,
)
boxes_dx = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dx = tf.math.minimum(box_dimension - boxes_x, boxes_dx)
boxes_dy = tf.random.uniform(
shape=[num_boxes, 1], minval=0, maxval=5.0, dtype=tf.float32
)
boxes_dy = tf.math.minimum(box_dimension - boxes_y, boxes_dy)
boxes_z = 5.0 * tf.ones([num_boxes, 1], dtype=tf.float32)
boxes_dz = 3.0 * tf.ones([num_boxes, 1], dtype=tf.float32)
boxes_angle = tf.zeros([num_boxes, 1], dtype=tf.float32)
boxes = tf.concat(
[boxes_x, boxes_y, boxes_z, boxes_dx, boxes_dy, boxes_dz, boxes_angle],
axis=-1,
)
return points, boxes
points, boxes = get_points_boxes();
"
echo "----------------------------------------"
echo "benchmark_within_any_box3d"
python -m timeit -s "$SETUP" \
"keras_cv.point_cloud.is_within_any_box3d(points, boxes)"
echo "----------------------------------------"
echo benchmark_within_any_box3d_v2
python -m timeit -s "$SETUP" \
"keras_cv.point_cloud.is_within_any_box3d_v2(points, boxes)"
echo "----------------------------------------"
echo benchmark_within_any_box3d_v3
python -m timeit -s "$SETUP" \
"keras_cv.point_cloud.is_within_any_box3d_v3(points, boxes)"
| keras-cv/benchmarks/custom_ops/within_any_box.sh/0 | {
"file_path": "keras-cv/benchmarks/custom_ops/within_any_box.sh",
"repo_id": "keras-cv",
"token_count": 885
} | 35 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras_cv import bounding_box
from keras_cv.layers import RandomRotation
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
H_AXIS = -3
W_AXIS = -2
class OldRandomRotation(BaseImageAugmentationLayer):
"""A preprocessing layer which randomly rotates images during training.
This layer will apply random rotations to each image, filling empty space
according to `fill_mode`.
By default, random rotations are only applied during training.
At inference time, the layer does nothing. If you need to apply random
rotations at inference time, set `training` to True when calling the layer.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Arguments:
factor: a float represented as fraction of 2 Pi, or a tuple of size 2
representing lower and upper bound for rotating clockwise and
counter-clockwise. A positive values means rotating counter clock-wise,
while a negative value means clock-wise. When represented as a single
float, this value is used for both the upper and lower bound. For
instance, `factor=(-0.2, 0.3)` results in an output rotation by a random
amount in the range `[-20% * 2pi, 30% * 2pi]`. `factor=0.2` results in
an output rotating by a random amount in the range
`[-20% * 2pi, 20% * 2pi]`.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
bounding_box_format: The format of bounding boxes of input dataset. Refer
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
segmentation_classes: an optional integer with the number of classes in
the input segmentation mask. Required iff augmenting data with sparse
(non one-hot) segmentation masks. Include the background class in this
count (e.g. for segmenting dog vs background, this should be set to 2).
"""
def __init__(
self,
factor,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
bounding_box_format=None,
segmentation_classes=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.lower = factor[0]
self.upper = factor[1]
else:
self.lower = -factor
self.upper = factor
if self.upper < self.lower:
raise ValueError(
"Factor cannot have negative values, " "got {}".format(factor)
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
self.bounding_box_format = bounding_box_format
self.segmentation_classes = segmentation_classes
def get_random_transformation(self, **kwargs):
min_angle = self.lower * 2.0 * np.pi
max_angle = self.upper * 2.0 * np.pi
angle = self._random_generator.uniform(
shape=[1], minval=min_angle, maxval=max_angle
)
return {"angle": angle}
def augment_image(self, image, transformation, **kwargs):
return self._rotate_image(image, transformation)
def _rotate_image(self, image, transformation):
image = preprocessing_utils.ensure_tensor(image, self.compute_dtype)
original_shape = image.shape
image = tf.expand_dims(image, 0)
image_shape = tf.shape(image)
img_hd = tf.cast(image_shape[H_AXIS], tf.float32)
img_wd = tf.cast(image_shape[W_AXIS], tf.float32)
angle = transformation["angle"]
output = preprocessing_utils.transform(
image,
preprocessing_utils.get_rotation_matrix(angle, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
output = tf.squeeze(output, 0)
output.set_shape(original_shape)
return output
def augment_bounding_boxes(
self, bounding_boxes, transformation, image=None, **kwargs
):
if self.bounding_box_format is None:
raise ValueError(
"`RandomRotation()` was called with bounding boxes, "
"but no `bounding_box_format` was specified in the "
"constructor. Please specify a bounding box format in the "
"constructor. i.e. "
"`RandomRotation(bounding_box_format='xyxy')`"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="xyxy",
images=image,
)
image_shape = tf.shape(image)
h = image_shape[H_AXIS]
w = image_shape[W_AXIS]
# origin coordinates, all the points on the image are rotated around
# this point
origin_x, origin_y = tf.cast(w / 2, dtype=self.compute_dtype), tf.cast(
h / 2, dtype=self.compute_dtype
)
angle = transformation["angle"]
angle = -angle
# calculate coordinates of all four corners of the bounding box
boxes = bounding_boxes["boxes"]
point = tf.stack(
[
tf.stack([boxes[:, 0], boxes[:, 1]], axis=1),
tf.stack([boxes[:, 2], boxes[:, 1]], axis=1),
tf.stack([boxes[:, 2], boxes[:, 3]], axis=1),
tf.stack([boxes[:, 0], boxes[:, 3]], axis=1),
],
axis=1,
)
# point_x : x coordinates of all corners of the bounding box
point_x = tf.gather(point, [0], axis=2)
# point_y : y coordinates of all corners of the bounding box
point_y = tf.gather(point, [1], axis=2)
# rotated bounding box coordinates
# new_x : new position of x coordinates of corners of bounding box
new_x = (
origin_x
+ tf.multiply(
tf.cos(angle), tf.cast((point_x - origin_x), dtype=tf.float32)
)
- tf.multiply(
tf.sin(angle), tf.cast((point_y - origin_y), dtype=tf.float32)
)
)
# new_y : new position of y coordinates of corners of bounding box
new_y = (
origin_y
+ tf.multiply(
tf.sin(angle), tf.cast((point_x - origin_x), dtype=tf.float32)
)
+ tf.multiply(
tf.cos(angle), tf.cast((point_y - origin_y), dtype=tf.float32)
)
)
# rotated bounding box coordinates
out = tf.concat([new_x, new_y], axis=2)
# find readjusted coordinates of bounding box to represent it in corners
# format
min_coordinates = tf.math.reduce_min(out, axis=1)
max_coordinates = tf.math.reduce_max(out, axis=1)
boxes = tf.concat([min_coordinates, max_coordinates], axis=1)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes,
bounding_box_format="xyxy",
images=image,
)
# coordinates cannot be float values, it is casted to int32
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="xyxy",
target=self.bounding_box_format,
dtype=self.compute_dtype,
images=image,
)
return bounding_boxes
def augment_label(self, label, transformation, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
# If segmentation_classes is specified, we have a dense segmentation
# mask. We therefore one-hot encode before rotation to avoid bad
# interpolation during the rotation transformation. We then make the
# mask sparse again using tf.argmax.
if self.segmentation_classes:
one_hot_mask = tf.one_hot(
tf.squeeze(segmentation_mask, axis=-1),
self.segmentation_classes,
)
rotated_one_hot_mask = self._rotate_image(
one_hot_mask, transformation
)
rotated_mask = tf.argmax(rotated_one_hot_mask, axis=-1)
return tf.expand_dims(rotated_mask, axis=-1)
else:
if segmentation_mask.shape[-1] == 1:
raise ValueError(
"Segmentation masks must be one-hot encoded, or "
"RandomRotate must be initialized with "
"`segmentation_classes`. `segmentation_classes` was not "
f"specified, and mask has shape {segmentation_mask.shape}"
)
rotated_mask = self._rotate_image(segmentation_mask, transformation)
# Round because we are in one-hot encoding, and we may have
# pixels with ambiguous value due to floating point math for
# rotation.
return tf.round(rotated_mask)
def get_config(self):
config = {
"factor": self.factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"bounding_box_format": self.bounding_box_format,
"segmentation_classes": self.segmentation_classes,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class RandomRotationTest(tf.test.TestCase):
def test_consistency_with_old_implementation_bounding_boxes(self):
input_image = np.random.random((2, 20, 20, 3)).astype(np.float32)
bboxes = {
"boxes": tf.ragged.constant(
[[[2, 2, 4, 4], [1, 1, 3, 3]], [[2, 2, 4, 4]]],
dtype=tf.float32,
),
"classes": tf.ragged.constant(
[[0, 1], [0]],
dtype=tf.float32,
),
}
input = {
"images": input_image,
"bounding_boxes": bboxes,
}
layer = RandomRotation(factor=(0.5, 0.5), bounding_box_format="xyxy")
old_layer = OldRandomRotation(
factor=(0.5, 0.5), bounding_box_format="xyxy"
)
output = layer(input, training=True)
old_output = old_layer(input, training=True)
self.assertAllClose(output["images"], old_output["images"])
self.assertAllClose(
output["bounding_boxes"]["classes"],
old_output["bounding_boxes"]["classes"],
)
self.assertAllClose(
output["bounding_boxes"]["boxes"].to_tensor(),
old_output["bounding_boxes"]["boxes"].to_tensor(),
)
def test_consistency_with_old_implementation_segmentation_masks(self):
num_classes = 10
input_image = np.random.random((2, 20, 20, 3)).astype(np.float32)
masks = np.random.randint(2, size=(2, 20, 20, 1)) * (num_classes - 1)
input = {
"images": input_image,
"segmentation_masks": masks,
}
layer = RandomRotation(
factor=(0.5, 0.5),
segmentation_classes=num_classes,
)
old_layer = OldRandomRotation(
factor=(0.5, 0.5),
segmentation_classes=num_classes,
)
output = layer(input, training=True)
old_output = old_layer(input, training=True)
self.assertAllClose(output["images"], old_output["images"])
self.assertAllClose(
output["segmentation_masks"], old_output["segmentation_masks"]
)
if __name__ == "__main__":
# Run benchmark
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32)
num_images = [100, 200, 500, 1000]
num_classes = 10
results = {}
aug_candidates = [RandomRotation, OldRandomRotation]
aug_args = {"factor": 0.5}
for aug in aug_candidates:
# Eager Mode
c = aug.__name__
layer = aug(**aug_args)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
layer(x_train[:n_images])
t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# Graph Mode
c = aug.__name__ + " Graph Mode"
layer = aug(**aug_args)
@tf.function()
def apply_aug(inputs):
return layer(inputs)
runtimes = []
print(f"Timing {c}")
for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])
t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1-t0}")
results[c] = runtimes
# XLA Mode
# cannot run tf.raw_ops.ImageProjectiveTransformV3 on XLA
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison.png")
# So we can actually see more relevant margins
del results[aug_candidates[1].__name__]
plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")
plt.ylabel("Runtime (seconds)")
plt.legend()
plt.savefig("comparison_no_old_eager.png")
# Run unit tests
tf.test.main()
| keras-cv/benchmarks/vectorized_random_rotation.py/0 | {
"file_path": "keras-cv/benchmarks/vectorized_random_rotation.py",
"repo_id": "keras-cv",
"token_count": 7228
} | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.