text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Title: Benchmarking a KerasCV model against ImageNetV2
Author: [DavidLandup0](https://github.com/DavidLandup0)
Date created: 2022/12/14
Last modified: 2022/12/14
Description: Use KerasCV architectures and benchmark them against ImageNetV2
from TensorFlow Datasets
"""
import sys
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import flags
from tensorflow import keras
from keras_cv import models
flags.DEFINE_string(
"model_name", None, "The name of the model in KerasCV.models to use."
)
flags.DEFINE_boolean(
"include_rescaling",
True,
"Whether to include rescaling or not at the start of the model.",
)
flags.DEFINE_string(
"model_kwargs",
"{}",
"Keyword argument dictionary to pass to the constructor of the model being"
" evaluated.",
)
flags.DEFINE_integer(
"batch_size",
32,
"The batch size for the evaluation set.",
)
flags.DEFINE_string(
"weights",
"imagenet",
"The path to the weights to load for the model.",
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
model = models.__dict__[FLAGS.model_name]
model = model(
include_rescaling=FLAGS.include_rescaling,
include_top=True,
num_classes=1000,
input_shape=(224, 224, 3),
weights=FLAGS.weights,
**eval(FLAGS.model_kwargs),
)
model.compile(
"adam",
"sparse_categorical_crossentropy",
metrics=["accuracy", keras.metrics.SparseTopKCategoricalAccuracy(5)],
)
def preprocess_image(img, label):
img = tf.image.resize(img, (224, 224))
img = tf.cast(img, tf.float32)
return img, label
# Todo
# Include imagenet_val and imagenet_real as well and report
# results for all three
(test_set), info = tfds.load(
"imagenet_v2", split=["test"], as_supervised=True, with_info=True
)
test_set = (
test_set[0]
.shuffle(len(test_set))
.map(preprocess_image)
.batch(FLAGS.batch_size)
.prefetch(tf.data.AUTOTUNE)
)
# Todo
# Create a nicer report, include inference time
# model size, etc.
loss, acc, top_5 = model.evaluate(test_set, verbose=0)
print(
f"Benchmark results:\n{'='*25}\n{FLAGS.model_name} achieves: \n - Top-1 "
f"Accuracy: {acc*100} \n - Top-5 Accuracy: {top_5*100} \non ImageNetV2 "
"with setup:"
)
print(
f"- model_name: {FLAGS.model_name}\n"
f"- include_rescaling: {FLAGS.include_rescaling}\n"
f"- batch_size: {FLAGS.batch_size}\n"
f"- weights: {FLAGS.weights}\n"
f"- model_kwargs: {FLAGS.model_kwargs}\n"
)
| keras-cv/examples/benchmarking/imagenet_v2.py/0 | {
"file_path": "keras-cv/examples/benchmarking/imagenet_v2.py",
"repo_id": "keras-cv",
"token_count": 1125
} | 37 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""cut_mix_demo.py shows how to use the CutMix preprocessing layer.
Operates on the oxford_flowers102 dataset. In this script the flowers
are loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import demo_utils
import tensorflow as tf
from keras_cv import layers
def main():
cutmix = layers.CutMix()
ds = demo_utils.load_oxford_dataset()
ds = ds.map(cutmix, num_parallel_calls=tf.data.AUTOTUNE)
demo_utils.visualize_dataset(ds)
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/classification/cut_mix_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/classification/cut_mix_demo.py",
"repo_id": "keras-cv",
"token_count": 342
} | 38 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import.
import torch
del torch
except ImportError:
pass
# isort:off
from keras_cv import version_check
version_check.check_tf_version()
# isort:on
from keras_cv import bounding_box
from keras_cv import callbacks
from keras_cv import datasets
from keras_cv import layers
from keras_cv import losses
from keras_cv import metrics
from keras_cv import models
from keras_cv import training
from keras_cv import utils
from keras_cv import visualization
from keras_cv.core import ConstantFactorSampler
from keras_cv.core import FactorSampler
from keras_cv.core import NormalFactorSampler
from keras_cv.core import UniformFactorSampler
from keras_cv.version_utils import __version__
from keras_cv.version_utils import version
| keras-cv/keras_cv/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/__init__.py",
"repo_id": "keras-cv",
"token_count": 410
} | 39 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains functions to compute ious of bounding boxes."""
import math
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
def _compute_area(box):
"""Computes area for bounding boxes
Args:
box: [N, 4] or [batch_size, N, 4] float Tensor, either batched
or unbatched boxes.
Returns:
a float Tensor of [N] or [batch_size, N]
"""
y_min, x_min, y_max, x_max = ops.split(box[..., :4], 4, axis=-1)
return ops.squeeze((y_max - y_min) * (x_max - x_min), axis=-1)
def _compute_intersection(boxes1, boxes2):
"""Computes intersection area between two sets of boxes.
Args:
boxes1: [N, 4] or [batch_size, N, 4] float Tensor boxes.
boxes2: [M, 4] or [batch_size, M, 4] float Tensor boxes.
Returns:
a [N, M] or [batch_size, N, M] float Tensor.
"""
y_min1, x_min1, y_max1, x_max1 = ops.split(boxes1[..., :4], 4, axis=-1)
y_min2, x_min2, y_max2, x_max2 = ops.split(boxes2[..., :4], 4, axis=-1)
boxes2_rank = len(boxes2.shape)
perm = [1, 0] if boxes2_rank == 2 else [0, 2, 1]
# [N, M] or [batch_size, N, M]
intersect_ymax = ops.minimum(y_max1, ops.transpose(y_max2, perm))
intersect_ymin = ops.maximum(y_min1, ops.transpose(y_min2, perm))
intersect_xmax = ops.minimum(x_max1, ops.transpose(x_max2, perm))
intersect_xmin = ops.maximum(x_min1, ops.transpose(x_min2, perm))
intersect_height = intersect_ymax - intersect_ymin
intersect_width = intersect_xmax - intersect_xmin
zeros_t = ops.cast(0, intersect_height.dtype)
intersect_height = ops.maximum(zeros_t, intersect_height)
intersect_width = ops.maximum(zeros_t, intersect_width)
return intersect_height * intersect_width
@keras_cv_export("keras_cv.bounding_box.compute_iou")
def compute_iou(
boxes1,
boxes2,
bounding_box_format,
use_masking=False,
mask_val=-1,
images=None,
image_shape=None,
):
"""Computes a lookup table vector containing the ious for a given set boxes.
The lookup vector is to be indexed by [`boxes1_index`,`boxes2_index`] if
boxes are unbatched and by [`batch`, `boxes1_index`,`boxes2_index`] if the
boxes are batched.
The users can pass `boxes1` and `boxes2` to be different ranks. For example:
1) `boxes1`: [batch_size, M, 4], `boxes2`: [batch_size, N, 4] -> return
[batch_size, M, N].
2) `boxes1`: [batch_size, M, 4], `boxes2`: [N, 4] -> return
[batch_size, M, N]
3) `boxes1`: [M, 4], `boxes2`: [batch_size, N, 4] -> return
[batch_size, M, N]
4) `boxes1`: [M, 4], `boxes2`: [N, 4] -> return [M, N]
Args:
boxes1: a list of bounding boxes in 'corners' format. Can be batched or
unbatched.
boxes2: a list of bounding boxes in 'corners' format. Can be batched or
unbatched.
bounding_box_format: a case-insensitive string which is one of `"xyxy"`,
`"rel_xyxy"`, `"xyWH"`, `"center_xyWH"`, `"yxyx"`, `"rel_yxyx"`.
For detailed information on the supported format, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
use_masking: whether masking will be applied. This will mask all `boxes1`
or `boxes2` that have values less than 0 in all its 4 dimensions.
Default to `False`.
mask_val: int to mask those returned IOUs if the masking is True, defaults
to -1.
Returns:
iou_lookup_table: a vector containing the pairwise ious of boxes1 and
boxes2.
""" # noqa: E501
boxes1_rank = len(boxes1.shape)
boxes2_rank = len(boxes2.shape)
if boxes1_rank not in [2, 3]:
raise ValueError(
"compute_iou() expects boxes1 to be batched, or to be unbatched. "
f"Received len(boxes1.shape)={boxes1_rank}, "
f"len(boxes2.shape)={boxes2_rank}. Expected either "
"len(boxes1.shape)=2 AND or len(boxes1.shape)=3."
)
if boxes2_rank not in [2, 3]:
raise ValueError(
"compute_iou() expects boxes2 to be batched, or to be unbatched. "
f"Received len(boxes1.shape)={boxes1_rank}, "
f"len(boxes2.shape)={boxes2_rank}. Expected either "
"len(boxes2.shape)=2 AND or len(boxes2.shape)=3."
)
target_format = "yxyx"
if bounding_box.is_relative(bounding_box_format):
target_format = bounding_box.as_relative(target_format)
boxes1 = bounding_box.convert_format(
boxes1,
source=bounding_box_format,
target=target_format,
images=images,
image_shape=image_shape,
)
boxes2 = bounding_box.convert_format(
boxes2,
source=bounding_box_format,
target=target_format,
images=images,
image_shape=image_shape,
)
intersect_area = _compute_intersection(boxes1, boxes2)
boxes1_area = _compute_area(boxes1)
boxes2_area = _compute_area(boxes2)
boxes2_area_rank = len(boxes2_area.shape)
boxes2_axis = 1 if (boxes2_area_rank == 2) else 0
boxes1_area = ops.expand_dims(boxes1_area, axis=-1)
boxes2_area = ops.expand_dims(boxes2_area, axis=boxes2_axis)
union_area = boxes1_area + boxes2_area - intersect_area
res = ops.divide(intersect_area, union_area + keras.backend.epsilon())
if boxes1_rank == 2:
perm = [1, 0]
else:
perm = [0, 2, 1]
if not use_masking:
return res
mask_val_t = ops.cast(mask_val, res.dtype) * ops.ones_like(res)
boxes1_mask = ops.less(ops.max(boxes1, axis=-1, keepdims=True), 0.0)
boxes2_mask = ops.less(ops.max(boxes2, axis=-1, keepdims=True), 0.0)
background_mask = ops.logical_or(
boxes1_mask, ops.transpose(boxes2_mask, perm)
)
iou_lookup_table = ops.where(background_mask, mask_val_t, res)
return iou_lookup_table
@keras_cv_export("keras_cv.bounding_box.compute_ciou")
def compute_ciou(boxes1, boxes2, bounding_box_format):
"""
Computes the Complete IoU (CIoU) between two bounding boxes or between
two batches of bounding boxes.
CIoU loss is an extension of GIoU loss, which further improves the IoU
optimization for object detection. CIoU loss not only penalizes the
bounding box coordinates but also considers the aspect ratio and center
distance of the boxes. The length of the last dimension should be 4 to
represent the bounding boxes.
Args:
box1 (tensor): tensor representing the first bounding box with
shape (..., 4).
box2 (tensor): tensor representing the second bounding box with
shape (..., 4).
bounding_box_format: a case-insensitive string (for example, "xyxy").
Each bounding box is defined by these 4 values. For detailed
information on the supported formats, see the [KerasCV bounding box
documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
Returns:
tensor: The CIoU distance between the two bounding boxes.
"""
target_format = "xyxy"
if bounding_box.is_relative(bounding_box_format):
target_format = bounding_box.as_relative(target_format)
boxes1 = bounding_box.convert_format(
boxes1, source=bounding_box_format, target=target_format
)
boxes2 = bounding_box.convert_format(
boxes2, source=bounding_box_format, target=target_format
)
x_min1, y_min1, x_max1, y_max1 = ops.split(boxes1[..., :4], 4, axis=-1)
x_min2, y_min2, x_max2, y_max2 = ops.split(boxes2[..., :4], 4, axis=-1)
width_1 = x_max1 - x_min1
height_1 = y_max1 - y_min1 + keras.backend.epsilon()
width_2 = x_max2 - x_min2
height_2 = y_max2 - y_min2 + keras.backend.epsilon()
intersection_area = ops.maximum(
ops.minimum(x_max1, x_max2) - ops.maximum(x_min1, x_min2), 0
) * ops.maximum(
ops.minimum(y_max1, y_max2) - ops.maximum(y_min1, y_min2), 0
)
union_area = (
width_1 * height_1
+ width_2 * height_2
- intersection_area
+ keras.backend.epsilon()
)
iou = ops.squeeze(
ops.divide(intersection_area, union_area + keras.backend.epsilon()),
axis=-1,
)
convex_width = ops.maximum(x_max1, x_max2) - ops.minimum(x_min1, x_min2)
convex_height = ops.maximum(y_max1, y_max2) - ops.minimum(y_min1, y_min2)
convex_diagonal_squared = ops.squeeze(
convex_width**2 + convex_height**2 + keras.backend.epsilon(),
axis=-1,
)
centers_distance_squared = ops.squeeze(
((x_min1 + x_max1) / 2 - (x_min2 + x_max2) / 2) ** 2
+ ((y_min1 + y_max1) / 2 - (y_min2 + y_max2) / 2) ** 2,
axis=-1,
)
v = ops.squeeze(
ops.power(
(4 / math.pi**2)
* (ops.arctan(width_2 / height_2) - ops.arctan(width_1 / height_1)),
2,
),
axis=-1,
)
alpha = v / (v - iou + (1 + keras.backend.epsilon()))
return iou - (
centers_distance_squared / convex_diagonal_squared + v * alpha
)
| keras-cv/keras_cv/bounding_box/iou.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/iou.py",
"repo_id": "keras-cv",
"token_count": 4178
} | 40 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import keras_cv
from keras_cv.callbacks import PyCOCOCallback
from keras_cv.metrics.coco.pycoco_wrapper import METRIC_NAMES
from keras_cv.models.object_detection.__test_utils__ import (
_create_bounding_box_dataset,
)
from keras_cv.tests.test_case import TestCase
class PyCOCOCallbackTest(TestCase):
@pytest.mark.large # Fit is slow, so mark these large.
def test_model_fit_retinanet(self):
model = keras_cv.models.RetinaNet(
num_classes=10,
bounding_box_format="xywh",
backbone=keras_cv.models.CSPDarkNetTinyBackbone(),
)
# all metric formats must match
model.compile(
optimizer="adam",
box_loss="smoothl1",
classification_loss="focal",
)
train_ds = _create_bounding_box_dataset(
bounding_box_format="xyxy", use_dictionary_box_format=True
)
val_ds = _create_bounding_box_dataset(
bounding_box_format="xyxy", use_dictionary_box_format=True
)
def dict_to_tuple(inputs):
return inputs["images"], inputs["bounding_boxes"]
train_ds = train_ds.map(dict_to_tuple)
val_ds = val_ds.map(dict_to_tuple)
callback = PyCOCOCallback(
validation_data=val_ds,
bounding_box_format="xyxy",
)
history = model.fit(train_ds, callbacks=[callback])
self.assertAllInSet(
[f"val_{metric}" for metric in METRIC_NAMES], history.history.keys()
)
@pytest.mark.skip(
reason="Causing OOMs on GitHub actions. This is not a user facing API "
"and will be replaced in a matter of weeks, so we shouldn't "
"invest engineering resources into working around the OOMs here."
)
def test_model_fit_rcnn(self):
model = keras_cv.models.FasterRCNN(
num_classes=10,
bounding_box_format="xywh",
)
model.compile(
optimizer="adam",
box_loss="Huber",
classification_loss="SparseCategoricalCrossentropy",
rpn_box_loss="Huber",
rpn_classification_loss="BinaryCrossentropy",
)
train_ds = _create_bounding_box_dataset(
bounding_box_format="yxyx", use_dictionary_box_format=True
)
eval_ds = _create_bounding_box_dataset(
bounding_box_format="yxyx", use_dictionary_box_format=True
)
callback = PyCOCOCallback(
validation_data=eval_ds,
bounding_box_format="yxyx",
)
history = model.fit(train_ds, callbacks=[callback])
self.assertAllInSet(
[f"val_{metric}" for metric in METRIC_NAMES], history.history.keys()
)
| keras-cv/keras_cv/callbacks/pycoco_callback_test.py/0 | {
"file_path": "keras-cv/keras_cv/callbacks/pycoco_callback_test.py",
"repo_id": "keras-cv",
"token_count": 1469
} | 41 |
/* Copyright 2022 The KerasCV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <vector>
#include "keras_cv/custom_ops/box_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace kerascv {
namespace {
class PairwiseIoUOp : public OpKernel {
public:
explicit PairwiseIoUOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& a = ctx->input(0);
const Tensor& b = ctx->input(1);
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a.shape()),
errors::InvalidArgument("In[0] must be a matrix, but get ",
a.shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b.shape()),
errors::InvalidArgument("In[0] must be a matrix, but get ",
b.shape().DebugString()));
OP_REQUIRES(ctx, 7 == a.dim_size(1),
errors::InvalidArgument("Matrix size-incompatible: In[0]: ",
a.shape().DebugString()));
OP_REQUIRES(ctx, 7 == b.dim_size(1),
errors::InvalidArgument("Matrix size-incompatible: In[1]: ",
b.shape().DebugString()));
const int n_a = a.dim_size(0);
const int n_b = b.dim_size(0);
Tensor* iou_a_b = nullptr;
OP_REQUIRES_OK(
ctx, ctx->allocate_output("iou", TensorShape({n_a, n_b}), &iou_a_b));
auto t_iou_a_b = iou_a_b->matrix<float>();
std::vector<box::Upright3DBox> box_a = box::ParseBoxesFromTensor(a);
std::vector<box::Upright3DBox> box_b = box::ParseBoxesFromTensor(b);
for (int i_a = 0; i_a < n_a; ++i_a) {
for (int i_b = 0; i_b < n_b; ++i_b) {
t_iou_a_b(i_a, i_b) = box_a[i_a].IoU(box_b[i_b]);
}
}
}
};
REGISTER_KERNEL_BUILDER(Name("KcvPairwiseIou3D").Device(DEVICE_CPU),
PairwiseIoUOp);
} // namespace
} // namespace kerascv
} // namespace tensorflow
| keras-cv/keras_cv/custom_ops/kernels/pairwise_iou_kernel.cc/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/kernels/pairwise_iou_kernel.cc",
"repo_id": "keras-cv",
"token_count": 1196
} | 42 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv import layers
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
class NonMaxSupressionTest(TestCase):
def test_confidence_threshold(self):
boxes = np.random.uniform(low=0, high=1, size=(2, 5, 4))
classes = ops.expand_dims(
np.array(
[[0.1, 0.1, 0.4, 0.9, 0.5], [0.7, 0.5, 0.3, 0.0, 0.0]],
"float32",
),
axis=-1,
)
nms = layers.NonMaxSuppression(
bounding_box_format="yxyx",
from_logits=False,
iou_threshold=1.0,
confidence_threshold=0.45,
max_detections=2,
)
outputs = nms(boxes, classes)
self.assertAllClose(
outputs["boxes"], [boxes[0][-2:, ...], boxes[1][:2, ...]]
)
self.assertAllClose(outputs["classes"], [[0.0, 0.0], [0.0, 0.0]])
self.assertAllClose(outputs["confidence"], [[0.9, 0.5], [0.7, 0.5]])
def test_max_detections(self):
boxes = np.random.uniform(low=0, high=1, size=(2, 5, 4))
classes = ops.expand_dims(
np.array(
[[0.1, 0.1, 0.4, 0.5, 0.9], [0.7, 0.5, 0.3, 0.0, 0.0]],
"float32",
),
axis=-1,
)
nms = layers.NonMaxSuppression(
bounding_box_format="yxyx",
from_logits=False,
iou_threshold=1.0,
confidence_threshold=0.1,
max_detections=1,
)
outputs = nms(boxes, classes)
self.assertAllClose(
outputs["boxes"], [boxes[0][-1:, ...], boxes[1][:1, ...]]
)
self.assertAllClose(outputs["classes"], [[0.0], [0.0]])
self.assertAllClose(outputs["confidence"], [[0.9], [0.7]])
| keras-cv/keras_cv/layers/object_detection/non_max_suppression_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/non_max_suppression_test.py",
"repo_id": "keras-cv",
"token_count": 1152
} | 43 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import tensorflow as tf
from keras_cv.backend import ops
EPSILON = 1e-4
def compute_feature_map_ref_xyz(
voxel_size,
spatial_size,
global_xyz,
):
"""Computes the offset xyz locations for each feature map pixel.
Args:
voxel_size: voxel size.
spatial_size: the x, y, z boundary of voxels.
global_xyz: [B, 3] tensor
Returns:
[B, H, W, Z, 3] offset locations for each feature map pixel in global
coordinate.
"""
voxel_spatial_size = compute_voxel_spatial_size(spatial_size, voxel_size)
voxel_coord_meshgrid = np.mgrid[
0 : voxel_spatial_size[0],
0 : voxel_spatial_size[1],
0 : voxel_spatial_size[2],
]
voxel_coord = np.concatenate(voxel_coord_meshgrid[..., np.newaxis], axis=-1)
# [H, W, Z, 3]
# [3]
voxel_origin = (compute_voxel_origin(spatial_size, voxel_size),)
# [H, W, Z, 3]
voxel_coord = voxel_coord + voxel_origin
# [H, W, Z, 3]
ref = ops.cast(voxel_coord * np.array(voxel_size), global_xyz.dtype)
# [1, H, W, Z, 3] + [B, 1, 1, 1, 3] -> [B, H, W, Z, 3]
ref = ops.expand_dims(ref, axis=0) + ops.expand_dims(
ops.expand_dims(ops.expand_dims(global_xyz, axis=1), axis=1), axis=1
)
return ref
def compute_voxel_spatial_size(spatial_size, voxel_size):
"""Computes how many voxels in each dimension are needed.
Args:
spatial_size: max/min range in each dim in global coordinate frame.
voxel_size: voxel size.
Returns:
voxel_spatial_size: voxel spatial size.
"""
dim = len(voxel_size)
# Compute the range as x_range = xmax - xmin, ymax - ymin, zmax - zmin
voxel_spatial_size_float = [
spatial_size[2 * i + 1] - spatial_size[2 * i] for i in range(dim)
]
# voxel_dim_x / x_range
voxel_spatial_size_float = [
i / j for i, j in zip(voxel_spatial_size_float, voxel_size)
]
voxel_spatial_size_int = [
math.ceil(v - EPSILON) for v in voxel_spatial_size_float
]
return voxel_spatial_size_int
def compute_voxel_origin(
spatial_size,
voxel_size,
):
"""Computes voxel origin.
Args:
spatial_size: The current location of SDC.
voxel_size: 1.0 / voxel size.
Returns:
voxel_origin: [dim] the voxel origin.
"""
voxel_origin = spatial_size[::2]
voxel_origin = np.array(
[o / v for o, v in zip(voxel_origin, voxel_size)], "float32"
)
voxel_origin = np.round(voxel_origin)
return voxel_origin
def point_to_voxel_coord(point_xyz, voxel_size, dtype=tf.int32):
"""Computes the voxel coord given points.
A voxel x represents [(x-0.5) / voxel_size, (x+0.5) / voxel_size)
in the coordinate system of the input point_xyz.
Args:
point_xyz: [..., dim] point xyz coordinates.
voxel_size: voxel size.
dtype: the output dtype.
Returns:
voxelized coordinates.
"""
with tf.name_scope("point_to_voxel_coord"):
point_voxelized = point_xyz / tf.constant(
voxel_size, dtype=point_xyz.dtype
)
assert dtype.is_integer or dtype.is_floating, f"{dtype}"
# Note: tf.round casts float to the nearest integer. If the float is
# 0.5, it casts it to the nearest even integer.
point_voxelized_round = tf.math.round(point_voxelized)
if dtype.is_floating:
assert dtype == point_xyz.dtype, f"{dtype}"
return point_voxelized_round
return tf.cast(point_voxelized_round, dtype=dtype)
def voxel_coord_to_point(voxel_coord, voxel_size, dtype=tf.float32):
"""Convert voxel coord to expected point in the original coordinate system.
This is the reverse of point_to_voxel_coord.
Args:
voxel_coord: [..., dim] int tensors for coordinate of each voxel.
voxel_size: voxel size.
dtype: output point data type.
Returns:
point coordinates.
"""
with tf.name_scope("voxel_coord_to_point"):
# This simply computes voxel_coord * voxel_size.
if voxel_coord.dtype != dtype:
voxel_coord = tf.cast(voxel_coord, dtype=dtype)
return voxel_coord * tf.constant(voxel_size, dtype=dtype)
def get_yaw_rotation(yaw, name=None):
"""Gets a rotation matrix given yaw only.
Args:
yaw: x-rotation in radians. This tensor can be any shape except an empty
one.
name: the op name.
Returns:
A rotation tensor with the same data type of the input. Its shape is
[input_shape, 3 ,3].
"""
with tf.name_scope("GetYawRotation"):
cos_yaw = tf.cos(yaw)
sin_yaw = tf.sin(yaw)
ones = tf.ones_like(yaw)
zeros = tf.zeros_like(yaw)
return tf.stack(
[
tf.stack([cos_yaw, -1.0 * sin_yaw, zeros], axis=-1),
tf.stack([sin_yaw, cos_yaw, zeros], axis=-1),
tf.stack([zeros, zeros, ones], axis=-1),
],
axis=-2,
)
def inv_loc(rot, loc):
"""Invert a location.
rot and loc can form a transform matrix between two frames.
R = rot, L = loc
R*R' = I
R * new_loc + L = 0 = > new_loc = -R'*L
Args:
rot: [..., 3, 3] rotation matrix.
loc: [..., 3] location matrix.
Returns:
[..., 3] new location matrix.
"""
new_loc = -1.0 * tf.linalg.matmul(
rot, loc[..., tf.newaxis], transpose_a=True
)
return tf.squeeze(new_loc, axis=-1)
def _has_rank(tensor, expected_rank):
"""Syntactic sugar for asserting that tensor has the expected rank.
Internal usages for keras_cv libraries only.
"""
if tensor.shape.ndims is not None and isinstance(expected_rank, int):
assert tensor.shape.ndims == expected_rank, (
"Ranks did not match, got %d, " "expected %d"
) % (tensor.shape.ndims, expected_rank)
return tensor
def _pad_or_trim_to(x, shape, pad_val=0, pad_after_contents=True):
"""Pad and slice x to the given shape.
This is branched from Lingvo
https://github.com/tensorflow/lingvo/blob/master/lingvo/core/py_utils.py.
Internal usages for keras_cv libraries only.
Args:
x: A tensor.
shape: The shape of the returned tensor.
pad_val: An int or float used to pad x.
pad_after_contents: Whether to pad and trim after the original contents of
each dimension.
Returns:
'x' is padded with pad_val and sliced so that the result has the given
shape.
Raises:
ValueError: if shape is a tf.TensorShape and not fully defined.
"""
if isinstance(shape, (list, tuple)):
expected_rank = len(shape)
elif isinstance(shape, tf.TensorShape):
if not shape.is_fully_defined():
raise ValueError(
"shape %s padding %s must be fully defined." % (shape, x)
)
expected_rank = shape.rank
else:
shape = _has_rank(shape, 1)
expected_rank = tf.size(shape)
x = _has_rank(x, expected_rank)
pad = shape - tf.minimum(tf.shape(x), shape)
zeros = tf.zeros_like(pad)
if pad_after_contents:
# If dim_i is less than shape[i], pads after contents.
paddings = tf.stack([zeros, pad], axis=1)
# If dim_i is larger than shape[i], we slice [0:shape[i]] for dim_i.
slice_begin = zeros
else:
# If dim_i is less than shape[i], pads before contents.
paddings = tf.stack([pad, zeros], axis=1)
# If dim-i is larger than shape[i], we slice [dim_i - shape[i]:dim_i]
# for dim_i.
slice_begin = tf.shape(x) + pad - shape
x = tf.pad(x, paddings, constant_values=pad_val)
x = tf.slice(x, slice_begin, shape)
return tf.reshape(x, shape)
| keras-cv/keras_cv/layers/object_detection_3d/voxel_utils.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/voxel_utils.py",
"repo_id": "keras-cv",
"token_count": 3715
} | 44 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.cut_mix import CutMix
from keras_cv.tests.test_case import TestCase
num_classes = 10
class CutMixTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
# randomly sample labels
ys_labels = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2)
ys_labels = tf.squeeze(ys_labels)
ys_labels = tf.one_hot(ys_labels, num_classes)
# randomly sample segmentation mask
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((512, 512)), tf.ones((512, 512))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = CutMix(seed=1)
outputs = layer(
{
"images": xs,
"labels": ys_labels,
"segmentation_masks": ys_segmentation_masks,
}
)
xs, ys_labels, ys_segmentation_masks = (
outputs["images"],
outputs["labels"],
outputs["segmentation_masks"],
)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_labels.shape, (2, 10))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
def test_cut_mix_call_results_with_labels(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = CutMix(seed=1)
outputs = layer({"images": xs, "labels": ys})
xs, ys = outputs["images"], outputs["labels"]
# At least some pixels should be replaced in the CutMix operation
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# No labels should still be close to their original values
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_cut_mix_call_results_one_channel_with_labels(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 1)), tf.ones((4, 4, 1))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = CutMix(seed=1)
outputs = layer({"images": xs, "labels": ys})
xs, ys = outputs["images"], outputs["labels"]
# At least some pixels should be replaced in the CutMix operation
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# No labels should still be close to their original values
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_cut_mix_call_results_with_dense_encoded_segmentation_masks(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 1)), tf.ones((4, 4, 1))],
axis=0,
),
tf.float32,
)
layer = CutMix(seed=1)
outputs = layer(
{"images": xs, "segmentation_masks": ys_segmentation_masks}
)
xs, ys_segmentation_masks = (
outputs["images"],
outputs["segmentation_masks"],
)
# At least some pixels should be replaced in the images
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# At least some pixels should be replaced in the segmentation_masks
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[0]) == 1.0)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[0]) == 2.0)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[1]) == 1.0)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[1]) == 2.0)
)
def test_cut_mix_call_results_with_one_hot_encoded_segmentation_masks(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((4, 4)), tf.ones((4, 4))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = CutMix(seed=1)
outputs = layer(
{"images": xs, "segmentation_masks": ys_segmentation_masks}
)
xs, ys_segmentation_masks = (
outputs["images"],
outputs["segmentation_masks"],
)
# At least some pixels should be replaced in the images
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# At least some pixels should be replaced in the segmentation_masks
self.assertTrue(
np.any(
ops.convert_to_numpy(ys_segmentation_masks[0][:, :, 2]) == 1.0
)
)
self.assertTrue(
np.any(
ops.convert_to_numpy(ys_segmentation_masks[0][:, :, 2]) == 0.0
)
)
self.assertTrue(
np.any(
ops.convert_to_numpy(ys_segmentation_masks[1][:, :, 1]) == 1.0
)
)
self.assertTrue(
np.any(
ops.convert_to_numpy(ys_segmentation_masks[1][:, :, 1]) == 0.0
)
)
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = CutMix(seed=1)
@tf.function
def augment(x, y):
return layer({"images": x, "labels": y})
outputs = augment(xs, ys)
xs, ys = outputs["images"], outputs["labels"]
# At least some pixels should be replaced in the CutMix operation
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
# No labels should still be close to their original values
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_single_image_input(self):
xs = tf.ones((512, 512, 3))
ys = tf.one_hot(tf.constant([1]), 2)
inputs = {"images": xs, "labels": ys}
layer = CutMix()
with self.assertRaisesRegexp(
ValueError, "CutMix received a single image to `call`"
):
_ = layer(inputs)
def test_int_labels(self):
xs = tf.ones((2, 512, 512, 3))
ys = tf.one_hot(tf.constant([1, 0]), 2, dtype=tf.int32)
inputs = {"images": xs, "labels": ys}
layer = CutMix()
with self.assertRaisesRegexp(
ValueError, "CutMix received labels with type"
):
_ = layer(inputs)
def test_image_input(self):
xs = tf.ones((2, 512, 512, 3))
layer = CutMix()
with self.assertRaisesRegexp(
ValueError, "CutMix expects inputs in a dictionary with format"
):
_ = layer(xs)
| keras-cv/keras_cv/layers/preprocessing/cut_mix_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/cut_mix_test.py",
"repo_id": "keras-cv",
"token_count": 4731
} | 45 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.layers.preprocessing.posterization import Posterization
from keras_cv.tests.test_case import TestCase
class PosterizationTest(TestCase):
rng = tf.random.Generator.from_non_deterministic_state()
def test_raises_error_on_invalid_bits_parameter(self):
invalid_values = [-1, 0, 9, 24]
for value in invalid_values:
with self.assertRaises(ValueError):
Posterization(bits=value, value_range=[0, 1])
def test_raises_error_on_invalid_value_range(self):
invalid_ranges = [(1,), [1, 2, 3]]
for value_range in invalid_ranges:
with self.assertRaises(ValueError):
Posterization(bits=1, value_range=value_range)
def test_single_image(self):
bits = self._get_random_bits()
dummy_input = self.rng.uniform(shape=(224, 224, 3), maxval=256)
expected_output = self._calc_expected_output(dummy_input, bits=bits)
layer = Posterization(bits=bits, value_range=[0, 255])
output = layer(dummy_input)
self.assertAllEqual(output, expected_output)
def _get_random_bits(self):
return int(
self.rng.uniform(shape=(), minval=1, maxval=9, dtype=tf.int32)
)
def test_single_image_rescaled(self):
bits = self._get_random_bits()
dummy_input = self.rng.uniform(shape=(224, 224, 3), maxval=1.0)
expected_output = (
self._calc_expected_output(dummy_input * 255, bits=bits) / 255
)
layer = Posterization(bits=bits, value_range=[0, 1])
output = layer(dummy_input)
self.assertAllClose(output, expected_output)
def test_batched_input(self):
bits = self._get_random_bits()
dummy_input = self.rng.uniform(shape=(2, 224, 224, 3), maxval=256)
expected_output = []
for image in dummy_input:
expected_output.append(self._calc_expected_output(image, bits=bits))
expected_output = tf.stack(expected_output)
layer = Posterization(bits=bits, value_range=[0, 255])
output = layer(dummy_input)
self.assertAllEqual(output, expected_output)
@pytest.mark.tf_only
def test_works_with_xla(self):
dummy_input = self.rng.uniform(shape=(2, 224, 224, 3))
layer = Posterization(bits=4, value_range=[0, 1])
@tf.function(jit_compile=True)
def apply(x):
return layer(x)
apply(dummy_input)
@staticmethod
def _calc_expected_output(image, bits):
"""Posterization in numpy, based on Albumentations:
The algorithm is basically:
1. create a lookup table of all possible input pixel values to pixel
values after posterize
2. map each pixel in the input to created lookup table.
Source:
https://github.com/albumentations-team/albumentations/blob/89a675cbfb2b76f6be90e7049cd5211cb08169a5/albumentations/augmentations/functional.py#L407
"""
dtype = image.dtype
image = tf.cast(image, tf.uint8)
lookup_table = np.arange(0, 256, dtype=np.uint8)
mask = ~np.uint8(2 ** (8 - bits) - 1)
lookup_table &= mask
return tf.cast(lookup_table[image], dtype)
| keras-cv/keras_cv/layers/preprocessing/posterization_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/posterization_test.py",
"repo_id": "keras-cv",
"token_count": 1586
} | 46 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomColorDegeneration")
class RandomColorDegeneration(VectorizedBaseImageAugmentationLayer):
"""Randomly performs the color degeneration operation on given images.
The sharpness operation first converts an image to gray scale, then back to
color. It then takes a weighted average between original image and the
degenerated image. This makes colors appear more dull.
Args:
factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image sharpness is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 uses the degenerated result
entirely. Values between 0 and 1 result in linear interpolation
between the original image and the sharpened image.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
factor,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.factor = preprocessing.parse_factor(
factor,
)
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
return self.factor(
shape=(batch_size, 1, 1, 1), dtype=self.compute_dtype
)
def augment_images(self, images, transformations=None, **kwargs):
degenerates = tf.image.grayscale_to_rgb(
tf.image.rgb_to_grayscale(images)
)
result = preprocessing.blend(images, degenerates, transformations)
return result
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_targets(self, targets, transformations, **kwargs):
return targets
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
image, transformations=transformation, **kwargs
)
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor, "seed": self.seed})
return config
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_color_degeneration.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_color_degeneration.py",
"repo_id": "keras-cv",
"token_count": 1455
} | 47 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomHue")
class RandomHue(VectorizedBaseImageAugmentationLayer):
"""Randomly adjusts the hue on given images.
This layer will randomly increase/reduce the hue for the input RGB
images.
The image hue is adjusted by converting the image(s) to HSV and rotating the
hue channel (H) by delta. The image is then converted back to RGB.
Args:
factor: A tuple of two floats, a single float or
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image hue is impacted. `factor=0.0` makes this layer perform a
no-op operation, while a value of 1.0 performs the most aggressive
contrast adjustment available. If a tuple is used, a `factor` is
sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high]. This is
typically either `[0, 1]` or `[0, 255]` depending on how your
preprocessing pipeline is set up.
seed: Integer. Used to create a random seed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_hue = keras_cv.layers.preprocessing.RandomHue()
augmented_images = random_hue(images)
```
"""
def __init__(self, factor, value_range, seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
self.factor = preprocessing_utils.parse_factor(
factor,
)
self.value_range = value_range
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
invert = self._random_generator.uniform((batch_size,), 0, 1, tf.float32)
invert = tf.where(
invert > 0.5, -tf.ones_like(invert), tf.ones_like(invert)
)
# We must scale self.factor() to the range [-0.5, 0.5]. This is because
# the tf.image operation performs rotation on the hue saturation value
# orientation. This can be thought of as an angle in the range
# [-180, 180]
return invert * self.factor(shape=(batch_size,)) * 0.5
def augment_ragged_image(self, image, transformation, **kwargs):
return self.augment_images(
images=image, transformations=transformation, **kwargs
)
def augment_images(self, images, transformations, **kwargs):
images = preprocessing_utils.transform_value_range(
images, self.value_range, (0, 1), dtype=self.compute_dtype
)
adjust_factors = tf.cast(transformations, images.dtype)
# broadcast
adjust_factors = adjust_factors[..., tf.newaxis, tf.newaxis]
# tf.image.adjust_hue expects floats to be in range [0, 1]
images = tf.image.rgb_to_hsv(images)
h_channel = images[..., 0] + adjust_factors
h_channel = tf.where(h_channel > 1.0, h_channel - 1.0, h_channel)
h_channel = tf.where(h_channel < 0.0, h_channel + 1.0, h_channel)
images = tf.stack([h_channel, images[..., 1], images[..., 2]], axis=-1)
images = tf.image.hsv_to_rgb(images)
# RandomHue is one of the rare KPLs that needs to clip
images = tf.clip_by_value(images, 0, 1)
images = preprocessing_utils.transform_value_range(
images, (0, 1), self.value_range, dtype=self.compute_dtype
)
return images
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_hue.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_hue.py",
"repo_id": "keras-cv",
"token_count": 2117
} | 48 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.RepeatedAugmentation")
class RepeatedAugmentation(BaseImageAugmentationLayer):
"""RepeatedAugmentation augments each image in a batch multiple times.
This technique exists to emulate the behavior of stochastic gradient descent
within the context of mini-batch gradient descent. When training large
vision models, choosing a large batch size can introduce too much noise into
aggregated gradients causing the overall batch's gradients to be less
effective than gradients produced using smaller gradients.
RepeatedAugmentation handles this by re-using the same image multiple times
within a batch creating correlated samples.
This layer increases your batch size by a factor of `len(augmenters)`.
Args:
augmenters: the augmenters to use to augment the image
shuffle: whether to shuffle the result. Essential when using an
asynchronous distribution strategy such as ParameterServerStrategy.
Usage:
List of identical augmenters:
```python
repeated_augment = cv_layers.RepeatedAugmentation(
augmenters=[cv_layers.RandAugment(value_range=(0, 255))] * 8
)
inputs = {
"images": tf.ones((8, 512, 512, 3)),
"labels": tf.ones((8,)),
}
outputs = repeated_augment(inputs)
# outputs now has a batch size of 64 because there are 8 augmenters
```
List of distinct augmenters:
```python
repeated_augment = cv_layers.RepeatedAugmentation(
augmenters=[
cv_layers.RandAugment(value_range=(0, 255)),
cv_layers.RandomFlip(),
]
)
inputs = {
"images": tf.ones((8, 512, 512, 3)),
"labels": tf.ones((8,)),
}
outputs = repeated_augment(inputs)
```
References:
- [DEIT implementation](https://github.com/facebookresearch/deit/blob/ee8893c8063f6937fec7096e47ba324c206e22b9/samplers.py#L8)
- [Original publication](https://openaccess.thecvf.com/content_CVPR_2020/papers/Hoffer_Augment_Your_Batch_Improving_Generalization_Through_Instance_Repetition_CVPR_2020_paper.pdf)
""" # noqa: E501
def __init__(self, augmenters, shuffle=True, **kwargs):
super().__init__(**kwargs)
self.augmenters = augmenters
self.shuffle = shuffle
def _batch_augment(self, inputs):
if "bounding_boxes" in inputs:
raise ValueError(
"RepeatedAugmentation() does not yet support bounding box "
"labels."
)
augmenter_outputs = [augmenter(inputs) for augmenter in self.augmenters]
outputs = {}
for k in inputs.keys():
outputs[k] = tf.concat(
[output[k] for output in augmenter_outputs], axis=0
)
if not self.shuffle:
return outputs
return self.shuffle_outputs(outputs)
def shuffle_outputs(self, result):
indices = tf.range(
start=0, limit=tf.shape(result["images"])[0], dtype=tf.int32
)
indices = tf.random.shuffle(indices)
for key in result:
result[key] = tf.gather(result[key], indices)
return result
def _augment(self, inputs):
raise ValueError(
"RepeatedAugmentation() only works in batched mode. If "
"you would like to create batches from a single image, use "
"`x = tf.expand_dims(x, axis=0)` on your input images and labels."
)
def get_config(self):
config = super().get_config()
config.update({"augmenters": self.augmenters, "shuffle": self.shuffle})
return config
@classmethod
def from_config(cls, config):
if config["augmenters"] and isinstance(config["augmenters"][0], dict):
config["augmenters"] = keras.utils.deserialize_keras_object(
config["augmenters"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/repeated_augmentation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/repeated_augmentation.py",
"repo_id": "keras-cv",
"token_count": 1814
} | 49 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.layers import preprocessing_3d
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
TEST_CONFIGURATIONS = [
(
"FrustrumRandomDroppingPoints",
preprocessing_3d.FrustumRandomDroppingPoints(
r_distance=0, theta_width=1, phi_width=1, drop_rate=0.5
),
),
(
"FrustrumRandomPointFeatureNoise",
preprocessing_3d.FrustumRandomPointFeatureNoise(
r_distance=10,
theta_width=np.pi,
phi_width=1.5 * np.pi,
max_noise_level=0.5,
),
),
(
"GlobalRandomDroppingPoints",
preprocessing_3d.GlobalRandomDroppingPoints(drop_rate=0.5),
),
(
"GlobalRandomFlip",
preprocessing_3d.GlobalRandomFlip(),
),
(
"GlobalRandomRotation",
preprocessing_3d.GlobalRandomRotation(
max_rotation_angle_x=1.0,
max_rotation_angle_y=1.0,
max_rotation_angle_z=1.0,
),
),
(
"GlobalRandomScaling",
preprocessing_3d.GlobalRandomScaling(
x_factor=(0.5, 1.5),
y_factor=(0.5, 1.5),
z_factor=(0.5, 1.5),
),
),
(
"GlobalRandomTranslation",
preprocessing_3d.GlobalRandomTranslation(
x_stddev=1.0, y_stddev=1.0, z_stddev=1.0
),
),
(
"RandomDropBox",
preprocessing_3d.RandomDropBox(
label_index=1, max_drop_bounding_boxes=4
),
),
]
def convert_to_model_format(inputs):
point_clouds = {
"point_xyz": inputs["point_clouds"][..., :3],
"point_feature": inputs["point_clouds"][..., 3:-1],
"point_mask": tf.cast(inputs["point_clouds"][..., -1], tf.bool),
}
boxes = {
"boxes": inputs["bounding_boxes"][..., :7],
"classes": inputs["bounding_boxes"][..., 7],
"difficulty": inputs["bounding_boxes"][..., -1],
"mask": tf.cast(inputs["bounding_boxes"][..., 8], tf.bool),
}
return {
"point_clouds": point_clouds,
"3d_boxes": boxes,
}
@pytest.mark.skip(
reason="values are not matching because of changes to random.py"
)
class InputFormatTest(TestCase):
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_equivalent_results_with_model_format(self, layer):
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 9)).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
tf.random.set_seed(123)
outputs_with_legacy_format = convert_to_model_format(layer(inputs))
tf.random.set_seed(123)
outputs_with_model_format = layer(convert_to_model_format(inputs))
self.assertAllClose(
outputs_with_legacy_format, outputs_with_model_format
)
| keras-cv/keras_cv/layers/preprocessing_3d/input_format_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/input_format_test.py",
"repo_id": "keras-cv",
"token_count": 1706
} | 50 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_translation import (
GlobalRandomTranslation,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalRandomTranslationTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomTranslation(
x_stddev=1.0, y_stddev=1.0, z_stddev=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomTranslation(
x_stddev=0.0, y_stddev=0.0, z_stddev=0.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomTranslation(
x_stddev=1.0, y_stddev=1.0, z_stddev=1.0
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomTranslation(
x_stddev=0.0, y_stddev=0.0, z_stddev=0.0
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_translation_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_translation_test.py",
"repo_id": "keras-cv",
"token_count": 1138
} | 51 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.StochasticDepth")
class StochasticDepth(keras.layers.Layer):
"""
Implements the Stochastic Depth layer. It randomly drops residual branches
in residual architectures. It is used as a drop-in replacement for addition
operation. Note that this layer DOES NOT drop a residual block across
individual samples but across the entire batch.
Reference:
- [Deep Networks with Stochastic Depth](https://arxiv.org/abs/1603.09382)
- [Docstring taken from [stochastic_depth.py](https://tinyurl.com/mr3y2af6)
Args:
rate: float, the probability of the residual branch being dropped.
Usage:
`StochasticDepth` can be used in a residual network as follows:
```python
# (...)
input = tf.ones((1, 3, 3, 1), dtype=tf.float32)
residual = keras.layers.Conv2D(1, 1)(input)
output = keras_cv.layers.StochasticDepth()([input, residual])
# (...)
```
At train time, StochasticDepth returns:
$$
x[0] + b_l * x[1],
$$
where $b_l$ is a random Bernoulli variable with probability
$P(b_l = 1) = rate$. At test time, StochasticDepth rescales the activations
of the residual branch based on the drop rate ($rate$):
$$
x[0] + (1 - rate) * x[1]
$$
""" # noqa: E501
def __init__(self, rate=0.5, **kwargs):
super().__init__(**kwargs)
self.rate = rate
self.survival_probability = 1.0 - self.rate
def call(self, x, training=None):
if len(x) != 2:
raise ValueError(
f"""Input must be a list of length 2. """
f"""Got input with length={len(x)}."""
)
shortcut, residual = x
b_l = keras.backend.random_bernoulli([], p=self.survival_probability)
if training:
return shortcut + b_l * residual
else:
return shortcut + self.survival_probability * residual
def get_config(self):
config = {"rate": self.rate}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/regularization/stochastic_depth.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/stochastic_depth.py",
"repo_id": "keras-cv",
"token_count": 1047
} | 52 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from absl.testing import parameterized
from keras_cv.losses.ciou_loss import CIoULoss
from keras_cv.tests.test_case import TestCase
class CIoUTest(TestCase):
def test_output_shape(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
ciou_loss = CIoULoss(bounding_box_format="xywh")
self.assertAllEqual(ciou_loss(y_true, y_pred).shape, ())
def test_output_shape_reduction_none(self):
y_true = np.random.uniform(size=(2, 2, 4), low=0, high=10)
y_pred = np.random.uniform(size=(2, 2, 4), low=0, high=20)
ciou_loss = CIoULoss(bounding_box_format="xyxy", reduction="none")
self.assertAllEqual(
[2, 2],
ciou_loss(y_true, y_pred).shape,
)
def test_output_shape_relative_formats(self):
y_true = [
[0.0, 0.0, 0.1, 0.1],
[0.0, 0.0, 0.2, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.2, 0.3, 0.3],
]
y_pred = [
[0.0, 0.0, 0.5, 0.6],
[0.0, 0.0, 0.7, 0.3],
[0.4, 0.5, 0.5, 0.6],
[0.2, 0.1, 0.3, 0.3],
]
ciou_loss = CIoULoss(bounding_box_format="rel_xyxy")
self.assertAllEqual(ciou_loss(y_true, y_pred).shape, ())
@parameterized.named_parameters(
("xyxy", "xyxy"),
("rel_xyxy", "rel_xyxy"),
)
def test_output_value(self, name):
y_true = [
[0, 0, 1, 1],
[0, 0, 2, 3],
[4, 5, 3, 6],
[2, 2, 3, 3],
]
y_pred = [
[0, 0, 5, 6],
[0, 0, 7, 3],
[4, 5, 5, 6],
[2, 1, 3, 3],
]
expected_loss = 1.03202
ciou_loss = CIoULoss(bounding_box_format="xyxy")
if name == "rel_xyxy":
scale_factor = 1 / 640.0
y_true = np.array(y_true) * scale_factor
y_pred = np.array(y_pred) * scale_factor
self.assertAllClose(
ciou_loss(y_true, y_pred), expected_loss, atol=0.005
)
| keras-cv/keras_cv/losses/ciou_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/ciou_loss_test.py",
"repo_id": "keras-cv",
"token_count": 1383
} | 53 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CSPDarkNet model utils for KerasCV.
Reference:
- [YoloV3 Paper](https://arxiv.org/abs/1804.02767)
- [YoloV3 implementation](https://github.com/ultralytics/yolov3)
"""
from keras_cv.backend import keras
def DarknetConvBlock(
filters, kernel_size, strides, use_bias=False, activation="silu", name=None
):
"""The basic conv block used in Darknet. Applies Conv2D followed by a
BatchNorm.
Args:
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single
integer to specify the same value both dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the height and width. Can be a single
integer to the same value both dimensions.
use_bias: Boolean, whether the layer uses a bias vector.
activation: the activation applied after the BatchNorm layer. One of
"silu", "relu" or "leaky_relu", defaults to "silu".
name: the prefix for the layer names used in the block.
"""
if name is None:
name = f"conv_block{keras.backend.get_uid('conv_block')}"
model_layers = [
keras.layers.Conv2D(
filters,
kernel_size,
strides,
padding="same",
use_bias=use_bias,
name=name + "_conv",
),
keras.layers.BatchNormalization(name=name + "_bn"),
]
if activation == "silu":
model_layers.append(
keras.layers.Lambda(lambda x: keras.activations.silu(x))
)
elif activation == "relu":
model_layers.append(keras.layers.ReLU())
elif activation == "leaky_relu":
model_layers.append(keras.layers.LeakyReLU(0.1))
return keras.Sequential(model_layers, name=name)
def ResidualBlocks(filters, num_blocks, name=None):
"""A residual block used in DarkNet models, repeated `num_blocks` times.
Args:
filters: Integer, the dimensionality of the output spaces (i.e. the
number of output filters in used the blocks).
num_blocks: number of times the residual connections are repeated
name: the prefix for the layer names used in the block.
Returns:
a function that takes an input Tensor representing a ResidualBlock.
"""
if name is None:
name = f"residual_block{keras.backend.get_uid('residual_block')}"
def apply(x):
x = DarknetConvBlock(
filters,
kernel_size=3,
strides=2,
activation="leaky_relu",
name=f"{name}_conv1",
)(x)
for i in range(1, num_blocks + 1):
residual = x
x = DarknetConvBlock(
filters // 2,
kernel_size=1,
strides=1,
activation="leaky_relu",
name=f"{name}_conv{2*i}",
)(x)
x = DarknetConvBlock(
filters,
kernel_size=3,
strides=1,
activation="leaky_relu",
name=f"{name}_conv{2*i + 1}",
)(x)
if i == num_blocks:
x = keras.layers.Add(name=f"{name}_out")([residual, x])
else:
x = keras.layers.Add(name=f"{name}_add_{i}")([residual, x])
return x
return apply
def SpatialPyramidPoolingBottleneck(
filters,
hidden_filters=None,
kernel_sizes=(5, 9, 13),
activation="silu",
name=None,
):
"""Spatial pyramid pooling layer used in YOLOv3-SPP
Args:
filters: Integer, the dimensionality of the output spaces (i.e. the
number of output filters in used the blocks).
hidden_filters: Integer, the dimensionality of the intermediate
bottleneck space (i.e. the number of output filters in the
bottleneck convolution). If None, it will be equal to filters.
Defaults to None.
kernel_sizes: A list or tuple representing all the pool sizes used for
the pooling layers, defaults to (5, 9, 13).
activation: Activation for the conv layers, defaults to "silu".
name: the prefix for the layer names used in the block.
Returns:
a function that takes an input Tensor representing an
SpatialPyramidPoolingBottleneck.
"""
if name is None:
name = f"spp{keras.backend.get_uid('spp')}"
if hidden_filters is None:
hidden_filters = filters
def apply(x):
x = DarknetConvBlock(
hidden_filters,
kernel_size=1,
strides=1,
activation=activation,
name=f"{name}_conv1",
)(x)
x = [x]
for kernel_size in kernel_sizes:
x.append(
keras.layers.MaxPooling2D(
kernel_size,
strides=1,
padding="same",
name=f"{name}_maxpool_{kernel_size}",
)(x[0])
)
x = keras.layers.Concatenate(name=f"{name}_concat")(x)
x = DarknetConvBlock(
filters,
kernel_size=1,
strides=1,
activation=activation,
name=f"{name}_conv2",
)(x)
return x
return apply
def DarknetConvBlockDepthwise(
filters, kernel_size, strides, activation="silu", name=None
):
"""The depthwise conv block used in CSPDarknet.
Args:
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the final convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window. Can be a single
integer to specify the same value both dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the height and width. Can be a single
integer to the same value both dimensions.
activation: the activation applied after the final layer. One of "silu",
"relu" or "leaky_relu", defaults to "silu".
name: the prefix for the layer names used in the block.
"""
if name is None:
name = f"conv_block{keras.backend.get_uid('conv_block')}"
model_layers = [
keras.layers.DepthwiseConv2D(
kernel_size, strides, padding="same", use_bias=False
),
keras.layers.BatchNormalization(),
]
if activation == "silu":
model_layers.append(
keras.layers.Lambda(lambda x: keras.activations.swish(x))
)
elif activation == "relu":
model_layers.append(keras.layers.ReLU())
elif activation == "leaky_relu":
model_layers.append(keras.layers.LeakyReLU(0.1))
model_layers.append(
DarknetConvBlock(
filters, kernel_size=1, strides=1, activation=activation
)
)
return keras.Sequential(model_layers, name=name)
@keras.saving.register_keras_serializable(package="keras_cv")
class CrossStagePartial(keras.layers.Layer):
"""A block used in Cross Stage Partial Darknet.
Args:
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the final convolution).
num_bottlenecks: an integer representing the number of blocks added in
the layer bottleneck.
residual: a boolean representing whether the value tensor before the
bottleneck should be added to the output of the bottleneck as a
residual, defaults to True.
use_depthwise: a boolean value used to decide whether a depthwise conv
block should be used over a regular darknet block, defaults to
False.
activation: the activation applied after the final layer. One of "silu",
"relu" or "leaky_relu", defaults to "silu".
"""
def __init__(
self,
filters,
num_bottlenecks,
residual=True,
use_depthwise=False,
activation="silu",
**kwargs,
):
super().__init__(**kwargs)
self.filters = filters
self.num_bottlenecks = num_bottlenecks
self.residual = residual
self.use_depthwise = use_depthwise
self.activation = activation
hidden_channels = filters // 2
ConvBlock = (
DarknetConvBlockDepthwise if use_depthwise else DarknetConvBlock
)
self.darknet_conv1 = DarknetConvBlock(
hidden_channels,
kernel_size=1,
strides=1,
activation=activation,
)
self.darknet_conv2 = DarknetConvBlock(
hidden_channels,
kernel_size=1,
strides=1,
activation=activation,
)
# repeat bottlenecks num_bottleneck times
self.bottleneck_convs = []
for _ in range(num_bottlenecks):
self.bottleneck_convs.append(
DarknetConvBlock(
hidden_channels,
kernel_size=1,
strides=1,
activation=activation,
)
)
self.bottleneck_convs.append(
ConvBlock(
hidden_channels,
kernel_size=3,
strides=1,
activation=activation,
)
)
if self.residual:
self.add = keras.layers.Add()
self.concatenate = keras.layers.Concatenate()
self.darknet_conv3 = DarknetConvBlock(
filters, kernel_size=1, strides=1, activation=activation
)
def call(self, x):
x1 = self.darknet_conv1(x)
x2 = self.darknet_conv2(x)
for i in range(self.num_bottlenecks):
residual = x1
x1 = self.bottleneck_convs[2 * i](x1)
x1 = self.bottleneck_convs[2 * i + 1](x1)
if self.residual:
x1 = self.add([residual, x1])
x1 = self.concatenate([x1, x2])
x = self.darknet_conv3(x1)
return x
def get_config(self):
config = {
"filters": self.filters,
"num_bottlenecks": self.num_bottlenecks,
"residual": self.residual,
"use_depthwise": self.use_depthwise,
"activation": self.activation,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def Focus(name=None):
"""A block used in CSPDarknet to focus information into channels of the
image.
If the dimensions of a batch input is (batch_size, width, height, channels),
this layer converts the image into size (batch_size, width/2, height/2,
4*channels). See [the original discussion on YoloV5 Focus Layer](https://github.com/ultralytics/yolov5/discussions/3181).
Args:
name: the name for the lambda layer used in the block.
Returns:
a function that takes an input Tensor representing a Focus layer.
""" # noqa: E501
def apply(x):
return keras.layers.Concatenate(name=name)(
[
x[..., ::2, ::2, :],
x[..., 1::2, ::2, :],
x[..., ::2, 1::2, :],
x[..., 1::2, 1::2, :],
],
)
return apply
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_utils.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_utils.py",
"repo_id": "keras-cv",
"token_count": 5595
} | 54 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNetV1 model preset configurations."""
backbone_presets_no_weights = {
"efficientnetv1_b0": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`."
),
"params": 4050716,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b0",
},
"efficientnetv1_b1": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`."
),
"params": 6576704,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b1",
},
"efficientnetv1_b2": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`."
),
"params": 7770034,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b2",
},
"efficientnetv1_b3": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.2` and `depth_coefficient=1.4`."
),
"params": 10785960,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b3",
},
"efficientnetv1_b4": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.4` and `depth_coefficient=1.8`."
),
"params": 17676984,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b4",
},
"efficientnetv1_b5": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.6` and `depth_coefficient=2.2`."
),
"params": 28517360,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b5",
},
"efficientnetv1_b6": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.8` and `depth_coefficient=2.6`."
),
"params": 40965800,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b6",
},
"efficientnetv1_b7": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=2.0` and `depth_coefficient=3.1`."
),
"params": 64105488,
"official_name": "EfficientNetV1",
"path": "efficientnetv1",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetv1_b7",
},
}
backbone_presets_with_weights = {}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 2340
} | 55 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone import (
MobileNetV3Backbone,
)
from keras_cv.models.backbones.mobilenet_v3.mobilenet_v3_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """MobileNetV3Backbone model with {num_layers} layers.
References:
- [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)
- [Based on the Original keras.applications MobileNetv3](https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet_v3.py)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(scale=1 / 255)`
layer. Defaults to True.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = {name}Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.models.MobileNetV3SmallBackbone")
class MobileNetV3SmallBackbone(MobileNetV3Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MobileNetV3Backbone.from_preset("mobilenet_v3_small", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"mobilenet_v3_small_imagenet": copy.deepcopy(
backbone_presets["mobilenet_v3_small_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return cls.presets
@keras_cv_export("keras_cv.models.MobileNetV3LargeBackbone")
class MobileNetV3LargeBackbone(MobileNetV3Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MobileNetV3Backbone.from_preset("mobilenet_v3_large", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"mobilenet_v3_large_imagenet": copy.deepcopy(
backbone_presets["mobilenet_v3_large_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
setattr(
MobileNetV3LargeBackbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MobileNetV3Large", num_layers="28"),
)
setattr(
MobileNetV3SmallBackbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MobileNetV3Small", num_layers="14"),
)
| keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mobilenet_v3/mobilenet_v3_aliases.py",
"repo_id": "keras-cv",
"token_count": 1817
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet50V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class ResNetV2BackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
def test_valid_call(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = ResNet50V2Backbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, ResNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = ResNet50V2Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v2_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, ResNetV2Backbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = ResNet50V2Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 256),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 512),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 1024),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 2048),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
# ResNet50 model
model = ResNetV2Backbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[3, 4, 6, 3],
stackwise_strides=[1, 2, 2, 2],
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 2048))
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2314
} | 57 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.feature_extractor.clip.clip_encoder import CLIPEncoder
from keras_cv.models.feature_extractor.clip.clip_encoder import get_initializer
class CLIPPatchingAndEmbedding(keras.layers.Layer):
def __init__(
self, width, patch_size, input_resolution, output_dim, **kwargs
):
super().__init__(**kwargs)
self.conv1 = keras.layers.Conv2D(
filters=width,
kernel_size=patch_size,
strides=patch_size,
padding="valid",
use_bias=False,
data_format="channels_last",
kernel_initializer=get_initializer(0.02),
name="patch_embed.embedding",
)
self.width = width
self.input_resolution = input_resolution
self.patch_size = patch_size
self.num_patches = ops.power(
(self.input_resolution // self.patch_size), 2
)
self.class_embedding_initializer = get_initializer(
ops.power(self.width, -0.5) * 0.02
)
self.output_dim = output_dim
def build(self, input_shape):
super().build(input_shape)
self.conv1.build(input_shape)
self.class_embedding = self.add_weight(
shape=((self.width,)),
initializer=self.class_embedding_initializer,
name="patch_embed.class_embedding",
)
self.positional_embedding = self.add_weight(
shape=(
(
(self.input_resolution // self.patch_size) ** 2 + 1,
self.width,
)
),
trainable=True,
name="patch_embed.positional_embedding",
)
def call(self, x):
batch_size = ops.shape(x)[0]
patch_embeddings = self.conv1(x) # shape = [*, grid, grid, channel]
patch_embeddings = ops.reshape(
patch_embeddings, (batch_size, self.num_patches, -1)
)
class_embeds = ops.broadcast_to(
self.class_embedding, (batch_size, 1, self.width)
)
embeddings = ops.concatenate(
[class_embeds, patch_embeddings], axis=1
) # shape = [*, grid ** 2 + 1, width]
positional_embedding = self.positional_embedding
embeddings = embeddings + positional_embedding
return embeddings
def get_config(self):
config = super().get_config()
config.update(
{
"width": self.width,
"patch_size": self.patch_size,
"input_resolution": self.input_resolution,
"output_dim": self.output_dim,
}
)
return config
class CLIPImageEncoder(keras.Model):
def __init__(
self,
input_resolution,
patch_size,
width,
num_layers,
heads,
output_dim,
**kwargs,
):
super().__init__(
**kwargs,
)
self.input_resolution = input_resolution
self.width = width
self.patch_size = patch_size
self.output_dim = output_dim
self.heads = heads
self.num_layers = num_layers
self.embeddings = CLIPPatchingAndEmbedding(
width=self.width,
patch_size=self.patch_size,
input_resolution=self.input_resolution,
output_dim=self.output_dim,
name="clip_patch_embedding",
)
self.pre_norm = keras.layers.LayerNormalization(
epsilon=1e-5, name="ln_1"
)
self.encoder = CLIPEncoder(
self.width,
self.num_layers,
self.heads,
name="clip_encoder",
)
self.post_norm = keras.layers.LayerNormalization(
epsilon=1e-5, name="ln_2"
)
self.image_projector = keras.layers.Dense(
output_dim, name="vision_projector", use_bias=False
)
def build(self, input_shape):
super().build(input_shape)
self.embeddings.build(input_shape)
self.pre_norm.build([None, None, self.width])
self.encoder.build(None)
self.post_norm.build([None, self.width])
self.image_projector.build([None, None, self.width])
def call(self, image):
x = self.embeddings(image)
x = self.pre_norm(x)
x = self.encoder(x)
x = self.post_norm(x[:, 0, :])
image_projected_embeddings = self.image_projector(x)
return image_projected_embeddings
def get_config(self):
config = super().get_config()
config.update(
{
"input_resolution": self.input_resolution,
"patch_size": self.patch_size,
"width": self.width,
"layers": self.num_layers,
"heads": self.heads,
"output_dim": self.output_dim,
}
)
return config
| keras-cv/keras_cv/models/feature_extractor/clip/clip_image_model.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_image_model.py",
"repo_id": "keras-cv",
"token_count": 2681
} | 58 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv8 Backbone presets."""
backbone_presets_no_weights = {
"yolo_v8_xs_backbone": {
"metadata": {
"description": "An extra small YOLOV8 backbone",
"params": 1277680,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_xs_backbone/2",
},
"yolo_v8_s_backbone": {
"metadata": {
"description": "A small YOLOV8 backbone",
"params": 5089760,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_s_backbone/2",
},
"yolo_v8_m_backbone": {
"metadata": {
"description": "A medium YOLOV8 backbone",
"params": 11872464,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_m_backbone/2",
},
"yolo_v8_l_backbone": {
"metadata": {
"description": "A large YOLOV8 backbone",
"params": 19831744,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_l_backbone/2",
},
"yolo_v8_xl_backbone": {
"metadata": {
"description": "An extra large YOLOV8 backbone",
"params": 30972080,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_xl_backbone/2",
},
}
backbone_presets_with_weights = {
"yolo_v8_xs_backbone_coco": {
"metadata": {
"description": (
"An extra small YOLOV8 backbone pretrained on COCO"
),
"params": 1277680,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_xs_backbone_coco/2", # noqa: E501
},
"yolo_v8_s_backbone_coco": {
"metadata": {
"description": ("A small YOLOV8 backbone pretrained on COCO"),
"params": 5089760,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_s_backbone_coco/2", # noqa: E501
},
"yolo_v8_m_backbone_coco": {
"metadata": {
"description": ("A medium YOLOV8 backbone pretrained on COCO"),
"params": 11872464,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_m_backbone_coco/2", # noqa: E501
},
"yolo_v8_l_backbone_coco": {
"metadata": {
"description": ("A large YOLOV8 backbone pretrained on COCO"),
"params": 19831744,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_l_backbone_coco/2", # noqa: E501
},
"yolo_v8_xl_backbone_coco": {
"metadata": {
"description": (
"An extra large YOLOV8 backbone pretrained on COCO"
),
"params": 30972080,
"official_name": "YOLOV8",
"path": "yolo_v8",
},
"kaggle_handle": "kaggle://keras/yolov8/keras/yolo_v8_xl_backbone_coco/2", # noqa: E501
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 2152
} | 59 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models import MiTBackbone
from keras_cv.models.segmentation.segformer.segformer_presets import ( # noqa: E501
presets,
)
from keras_cv.models.segmentation.segformer.segformer_presets import ( # noqa: E501
presets_with_weights,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
from keras_cv.utils.train import get_feature_extractor
@keras_cv_export(
["keras_cv.models.SegFormer", "keras_cv.models.segmentation.SegFormer"]
)
class SegFormer(Task):
"""A Keras model implementing the SegFormer architecture for semantic
segmentation.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) # noqa: E501
- [Based on the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/tree/main/deepvision/models/segmentation/segformer) # noqa: E501
Args:
backbone: `keras.Model`. The backbone network for the model that is
used as a feature extractor for the SegFormer encoder.
It is *intended* to be used only with the MiT backbone model which
was created specifically for SegFormers. It should either be a
`keras_cv.models.backbones.backbone.Backbone` or a `tf.keras.Model`
that implements the `pyramid_level_inputs` property with keys
"P2", "P3", "P4", and "P5" and layer names as
values.
num_classes: int, the number of classes for the detection model,
including the background class.
projection_filters: int, number of filters in the
convolution layer projecting the concatenated features into
a segmentation map. Defaults to 256`.
Examples:
Using the class with a `backbone`:
```python
import tensorflow as tf
import keras_cv
images = np.ones(shape=(1, 96, 96, 3))
labels = np.zeros(shape=(1, 96, 96, 1))
backbone = keras_cv.models.MiTBackbone.from_preset("mit_b0_imagenet")
model = keras_cv.models.segmentation.SegFormer(
num_classes=1, backbone=backbone,
)
# Evaluate model
model(images)
# Train model
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(from_logits=False),
metrics=["accuracy"],
)
model.fit(images, labels, epochs=3)
```
"""
def __init__(
self,
backbone,
num_classes,
projection_filters=256,
**kwargs,
):
if not isinstance(backbone, keras.layers.Layer) or not isinstance(
backbone, keras.Model
):
raise ValueError(
"Argument `backbone` must be a `keras.layers.Layer` instance "
f" or `keras.Model`. Received instead "
f"backbone={backbone} (of type {type(backbone)})."
)
inputs = backbone.input
feature_extractor = get_feature_extractor(
backbone, list(backbone.pyramid_level_inputs.values())
)
# Multi-level dictionary
features = list(feature_extractor(inputs).values())
# Get H and W of level one output
_, H, W, _ = features[0].shape
# Project all multi-level outputs onto the same dimensionality
# and feature map shape
multi_layer_outs = []
for feature_dim, feature in zip(backbone.embedding_dims, features):
out = keras.layers.Dense(
projection_filters, name=f"linear_{feature_dim}"
)(feature)
out = keras.layers.Resizing(H, W, interpolation="bilinear")(out)
multi_layer_outs.append(out)
# Concat now-equal feature maps
concatenated_outs = keras.layers.Concatenate(axis=3)(
multi_layer_outs[::-1]
)
# Fuse concatenated features into a segmentation map
seg = keras.Sequential(
[
keras.layers.Conv2D(
filters=projection_filters, kernel_size=1, use_bias=False
),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
]
)(concatenated_outs)
seg = keras.layers.Dropout(0.1)(seg)
seg = keras.layers.Conv2D(
filters=num_classes, kernel_size=1, activation="softmax"
)(seg)
output = keras.layers.Resizing(
height=inputs.shape[1],
width=inputs.shape[2],
interpolation="bilinear",
)(seg)
super().__init__(
inputs=inputs,
outputs=output,
**kwargs,
)
self.num_classes = num_classes
self.projection_filters = projection_filters
self.backbone = backbone
def get_config(self):
config = super().get_config()
config.update(
{
"num_classes": self.num_classes,
"projection_filters": self.projection_filters,
"backbone": keras.saving.serialize_keras_object(self.backbone),
}
)
return config
@classmethod
def from_preset(
cls,
preset,
num_classes,
load_weights=None,
input_shape=None,
**kwargs,
):
aliases = {
"segformer_b0": "mit_b0",
"segformer_b1": "mit_b1",
"segformer_b2": "mit_b2",
"segformer_b3": "mit_b3",
"segformer_b4": "mit_b4",
"segformer_b5": "mit_b5",
}
if preset in aliases:
preset = aliases[preset]
return super().from_preset(
preset,
load_weights=load_weights,
num_classes=num_classes,
input_shape=input_shape,
**kwargs,
)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(presets_with_weights)
@classproperty
def backbone_presets(cls):
return copy.deepcopy(MiTBackbone.presets)
| keras-cv/keras_cv/models/segmentation/segformer/segformer.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segformer/segformer.py",
"repo_id": "keras-cv",
"token_count": 3095
} | 60 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.stable_diffusion.padded_conv2d import PaddedConv2D
class AttentionBlock(keras.layers.Layer):
def __init__(self, output_dim, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dim
self.norm = keras.layers.GroupNormalization(epsilon=1e-5)
self.q = PaddedConv2D(output_dim, 1)
self.k = PaddedConv2D(output_dim, 1)
self.v = PaddedConv2D(output_dim, 1)
self.proj_out = PaddedConv2D(output_dim, 1)
def call(self, inputs):
x = self.norm(inputs)
q, k, v = self.q(x), self.k(x), self.v(x)
# Compute attention
shape = ops.shape(q)
h, w, c = shape[1], shape[2], shape[3]
q = ops.reshape(q, (-1, h * w, c)) # b, hw, c
k = ops.transpose(k, (0, 3, 1, 2))
k = ops.reshape(k, (-1, c, h * w)) # b, c, hw
y = q @ k
y = y * 1 / ops.sqrt(ops.cast(c, self.compute_dtype))
y = keras.activations.softmax(y)
# Attend to values
v = ops.transpose(v, (0, 3, 1, 2))
v = ops.reshape(v, (-1, c, h * w))
y = ops.transpose(y, (0, 2, 1))
x = v @ y
x = ops.transpose(x, (0, 2, 1))
x = ops.reshape(x, (-1, h, w, c))
return self.proj_out(x) + inputs
| keras-cv/keras_cv/models/stable_diffusion/attention_block.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/attention_block.py",
"repo_id": "keras-cv",
"token_count": 855
} | 61 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""IoU3D using a custom TF op."""
from keras_cv.utils.resource_loader import LazySO
keras_cv_custom_ops = LazySO("custom_ops/_keras_cv_custom_ops.so")
def iou_3d(y_true, y_pred):
"""Implements IoU computation for 3D upright rotated bounding boxes.
Note that this is implemented using a custom TensorFlow op. If you don't
have KerasCV installed with custom ops, calling this will fail.
Boxes should have the format CENTER_XYZ_DXDYDZ_PHI. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Sample Usage:
```python
y_true = [[0, 0, 0, 2, 2, 2, 0], [1, 1, 1, 2, 2, 2, 3 * math.pi / 4]]
y_pred = [[1, 1, 1, 2, 2, 2, math.pi / 4], [1, 1, 1, 2, 2, 2, 0]]
iou_3d(y_true, y_pred)
```
"""
return keras_cv_custom_ops.ops.kcv_pairwise_iou3d(y_true, y_pred)
| keras-cv/keras_cv/ops/iou_3d.py/0 | {
"file_path": "keras-cv/keras_cv/ops/iou_3d.py",
"repo_id": "keras-cv",
"token_count": 543
} | 62 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import optimizers
from keras_cv.losses import SimCLRLoss
from keras_cv.models import ResNet50V2Backbone
from keras_cv.tests.test_case import TestCase
from keras_cv.training import SimCLRAugmenter
from keras_cv.training import SimCLRTrainer
# TODO(jbischof): revisit "extra_large" tag once development resumes.
# These tests are currently some of the slowest in our repo.
@pytest.mark.extra_large
class SimCLRTrainerTest(TestCase):
def test_train_without_probing(self):
simclr_without_probing = SimCLRTrainer(
self.build_encoder(),
augmenter=SimCLRAugmenter(value_range=(0, 255)),
)
images = tf.random.uniform((10, 512, 512, 3))
simclr_without_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
simclr_without_probing.fit(images)
def build_encoder(self):
return keras.Sequential(
[
ResNet50V2Backbone(include_rescaling=False),
layers.GlobalAveragePooling2D(name="avg_pool"),
]
)
| keras-cv/keras_cv/training/contrastive/simclr_trainer_test.py/0 | {
"file_path": "keras-cv/keras_cv/training/contrastive/simclr_trainer_test.py",
"repo_id": "keras-cv",
"token_count": 676
} | 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.backend import keras
def scale_loss_for_distribution(loss_value):
"""Scales and returns the given loss value by the number of replicas."""
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
if num_replicas > 1:
loss_value *= 1.0 / num_replicas
return loss_value
def convert_inputs_to_tf_dataset(
x=None, y=None, sample_weight=None, batch_size=None
):
if sample_weight is not None:
raise ValueError(
"Contrastive trainers do not yet support `sample_weight`."
)
if isinstance(x, tf.data.Dataset):
if y is not None or batch_size is not None:
raise ValueError(
"When `x` is a `tf.data.Dataset`, please do not "
"provide a value for `y` or `batch_size`. "
"Got `y={y}`, `batch_size={batch_size}`."
)
return x
# batch_size defaults to 32, as it does in fit().
batch_size = batch_size or 32
# Parse inputs
inputs = x
if y is not None:
inputs = (x, y)
# Construct tf.data.Dataset
dataset = tf.data.Dataset.from_tensor_slices(inputs)
if batch_size is not None:
dataset = dataset.batch(batch_size)
return dataset
def get_feature_extractor(model, layer_names, output_keys=None):
"""Create a feature extractor model with augmented output.
This method produces a new `keras.Model` with the same input signature
as the source but with the layers in `layer_names` as the output.
This is useful for downstream tasks that require more output than the
final layer of the backbone.
Args:
model: keras.Model. The source model.
layer_names: list of strings. Names of layers to include in the
output signature.
output_keys: optional, list of strings. Key to use for each layer in
the model's output dictionary.
Returns:
`keras.Model` which has dict as outputs.
"""
if not output_keys:
output_keys = layer_names
items = zip(output_keys, layer_names)
outputs = {key: model.get_layer(name).output for key, name in items}
return keras.Model(inputs=model.inputs, outputs=outputs)
| keras-cv/keras_cv/utils/train.py/0 | {
"file_path": "keras-cv/keras_cv/utils/train.py",
"repo_id": "keras-cv",
"token_count": 1036
} | 64 |
[metadata]
license_files = LICENSE
description_file = README.md
version = attr: keras_cv.__version__
[tool:pytest]
filterwarnings =
error
ignore::DeprecationWarning
ignore::ImportWarning
ignore::RuntimeWarning
ignore::PendingDeprecationWarning
ignore::FutureWarning
[flake8]
max-line-length = 80
per-file-ignores =
./keras_cv/__init__.py:E402, F401
./examples/**/*:E402
**/__init__.py:F401
ignore =
# Conflicts with black
E203
# defaults flake8 ignores
E121,E123,E126,E226,E24,E704,W503,W504
# Function name should be lowercase
N802
# lowercase ... imported as non lowercase
# Useful to ignore for "import keras.backend as K"
N812
# do not use bare 'except'
E722
# Escape characters check.
# Conflict with pytest error message regex.
W605
# Ignore for tf.cond lambda
E731
| keras-cv/setup.cfg/0 | {
"file_path": "keras-cv/setup.cfg",
"repo_id": "keras-cv",
"token_count": 343
} | 65 |
# SequentialモデルでKerasを始めてみよう
`Sequential` (系列)モデルは層を積み重ねたものです.
`Sequential` モデルはコンストラクタにレイヤーのインスタンスのリストを与えることで作れます:
```python
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential([
Dense(32, input_shape=(784,)),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
```
`.add()` メソッドで簡単にレイヤーを追加できます.
```python
model = Sequential()
model.add(Dense(32, input_dim=784))
model.add(Activation('relu'))
```
----
## 入力のshapeを指定する
モデルはどのような入力のshapeを想定しているのかを知る必要があります.
このため, `Sequential` モデルの最初のレイヤーに入力のshapeについての情報を与える必要があります(最初のレイヤー以外は入力のshapeを推定できるため,指定する必要はありません).
入力のshapeを指定する方法はいくつかあります:
- 最初のレイヤーの `input_shape`引数を指定する.この引数にはshapeを示すタプルを与えます(このタプルの要素は整数か `None`を取ります.`None`は任意の正の整数を期待することを意味します).
- `Dense` のような2次元の層では `input_dim`引数を指定することで入力のshapeを指定できます.同様に,3次元のレイヤーでは `input_dim`引数と `input_length`引数を指定することで入力のshapeを指定できます.
- (stateful reccurent networkなどで)バッチサイズを指定したい場合, `batch_size`引数を指定することができます.もし, `batch_size=32`と `input_shape=(6, 8)`を同時に指定した場合,想定されるバッチごとの入力のshapeは `(32, 6, 8)`となります.
このため,次のコードは等価となります.
```python
model = Sequential()
model.add(Dense(32, input_shape=(784,)))
```
```python
model = Sequential()
model.add(Dense(32, input_dim=784))
```
----
## コンパイル
モデルの学習を始める前に,`compile`メソッドを用いどのような学習処理を行なうかを設定する必要があります.`compile`メソッドは3つの引数を取ります:
- 最適化アルゴリズム: 引数として,定義されている最適化手法の識別子を文字列として与える(`rmsprop`や`adagrad`など),もしくは `Optimizer`クラスのインスタンスを与えることができます. 参考: [最適化](/optimizers)
- 損失関数: モデルが最小化しようとする目的関数です.引数として,定義されている損失関数の識別子を文字列として与える(`categorical_crossentropy`や`mse`など),もしくは目的関数を関数として与えることができます.参考:[損失関数](/losses)
- 評価関数のリスト: 分類問題では精度として`metrics=['accuracy']`を指定したくなるでしょう.引数として,定義されている評価関数の識別子を文字列として与える,もしくは自分で定義した関数を関数として与えることができます.
```python
# マルチクラス分類問題の場合
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 2値分類問題の場合
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# 平均二乗誤差を最小化する回帰問題の場合
model.compile(optimizer='rmsprop',
loss='mse')
# 独自定義の評価関数を定義
import keras.backend as K
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred])
```
----
## 訓練
KerasのモデルはNumpy 配列として入力データとラベルデータから訓練します.モデルを訓練するときは,一般に`fit`関数を使います.[ドキュメントはこちら](/models/sequential).
```python
# 1つの入力から2クラス分類をするモデルにおいては
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# ダミーデータの作成
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))
# 各イテレーションのバッチサイズを32で学習を行なう
model.fit(data, labels, epochs=10, batch_size=32)
```
```python
# 1つの入力から10クラスの分類を行なう場合について(カテゴリ分類)
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# ダミーデータ作成
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(10, size=(1000, 1))
# ラベルデータをカテゴリの1-hotベクトルにエンコードする
one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)
# 各イテレーションのバッチサイズを32で学習を行なう
model.fit(data, one_hot_labels, epochs=10, batch_size=32)
```
----
## 例
いますぐKerasを始められるようにいくつか例を用意しました!
[examples folder](https://github.com/keras-team/keras/tree/master/examples)フォルダにはリアルデータセットを利用したモデルがあります.
- CIFAR10 小規模な画像分類:リアルタイムなdata augmentationを用いたConvolutional Neural Network (CNN)
- IMDB 映画レビューのセンチメント分類:単語単位のLSTM
- Reuters 記事のトピック分類:多層パーセプトロン (MLP)
- MNIST 手書き文字認識:MLPとCNN
- LSTMを用いた文字単位の文章生成
...など
### 多層パーセプトロン (MLP) を用いた多値分類:
```python
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
# ダミーデータ生成
import numpy as np
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# Dense(64) は,64個のhidden unitを持つ全結合層です.
# 最初のlayerでは,想定する入力データshapeを指定する必要があり,ここでは20次元としてます.
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
```
### MLPを用いた二値分類:
```python
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
# 疑似データの生成
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))
model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
```
### VGG-likeなconvnet
```python
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
# 疑似データ生成
x_train = np.random.random((100, 100, 100, 3))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
x_test = np.random.random((20, 100, 100, 3))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
model = Sequential()
# 入力: サイズが100x100で3チャンネルをもつ画像 -> (100, 100, 3) のテンソル
# それぞれのlayerで3x3の畳み込み処理を適用している
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(x_train, y_train, batch_size=32, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=32)
```
### LSTMを用いた系列データ分類:
```python
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
model = Sequential()
model.add(Embedding(max_features, output_dim=256))
model.add(LSTM(128))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=16, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=16)
```
### 1D Convolutionを用いた系列データ分類:
```python
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(seq_length, 100)))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 3, activation='relu'))
model.add(Conv1D(128, 3, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=16, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=16)
```
### Stacked LSTMを用いた系列データ分類
このモデルは,3つのLSTM layerを繋げ,高等表現を獲得できるような設計となっています.
最初の2つのLSTMは出力系列をすべて出力しています.
しかし,最後のLSTMは最後のステップの状態のみを出力しており,データの次元が落ちています(入力系列を一つのベクトルにしているようなものです).
<img src="https://keras.io/img/regular_stacked_lstm.png" alt="stacked LSTM" style="width: 300px;"/>
```python
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
data_dim = 16
timesteps = 8
num_classes = 10
# 想定する入力データshape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(LSTM(32, return_sequences=True,
input_shape=(timesteps, data_dim))) # 32次元のベクトルのsequenceを出力する
model.add(LSTM(32, return_sequences=True)) # 32次元のベクトルのsequenceを出力する
model.add(LSTM(32)) # 32次元のベクトルを一つ出力する
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# 疑似訓練データを生成する
x_train = np.random.random((1000, timesteps, data_dim))
y_train = np.random.random((1000, num_classes))
# 疑似検証データを生成する
x_val = np.random.random((100, timesteps, data_dim))
y_val = np.random.random((100, num_classes))
model.fit(x_train, y_train,
batch_size=64, epochs=5,
validation_data=(x_val, y_val))
```
### 同じようなStacked LSTMを"stateful"にする
Stateful recurent modelは,バッチを処理し得られた内部状態を次のバッチの内部状態の初期値として再利用するモデルの一つです.
このため,計算複雑度を調整できるようにしたまま,長い系列を処理することができるようになりました.
[FAQにもstateful RNNsについての情報があります](/getting-started/faq/#how-can-i-use-stateful-rnns)
```python
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
data_dim = 16
timesteps = 8
num_classes = 10
batch_size = 32
# 想定している入力バッチshape: (batch_size, timesteps, data_dim)
# 注意: ネットワークがstatefulであるため,batch_input_shapewをすべてうめて与えなければなりません
# バッチkのi番目のサンプルは,バッチk-1のi番目のサンプルの次の時系列となります.
model = Sequential()
model.add(LSTM(32, return_sequences=True, stateful=True,
batch_input_shape=(batch_size, timesteps, data_dim)))
model.add(LSTM(32, return_sequences=True, stateful=True))
model.add(LSTM(32, stateful=True))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# 疑似訓練データを生成
x_train = np.random.random((batch_size * 10, timesteps, data_dim))
y_train = np.random.random((batch_size * 10, num_classes))
# 疑似検証データを生成
x_val = np.random.random((batch_size * 3, timesteps, data_dim))
y_val = np.random.random((batch_size * 3, num_classes))
model.fit(x_train, y_train,
batch_size=batch_size, epochs=5, shuffle=False,
validation_data=(x_val, y_val))
```
| keras-docs-ja/sources/getting-started/sequential-model-guide.md/0 | {
"file_path": "keras-docs-ja/sources/getting-started/sequential-model-guide.md",
"repo_id": "keras-docs-ja",
"token_count": 6628
} | 66 |
## 損失関数の利用方法
損失関数(損失関数や最適スコア関数)はモデルをコンパイルする際に必要なパラメータの1つです:
```python
model.compile(loss='mean_squared_error', optimizer='sgd')
```
```python
from keras import losses
model.compile(loss=losses.mean_squared_error, optimizer='sgd')
```
既存の損失関数の名前を引数に与えるか,各データ点に対してスカラを返し,以下の2つの引数を取るTensorFlow/Theanoのシンボリック関数を与えることができます:
- __y_true__: 正解ラベル.TensorFlow/Theano テンソル
- __y_pred__: 予測値.y_trueと同じshapeのTensorFlow/Theano テンソル
実際に最適化される目的関数値は全データ点における出力の平均です.
このような関数の実装例に関しては,[losses source](https://github.com/keras-team/keras/blob/master/keras/losses.py)を参照してください.
## 利用可能な損失関数
### mean_squared_error
```python
keras.losses.mean_squared_error(y_true, y_pred)
```
---
### mean_absolute_error
```python
keras.losses.mean_absolute_error(y_true, y_pred)
```
---
### mean_absolute_percentage_error
```python
keras.losses.mean_absolute_percentage_error(y_true, y_pred)
```
---
### mean_squared_logarithmic_error
```python
keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
```
---
### squared_hinge
```python
keras.losses.squared_hinge(y_true, y_pred)
```
---
### hinge
```python
keras.losses.hinge(y_true, y_pred)
```
---
### categorical_hinge
```python
keras.losses.categorical_hinge(y_true, y_pred)
```
---
### logcosh
```python
keras.losses.logcosh(y_true, y_pred)
```
予測誤差のハイパボリックコサインの対数.
`log(cosh(x))`は`x`が小さければ`(x ** 2) / 2`とほぼ等しくなり,`x`が大きければ`abs(x) - log(2)`とほぼ等しくなります.つまり'logcosh'は平均二乗誤差とほぼ同じように働きます.しかし,時折ある乱雑な誤った予測にそれほど強く影響されません.
---
### categorical_crossentropy
```python
keras.losses.categorical_crossentropy(y_true, y_pred)
```
---
### sparse_categorical_crossentropy
```python
keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
```
---
### binary_crossentropy
```python
keras.losses.binary_crossentropy(y_true, y_pred)
```
---
### kullback_leibler_divergence
```python
kullback_leibler_divergence(y_true, y_pred)
```
---
### poisson
```python
keras.losses.poisson(y_true, y_pred)
```
---
### cosine_proximity
```python
keras.losses.cosine_proximity(y_true, y_pred)
```
---
__NOTE__: `categorical_crossentropy`を使う場合,目的値はカテゴリカルにしなければいけません.(例.もし10クラスなら,サンプルに対する目的値は,サンプルのクラスに対応する次元の値が1,それ以外が0の10次元のベクトルです).
*整数の目的値からカテゴリカルな目的値に*変換するためには,Keras utilityの`to_categorical`を使えます.
```
from keras.utils import to_categorical
categorical_labels = to_categorical(int_labels, num_classes=None)
```
`sparse_categorical_crossentropy`を使う場合,目的値は整数値にしなければいけません.もしカテゴリカルな目的値を持つ場合,`categorical_crossentropy`を使えます.
| keras-docs-ja/sources/losses.md/0 | {
"file_path": "keras-docs-ja/sources/losses.md",
"repo_id": "keras-docs-ja",
"token_count": 1649
} | 67 |
# 깃헙 이슈와 풀 리퀘스트에 관해서
버그를 찾으셨나요? 새로이 제안할 기능이 있습니까? 코드베이스에 기여를 하고 싶으신가요? 이 안내문을 먼저 읽어주십시오.
## 버그 리포트
작성하신 코드가 작동하지 않는게 케라스쪽의 문제라고 확신하시나요? 버그 리포트를 하려면 다음의 절차를 따라주십시오.
1. 버그가 벌써 수정되었을 수도 있습니다. 먼저 가장 최신의 케라스 마스터 브랜치와 Theano/TensorFlow/CNTK 마스터 브랜치로 업데이트하십시오.
Theano를 쉽게 업데이트 하려면: `pip install git+git://github.com/Theano/Theano.git --upgrade`
2. 비슷한 이슈를 찾아보십시오. 이슈를 찾을 때 `is:open`을 지워서 해결된 문제도 찾아보십시오. 다른 누군가 이미 이 버그를 해결했을 수도 있습니다. 케라스의 [FAQ](http://keras.io/faq/)를 확인하는 것도 잊지 마십시오. 아직도 문제가 해결되지 않았나요? 깃헙에 이슈를 열어 저희에게 알려주십시오.
3. 반드시 구성에 대해서 자세히 말씀해주십시오: 어떤 OS를 사용하십니까? 케라스 백엔드는 어떤 것을 사용하십니까? GPU에 돌리고 계시나요? 그렇다면 Cuda와 cuDNN의 버전은 어떻게 됩니까? GPU는 어느 제품을 사용하십니까?
4. 이슈를 재현할 스크립트를 제공해 주십시오. 스크립트는 그 자체로 작동해야 하며 외부 데이터 다운로드를 필요로 해서는 안됩니다(모델을 어느 테스트 데이터에 작동시켜야 한다면 임의로 만들어진 데이터를 사용해주십시오). 코드를 게시할 때는 Github Gists를 사용할 것을 추천드립니다. 재현 불가능한 이슈는 닫히게 됩니다.
5. 가능하다면 버그를 스스로 고쳐보십시오 --가능하다면요!
더 많은 정보를 제공해 주실수록, 저희가 버그의 유무를 확인하기 쉬워지며 더 빨리 조치를 취할 수 있습니다. 이슈가 빨리 해결되길 바라신다면, 위의 절차를 따라주시는 것이 중요합니다.
---
## 새 기능을 신청하려면
깃헙 이슈를 사용해서 케라스에 추가됐으면 하는 기능이나 케라스 API에 대한 변경사항을 요구할 수 있습니다.
1. 원하시는 기능과 왜 그 기능이 중요한지에 대해서 분명하고 자세하게 설명해주십시오. 저희는 소수가 아닌 다수의 사용자에게 유용한 기능을 고려한다는 점을 명심해 주십시오. 만약 특정 사용자만을 고려한다면, 애드온 라이브러리를 작성하시는 것을 추천드립니다. 케라스는 API와 코드베이스가 지나치게 방대해지는 것을 피하려고 합니다.
2. 염두하시는 API를 보여주고 새 기능의 사용처를 입증하는 코드를 제공해주십시오. 물론 이 시점에서 진짜 코드를 작성하실 필요까지는 없습니다!
3. 기능에 대해서 논의 후 풀 리퀘스트를 넣을 수도 있습니다. 가능하면 코드를 작성해 주십시오. 시간은 적고 일은 많기에, 코드를 작성해 주시면 프로세스를 좀 더 빠르게 진행할 수 있습니다.
---
## 케라스에 기여하려면
[케라스 게시판입니다](https://github.com/keras-team/keras/projects/1). 여기에 현재 주요한 이슈와 추가할 기능을 게시합니다. 케라스에 기여하려면 이 게시판에서 시작하시면 됩니다.
---
## 풀 리퀘스트
**풀 리퀘스트는 어디에 제출합니까?**
1. **케라스 개선과 버그해결** [케라스 `master` 브랜치](https://github.com/keras-team/keras/tree/master)로 가시면 됩니다.
2. **새로운 기능** [Requests for Contributions](https://github.com/keras-team/keras/projects/1)에 게시된 케라스의 코어에 관련한 새로운 기능이 아니라면, 레이어나 데이터셋에 관한 새로운 기능에 관련해서는 [keras-contrib](https://github.com/farizrahman4u/keras-contrib)로 가시면 됩니다.
(버그 해결, 설명서 개선, 혹은 새로운 기능 추가가 아닌) **코딩 스타일**에 관련된 풀 리퀘스트는 거부될 가능성이 높습니다.
개선사항을 제출하는 빠른 가이드라인입니다:
1. 기능상 변경사항에 관련된 PR의 경우, 디자인 설명서를 먼저 작성하고 케라스 메일 리스트에 전송해서 변경이 필요한지 여부와, 어떻게 변경할지를 논의해야합니다. 이 작업은 PR을 진행하고 나서 PR이 거절될 확율을 줄여줍니다! 물론 간단한 버그 수정에 관한 PR이라면 그럴 필요까지는 없습니다. 디자인 설명서를 작성하고 제출하는 절차는 다음과 같습니다:
- [Google Doc template](https://docs.google.com/document/d/1ZXNfce77LDW9tFAj6U5ctaJmI5mT7CQXOFMEAZo-mAA/edit#)을 복사해서 새로운 Google doc에 옮겨주세요.
- 내용을 채워 주십시오. 반드시 코드 예시를 넣어 주시기 바랍니다. 코드를 삽입하려면 [CodePretty](https://chrome.google.com/webstore/detail/code-pretty/igjbncgfgnfpbnifnnlcmjfbnidkndnh?hl=en)와 같은 Google Doc 익스텐션을 사용하시면 됩니다 (다른 비슷한 익스텐션도 사용가능합니다).
- 공유 셋팅을 "링크가 있는 모든 사용자는 댓글을 달 수 있습니다"로 설정해주십시오.
- 저희가 알기 쉽도록 `[API DESIGN REVIEW]`(전부 대문자)로 시작하는 제목을 달아 문서를 `[email protected]`으로 보내주십시오.
- 댓글을 기다렸다가 댓글이 달리면 대답해 주십시오. 필요한대로 제안안을 수정해주시면 됩니다.
- 제안안은 궁극적으로 거부되거나 승인됩니다. 승인되면 PR을 보내시거나 다른 이들에게 PR을 작성할 것을 부탁하시면 됩니다.
2. 코드를 작성하십시오 (아니면 다른 이들이 작성토록 해주십시오). 여기가 어려운 부분입니다!
3. 새로 만드신 함수나 클래스는 제대로 된 독스트링을 갖춰야 합니다. 작업하신 모든 코드에는 최신화된 독스트링과 사용설명서가 있어야 합니다. **독스트링 스타일을 지켜주십시오.** 특히 MarkDown 포맷으로 작업해야 하고, (가능한 경우) `Arguments`, `Returns`, `Raises`의 섹션이 있어야 합니다. 코드베이스에 있는 다른 독스트링을 참고해 주십시오.
4. 테스트를 작성해 주십시오. 작성하신 코드는 전 부분에 걸쳐 유닛 테스트를 갖춰야 합니다. 이는 PR을 신속하게 병합하려면 꼭 필요합니다.
5. 저희 테스트슈트를 로컬컴퓨터에 작동시키십시오. 어렵지 않습니다: 케라스 폴더에서, 다음을 실행하면 됩니다: `py.test tests/`.
- 또한 테스트 필요요소도 설치하셔야 합니다: `pip install -e .[tests]`.
6. 다음 조건에서 테스트가 전부 통과되는지 확인해 주십시오:
- Theano 백엔드, Python 2.7과 Python 3.6. Theano가 개발 버전인지 확인하십시오.
- TensorFlow 백엔드, Python 2.7과 Python 3.6. TensorFlow가 개발 버전인지 확인하십시오.
- CNTK 백엔드, Python 2.7과 Python 3.6. CNTK가 개발 버전인지 확인하십시오.
7. 저희는 PEP8 문법 규칙을 사용합니다만, 각 라인의 길이에 대해서는 크게 신경쓰지 않습니다. 그래도 각 라인의 길이가 적당한지는 확인해 주십시오. 보다 손쉬운 방법으로, PEP8 linter를 사용하시는 것을 추천드립니다:
- PEP8 packages를 설치합니다: `pip install pep8 pytest-pep8 autopep8`
- 독립형 PEP8 check을 실행합니다: `py.test --pep8 -m pep8`
- 몇몇 PEP8 에러는 다음을 실행해서 자동적으로 수정할 수 있습니다: `autopep8 -i --select <errors> <FILENAME>` 예를 들면: `autopep8 -i --select E128 tests/keras/backend/test_backends.py`
8. 커밋 시 적절하고 자세한 커밋 메시지를 첨부해 주십시오.
9. 사용설명서를 최신화 시켜주십시오. 새로운 기능을 도입한다면, 그 새로운 기능의 사용처를 보여주는 코드를 포함시켜주시기 바랍니다.
10. PR을 제출하십시오. 이전 논의에서 요구하신 변경사항이 승인되었거나 적절한 독스트링/사용설명서와 함께 완전한 유닛테스트를 갖추셨다면, PR이 신속하게 병합될 가능성이 높습니다.
---
## 예시를 새로 더하려면
케라스 소스코드에 기여하지 않더라도 명료하고 강력한 케라스 어플리케이션을 만드셨다면, 범례 컬렉션에 추가하시는 것을 고려해주십시오. [기존의 범례](https://github.com/keras-team/keras/tree/master/examples)에서 케라스 문법에 부합하는 코드를 보시고 동일한 스타일로 스크립트를 작성해 주시기 바랍니다.
| keras-docs-ko/sources/contributing.md/0 | {
"file_path": "keras-docs-ko/sources/contributing.md",
"repo_id": "keras-docs-ko",
"token_count": 7240
} | 68 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L69)</span>
### MaxPooling1D
```python
keras.layers.MaxPooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last')
```
시계열 데이터에 대한 최대값 풀링<sub>max pooling</sub>.
__인자__
- __pool_size__: `int`. 최대값 풀링 창<sub>window</sub>의 크기.
- __strides__: `int` 또는 `None`. 차원을 축소할 정도. 스트라이드.
예: 2는 입력 값을 반으로 줄입니다.
`None`일 경우, 기본값으로 `pool_size`을 사용합니다.
- __padding__: `str`. `'valid'` 또는 `'same'`(대소문자 무시).
- __data_format__: `str`. `'channels_last'`(기본값) 또는 `'channels_first'`.
입력 인자의 순서.
`'channels_last'`는 `(batch, steps, features)`, `'channels_first'`는 `(batch, features, steps)` 형태를 의미합니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, steps, features)`
형태의 3D 텐서.
- `data_format='channels_first'`이면
`(batch_size, features, steps)`
형태의 3D 텐서.
__출력 형태__
- `data_format='channels_last'`이면
`(batch_size, downsampled_steps, features)`
형태의 3D 텐서.
- `data_format='channels_first'`이면
`(batch_size, features, downsampled_steps)`
형태의 3D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L217)</span>
### MaxPooling2D
```python
keras.layers.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)
```
공간 데이터에 대한 최대값 풀링.
__인자__
- __pool_size__: `int` 또는 2개의 `int`로 이루어진 튜플.
(가로, 세로)의 차원을 축소할 정도.
예: (2, 2)는 2D 입력값을 각 차원에서 반으로 축소합니다.
`int` 하나만 설정된 경우, 두 차원에 동일한 창 크기를 사용합니다.
- __strides__: `int`, 2개의 `int`로 이루어진 튜플 또는 `None`. 차원을 축소할 정도. 스트라이드.
`None`인 경우 기본값으로 `pool_size`를 사용합니다.
- __padding__: `'valid'` 또는 `'same'`(대소문자 무시).
- __data_format__: `str`.
`'channels_last'` 또는 `'channels_first'`.
입력 인자의 순서.
`'channels_last'`는 `(batch, rows, cols, channels)`, `'channels_first'`는
`(batch, channels, rows, cols)` 형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의
`image_data_format`에서 설정할 수 있습니다.
따로 변경하지 않으면, 기본 설정은 `'channels_last'`입니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, rows, cols, channels)`
형태의 4D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, rows, cols)`
형태의 4D 텐서.
__출력 형태__
- `data_format='channels_last'`이면
`(batch_size, pooled_rows, pooled_cols, channels)`
형태의 4D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, pooled_rows, pooled_cols)`
형태의 4D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L386)</span>
### MaxPooling3D
```python
keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None)
```
(공간 혹은 시공간) 3D 데이터에 대한 최대값 풀링.
__인자__
- __pool_size__: `int` 또는 3개의 `int`로 이루어진 튜플.
(dim1, dim2, dim3)의 차원을 축소할 정도.
예: (2, 2, 2)는 3D 입력값을 각 차원에서 반으로 축소합니다.
`int` 하나만 설정된 경우, 세 차원에 동일한 창 크기를 사용합니다.
- __strides__: `int` 또는 3개의 `int`로 이루어진 튜플 또는 `None`. 차원을 축소할 정도. 스트라이드.
`None`인 경우 기본값으로 `pool_size`를 사용합니다.
- __padding__: `'valid'` 또는 `'same'`(대소문자 무시).
- __data_format__: `str`.
`'channels_last'` 또는 `'channels_first'`.
입력 인자의 순서. `'channels_last'`는
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
, `'channels_first'`는
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의
`image_data_format`에서 설정할 수 있습니다.
따로 변경하지 않으면, 기본 설정은 `'channels_last'`입니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
형태의 5D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
형태의 5D 텐서.
__출력 형태__
- `data_format='channels_last'`이면
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
형태의 5D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
형태의 5D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L117)</span>
### AveragePooling1D
```python
keras.layers.AveragePooling1D(pool_size=2, strides=None, padding='valid', data_format='channels_last')
```
시계열 데이터에 대한 평균 풀링.
__인자__
- __pool_size__: `int`. 평균 풀링 창의 크기.
- __strides__: `int` 또는 `None`. 차원을 축소할 정도. 스트라이드.
예: 2는 입력값을 반으로 축소합니다.
`None`인 경우, 기본값으로 `pool_size`을 사용합니다.
- __padding__: `'valid'` 또는 `'same'`(대소문자 무시).
- __data_format__: `str`.
`'channels_last'`(기본값) 또는 `'channels_first'`.
입력값의 형태.
`'channels_last'`는 `(batch, steps, features)`, `'channels_first'`는
`(batch, features, steps)` 형태를 의미합니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, steps, features)`
형태의 3D 텐서.
- `data_format='channels_first'`이면
`(batch_size, features, steps)`
형태의 3D 텐서.
__출력 형태__
- `data_format='channels_last'`이면
`(batch_size, downsampled_steps, features)`
형태의 3D 텐서.
- `data_format='channels_first'`이면
`(batch_size, features, downsampled_steps)`
형태의 3D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L272)</span>
### AveragePooling2D
```python
keras.layers.AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)
```
공간 데이터에 대한 평균 풀링.
__인자__
- __pool_size__: `int` 또는 2개의 `int`로 이루어진 튜플.
(가로, 세로)의 차원을 축소할 정도.
예: (2, 2)는 2D 입력값을 각 차원에서 반으로 축소합니다.
`int` 하나만 설정된 경우, 두 차원에 동일한 창 크기를 사용합니다.
- __strides__: `int` 또는 2개의 `int`로 이루어진 튜플 또는 `None`. 차원을 축소할 정도. 스트라이드.
`None`인 경우 기본값으로 `pool_size`를 사용합니다.
- __padding__: `'valid'` 또는 `'same'`(대소문자 무시).
- __data_format__: `str`.
`'channels_last'` 또는 `'channels_first'`.
입력 인자의 순서.
`'channels_last'`는 `(batch, rows, cols, channels)`, `'channels_first'`는
`(batch, channels, rows, cols)` 형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의
`image_data_format`에서 설정할 수 있습니다.
따로 변경하지 않으면, 기본 설정은 `'channels_last'`입니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, rows, cols, channels)`
형태의 4D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, rows, cols)`
형태의 4D 텐서.
__출력 형태__
- `data_format='channels_last'`이면
`(batch_size, pooled_rows, pooled_cols, channels)`
형태의 4D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, pooled_rows, pooled_cols)`
형태의 4D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L436)</span>
### AveragePooling3D
```python
keras.layers.AveragePooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format=None)
```
(공간 혹은 시공간) 3D 데이터에 대한 평균 풀링.
__인자__
- __pool_size__: `int` 또는 3개의 `int`로 이루어진 튜플.
(dim1, dim2, dim3)의 차원을 축소할 정도.
예: (2, 2, 2)는 3D 입력값을 각 차원에서 반으로 축소합니다.
`int` 하나만 설정된 경우, 세 차원에 동일한 창 크기를 사용합니다.
- __strides__: `int` 또는 3개의 `int`로 이루어진 튜플 또는 `None`. 차원을 축소할 정도. 스트라이드.
`None`인 경우 기본값으로 `pool_size`를 사용합니다.
- __padding__: `'valid'` 또는 `'same'`(대소문자 무시).
- __data_format__: `str`.
`'channels_last'` 또는 `'channels_first'`.
입력 인자의 순서. `'channels_last'`는
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
, `'channels_first'`는
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의
`image_data_format`에서 설정할 수 있습니다.
따로 변경하지 않으면, 기본 설정은 `'channels_last'`입니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
형태의 5D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
형태의 5D 텐서.
__출력 형태__
- `data_format='channels_last'`이면
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
형태의 5D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
형태의 5D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L557)</span>
### GlobalMaxPooling1D
```python
keras.layers.GlobalMaxPooling1D(data_format='channels_last')
```
시계열 데이터에 대한 전역 최대값 풀링.
__인자__
- __data_format__: `str`.
`'channels_last'`(기본값) 또는 `'channels_first'`.
입력값의 형태.
`'channels_last'`는 `(batch, steps, features)`, `'channels_first'`는
`(batch, features, steps)` 형태를 의미합니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, steps, features)`
형태의 3D 텐서.
- `data_format='channels_first'`이면
`(batch_size, features, steps)`
형태의 3D 텐서.
__출력 형태__
`(batch_size, features)`
형태의 2D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L511)</span>
### GlobalAveragePooling1D
```python
keras.layers.GlobalAveragePooling1D(data_format='channels_last')
```
시계열 데이터에 대한 전역 평균 풀링.
__인자__
- __data_format__: `str`.
`'channels_last'`(기본값) 또는 `'channels_first'`.
입력값의 형태.
`'channels_last'`는 `(batch, steps, features)`, `'channels_first'`는
`(batch, features, steps)` 형태를 의미합니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, steps, features)`
형태의 3D 텐서.
- `data_format='channels_first'`이면
`(batch_size, features, steps)`
형태의 3D 텐서.
__출력 형태__
`(batch_size, features)`
형태의 2D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L647)</span>
### GlobalMaxPooling2D
```python
keras.layers.GlobalMaxPooling2D(data_format=None)
```
공간 데이터에 대한 전역 최대값 풀링.
__인자__
- __data_format__: `str`.
`'channels_last'` 또는 `'channels_first'`.
입력 인자의 순서.
`'channels_last'`는 `(batch, rows, cols, channels)`, `'channels_first'`는
`(batch, channels, rows, cols)` 형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의
`image_data_format`에서 설정할 수 있습니다.
따로 변경하지 않으면, 기본 설정은 `'channels_last'`입니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, rows, cols, channels)`
형태의 4D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, rows, cols)`
형태의 4D 텐서.
__출력 형태__
`(batch_size, channels)`
형태의 2D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L612)</span>
### GlobalAveragePooling2D
```python
keras.layers.GlobalAveragePooling2D(data_format=None)
```
공간 데이터에 대한 전역 평균 풀링.
__인자__
- __data_format__: `str`.
`'channels_last'` 또는 `'channels_first'`.
입력 인자의 순서.
`'channels_last'`는 `(batch, rows, cols, channels)`, `'channels_first'`는
`(batch, channels, rows, cols)` 형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의
`image_data_format`에서 설정할 수 있습니다.
따로 변경하지 않으면, 기본 설정은 `'channels_last'`입니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, rows, cols, channels)`
형태의 4D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, rows, cols)`
형태의 4D 텐서.
__출력 형태__
`(batch_size, channels)`
형태의 2D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L742)</span>
### GlobalMaxPooling3D
```python
keras.layers.GlobalMaxPooling3D(data_format=None)
```
(공간 혹은 시공간) 3D 데이터에 대한 전역 최대값 풀링
__인자__
- __data_format__: `str`.
`'channels_last'` 또는 `'channels_first'`.
입력 인자의 순서. `'channels_last'`는
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
, `'channels_first'`는
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의
`image_data_format`에서 설정할 수 있습니다.
따로 변경하지 않으면, 기본 설정은 `'channels_last'`입니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
형태의 5D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
형태의 5D 텐서.
__출력 형태__
`(batch_size, channels)`
형태의 2D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/pooling.py#L707)</span>
### GlobalAveragePooling3D
```python
keras.layers.GlobalAveragePooling3D(data_format=None)
```
(공간 혹은 시공간) 3D 데이터에 대한 전역 평균 풀링
__인자__
- __data_format__: `str`.
`'channels_last'` 또는 `'channels_first'`.
입력 인자의 순서. `'channels_last'`는
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`,
`'channels_first'`는 `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
형태를 의미합니다.
기본 설정은 `~/.keras/keras.json`의
`image_data_format`에서 설정할 수 있습니다.
따로 변경하지 않으면, 기본 설정은 `'channels_last'`입니다.
__입력 형태__
- `data_format='channels_last'`이면
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
형태의 5D 텐서.
- `data_format='channels_first'`이면
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
형태의 5D 텐서.
__출력 형태__
`(batch_size, channels)`
형태의 2D 텐서.
| keras-docs-ko/sources/layers/pooling.md/0 | {
"file_path": "keras-docs-ko/sources/layers/pooling.md",
"repo_id": "keras-docs-ko",
"token_count": 9803
} | 69 |
## 모델 시각화
케라스는(`graphviz`를 사용해서) 케라스 모델을 그래프로 그리기 위한 유틸리티 함수를 제공합니다.
아래 예시는 모델의 그래프를 그려주고 그 결과를 파일로 저장합니다:
```python
from keras.utils import plot_model
plot_model(model, to_file='model.png')
```
`plot_model`은 네 가지 인자를 전달받습니다:
- `show_shapes`(기본값은 `False`)는 결과의 형태을 그래프에 나타낼 것인지 조정합니다.
- `show_layer_names`(기본값은 `True`)는 층의 이름을 그래프에 나타낼 것인지 조정합니다.
- `expand_nested`(기본값은 `False`)는 중첩된 모델을 그래프상에서 클러스터로 확장할 것인지 조정합니다.
- `dpi`(기본값은 96)는 이미지 dpi를 조정합니다.
또한 직접 `pydot.Graph`오브젝트를 만들어 사용할 수도 있습니다,
예를들어 ipython notebook에 나타내자면 :
```python
from IPython.display import SVG
from keras.utils import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
```
## 학습 히스토리 시각화
케라스 `Model`의 `fit()` 메소드는 `History` 오브젝트를 반환합니다. `History.history` 속성<sub>attribute</sub>은 각 에폭마다 계산된 학습 손실 및 평가 지표가 순서대로 기록된 딕셔너리입니다. 검증 데이터를 적용한 경우에는 해당 손실 및 지표도 함께 기록됩니다. 아래는 `matplotlib`을 사용하여 학습 및 검증의 손실과 정확도 그래프를 그리는 예시입니다.
```python
import matplotlib.pyplot as plt
history = model.fit(x, y, validation_split=0.25, epochs=50, batch_size=16, verbose=1)
# 학습 정확도와 검증 정확도를 그래프로 나타냅니다.
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# 학습 손실값과 검증 손실값을 그래프로 나타냅니다.
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
```
| keras-docs-ko/sources/visualization.md/0 | {
"file_path": "keras-docs-ko/sources/visualization.md",
"repo_id": "keras-docs-ko",
"token_count": 1533
} | 70 |
# 应用 Applications
Keras 的应用模块(keras.applications)提供了带有预训练权值的深度学习模型,这些模型可以用来进行预测、特征提取和微调(fine-tuning)。
当你初始化一个预训练模型时,会自动下载权重到 `~/.keras/models/` 目录下。
## 可用的模型
### 在 ImageNet 上预训练过的用于图像分类的模型:
- [Xception](#xception)
- [VGG16](#vgg16)
- [VGG19](#vgg19)
- [ResNet, ResNetV2](#resnet)
- [InceptionV3](#inceptionv3)
- [InceptionResNetV2](#inceptionresnetv2)
- [MobileNet](#mobilenet)
- [MobileNetV2](#mobilenetv2)
- [DenseNet](#densenet)
- [NASNet](#nasnet)
所有的这些架构都兼容所有的后端 (TensorFlow, Theano 和 CNTK),并且会在实例化时,根据 Keras 配置文件`〜/.keras/keras.json` 中设置的图像数据格式构建模型。举个例子,如果你设置 `image_data_format=channels_last`,则加载的模型将按照 TensorFlow 的维度顺序来构造,即「高度-宽度-深度」(Height-Width-Depth) 的顺序。
注意:
- 对于 `Keras < 2.2.0`,Xception 模型仅适用于 TensorFlow,因为它依赖于 `SeparableConvolution` 层。
- 对于 `Keras < 2.1.5`,MobileNet 模型仅适用于 TensorFlow,因为它依赖于 `DepthwiseConvolution` 层。
-----
## 图像分类模型的使用示例
### 使用 ResNet50 进行 ImageNet 分类
```python
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# 将结果解码为元组列表 (class, description, probability)
# (一个列表代表批次中的一个样本)
print('Predicted:', decode_predictions(preds, top=3)[0])
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
```
### 使用 VGG16 提取特征
```python
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
model = VGG16(weights='imagenet', include_top=False)
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
```
### 从VGG19 的任意中间层中抽取特征
```python
from keras.applications.vgg19 import VGG19
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
import numpy as np
base_model = VGG19(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('block4_pool').output)
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
block4_pool_features = model.predict(x)
```
### 在新类上微调 InceptionV3
```python
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras import backend as K
# 构建不带分类器的预训练模型
base_model = InceptionV3(weights='imagenet', include_top=False)
# 添加全局平均池化层
x = base_model.output
x = GlobalAveragePooling2D()(x)
# 添加一个全连接层
x = Dense(1024, activation='relu')(x)
# 添加一个分类器,假设我们有200个类
predictions = Dense(200, activation='softmax')(x)
# 构建我们需要训练的完整模型
model = Model(inputs=base_model.input, outputs=predictions)
# 首先,我们只训练顶部的几层(随机初始化的层)
# 锁住所有 InceptionV3 的卷积层
for layer in base_model.layers:
layer.trainable = False
# 编译模型(一定要在锁层以后操作)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# 在新的数据集上训练几代
model.fit_generator(...)
# 现在顶层应该训练好了,让我们开始微调 Inception V3 的卷积层。
# 我们会锁住底下的几层,然后训练其余的顶层。
# 让我们看看每一层的名字和层号,看看我们应该锁多少层呢:
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
# 我们选择训练最上面的两个 Inception block
# 也就是说锁住前面249层,然后放开之后的层。
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
# 我们需要重新编译模型,才能使上面的修改生效
# 让我们设置一个很低的学习率,使用 SGD 来微调
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy')
# 我们继续训练模型,这次我们训练最后两个 Inception block
# 和两个全连接层
model.fit_generator(...)
```
### 通过自定义输入张量构建 InceptionV3
```python
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Input
# 这也可能是不同的 Keras 模型或层的输出
input_tensor = Input(shape=(224, 224, 3)) # 假定 K.image_data_format() == 'channels_last'
model = InceptionV3(input_tensor=input_tensor, weights='imagenet', include_top=True)
```
-----
# 模型概览
| 模型 | 大小 | Top-1 准确率 | Top-5 准确率 | 参数数量 | 深度 |
| ----- | ----: | --------------: | --------------: | ----------: | -----: |
| [Xception](#xception) | 88 MB | 0.790 | 0.945 | 22,910,480 | 126 |
| [VGG16](#vgg16) | 528 MB | 0.713 | 0.901 | 138,357,544 | 23 |
| [VGG19](#vgg19) | 549 MB | 0.713 | 0.900 | 143,667,240 | 26 |
| [ResNet50](#resnet) | 98 MB | 0.749 | 0.921 | 25,636,712 | - |
| [ResNet101](#resnet) | 171 MB | 0.764 | 0.928 | 44,707,176 | - |
| [ResNet152](#resnet) | 232 MB | 0.766 | 0.931 | 60,419,944 | - |
| [ResNet50V2](#resnet) | 98 MB | 0.760 | 0.930 | 25,613,800 | - |
| [ResNet101V2](#resnet) | 171 MB | 0.772 | 0.938 | 44,675,560 | - |
| [ResNet152V2](#resnet) | 232 MB | 0.780 | 0.942 | 60,380,648 | - |
| [InceptionV3](#inceptionv3) | 92 MB | 0.779 | 0.937 | 23,851,784 | 159 |
| [InceptionResNetV2](#inceptionresnetv2) | 215 MB | 0.803 | 0.953 | 55,873,736 | 572 |
| [MobileNet](#mobilenet) | 16 MB | 0.704 | 0.895 | 4,253,864 | 88 |
| [MobileNetV2](#mobilenetv2) | 14 MB | 0.713 | 0.901 | 3,538,984 | 88 |
| [DenseNet121](#densenet) | 33 MB | 0.750 | 0.923 | 8,062,504 | 121 |
| [DenseNet169](#densenet) | 57 MB | 0.762 | 0.932 | 14,307,880 | 169 |
| [DenseNet201](#densenet) | 80 MB | 0.773 | 0.936 | 20,242,984 | 201 |
| [NASNetMobile](#nasnet) | 23 MB | 0.744 | 0.919 | 5,326,716 | - |
| [NASNetLarge](#nasnet) | 343 MB | 0.825 | 0.960 | 88,949,818 | - |
Top-1 准确率和 Top-5 准确率都是在 ImageNet 验证集上的结果。
Depth 表示网络的拓扑深度。这包括激活层,批标准化层等。
-----
## Xception
```python
keras.applications.xception.Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
在 ImageNet 上预训练的 Xception V1 模型。
在 ImageNet 上,该模型取得了验证集 top1 0.790 和 top5 0.945 的准确率。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last`(高度,宽度,通道)两种输入维度顺序。
模型默认输入尺寸是 299x299。
__参数__
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(即 `layers.Input()` 输出的 tensor)。
- __input_shape__: 可选,输入尺寸元组,仅当 `include_top=False` 时有效(否则输入形状必须是 `(299, 299, 3)`,因为预训练模型是以这个大小训练的)。它必须拥有 3 个输入通道,且宽高必须不小于 71。例如 `(150, 150, 3)` 是一个合法的输入尺寸。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积块的输出,该输出是一个 4D 张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积块后面再加一层全局平均池化层,输出是一个 2D 张量。
- `'max'` 代表全局最大池化。
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
__返回__
一个 Keras `Model` 对象。
__参考文献__
- [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)
__License__
预训练权值由我们自己训练而来,基于 MIT license 发布。
-----
## VGG16
```python
keras.applications.vgg16.VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
VGG16 模型,权值由 ImageNet 训练而来。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last` (高度,宽度,通道)两种输入维度顺序。
模型默认输入尺寸是 224x224。
__参数__
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(即 `layers.Input()` 输出的 tensor)。
- __input_shape__: 可选,输入尺寸元组,仅当 `include_top=False` 时有效,否则输入形状必须是 `(244, 244, 3)`(对于 `channels_last` 数据格式),或者 `(3, 244, 244)`(对于 `channels_first` 数据格式)。它必须拥有 3 个输入通道,且宽高必须不小于 32。例如 `(200, 200, 3)` 是一个合法的输入尺寸。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积块的输出,该输出是一个 4D 张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积块后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化。
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
__返回__
一个 Keras `Model` 对象。
__参考文献__
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556):如果在研究中使用了VGG,请引用该论文。
__License__
预训练权值由 [VGG at Oxford](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) 发布的预训练权值移植而来,基于 [Creative Commons Attribution License](https://creativecommons.org/licenses/by/4.0/)。
-----
## VGG19
```python
keras.applications.vgg19.VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
VGG19 模型,权值由 ImageNet 训练而来。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last`(高度,宽度,通道)两种输入维度顺序。
模型默认输入尺寸是 224x224。
__参数__
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(即 `layers.Input()` 输出的 tensor)。
- __input_shape__: 可选,输入尺寸元组,仅当 `include_top=False` 时有效,否则输入形状必须是 `(224, 224, 3)`(对于 `channels_last` 数据格式),或者 `(3, 224, 224)`(对于 `channels_first` 数据格式)。它必须拥有 3 个输入通道,且宽高必须不小于 32。例如 `(200, 200, 3)` 是一个合法的输入尺寸。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积层的输出,该输出是一个四维张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积层后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
__返回__
一个 Keras `Model` 对象。
__参考文献__
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556):如果在研究中使用了VGG,请引用该论文。
__License__
预训练权值由 [VGG at Oxford](http://www.robots.ox.ac.uk/~vgg/research/very_deep/) 发布的预训练权值移植而来,基于 [Creative Commons Attribution License](https://creativecommons.org/licenses/by/4.0/)。
-----
## ResNet
```python
keras.applications.resnet.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet.ResNet101(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet.ResNet152(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet50V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet101V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.resnet_v2.ResNet152V2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
ResNet, ResNetV2 模型,权值由 ImageNet 训练而来。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last`(高度,宽度,通道)两种输入维度顺序。
模型默认输入尺寸是 224x224。
__参数__
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(即 `layers.Input()` 输出的 tensor)。
- __input_shape__: 可选,输入尺寸元组,仅当 `include_top=False` 时有效,否则输入形状必须是 `(244, 244, 3)`(对于 `channels_last` 数据格式),或者 `(3, 244, 244)`(对于 `channels_first` 数据格式)。它必须拥有 3 个输入通道,且宽高必须不小于 32。例如 `(200, 200, 3)` 是一个合法的输入尺寸。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积块的输出,该输出是一个 4D 张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积块后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
__返回__
一个 Keras `Model` 对象。
__参考文献__
- `ResNet`: [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
- `ResNetV2`: [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027)
__License__
预训练权值由以下提供:
- `ResNet`: [The original repository of Kaiming He](https://github.com/KaimingHe/deep-residual-networks) under the [MIT license](https://github.com/KaimingHe/deep-residual-networks/blob/master/LICENSE).
- `ResNetV2`: [Facebook](https://github.com/facebook/fb.resnet.torch) under the [BSD license](https://github.com/facebook/fb.resnet.torch/blob/master/LICENSE).
-----
## InceptionV3
```python
keras.applications.inception_v3.InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
Inception V3 模型,权值由 ImageNet 训练而来。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last`(高度,宽度,通道)两种输入维度顺序。
模型默认输入尺寸是 299x299。
__参数__
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(即 `layers.Input()` 输出的 tensor)。
- __input_shape__: 可选,输入尺寸元组,仅当 `include_top=False` 时有效,否则输入形状必须是 `(299, 299, 3)`(对于 `channels_last` 数据格式),或者 `(3, 299, 299)`(对于 `channels_first` 数据格式)。它必须拥有 3 个输入通道,且宽高必须不小于 139。例如 `(150, 150, 3)` 是一个合法的输入尺寸。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积块的输出,该输出是一个 4D 张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积块后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化。
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
__返回__
一个 Keras `Model` 对象。
__参考文献__
- [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567)
__License__
预训练权值基于 [Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)。
-----
## InceptionResNetV2
```python
keras.applications.inception_resnet_v2.InceptionResNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
Inception-ResNet V2 模型,权值由 ImageNet 训练而来。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last`(高度,宽度,通道)两种输入维度顺序。
模型默认输入尺寸是 299x299。
__参数__
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(即 `layers.Input()` 输出的 tensor)。
- __input_shape__: 可选,输入尺寸元组,仅当 `include_top=False` 时有效,否则输入形状必须是 `(299, 299, 3)`(对于 `channels_last` 数据格式),或者 `(3, 299, 299)`(对于 `channels_first` 数据格式)。它必须拥有 3 个输入通道,且宽高必须不小于 139。例如 `(150, 150, 3)` 是一个合法的输入尺寸。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积块的输出,该输出是一个 4D 张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积块后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
__返回__
一个 Keras `Model` 对象。
__参考文献__
- [Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
__License__
预训练权值基于 [Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)。
-----
## MobileNet
```python
keras.applications.mobilenet.MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
在 ImageNet 上预训练的 MobileNet 模型。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last`(高度,宽度,通道)两种输入维度顺序。
模型默认输入尺寸是 224x224。
__参数__
- __input_shape__: 可选,输入尺寸元组,仅当 `include_top=False` 时有效,否则输入形状必须是 `(224, 224, 3)`(`channels_last` 格式)或 `(3, 224, 224)`(`channels_first` 格式)。它必须为 3 个输入通道,且宽高必须不小于 32,比如 `(200, 200, 3)` 是一个合法的输入尺寸。
- __alpha__: 控制网络的宽度:
- 如果 `alpha` < 1.0,则同比例减少每层的滤波器个数。
- 如果 `alpha` > 1.0,则同比例增加每层的滤波器个数。
- 如果 `alpha` = 1,使用论文默认的滤波器个数
- __depth_multiplier__: depthwise卷积的深度乘子,也称为(分辨率乘子)
- __dropout__: dropout 概率
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(比如 `layers.Input()` 输出的 tensor)。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积块的输出,该输出是一个 4D 张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积块后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
### 返回
一个 Keras `Model` 对象。
__参考文献__
- [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf)
__License__
预训练权值基于 [Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)。
-----
## DenseNet
```python
keras.applications.densenet.DenseNet121(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.densenet.DenseNet169(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
keras.applications.densenet.DenseNet201(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
```
在 ImageNet 上预训练的 DenseNet 模型。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last`(高度,宽度,通道)两种输入维度顺序。
模型默认输入尺寸是 224x224。
__参数__
- __blocks__: 四个 Dense Layers 的 block 数量。
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(比如 `layers.Input()` 输出的 tensor)。
- input_shape: 可选,输入尺寸元组,仅当 `include_top=False` 时有效(不然输入形状必须是 `(224, 224, 3)` (`channels_last` 格式)或 `(3, 224, 224)` (`channels_first` 格式),因为预训练模型是以这个大小训练的)。它必须为 3 个输入通道,且宽高必须不小于 32,比如 `(200, 200, 3)` 是一个合法的输入尺寸。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积层的输出,该输出是一个四维张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积层后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化.
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
### 返回
一个 Keras `Model` 对象。
__参考文献__
- [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993) (CVPR 2017 Best Paper Award)
### Licence
预训练权值基于 [BSD 3-clause License](https://github.com/liuzhuang13/DenseNet/blob/master/LICENSE)。
-----
## NASNet
```python
keras.applications.nasnet.NASNetLarge(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
keras.applications.nasnet.NASNetMobile(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
在 ImageNet 上预训练的神经结构搜索网络模型(NASNet)。
NASNetLarge 模型默认的输入尺寸是 331x331,NASNetMobile 模型默认的输入尺寸是 224x224。
__参数__
- __input_shape__: 可选,输入尺寸元组,仅当 `include_top=False` 时有效,否则对于 NASNetMobile 模型来说,输入形状必须是 `(224, 224, 3)`(`channels_last` 格式)或 `(3, 224, 224)`(`channels_first` 格式),对于 NASNetLarge 来说,输入形状必须是 `(331, 331, 3)` (`channels_last` 格式)或 `(3, 331, 331)`(`channels_first` 格式)。它必须为 3 个输入通道,且宽高必须不小于 32,比如 `(200, 200, 3)` 是一个合法的输入尺寸。
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化, `'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(比如 `layers.Input()` 输出的 tensor)。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积层的输出,该输出是一个四维张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积层后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
### 返回
一个 Keras `Model` 实例。
__参考文献__
- [Learning Transferable Architectures for Scalable Image Recognition](https://arxiv.org/abs/1707.07012)
__License__
预训练权值基于 [Apache License](https://github.com/tensorflow/models/blob/master/LICENSE)。
## MobileNetV2
```python
keras.applications.mobilenet_v2.MobileNetV2(input_shape=None, alpha=1.0, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000)
```
在 ImageNet 上预训练的 MobileNetV2 模型。
该模型可同时构建于 `channels_first` (通道,高度,宽度) 和 `channels_last`(高度,宽度,通道)两种输入维度顺序。
模型默认输出尺寸为 224x224。
__参数__
- __input_shape__: 可选尺寸元组,以确认你是否想使用一个输入图像像素不为 (224, 224, 3) 的模型。输入形状必须是 `(224, 224, 3)`。你也可以忽略这个选项,如果你像从 input_tensor来推断 input_shape。如果你选择同时包含 input_tensor 和 input_shape,那么如果匹配的话会使用 input_shape,如果不匹配会抛出错误。例如,`(160, 160, 3)` 是一个有效的值。
- __alpha__: 控制网络的宽度。这在 MobileNetV2 论文中被称作宽度乘子。
- 如果 `alpha` < 1.0,则同比例减少每层的滤波器个数。
- 如果 `alpha` > 1.0,则同比例增加每层的滤波器个数。
- 如果 `alpha` = 1,使用论文默认的滤波器个数。
- __depth_multiplier__: depthwise 卷积的深度乘子,也称为(分辨率乘子)
- __include_top__: 是否包括顶层的全连接层。
- __weights__: `None` 代表随机初始化,`'imagenet'` 代表加载在 ImageNet 上预训练的权值。
- __input_tensor__: 可选,Keras tensor 作为模型的输入(即 `layers.Input()` 输出的 tensor)。
- __pooling__: 可选,当 `include_top` 为 `False` 时,该参数指定了特征提取时的池化方式。
- `None` 代表不池化,直接输出最后一层卷积块的输出,该输出是一个 4D 张量。
- `'avg'` 代表全局平均池化(GlobalAveragePooling2D),相当于在最后一层卷积块后面再加一层全局平均池化层,输出是一个二维张量。
- `'max'` 代表全局最大池化。
- __classes__: 可选,图片分类的类别数,仅当 `include_top` 为 `True` 并且不加载预训练权值时可用。
### 返回
一个 Keras `model` 实例。
### 异常
__ValueError__: 如果 `weights` 参数非法,或非法的输入尺寸,或者当 weights='imagenet' 时,非法的 alpha, rows。
__参考文献__
- [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381)
__License__
预训练权值基于 [Apache License](https://github.com/tensorflow/models/blob/master/LICENSE).
| keras-docs-zh/sources/applications.md/0 | {
"file_path": "keras-docs-zh/sources/applications.md",
"repo_id": "keras-docs-zh",
"token_count": 16578
} | 71 |
# Keras 实现的 Deep Dreaming。
按以下命令执行该脚本:
```python
python deep_dream.py path_to_your_base_image.jpg prefix_for_results
```
例如:
```python
python deep_dream.py img/mypic.jpg results/dream
```
```python
from __future__ import print_function
from keras.preprocessing.image import load_img, save_img, img_to_array
import numpy as np
import scipy
import argparse
from keras.applications import inception_v3
from keras import backend as K
parser = argparse.ArgumentParser(description='Deep Dreams with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
args = parser.parse_args()
base_image_path = args.base_image_path
result_prefix = args.result_prefix
# 这些是我们尝试最大化激活的层的名称,以及它们们在我们试图最大化的最终损失中的权重。
# 你可以调整这些设置以获得新的视觉效果。
settings = {
'features': {
'mixed2': 0.2,
'mixed3': 0.5,
'mixed4': 2.,
'mixed5': 1.5,
},
}
def preprocess_image(image_path):
# 用于打开,调整图片大小并将图片格式化为适当的张量的实用函数。
img = load_img(image_path)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# 函数将张量转换为有效图像的实用函数。
if K.image_data_format() == 'channels_first':
x = x.reshape((3, x.shape[2], x.shape[3]))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((x.shape[1], x.shape[2], 3))
x /= 2.
x += 0.5
x *= 255.
x = np.clip(x, 0, 255).astype('uint8')
return x
K.set_learning_phase(0)
# 使用我们的占位符构建 InceptionV3 网络。
# 该模型将加载预先训练的 ImageNet 权重。
model = inception_v3.InceptionV3(weights='imagenet',
include_top=False)
dream = model.input
print('Model loaded.')
# 获取每个『关键』层的符号输出(我们为它们指定了唯一的名称)。
layer_dict = dict([(layer.name, layer) for layer in model.layers])
# 定义损失。
loss = K.variable(0.)
for layer_name in settings['features']:
# 将层特征的 L2 范数添加到损失中。
if layer_name not in layer_dict:
raise ValueError('Layer ' + layer_name + ' not found in model.')
coeff = settings['features'][layer_name]
x = layer_dict[layer_name].output
# 我们通过仅涉及损失中的非边界像素来避免边界伪影。
scaling = K.prod(K.cast(K.shape(x), 'float32'))
if K.image_data_format() == 'channels_first':
loss = loss + coeff * K.sum(K.square(x[:, :, 2: -2, 2: -2])) / scaling
else:
loss = loss + coeff * K.sum(K.square(x[:, 2: -2, 2: -2, :])) / scaling
# 计算 dream 即损失的梯度。
grads = K.gradients(loss, dream)[0]
# 标准化梯度。
grads /= K.maximum(K.mean(K.abs(grads)), K.epsilon())
# 设置函数,以检索给定输入图像的损失和梯度的值。
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream], outputs)
def eval_loss_and_grads(x):
outs = fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
def resize_img(img, size):
img = np.copy(img)
if K.image_data_format() == 'channels_first':
factors = (1, 1,
float(size[0]) / img.shape[2],
float(size[1]) / img.shape[3])
else:
factors = (1,
float(size[0]) / img.shape[1],
float(size[1]) / img.shape[2],
1)
return scipy.ndimage.zoom(img, factors, order=1)
def gradient_ascent(x, iterations, step, max_loss=None):
for i in range(iterations):
loss_value, grad_values = eval_loss_and_grads(x)
if max_loss is not None and loss_value > max_loss:
break
print('..Loss value at', i, ':', loss_value)
x += step * grad_values
return x
"""Process:
- 载入原始图像。
- 定义一系列预处理规模 (即图像尺寸),从最小到最大。
- 将原始图像调整为最小尺寸。
- 对于每个规模,从最小的(即当前的)开始:
- 执行梯度提升
- 将图像放大到下一个比例
- 重新投射在提升时丢失的细节
- 当我们回到原始大小时停止。
为了获得在放大过程中丢失的细节,我们只需将原始图像缩小,放大,然后将结果与(调整大小的)原始图像进行比较即可。
"""
# 把玩这些超参数也可以让你获得新的效果
step = 0.01 # 梯度提升步长
num_octave = 3 # 运行梯度提升的规模数
octave_scale = 1.4 # 规模之间的比
iterations = 20 # 每个规模的提升步数
max_loss = 10.
img = preprocess_image(base_image_path)
if K.image_data_format() == 'channels_first':
original_shape = img.shape[2:]
else:
original_shape = img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
successive_shapes.append(shape)
successive_shapes = successive_shapes[::-1]
original_img = np.copy(img)
shrunk_original_img = resize_img(img, successive_shapes[0])
for shape in successive_shapes:
print('Processing image shape', shape)
img = resize_img(img, shape)
img = gradient_ascent(img,
iterations=iterations,
step=step,
max_loss=max_loss)
upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
same_size_original = resize_img(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = resize_img(original_img, shape)
save_img(result_prefix + '.png', deprocess_image(np.copy(img)))
```
| keras-docs-zh/sources/examples/deep_dream.md/0 | {
"file_path": "keras-docs-zh/sources/examples/deep_dream.md",
"repo_id": "keras-docs-zh",
"token_count": 3124
} | 72 |
# 在 MNIST 数据集上训练简单的深度 NN。
20 个轮次后达到 98.40% 的测试准确度
(参数调整有*很大*的空间)。
在 K520 GPU 上,每个轮次 2 秒。
```python
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
batch_size = 128
num_classes = 10
epochs = 20
# 数据,分为训练集和测试集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# 将类向量转换为二进制类矩阵
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
``` | keras-docs-zh/sources/examples/mnist_mlp.md/0 | {
"file_path": "keras-docs-zh/sources/examples/mnist_mlp.md",
"repo_id": "keras-docs-zh",
"token_count": 790
} | 73 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L200)</span>
### Add
```python
keras.layers.Add()
```
计算输入张量列表的和。
它接受一个张量的列表,
所有的张量必须有相同的输入尺寸,
然后返回一个张量(和输入张量尺寸相同)。
__示例__
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
# 相当于 added = keras.layers.add([x1, x2])
added = keras.layers.Add()([x1, x2])
out = keras.layers.Dense(4)(added)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L231)</span>
### Subtract
```python
keras.layers.Subtract()
```
计算两个输入张量的差。
它接受一个长度为 2 的张量列表,
两个张量必须有相同的尺寸,然后返回一个值为 (inputs[0] - inputs[1]) 的张量,
输出张量和输入张量尺寸相同。
__示例__
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
# 相当于 subtracted = keras.layers.subtract([x1, x2])
subtracted = keras.layers.Subtract()([x1, x2])
out = keras.layers.Dense(4)(subtracted)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L268)</span>
### Multiply
```python
keras.layers.Multiply()
```
计算输入张量列表的(逐元素间的)乘积。
它接受一个张量的列表,
所有的张量必须有相同的输入尺寸,
然后返回一个张量(和输入张量尺寸相同)。
----
<span style="float:right;">[[source]]<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L283)</span>
### Average
```python
keras.layers.Average()
```
计算输入张量列表的平均值。
它接受一个张量的列表,
所有的张量必须有相同的输入尺寸,
然后返回一个张量(和输入张量尺寸相同)。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L298)</span>
### Maximum
```python
keras.layers.Maximum()
```
计算输入张量列表的(逐元素间的)最大值。
它接受一个张量的列表,
所有的张量必须有相同的输入尺寸,
然后返回一个张量(和输入张量尺寸相同)。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L313)</span>
### Minimum
```python
keras.layers.Minimum()
```
计算输入张量列表的(逐元素间的)最小值。
它接受一个张量的列表,
所有的张量必须有相同的输入尺寸,
然后返回一个张量(和输入张量尺寸相同)。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L320)</span>
### Concatenate
```python
keras.layers.Concatenate(axis=-1)
```
连接一个输入张量的列表。
它接受一个张量的列表,
除了连接轴之外,其他的尺寸都必须相同,
然后返回一个由所有输入张量连接起来的输出张量。
__参数__
- __axis__: 连接的轴。
- __**kwargs__: 层关键字参数。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L416)</span>
### Dot
```python
keras.layers.Dot(axes, normalize=False)
```
计算两个张量之间样本的点积。
例如,如果作用于输入尺寸为 `(batch_size, n)` 的两个张量 `a` 和 `b`,
那么输出结果就会是尺寸为 `(batch_size, 1)` 的一个张量。
在这个张量中,每一个条目 `i` 是 `a[i]` 和 `b[i]` 之间的点积。
__参数__
- __axes__: 整数或者整数元组,
一个或者几个进行点积的轴。
- __normalize__: 是否在点积之前对即将进行点积的轴进行 L2 标准化。
如果设置成 `True`,那么输出两个样本之间的余弦相似值。
- __**kwargs__: 层关键字参数。
----
### add
```python
keras.layers.add(inputs)
```
`Add` 层的函数式接口。
__参数__
- __inputs__: 一个输入张量的列表(列表大小至少为 2)。
- __**kwargs__: 层关键字参数。
__返回__
一个张量,所有输入张量的和。
__示例__
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
added = keras.layers.add([x1, x2])
out = keras.layers.Dense(4)(added)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
----
### subtract
```python
keras.layers.subtract(inputs)
```
`Subtract` 层的函数式接口。
__参数__
- __inputs__: 一个列表的输入张量(列表大小准确为 2)。
- __**kwargs__: 层的关键字参数。
__返回__
一个张量,两个输入张量的差。
__示例__
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
subtracted = keras.layers.subtract([x1, x2])
out = keras.layers.Dense(4)(subtracted)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
----
### multiply
```python
keras.layers.multiply(inputs)
```
`Multiply` 层的函数式接口。
__参数__
- __inputs__: 一个列表的输入张量(列表大小至少为 2)。
- __**kwargs__: 层的关键字参数。
__返回__
一个张量,所有输入张量的逐元素乘积。
----
### average
```python
keras.layers.average(inputs)
```
`Average` 层的函数式接口。
__参数__
- __inputs__: 一个列表的输入张量(列表大小至少为 2)。
- __**kwargs__: 层的关键字参数。
__返回__
一个张量,所有输入张量的平均值。
----
### maximum
```python
keras.layers.maximum(inputs)
```
`Maximum` 层的函数式接口。
__参数__
- __inputs__: 一个列表的输入张量(列表大小至少为 2)。
- __**kwargs__: 层的关键字参数。
__返回__
一个张量,所有张量的逐元素的最大值。
----
### minimum
```python
keras.layers.minimum(inputs)
```
`Minimum` 层的函数式接口。
__参数__
- __inputs__: 一个列表的输入张量(列表大小至少为 2)。
- __**kwargs__: 层的关键字参数。
__返回__
一个张量,所有张量的逐元素的最小值。
----
### concatenate
```python
keras.layers.concatenate(inputs, axis=-1)
```
`Concatenate` 层的函数式接口。
__参数__
- __inputs__: 一个列表的输入张量(列表大小至少为 2)。
- __axis__: 串联的轴。
- __**kwargs__: 层的关键字参数。
__返回__
一个张量,所有输入张量通过 `axis` 轴串联起来的输出张量。
----
### dot
```python
keras.layers.dot(inputs, axes, normalize=False)
```
`Dot` 层的函数式接口。
__参数__
- __inputs__: 一个列表的输入张量(列表大小至少为 2)。
- __axes__: 整数或者整数元组,
一个或者几个进行点积的轴。
- __normalize__: 是否在点积之前对即将进行点积的轴进行 L2 标准化。
如果设置成 True,那么输出两个样本之间的余弦相似值。
- __**kwargs__: 层的关键字参数。
__返回__
一个张量,所有输入张量样本之间的点积。
| keras-docs-zh/sources/layers/merge.md/0 | {
"file_path": "keras-docs-zh/sources/layers/merge.md",
"repo_id": "keras-docs-zh",
"token_count": 4364
} | 74 |
## 正则化器的使用
正则化器允许在优化过程中对层的参数或层的激活情况进行惩罚。 网络优化的损失函数也包括这些惩罚项。
惩罚是以层为对象进行的。具体的 API 因层而异,但 `Dense`,`Conv1D`,`Conv2D` 和 `Conv3D` 这些层具有统一的 API。
正则化器开放 3 个关键字参数:
- `kernel_regularizer`: `keras.regularizers.Regularizer` 的实例
- `bias_regularizer`: `keras.regularizers.Regularizer` 的实例
- `activity_regularizer`: `keras.regularizers.Regularizer` 的实例
## 示例
```python
from keras import regularizers
model.add(Dense(64, input_dim=64,
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
```
## 可用的正则化器
```python
keras.regularizers.l1(0.)
keras.regularizers.l2(0.)
keras.regularizers.l1_l2(l1=0.01, l2=0.01)
```
## 开发新的正则化器
任何输入一个权重矩阵、返回一个损失贡献张量的函数,都可以用作正则化器,例如:
```python
from keras import backend as K
def l1_reg(weight_matrix):
return 0.01 * K.sum(K.abs(weight_matrix))
model.add(Dense(64, input_dim=64,
kernel_regularizer=l1_reg))
```
另外,你也可以用面向对象的方式来编写正则化器的代码,例子见 [keras/regularizers.py](https://github.com/keras-team/keras/blob/master/keras/regularizers.py) 模块。
| keras-docs-zh/sources/regularizers.md/0 | {
"file_path": "keras-docs-zh/sources/regularizers.md",
"repo_id": "keras-docs-zh",
"token_count": 817
} | 75 |
# Call for code example contributions
This is a constantly-updated list of code examples that we're currently interested in.
If you're not sure whether your idea would make a good code example, please ask us first!
---
## Structured data examples featuring Keras Preprocessing Layers (KPL)
E.g. feature hashing, feature indexing with handling of missing values,
mixing numerical, categorical, and text features, doing feature engineering with KPL, etc.
---
## Transformer model for MIDI music generation
[Reference TF/Keras implementation](https://github.com/jason9693/MusicTransformer-tensorflow2.0)
---
## Text-to-image
A text-to-image diffusion model in the style of Imagen, using a frozen BERT encoder from KerasNLP
and a multi-stage diffusion model.
---
## Text-to-speech
[Example TF2/Keras implementation](https://github.com/dathudeptrai/TensorflowTTS)
---
## Learning to rank
[Reference Kaggle competition](https://www.kaggle.com/c/wm-2017-learning-to-rank)
---
## DETR: End-to-End Object Detection with Transformers
- [Reference implementation](https://github.com/facebookresearch/detr)
- [TF/Keras implementation](https://github.com/Visual-Behavior/detr-tensorflow)
---
## 3D image segmentation
---
## Question answering from structured knowledge base and freeform documents
---
## Instance segmentation
- [Tensorflow-YOLACT](https://github.com/leohsuofnthu/Tensorflow-YOLACT)
- [Additional references](https://www.kaggle.com/c/sartorius-cell-instance-segmentation/discussion/278883#1546104)
---
## EEG & MEG signal classification
---
## Text summarization
---
## Audio track separation
---
## Audio style transfer
---
## Timeseries imputation
---
## Customer lifetime value prediction
---
## Keras reproducibility recipes
---
## Standalone Mixture-of-Experts (MoE) layer
MoE layers provide a flexible way to scale deep models to train on larger datasets. The aim of this example should be to show
how replace the regular layers (such as `Dense`, `Conv2D`) with compatible MoE layers.
References:
* A relevant paper on MoE: https://arxiv.org/abs/1701.06538
* [Switch Transformers on keras.io](https://keras.io/examples/nlp/text_classification_with_switch_transformer/)
* [Keras implementation of Dense and Conv2D MoE layers](https://github.com/eminorhan/mixture-of-experts)
---
## Guide to report the efficiency of a Keras model
It's often important to report the efficiency of a model. But what factors should be included when reporting the efficiency
of a deep learning model? [The Efficiency Misnomer](https://openreview.net/forum?id=iulEMLYh1uR) paper discusses this thoroughly and provides guidelines for practitioners on how to properly report model efficiency.
The objectives of this guide will include the following:
* What factors to consider when reporting model efficiency?
* How to calculate certain metrics like FLOPS, number of examples a model can process per second (both in training and inference mode), etc?
| keras-io/call_for_contributions.md/0 | {
"file_path": "keras-io/call_for_contributions.md",
"repo_id": "keras-io",
"token_count": 856
} | 76 |
# WGAN-GP overriding `Model.train_step`
**Author:** [A_K_Nain](https://twitter.com/A_K_Nain)<br>
**Date created:** 2020/05/9<br>
**Last modified:** 2023/08/3<br>
**Description:** Implementation of Wasserstein GAN with Gradient Penalty.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/wgan_gp.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/wgan_gp.py)
---
## Wasserstein GAN (WGAN) with Gradient Penalty (GP)
The original [Wasserstein GAN](https://arxiv.org/abs/1701.07875) leverages the
Wasserstein distance to produce a value function that has better theoretical
properties than the value function used in the original GAN paper. WGAN requires
that the discriminator (aka the critic) lie within the space of 1-Lipschitz
functions. The authors proposed the idea of weight clipping to achieve this
constraint. Though weight clipping works, it can be a problematic way to enforce
1-Lipschitz constraint and can cause undesirable behavior, e.g. a very deep WGAN
discriminator (critic) often fails to converge.
The [WGAN-GP](https://arxiv.org/abs/1704.00028) method proposes an
alternative to weight clipping to ensure smooth training. Instead of clipping
the weights, the authors proposed a "gradient penalty" by adding a loss term
that keeps the L2 norm of the discriminator gradients close to 1.
---
## Setup
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import tensorflow as tf
from keras import layers
```
---
## Prepare the Fashion-MNIST data
To demonstrate how to train WGAN-GP, we will be using the
[Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset. Each
sample in this dataset is a 28x28 grayscale image associated with a label from
10 classes (e.g. trouser, pullover, sneaker, etc.)
```python
IMG_SHAPE = (28, 28, 1)
BATCH_SIZE = 512
# Size of the noise vector
noise_dim = 128
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
print(f"Number of examples: {len(train_images)}")
print(f"Shape of the images in the dataset: {train_images.shape[1:]}")
# Reshape each sample to (28, 28, 1) and normalize the pixel values in the [-1, 1] range
train_images = train_images.reshape(train_images.shape[0], *IMG_SHAPE).astype("float32")
train_images = (train_images - 127.5) / 127.5
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
29515/29515 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26421880/26421880 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
5148/5148 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4422102/4422102 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
Number of examples: 60000
Shape of the images in the dataset: (28, 28)
```
</div>
---
## Create the discriminator (the critic in the original WGAN)
The samples in the dataset have a (28, 28, 1) shape. Because we will be
using strided convolutions, this can result in a shape with odd dimensions.
For example,
`(28, 28) -> Conv_s2 -> (14, 14) -> Conv_s2 -> (7, 7) -> Conv_s2 ->(3, 3)`.
While performing upsampling in the generator part of the network, we won't get
the same input shape as the original images if we aren't careful. To avoid this,
we will do something much simpler:
- In the discriminator: "zero pad" the input to change the shape to `(32, 32, 1)`
for each sample; and
- Ihe generator: crop the final output to match the shape with input shape.
```python
def conv_block(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
use_bias=True,
use_bn=False,
use_dropout=False,
drop_value=0.5,
):
x = layers.Conv2D(
filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias
)(x)
if use_bn:
x = layers.BatchNormalization()(x)
x = activation(x)
if use_dropout:
x = layers.Dropout(drop_value)(x)
return x
def get_discriminator_model():
img_input = layers.Input(shape=IMG_SHAPE)
# Zero pad the input to make the input images size to (32, 32, 1).
x = layers.ZeroPadding2D((2, 2))(img_input)
x = conv_block(
x,
64,
kernel_size=(5, 5),
strides=(2, 2),
use_bn=False,
use_bias=True,
activation=layers.LeakyReLU(0.2),
use_dropout=False,
drop_value=0.3,
)
x = conv_block(
x,
128,
kernel_size=(5, 5),
strides=(2, 2),
use_bn=False,
activation=layers.LeakyReLU(0.2),
use_bias=True,
use_dropout=True,
drop_value=0.3,
)
x = conv_block(
x,
256,
kernel_size=(5, 5),
strides=(2, 2),
use_bn=False,
activation=layers.LeakyReLU(0.2),
use_bias=True,
use_dropout=True,
drop_value=0.3,
)
x = conv_block(
x,
512,
kernel_size=(5, 5),
strides=(2, 2),
use_bn=False,
activation=layers.LeakyReLU(0.2),
use_bias=True,
use_dropout=False,
drop_value=0.3,
)
x = layers.Flatten()(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(1)(x)
d_model = keras.models.Model(img_input, x, name="discriminator")
return d_model
d_model = get_discriminator_model()
d_model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "discriminator"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ zero_padding2d (<span style="color: #0087ff; text-decoration-color: #0087ff">ZeroPadding2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,664</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">204,928</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">819,456</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3,277,312</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,049</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">4,305,409</span> (16.42 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">4,305,409</span> (16.42 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Create the generator
```python
def upsample_block(
x,
filters,
activation,
kernel_size=(3, 3),
strides=(1, 1),
up_size=(2, 2),
padding="same",
use_bn=False,
use_bias=True,
use_dropout=False,
drop_value=0.3,
):
x = layers.UpSampling2D(up_size)(x)
x = layers.Conv2D(
filters, kernel_size, strides=strides, padding=padding, use_bias=use_bias
)(x)
if use_bn:
x = layers.BatchNormalization()(x)
if activation:
x = activation(x)
if use_dropout:
x = layers.Dropout(drop_value)(x)
return x
def get_generator_model():
noise = layers.Input(shape=(noise_dim,))
x = layers.Dense(4 * 4 * 256, use_bias=False)(noise)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Reshape((4, 4, 256))(x)
x = upsample_block(
x,
128,
layers.LeakyReLU(0.2),
strides=(1, 1),
use_bias=False,
use_bn=True,
padding="same",
use_dropout=False,
)
x = upsample_block(
x,
64,
layers.LeakyReLU(0.2),
strides=(1, 1),
use_bias=False,
use_bn=True,
padding="same",
use_dropout=False,
)
x = upsample_block(
x, 1, layers.Activation("tanh"), strides=(1, 1), use_bias=False, use_bn=True
)
# At this point, we have an output which has the same shape as the input, (32, 32, 1).
# We will use a Cropping2D layer to make it (28, 28, 1).
x = layers.Cropping2D((2, 2))(x)
g_model = keras.models.Model(noise, x, name="generator")
return g_model
g_model = get_generator_model()
g_model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "generator"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4096</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">524,288</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4096</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,384</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4096</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ reshape (<span style="color: #0087ff; text-decoration-color: #0087ff">Reshape</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ up_sampling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">294,912</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">512</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ up_sampling2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">73,728</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ up_sampling2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">576</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ activation (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ cropping2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Cropping2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">910,660</span> (3.47 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">902,082</span> (3.44 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">8,578</span> (33.51 KB)
</pre>
---
## Create the WGAN-GP model
Now that we have defined our generator and discriminator, it's time to implement
the WGAN-GP model. We will also override the `train_step` for training.
```python
class WGAN(keras.Model):
def __init__(
self,
discriminator,
generator,
latent_dim,
discriminator_extra_steps=3,
gp_weight=10.0,
):
super().__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.d_steps = discriminator_extra_steps
self.gp_weight = gp_weight
def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn):
super().compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.d_loss_fn = d_loss_fn
self.g_loss_fn = g_loss_fn
def gradient_penalty(self, batch_size, real_images, fake_images):
"""Calculates the gradient penalty.
This loss is calculated on an interpolated image
and added to the discriminator loss.
"""
# Get the interpolated image
alpha = tf.random.uniform([batch_size, 1, 1, 1], 0.0, 1.0)
diff = fake_images - real_images
interpolated = real_images + alpha * diff
with tf.GradientTape() as gp_tape:
gp_tape.watch(interpolated)
# 1. Get the discriminator output for this interpolated image.
pred = self.discriminator(interpolated, training=True)
# 2. Calculate the gradients w.r.t to this interpolated image.
grads = gp_tape.gradient(pred, [interpolated])[0]
# 3. Calculate the norm of the gradients.
norm = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=[1, 2, 3]))
gp = tf.reduce_mean((norm - 1.0) ** 2)
return gp
def train_step(self, real_images):
if isinstance(real_images, tuple):
real_images = real_images[0]
# Get the batch size
batch_size = tf.shape(real_images)[0]
# For each batch, we are going to perform the
# following steps as laid out in the original paper:
# 1. Train the generator and get the generator loss
# 2. Train the discriminator and get the discriminator loss
# 3. Calculate the gradient penalty
# 4. Multiply this gradient penalty with a constant weight factor
# 5. Add the gradient penalty to the discriminator loss
# 6. Return the generator and discriminator losses as a loss dictionary
# Train the discriminator first. The original paper recommends training
# the discriminator for `x` more steps (typically 5) as compared to
# one step of the generator. Here we will train it for 3 extra steps
# as compared to 5 to reduce the training time.
for i in range(self.d_steps):
# Get the latent vector
random_latent_vectors = tf.random.normal(
shape=(batch_size, self.latent_dim)
)
with tf.GradientTape() as tape:
# Generate fake images from the latent vector
fake_images = self.generator(random_latent_vectors, training=True)
# Get the logits for the fake images
fake_logits = self.discriminator(fake_images, training=True)
# Get the logits for the real images
real_logits = self.discriminator(real_images, training=True)
# Calculate the discriminator loss using the fake and real image logits
d_cost = self.d_loss_fn(real_img=real_logits, fake_img=fake_logits)
# Calculate the gradient penalty
gp = self.gradient_penalty(batch_size, real_images, fake_images)
# Add the gradient penalty to the original discriminator loss
d_loss = d_cost + gp * self.gp_weight
# Get the gradients w.r.t the discriminator loss
d_gradient = tape.gradient(d_loss, self.discriminator.trainable_variables)
# Update the weights of the discriminator using the discriminator optimizer
self.d_optimizer.apply_gradients(
zip(d_gradient, self.discriminator.trainable_variables)
)
# Train the generator
# Get the latent vector
random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
with tf.GradientTape() as tape:
# Generate fake images using the generator
generated_images = self.generator(random_latent_vectors, training=True)
# Get the discriminator logits for fake images
gen_img_logits = self.discriminator(generated_images, training=True)
# Calculate the generator loss
g_loss = self.g_loss_fn(gen_img_logits)
# Get the gradients w.r.t the generator loss
gen_gradient = tape.gradient(g_loss, self.generator.trainable_variables)
# Update the weights of the generator using the generator optimizer
self.g_optimizer.apply_gradients(
zip(gen_gradient, self.generator.trainable_variables)
)
return {"d_loss": d_loss, "g_loss": g_loss}
```
---
## Create a Keras callback that periodically saves generated images
```python
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=6, latent_dim=128):
self.num_img = num_img
self.latent_dim = latent_dim
def on_epoch_end(self, epoch, logs=None):
random_latent_vectors = tf.random.normal(shape=(self.num_img, self.latent_dim))
generated_images = self.model.generator(random_latent_vectors)
generated_images = (generated_images * 127.5) + 127.5
for i in range(self.num_img):
img = generated_images[i].numpy()
img = keras.utils.array_to_img(img)
img.save("generated_img_{i}_{epoch}.png".format(i=i, epoch=epoch))
```
---
## Train the end-to-end model
```python
# Instantiate the optimizer for both networks
# (learning_rate=0.0002, beta_1=0.5 are recommended)
generator_optimizer = keras.optimizers.Adam(
learning_rate=0.0002, beta_1=0.5, beta_2=0.9
)
discriminator_optimizer = keras.optimizers.Adam(
learning_rate=0.0002, beta_1=0.5, beta_2=0.9
)
# Define the loss functions for the discriminator,
# which should be (fake_loss - real_loss).
# We will add the gradient penalty later to this loss function.
def discriminator_loss(real_img, fake_img):
real_loss = tf.reduce_mean(real_img)
fake_loss = tf.reduce_mean(fake_img)
return fake_loss - real_loss
# Define the loss functions for the generator.
def generator_loss(fake_img):
return -tf.reduce_mean(fake_img)
# Set the number of epochs for training.
epochs = 20
# Instantiate the customer `GANMonitor` Keras callback.
cbk = GANMonitor(num_img=3, latent_dim=noise_dim)
# Get the wgan model
wgan = WGAN(
discriminator=d_model,
generator=g_model,
latent_dim=noise_dim,
discriminator_extra_steps=3,
)
# Compile the wgan model
wgan.compile(
d_optimizer=discriminator_optimizer,
g_optimizer=generator_optimizer,
g_loss_fn=generator_loss,
d_loss_fn=discriminator_loss,
)
# Start training
wgan.fit(train_images, batch_size=BATCH_SIZE, epochs=epochs, callbacks=[cbk])
```
<div class="k-default-codeblock">
```
Epoch 1/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 79s 345ms/step - d_loss: -7.7597 - g_loss: -17.2858 - loss: 0.0000e+00
Epoch 2/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 118ms/step - d_loss: -7.0841 - g_loss: -13.8542 - loss: 0.0000e+00
Epoch 3/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 118ms/step - d_loss: -6.1011 - g_loss: -13.2763 - loss: 0.0000e+00
Epoch 4/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 119ms/step - d_loss: -5.5292 - g_loss: -13.3122 - loss: 0.0000e+00
Epoch 5/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 119ms/step - d_loss: -5.1012 - g_loss: -12.1395 - loss: 0.0000e+00
Epoch 6/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 119ms/step - d_loss: -4.7557 - g_loss: -11.2559 - loss: 0.0000e+00
Epoch 7/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 119ms/step - d_loss: -4.4727 - g_loss: -10.3075 - loss: 0.0000e+00
Epoch 8/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 119ms/step - d_loss: -4.2056 - g_loss: -10.0340 - loss: 0.0000e+00
Epoch 9/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -4.0116 - g_loss: -9.9283 - loss: 0.0000e+00
Epoch 10/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -3.8050 - g_loss: -9.7392 - loss: 0.0000e+00
Epoch 11/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -3.6608 - g_loss: -9.4686 - loss: 0.0000e+00
Epoch 12/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 121ms/step - d_loss: -3.4623 - g_loss: -8.9601 - loss: 0.0000e+00
Epoch 13/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -3.3659 - g_loss: -8.4620 - loss: 0.0000e+00
Epoch 14/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -3.2486 - g_loss: -7.9598 - loss: 0.0000e+00
Epoch 15/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -3.1436 - g_loss: -7.5392 - loss: 0.0000e+00
Epoch 16/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -3.0370 - g_loss: -7.3694 - loss: 0.0000e+00
Epoch 17/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -2.9256 - g_loss: -7.6105 - loss: 0.0000e+00
Epoch 18/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -2.8976 - g_loss: -6.5240 - loss: 0.0000e+00
Epoch 19/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -2.7944 - g_loss: -6.6281 - loss: 0.0000e+00
Epoch 20/20
118/118 ━━━━━━━━━━━━━━━━━━━━ 14s 120ms/step - d_loss: -2.7175 - g_loss: -6.5900 - loss: 0.0000e+00
<keras.src.callbacks.history.History at 0x7fc763a8e950>
```
</div>
Display the last generated images:
```python
from IPython.display import Image, display
display(Image("generated_img_0_19.png"))
display(Image("generated_img_1_19.png"))
display(Image("generated_img_2_19.png"))
```



| keras-io/examples/generative/md/wgan_gp.md/0 | {
"file_path": "keras-io/examples/generative/md/wgan_gp.md",
"repo_id": "keras-io",
"token_count": 16518
} | 77 |
<jupyter_start><jupyter_text>Probabilistic Bayesian Neural Networks**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2021/01/15**Last modified:** 2021/01/15**Description:** Building probabilistic Bayesian neural network models with TensorFlow Probability. IntroductionTaking a probabilistic approach to deep learning allows to account for *uncertainty*,so that models can assign less levels of confidence to incorrect predictions.Sources of uncertainty can be found in the data, due to measurement error ornoise in the labels, or the model, due to insufficient data availability forthe model to learn effectively.This example demonstrates how to build basic probabilistic Bayesian neural networksto account for these two types of uncertainty.We use [TensorFlow Probability](https://www.tensorflow.org/probability) library,which is compatible with Keras API.This example requires TensorFlow 2.3 or higher.You can install Tensorflow Probability using the following command:```pythonpip install tensorflow-probability``` The datasetWe use the [Wine Quality](https://archive.ics.uci.edu/ml/datasets/wine+quality)dataset, which is available in the [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/wine_quality).We use the red wine subset, which contains 4,898 examples.The dataset has 11numerical physicochemical features of the wine, and the taskis to predict the wine quality, which is a score between 0 and 10.In this example, we treat this as a regression task.You can install TensorFlow Datasets using the following command:```pythonpip install tensorflow-datasets``` Setup<jupyter_code>import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds
import tensorflow_probability as tfp<jupyter_output><empty_output><jupyter_text>Create training and evaluation datasetsHere, we load the `wine_quality` dataset using `tfds.load()`, and we convertthe target feature to float. Then, we shuffle the dataset and split it intotraining and test sets. We take the first `train_size` examples as the trainsplit, and the rest as the test split.<jupyter_code>def get_train_and_test_splits(train_size, batch_size=1):
# We prefetch with a buffer the same size as the dataset because th dataset
# is very small and fits into memory.
dataset = (
tfds.load(name="wine_quality", as_supervised=True, split="train")
.map(lambda x, y: (x, tf.cast(y, tf.float32)))
.prefetch(buffer_size=dataset_size)
.cache()
)
# We shuffle with a buffer the same size as the dataset.
train_dataset = (
dataset.take(train_size).shuffle(buffer_size=train_size).batch(batch_size)
)
test_dataset = dataset.skip(train_size).batch(batch_size)
return train_dataset, test_dataset<jupyter_output><empty_output><jupyter_text>Compile, train, and evaluate the model<jupyter_code>hidden_units = [8, 8]
learning_rate = 0.001
def run_experiment(model, loss, train_dataset, test_dataset):
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=learning_rate),
loss=loss,
metrics=[keras.metrics.RootMeanSquaredError()],
)
print("Start training the model...")
model.fit(train_dataset, epochs=num_epochs, validation_data=test_dataset)
print("Model training finished.")
_, rmse = model.evaluate(train_dataset, verbose=0)
print(f"Train RMSE: {round(rmse, 3)}")
print("Evaluating model performance...")
_, rmse = model.evaluate(test_dataset, verbose=0)
print(f"Test RMSE: {round(rmse, 3)}")<jupyter_output><empty_output><jupyter_text>Create model inputs<jupyter_code>FEATURE_NAMES = [
"fixed acidity",
"volatile acidity",
"citric acid",
"residual sugar",
"chlorides",
"free sulfur dioxide",
"total sulfur dioxide",
"density",
"pH",
"sulphates",
"alcohol",
]
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(1,), dtype=tf.float32
)
return inputs<jupyter_output><empty_output><jupyter_text>Experiment 1: standard neural networkWe create a standard deterministic neural network model as a baseline.<jupyter_code>def create_baseline_model():
inputs = create_model_inputs()
input_values = [value for _, value in sorted(inputs.items())]
features = keras.layers.concatenate(input_values)
features = layers.BatchNormalization()(features)
# Create hidden layers with deterministic weights using the Dense layer.
for units in hidden_units:
features = layers.Dense(units, activation="sigmoid")(features)
# The output is deterministic: a single point estimate.
outputs = layers.Dense(units=1)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model<jupyter_output><empty_output><jupyter_text>Let's split the wine dataset into training and test sets, with 85% and 15% ofthe examples, respectively.<jupyter_code>dataset_size = 4898
batch_size = 256
train_size = int(dataset_size * 0.85)
train_dataset, test_dataset = get_train_and_test_splits(train_size, batch_size)<jupyter_output><empty_output><jupyter_text>Now let's train the baseline model. We use the `MeanSquaredError`as the loss function.<jupyter_code>num_epochs = 100
mse_loss = keras.losses.MeanSquaredError()
baseline_model = create_baseline_model()
run_experiment(baseline_model, mse_loss, train_dataset, test_dataset)<jupyter_output><empty_output><jupyter_text>We take a sample from the test set use the model to obtain predictions for them.Note that since the baseline model is deterministic, we get a single a*point estimate* prediction for each test example, with no information about theuncertainty of the model nor the prediction.<jupyter_code>sample = 10
examples, targets = list(test_dataset.unbatch().shuffle(batch_size * 10).batch(sample))[
0
]
predicted = baseline_model(examples).numpy()
for idx in range(sample):
print(f"Predicted: {round(float(predicted[idx][0]), 1)} - Actual: {targets[idx]}")<jupyter_output><empty_output><jupyter_text>Experiment 2: Bayesian neural network (BNN)The object of the Bayesian approach for modeling neural networks is to capturethe *epistemic uncertainty*, which is uncertainty about the model fitness,due to limited training data.The idea is that, instead of learning specific weight (and bias) *values* in theneural network, the Bayesian approach learns weight *distributions*- from which we can sample to produce an output for a given input -to encode weight uncertainty.Thus, we need to define prior and the posterior distributions of these weights,and the training process is to learn the parameters of these distributions.<jupyter_code># Define the prior weight distribution as Normal of mean=0 and stddev=1.
# Note that, in this example, the we prior distribution is not trainable,
# as we fix its parameters.
def prior(kernel_size, bias_size, dtype=None):
n = kernel_size + bias_size
prior_model = keras.Sequential(
[
tfp.layers.DistributionLambda(
lambda t: tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros(n), scale_diag=tf.ones(n)
)
)
]
)
return prior_model
# Define variational posterior weight distribution as multivariate Gaussian.
# Note that the learnable parameters for this distribution are the means,
# variances, and covariances.
def posterior(kernel_size, bias_size, dtype=None):
n = kernel_size + bias_size
posterior_model = keras.Sequential(
[
tfp.layers.VariableLayer(
tfp.layers.MultivariateNormalTriL.params_size(n), dtype=dtype
),
tfp.layers.MultivariateNormalTriL(n),
]
)
return posterior_model<jupyter_output><empty_output><jupyter_text>We use the `tfp.layers.DenseVariational` layer instead of the standard`keras.layers.Dense` layer in the neural network model.<jupyter_code>def create_bnn_model(train_size):
inputs = create_model_inputs()
features = keras.layers.concatenate(list(inputs.values()))
features = layers.BatchNormalization()(features)
# Create hidden layers with weight uncertainty using the DenseVariational layer.
for units in hidden_units:
features = tfp.layers.DenseVariational(
units=units,
make_prior_fn=prior,
make_posterior_fn=posterior,
kl_weight=1 / train_size,
activation="sigmoid",
)(features)
# The output is deterministic: a single point estimate.
outputs = layers.Dense(units=1)(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model<jupyter_output><empty_output><jupyter_text>The epistemic uncertainty can be reduced as we increase the size of thetraining data. That is, the more data the BNN model sees, the more it is certainabout its estimates for the weights (distribution parameters).Let's test this behaviour by training the BNN model on a small subset ofthe training set, and then on the full training set, to compare the output variances. Train BNN with a small training subset.<jupyter_code>num_epochs = 500
train_sample_size = int(train_size * 0.3)
small_train_dataset = train_dataset.unbatch().take(train_sample_size).batch(batch_size)
bnn_model_small = create_bnn_model(train_sample_size)
run_experiment(bnn_model_small, mse_loss, small_train_dataset, test_dataset)<jupyter_output><empty_output><jupyter_text>Since we have trained a BNN model, the model produces a different output each timewe call it with the same input, since each time a new set of weights are sampledfrom the distributions to construct the network and produce an output.The less certain the mode weights are, the more variability (wider range) we willsee in the outputs of the same inputs.<jupyter_code>def compute_predictions(model, iterations=100):
predicted = []
for _ in range(iterations):
predicted.append(model(examples).numpy())
predicted = np.concatenate(predicted, axis=1)
prediction_mean = np.mean(predicted, axis=1).tolist()
prediction_min = np.min(predicted, axis=1).tolist()
prediction_max = np.max(predicted, axis=1).tolist()
prediction_range = (np.max(predicted, axis=1) - np.min(predicted, axis=1)).tolist()
for idx in range(sample):
print(
f"Predictions mean: {round(prediction_mean[idx], 2)}, "
f"min: {round(prediction_min[idx], 2)}, "
f"max: {round(prediction_max[idx], 2)}, "
f"range: {round(prediction_range[idx], 2)} - "
f"Actual: {targets[idx]}"
)
compute_predictions(bnn_model_small)<jupyter_output><empty_output><jupyter_text>Train BNN with the whole training set.<jupyter_code>num_epochs = 500
bnn_model_full = create_bnn_model(train_size)
run_experiment(bnn_model_full, mse_loss, train_dataset, test_dataset)
compute_predictions(bnn_model_full)<jupyter_output><empty_output><jupyter_text>Notice that the model trained with the full training dataset shows smaller range(uncertainty) in the prediction values for the same inputs, compared to the modeltrained with a subset of the training dataset. Experiment 3: probabilistic Bayesian neural networkSo far, the output of the standard and the Bayesian NN models that we built isdeterministic, that is, produces a point estimate as a prediction for a given example.We can create a probabilistic NN by letting the model output a distribution.In this case, the model captures the *aleatoric uncertainty* as well,which is due to irreducible noise in the data, or to the stochastic nature of theprocess generating the data.In this example, we model the output as a `IndependentNormal` distribution,with learnable mean and variance parameters. If the task was classification,we would have used `IndependentBernoulli` with binary classes, and `OneHotCategorical`with multiple classes, to model distribution of the model output.<jupyter_code>def create_probablistic_bnn_model(train_size):
inputs = create_model_inputs()
features = keras.layers.concatenate(list(inputs.values()))
features = layers.BatchNormalization()(features)
# Create hidden layers with weight uncertainty using the DenseVariational layer.
for units in hidden_units:
features = tfp.layers.DenseVariational(
units=units,
make_prior_fn=prior,
make_posterior_fn=posterior,
kl_weight=1 / train_size,
activation="sigmoid",
)(features)
# Create a probabilisticå output (Normal distribution), and use the `Dense` layer
# to produce the parameters of the distribution.
# We set units=2 to learn both the mean and the variance of the Normal distribution.
distribution_params = layers.Dense(units=2)(features)
outputs = tfp.layers.IndependentNormal(1)(distribution_params)
model = keras.Model(inputs=inputs, outputs=outputs)
return model<jupyter_output><empty_output><jupyter_text>Since the output of the model is a distribution, rather than a point estimate,we use the [negative loglikelihood](https://en.wikipedia.org/wiki/Likelihood_function)as our loss function to compute how likely to see the true data (targets) from theestimated distribution produced by the model.<jupyter_code>def negative_loglikelihood(targets, estimated_distribution):
return -estimated_distribution.log_prob(targets)
num_epochs = 1000
prob_bnn_model = create_probablistic_bnn_model(train_size)
run_experiment(prob_bnn_model, negative_loglikelihood, train_dataset, test_dataset)<jupyter_output><empty_output><jupyter_text>Now let's produce an output from the model given the test examples.The output is now a distribution, and we can use its mean and varianceto compute the confidence intervals (CI) of the prediction.<jupyter_code>prediction_distribution = prob_bnn_model(examples)
prediction_mean = prediction_distribution.mean().numpy().tolist()
prediction_stdv = prediction_distribution.stddev().numpy()
# The 95% CI is computed as mean ± (1.96 * stdv)
upper = (prediction_mean + (1.96 * prediction_stdv)).tolist()
lower = (prediction_mean - (1.96 * prediction_stdv)).tolist()
prediction_stdv = prediction_stdv.tolist()
for idx in range(sample):
print(
f"Prediction mean: {round(prediction_mean[idx][0], 2)}, "
f"stddev: {round(prediction_stdv[idx][0], 2)}, "
f"95% CI: [{round(upper[idx][0], 2)} - {round(lower[idx][0], 2)}]"
f" - Actual: {targets[idx]}"
)<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/bayesian_neural_networks.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/bayesian_neural_networks.ipynb",
"repo_id": "keras-io",
"token_count": 5003
} | 78 |
# Approximating non-Function Mappings with Mixture Density Networks
**Author:** [lukewood](https://twitter.com/luke_wood_ml)<br>
**Date created:** 2023/07/15<br>
**Last modified:** 2023/07/15<br>
**Description:** Approximate non one to one mapping using mixture density networks.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/approximating_non_function_mappings.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/approximating_non_function_mappings.py)
---
## Approximating NonFunctions
Neural networks are universal function approximators. Key word: function!
While powerful function approximators, neural networks are not able to
approximate non-functions.
One important restriction to remember about functions - they have one input, one
output!
Neural networks suffer greatly when the training set has multiple values of Y for a single X.
In this guide I'll show you how to approximate the class of non-functions
consisting of mappings from `x -> y` such that multiple `y` may exist for a
given `x`. We'll use a class of neural networks called
"Mixture Density Networks".
I'm going to use the new
[multibackend Keras Core project](https://github.com/keras-team/keras-core) to
build my Mixture Density networks.
Great job to the Keras team on the project - it's awesome to be able to swap
frameworks in one line of code.
Some bad news: I use TensorFlow probability in this guide... so it doesn't
actually work with other backends.
Anyways, let's start by installing dependencies and sorting out imports:
```python
!pip install -q --upgrade tensorflow-probability keras-core
```
```python
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from keras_core import callbacks
import keras_core
import tensorflow as tf
from keras_core import layers
from keras_core import optimizers
from tensorflow_probability import distributions as tfd
```
<div class="k-default-codeblock">
```
Using TensorFlow backend
```
</div>
Next, lets generate a noisy spiral that we're going to attempt to approximate.
I've defined a few functions below to do this:
```python
def normalize(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
def create_noisy_spiral(n, jitter_std=0.2, revolutions=2):
angle = np.random.uniform(0, 2 * np.pi * revolutions, [n])
r = angle
x = r * np.cos(angle)
y = r * np.sin(angle)
result = np.stack([x, y], axis=1)
result = result + np.random.normal(scale=jitter_std, size=[n, 2])
result = 5 * normalize(result)
return result
```
Next, lets invoke this function many times to construct a sample dataset:
```python
xy = create_noisy_spiral(10000)
x, y = xy[:, 0:1], xy[:, 1:]
plt.scatter(x, y)
plt.show()
```

As you can see, there's multiple possible values for Y with respect to a given
X. Normal neural networks will simply learn the mean of these points with
respect to geometric space.
We can quickly show this with a simple linear model:
```python
N_HIDDEN = 128
model = keras_core.Sequential(
[
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(1),
]
)
```
Let's use mean squared error as well as the adam optimizer.
These tend to be reasonable prototyping choices:
```python
model.compile(optimizer="adam", loss="mse")
```
We can fit this model quite easy
```python
model.fit(
x,
y,
epochs=300,
batch_size=128,
validation_split=0.15,
callbacks=[callbacks.EarlyStopping(monitor="val_loss", patience=10)],
)
```
<div class="k-default-codeblock">
```
Epoch 1/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 3s 8ms/step - loss: 2.6971 - val_loss: 1.6366
Epoch 2/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.5672 - val_loss: 1.2341
Epoch 3/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.1751 - val_loss: 1.0113
Epoch 4/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0322 - val_loss: 1.0108
Epoch 5/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0625 - val_loss: 1.0212
Epoch 6/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0290 - val_loss: 1.0022
Epoch 7/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0469 - val_loss: 1.0033
Epoch 8/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0247 - val_loss: 1.0011
Epoch 9/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0313 - val_loss: 0.9997
Epoch 10/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0252 - val_loss: 0.9995
Epoch 11/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0369 - val_loss: 1.0015
Epoch 12/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0203 - val_loss: 0.9958
Epoch 13/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0305 - val_loss: 0.9960
Epoch 14/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0283 - val_loss: 1.0081
Epoch 15/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0331 - val_loss: 0.9943
Epoch 16/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 1.0244 - val_loss: 1.0021
Epoch 17/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0496 - val_loss: 1.0077
Epoch 18/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0367 - val_loss: 0.9940
Epoch 19/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0201 - val_loss: 0.9927
Epoch 20/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0501 - val_loss: 1.0133
Epoch 21/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0098 - val_loss: 0.9980
Epoch 22/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0195 - val_loss: 0.9907
Epoch 23/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0196 - val_loss: 0.9899
Epoch 24/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0170 - val_loss: 1.0033
Epoch 25/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0169 - val_loss: 0.9963
Epoch 26/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0141 - val_loss: 0.9895
Epoch 27/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0367 - val_loss: 0.9916
Epoch 28/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0301 - val_loss: 0.9991
Epoch 29/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0097 - val_loss: 1.0004
Epoch 30/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0415 - val_loss: 1.0062
Epoch 31/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0186 - val_loss: 0.9888
Epoch 32/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0230 - val_loss: 0.9910
Epoch 33/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0217 - val_loss: 0.9910
Epoch 34/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0180 - val_loss: 0.9945
Epoch 35/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0329 - val_loss: 0.9963
Epoch 36/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0190 - val_loss: 0.9912
Epoch 37/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0341 - val_loss: 0.9894
Epoch 38/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0100 - val_loss: 0.9920
Epoch 39/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0097 - val_loss: 0.9899
Epoch 40/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0216 - val_loss: 0.9948
Epoch 41/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0115 - val_loss: 0.9923
<keras_core.src.callbacks.history.History at 0x12e0b4dd0>
```
</div>
And let's check out the result:
```python
y_pred = model.predict(x)
```
<div class="k-default-codeblock">
```
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 851us/step
```
</div>
As expected, the model learns the geometric mean of all points in `y` for a
given `x`.
```python
plt.scatter(x, y)
plt.scatter(x, y_pred)
plt.show()
```

---
## Mixture Density Networks
Mixture Density networks can alleviate this problem.
A Mixture density is a class of complicated densities expressible in terms of simpler densities.
They are effectively the sum of a ton of probability distributions.
Mixture Density networks learn to parameterize a mixture density distribution
based on a given training set.
As a practitioner, all you need to know, is that Mixture Density Networks solve
the problem of multiple values of Y for a given X.
I'm hoping to add a tool to your kit- but I'm not going to formally explain the
derivation of Mixture Density networks in this guide.
The most important thing to know is that a Mixture Density network learns to
parameterize a mixture density distribution.
This is done by computing a special loss with respect to both the provided
`y_i` label as well as the predicted distribution for the corresponding `x_i`.
This loss function operates by computing the probability that `y_i` would be
drawn from the predicted mixture distribution.
Let's implement a Mixture density network.
Below, a ton of helper functions are defined based on an old Keras library
[`Keras Mixture Density Network Layer`](https://github.com/cpmpercussion/keras-mdn-layer).
I've adapted the code for use with Keras core.
Lets start writing a Mixture Density Network!
First, we need a special activation function: ELU plus a tiny epsilon.
This helps prevent ELU from outputting 0 which causes NaNs in Mixture Density
Network loss evaluation.
```python
def elu_plus_one_plus_epsilon(x):
return keras_core.activations.elu(x) + 1 + keras_core.backend.epsilon()
```
Next, lets actually define a MixtureDensity layer that outputs all values needed
to sample from the learned mixture distribution:
```python
class MixtureDensityOutput(layers.Layer):
def __init__(self, output_dimension, num_mixtures, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dimension
self.num_mix = num_mixtures
self.mdn_mus = layers.Dense(
self.num_mix * self.output_dim, name="mdn_mus"
) # mix*output vals, no activation
self.mdn_sigmas = layers.Dense(
self.num_mix * self.output_dim,
activation=elu_plus_one_plus_epsilon,
name="mdn_sigmas",
) # mix*output vals exp activation
self.mdn_pi = layers.Dense(self.num_mix, name="mdn_pi") # mix vals, logits
def build(self, input_shape):
self.mdn_mus.build(input_shape)
self.mdn_sigmas.build(input_shape)
self.mdn_pi.build(input_shape)
super().build(input_shape)
@property
def trainable_weights(self):
return (
self.mdn_mus.trainable_weights
+ self.mdn_sigmas.trainable_weights
+ self.mdn_pi.trainable_weights
)
@property
def non_trainable_weights(self):
return (
self.mdn_mus.non_trainable_weights
+ self.mdn_sigmas.non_trainable_weights
+ self.mdn_pi.non_trainable_weights
)
def call(self, x, mask=None):
return layers.concatenate(
[self.mdn_mus(x), self.mdn_sigmas(x), self.mdn_pi(x)], name="mdn_outputs"
)
```
Lets construct an Mixture Density Network using our new layer:
```python
OUTPUT_DIMS = 1
N_MIXES = 20
mdn_network = keras_core.Sequential(
[
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(N_HIDDEN, activation="relu"),
MixtureDensityOutput(OUTPUT_DIMS, N_MIXES),
]
)
```
Next, let's implement a custom loss function to train the Mixture Density
Network layer based on the true values and our expected outputs:
```python
def get_mixture_loss_func(output_dim, num_mixes):
def mdn_loss_func(y_true, y_pred):
# Reshape inputs in case this is used in a TimeDistribued layer
y_pred = tf.reshape(
y_pred,
[-1, (2 * num_mixes * output_dim) + num_mixes],
name="reshape_ypreds",
)
y_true = tf.reshape(y_true, [-1, output_dim], name="reshape_ytrue")
# Split the inputs into paramaters
out_mu, out_sigma, out_pi = tf.split(
y_pred,
num_or_size_splits=[
num_mixes * output_dim,
num_mixes * output_dim,
num_mixes,
],
axis=-1,
name="mdn_coef_split",
)
# Construct the mixture models
cat = tfd.Categorical(logits=out_pi)
component_splits = [output_dim] * num_mixes
mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
coll = [
tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)
for loc, scale in zip(mus, sigs)
]
mixture = tfd.Mixture(cat=cat, components=coll)
loss = mixture.log_prob(y_true)
loss = tf.negative(loss)
loss = tf.reduce_mean(loss)
return loss
return mdn_loss_func
mdn_network.compile(loss=get_mixture_loss_func(OUTPUT_DIMS, N_MIXES), optimizer="adam")
```
Finally, we can call `model.fit()` like any other Keras model.
```python
mdn_network.fit(
x,
y,
epochs=300,
batch_size=128,
validation_split=0.15,
callbacks=[
callbacks.EarlyStopping(monitor="loss", patience=10, restore_best_weights=True),
callbacks.ReduceLROnPlateau(monitor="loss", patience=5),
],
)
```
<div class="k-default-codeblock">
```
Epoch 1/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 20s 89ms/step - loss: 2.5088 - val_loss: 1.6384 - learning_rate: 0.0010
Epoch 2/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.5954 - val_loss: 1.4872 - learning_rate: 0.0010
Epoch 3/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.4818 - val_loss: 1.4026 - learning_rate: 0.0010
Epoch 4/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.3818 - val_loss: 1.3327 - learning_rate: 0.0010
Epoch 5/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.3478 - val_loss: 1.3034 - learning_rate: 0.0010
Epoch 6/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 1.3045 - val_loss: 1.2684 - learning_rate: 0.0010
Epoch 7/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 1.2836 - val_loss: 1.2381 - learning_rate: 0.0010
Epoch 8/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 1.2582 - val_loss: 1.2047 - learning_rate: 0.0010
Epoch 9/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.2212 - val_loss: 1.1915 - learning_rate: 0.0010
Epoch 10/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.1907 - val_loss: 1.1903 - learning_rate: 0.0010
Epoch 11/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.1456 - val_loss: 1.0221 - learning_rate: 0.0010
Epoch 12/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 1.0075 - val_loss: 0.9356 - learning_rate: 0.0010
Epoch 13/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.9413 - val_loss: 0.8409 - learning_rate: 0.0010
Epoch 14/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.8646 - val_loss: 0.8717 - learning_rate: 0.0010
Epoch 15/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.8053 - val_loss: 0.8080 - learning_rate: 0.0010
Epoch 16/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.7568 - val_loss: 0.6381 - learning_rate: 0.0010
Epoch 17/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.6638 - val_loss: 0.6175 - learning_rate: 0.0010
Epoch 18/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.5893 - val_loss: 0.5387 - learning_rate: 0.0010
Epoch 19/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.5835 - val_loss: 0.5449 - learning_rate: 0.0010
Epoch 20/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.5137 - val_loss: 0.4536 - learning_rate: 0.0010
Epoch 21/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.4808 - val_loss: 0.4779 - learning_rate: 0.0010
Epoch 22/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4592 - val_loss: 0.4359 - learning_rate: 0.0010
Epoch 23/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4303 - val_loss: 0.4768 - learning_rate: 0.0010
Epoch 24/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4505 - val_loss: 0.4084 - learning_rate: 0.0010
Epoch 25/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4033 - val_loss: 0.3484 - learning_rate: 0.0010
Epoch 26/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3696 - val_loss: 0.4844 - learning_rate: 0.0010
Epoch 27/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3868 - val_loss: 0.3406 - learning_rate: 0.0010
Epoch 28/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3214 - val_loss: 0.2739 - learning_rate: 0.0010
Epoch 29/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3154 - val_loss: 0.3286 - learning_rate: 0.0010
Epoch 30/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2930 - val_loss: 0.2263 - learning_rate: 0.0010
Epoch 31/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2946 - val_loss: 0.2927 - learning_rate: 0.0010
Epoch 32/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2739 - val_loss: 0.2026 - learning_rate: 0.0010
Epoch 33/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2454 - val_loss: 0.2451 - learning_rate: 0.0010
Epoch 34/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2146 - val_loss: 0.1722 - learning_rate: 0.0010
Epoch 35/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2041 - val_loss: 0.2774 - learning_rate: 0.0010
Epoch 36/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2020 - val_loss: 0.1257 - learning_rate: 0.0010
Epoch 37/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1614 - val_loss: 0.1128 - learning_rate: 0.0010
Epoch 38/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1676 - val_loss: 0.1908 - learning_rate: 0.0010
Epoch 39/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.1511 - val_loss: 0.1045 - learning_rate: 0.0010
Epoch 40/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 0.1061 - val_loss: 0.1321 - learning_rate: 0.0010
Epoch 41/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1170 - val_loss: 0.0879 - learning_rate: 0.0010
Epoch 42/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1045 - val_loss: 0.0307 - learning_rate: 0.0010
Epoch 43/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1066 - val_loss: 0.0637 - learning_rate: 0.0010
Epoch 44/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0960 - val_loss: 0.0304 - learning_rate: 0.0010
Epoch 45/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0747 - val_loss: 0.0211 - learning_rate: 0.0010
Epoch 46/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0733 - val_loss: -0.0155 - learning_rate: 0.0010
Epoch 47/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0339 - val_loss: 0.0079 - learning_rate: 0.0010
Epoch 48/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0597 - val_loss: 0.0223 - learning_rate: 0.0010
Epoch 49/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0370 - val_loss: 0.0549 - learning_rate: 0.0010
Epoch 50/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0343 - val_loss: 0.0031 - learning_rate: 0.0010
Epoch 51/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0132 - val_loss: -0.0304 - learning_rate: 0.0010
Epoch 52/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0326 - val_loss: 0.0584 - learning_rate: 0.0010
Epoch 53/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0512 - val_loss: -0.0166 - learning_rate: 0.0010
Epoch 54/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0210 - val_loss: -0.0433 - learning_rate: 0.0010
Epoch 55/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0261 - val_loss: 0.0317 - learning_rate: 0.0010
Epoch 56/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0185 - val_loss: -0.0210 - learning_rate: 0.0010
Epoch 57/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0021 - val_loss: -0.0218 - learning_rate: 0.0010
Epoch 58/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0100 - val_loss: -0.0488 - learning_rate: 0.0010
Epoch 59/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0126 - val_loss: -0.0504 - learning_rate: 0.0010
Epoch 60/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0278 - val_loss: -0.0622 - learning_rate: 0.0010
Epoch 61/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0180 - val_loss: -0.0756 - learning_rate: 0.0010
Epoch 62/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: -0.0198 - val_loss: -0.0427 - learning_rate: 0.0010
Epoch 63/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0129 - val_loss: -0.0483 - learning_rate: 0.0010
Epoch 64/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0221 - val_loss: -0.0379 - learning_rate: 0.0010
Epoch 65/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: -0.0177 - val_loss: -0.0626 - learning_rate: 0.0010
Epoch 66/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0045 - val_loss: -0.0148 - learning_rate: 0.0010
Epoch 67/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0045 - val_loss: -0.0570 - learning_rate: 0.0010
Epoch 68/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0304 - val_loss: -0.0062 - learning_rate: 0.0010
Epoch 69/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0053 - val_loss: -0.0553 - learning_rate: 0.0010
Epoch 70/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0364 - val_loss: -0.1112 - learning_rate: 0.0010
Epoch 71/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0017 - val_loss: -0.0865 - learning_rate: 0.0010
Epoch 72/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0082 - val_loss: -0.1180 - learning_rate: 0.0010
Epoch 73/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0501 - val_loss: -0.1028 - learning_rate: 0.0010
Epoch 74/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0452 - val_loss: -0.0381 - learning_rate: 0.0010
Epoch 75/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0397 - val_loss: -0.0517 - learning_rate: 0.0010
Epoch 76/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0317 - val_loss: -0.1144 - learning_rate: 0.0010
Epoch 77/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0400 - val_loss: -0.1283 - learning_rate: 0.0010
Epoch 78/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0756 - val_loss: -0.0749 - learning_rate: 0.0010
Epoch 79/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0459 - val_loss: -0.1229 - learning_rate: 0.0010
Epoch 80/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0485 - val_loss: -0.0896 - learning_rate: 0.0010
Epoch 81/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: -0.0351 - val_loss: -0.1037 - learning_rate: 0.0010
Epoch 82/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0617 - val_loss: -0.0949 - learning_rate: 0.0010
Epoch 83/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0614 - val_loss: -0.1044 - learning_rate: 0.0010
Epoch 84/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0650 - val_loss: -0.1128 - learning_rate: 0.0010
Epoch 85/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0710 - val_loss: -0.1236 - learning_rate: 0.0010
Epoch 86/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0504 - val_loss: -0.0149 - learning_rate: 0.0010
Epoch 87/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0561 - val_loss: -0.1095 - learning_rate: 0.0010
Epoch 88/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0527 - val_loss: -0.0929 - learning_rate: 0.0010
Epoch 89/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0704 - val_loss: -0.1062 - learning_rate: 0.0010
Epoch 90/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.0386 - val_loss: -0.1433 - learning_rate: 0.0010
Epoch 91/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1129 - val_loss: -0.1698 - learning_rate: 1.0000e-04
Epoch 92/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1210 - val_loss: -0.1696 - learning_rate: 1.0000e-04
Epoch 93/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1315 - val_loss: -0.1663 - learning_rate: 1.0000e-04
Epoch 94/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1207 - val_loss: -0.1696 - learning_rate: 1.0000e-04
Epoch 95/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1208 - val_loss: -0.1606 - learning_rate: 1.0000e-04
Epoch 96/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1157 - val_loss: -0.1728 - learning_rate: 1.0000e-04
Epoch 97/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1367 - val_loss: -0.1691 - learning_rate: 1.0000e-04
Epoch 98/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1237 - val_loss: -0.1740 - learning_rate: 1.0000e-04
Epoch 99/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1271 - val_loss: -0.1593 - learning_rate: 1.0000e-04
Epoch 100/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1358 - val_loss: -0.1738 - learning_rate: 1.0000e-04
Epoch 101/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1260 - val_loss: -0.1669 - learning_rate: 1.0000e-04
Epoch 102/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1184 - val_loss: -0.1660 - learning_rate: 1.0000e-04
Epoch 103/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1221 - val_loss: -0.1740 - learning_rate: 1.0000e-04
Epoch 104/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1207 - val_loss: -0.1498 - learning_rate: 1.0000e-04
Epoch 105/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1210 - val_loss: -0.1695 - learning_rate: 1.0000e-04
Epoch 106/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1264 - val_loss: -0.1477 - learning_rate: 1.0000e-04
Epoch 107/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1217 - val_loss: -0.1717 - learning_rate: 1.0000e-04
Epoch 108/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1182 - val_loss: -0.1748 - learning_rate: 1.0000e-05
Epoch 109/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1394 - val_loss: -0.1757 - learning_rate: 1.0000e-05
Epoch 110/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1363 - val_loss: -0.1762 - learning_rate: 1.0000e-05
Epoch 111/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1292 - val_loss: -0.1765 - learning_rate: 1.0000e-05
Epoch 112/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1330 - val_loss: -0.1737 - learning_rate: 1.0000e-05
Epoch 113/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1341 - val_loss: -0.1769 - learning_rate: 1.0000e-05
Epoch 114/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1318 - val_loss: -0.1771 - learning_rate: 1.0000e-05
Epoch 115/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1285 - val_loss: -0.1756 - learning_rate: 1.0000e-05
Epoch 116/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1211 - val_loss: -0.1764 - learning_rate: 1.0000e-05
Epoch 117/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1434 - val_loss: -0.1755 - learning_rate: 1.0000e-05
Epoch 118/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: -0.1375 - val_loss: -0.1757 - learning_rate: 1.0000e-05
Epoch 119/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1407 - val_loss: -0.1740 - learning_rate: 1.0000e-05
Epoch 120/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1406 - val_loss: -0.1754 - learning_rate: 1.0000e-06
Epoch 121/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1258 - val_loss: -0.1761 - learning_rate: 1.0000e-06
Epoch 122/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1384 - val_loss: -0.1762 - learning_rate: 1.0000e-06
Epoch 123/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1522 - val_loss: -0.1764 - learning_rate: 1.0000e-06
Epoch 124/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1310 - val_loss: -0.1763 - learning_rate: 1.0000e-06
Epoch 125/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1434 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 126/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1329 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 127/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1392 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 128/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1300 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 129/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: -0.1347 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 130/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: -0.1200 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 131/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: -0.1415 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 132/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1270 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 133/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1329 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 134/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1265 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 135/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1329 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 136/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1429 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 137/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1394 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 138/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1315 - val_loss: -0.1763 - learning_rate: 1.0000e-07
Epoch 139/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1253 - val_loss: -0.1763 - learning_rate: 1.0000e-08
Epoch 140/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1346 - val_loss: -0.1763 - learning_rate: 1.0000e-08
Epoch 141/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1418 - val_loss: -0.1763 - learning_rate: 1.0000e-08
Epoch 142/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: -0.1279 - val_loss: -0.1763 - learning_rate: 1.0000e-08
Epoch 143/300
67/67 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: -0.1224 - val_loss: -0.1763 - learning_rate: 1.0000e-08
<keras_core.src.callbacks.history.History at 0x148c20890>
```
</div>
Let's make some predictions!
```python
y_pred_mixture = mdn_network.predict(x)
print(y_pred_mixture.shape)
```
<div class="k-default-codeblock">
```
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 811us/step
(10000, 60)
```
</div>
The MDN does not output a single value; instead it outputs values to
parameterize a mixture distribution.
To visualize these outputs, lets sample from the distribution.
Note that sampling is a lossy process.
If you want to preserve all information as part of a greater latent
representation (i.e. for downstream processing) I recommend you simply keep the
distribution parameters in place.
```python
def split_mixture_params(params, output_dim, num_mixes):
mus = params[: num_mixes * output_dim]
sigs = params[num_mixes * output_dim : 2 * num_mixes * output_dim]
pi_logits = params[-num_mixes:]
return mus, sigs, pi_logits
def softmax(w, t=1.0):
e = np.array(w) / t # adjust temperature
e -= e.max() # subtract max to protect from exploding exp values.
e = np.exp(e)
dist = e / np.sum(e)
return dist
def sample_from_categorical(dist):
r = np.random.rand(1) # uniform random number in [0,1]
accumulate = 0
for i in range(0, dist.size):
accumulate += dist[i]
if accumulate >= r:
return i
tf.logging.info("Error sampling categorical model.")
return -1
def sample_from_output(params, output_dim, num_mixes, temp=1.0, sigma_temp=1.0):
mus, sigs, pi_logits = split_mixture_params(params, output_dim, num_mixes)
pis = softmax(pi_logits, t=temp)
m = sample_from_categorical(pis)
# Alternative way to sample from categorical:
# m = np.random.choice(range(len(pis)), p=pis)
mus_vector = mus[m * output_dim : (m + 1) * output_dim]
sig_vector = sigs[m * output_dim : (m + 1) * output_dim]
scale_matrix = np.identity(output_dim) * sig_vector # scale matrix from diag
cov_matrix = np.matmul(scale_matrix, scale_matrix.T) # cov is scale squared.
cov_matrix = cov_matrix * sigma_temp # adjust for sigma temperature
sample = np.random.multivariate_normal(mus_vector, cov_matrix, 1)
return sample
```
Next lets use our sampling function:
```python
# Sample from the predicted distributions
y_samples = np.apply_along_axis(
sample_from_output, 1, y_pred_mixture, 1, N_MIXES, temp=1.0
)
```
Finally, we can visualize our network outputs
```python
plt.scatter(x, y, alpha=0.05, color="blue", label="Ground Truth")
plt.scatter(
x,
y_samples[:, :, 0],
color="green",
alpha=0.05,
label="Mixture Density Network prediction",
)
plt.show()
```

Beautiful. Love to see it
# Conclusions
Neural Networks are universal function approximators - but they can only
approximate functions. Mixture Density networks can approximate arbitrary
x->y mappings using some neat probability tricks.
For more examples with `tensorflow_probability`
[start here](https://www.tensorflow.org/probability/examples/Probabilistic_Layers_Regression).
One more pretty graphic for the road:
```python
fig, axs = plt.subplots(1, 3)
fig.set_figheight(3)
fig.set_figwidth(12)
axs[0].set_title("Ground Truth")
axs[0].scatter(x, y, alpha=0.05, color="blue")
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
axs[1].set_title("Normal Model prediction")
axs[1].scatter(x, y_pred, alpha=0.05, color="red")
axs[1].set_xlim(xlim)
axs[1].set_ylim(ylim)
axs[2].scatter(
x,
y_samples[:, :, 0],
color="green",
alpha=0.05,
label="Mixture Density Network prediction",
)
axs[2].set_title("Mixture Density Network prediction")
axs[2].set_xlim(xlim)
axs[2].set_ylim(ylim)
plt.show()
```

| keras-io/examples/keras_recipes/md/approximating_non_function_mappings.md/0 | {
"file_path": "keras-io/examples/keras_recipes/md/approximating_non_function_mappings.md",
"repo_id": "keras-io",
"token_count": 16684
} | 79 |
"""
Title: Memory-efficient embeddings for recommendation systems
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/02/15
Last modified: 2023/11/15
Description: Using compositional & mixed-dimension embeddings for memory-efficient recommendation models.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates two techniques for building memory-efficient recommendation models
by reducing the size of the embedding tables, without sacrificing model effectiveness:
1. [Quotient-remainder trick](https://arxiv.org/abs/1909.02107), by Hao-Jun Michael Shi et al.,
which reduces the number of embedding vectors to store, yet produces unique embedding
vector for each item without explicit definition.
2. [Mixed Dimension embeddings](https://arxiv.org/abs/1909.11810), by Antonio Ginart et al.,
which stores embedding vectors with mixed dimensions, where less popular items have
reduced dimension embeddings.
We use the [1M version of the Movielens dataset](https://grouplens.org/datasets/movielens/1m/).
The dataset includes around 1 million ratings from 6,000 users on 4,000 movies.
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
from zipfile import ZipFile
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
import tensorflow as tf
import keras
from keras import layers
from keras.layers import StringLookup
import matplotlib.pyplot as plt
"""
## Prepare the data
## Download and process data
"""
urlretrieve("http://files.grouplens.org/datasets/movielens/ml-1m.zip", "movielens.zip")
ZipFile("movielens.zip", "r").extractall()
ratings_data = pd.read_csv(
"ml-1m/ratings.dat",
sep="::",
names=["user_id", "movie_id", "rating", "unix_timestamp"],
)
ratings_data["movie_id"] = ratings_data["movie_id"].apply(lambda x: f"movie_{x}")
ratings_data["user_id"] = ratings_data["user_id"].apply(lambda x: f"user_{x}")
ratings_data["rating"] = ratings_data["rating"].apply(lambda x: float(x))
del ratings_data["unix_timestamp"]
print(f"Number of users: {len(ratings_data.user_id.unique())}")
print(f"Number of movies: {len(ratings_data.movie_id.unique())}")
print(f"Number of ratings: {len(ratings_data.index)}")
"""
## Create train and eval data splits
"""
random_selection = np.random.rand(len(ratings_data.index)) <= 0.85
train_data = ratings_data[random_selection]
eval_data = ratings_data[~random_selection]
train_data.to_csv("train_data.csv", index=False, sep="|", header=False)
eval_data.to_csv("eval_data.csv", index=False, sep="|", header=False)
print(f"Train data split: {len(train_data.index)}")
print(f"Eval data split: {len(eval_data.index)}")
print("Train and eval data files are saved.")
"""
## Define dataset metadata and hyperparameters
"""
csv_header = list(ratings_data.columns)
user_vocabulary = list(ratings_data.user_id.unique())
movie_vocabulary = list(ratings_data.movie_id.unique())
target_feature_name = "rating"
learning_rate = 0.001
batch_size = 128
num_epochs = 3
base_embedding_dim = 64
"""
## Train and evaluate the model
"""
def get_dataset_from_csv(csv_file_path, batch_size=128, shuffle=True):
return tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=csv_header,
label_name=target_feature_name,
num_epochs=1,
header=False,
field_delim="|",
shuffle=shuffle,
)
def run_experiment(model):
# Compile the model.
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.MeanSquaredError(),
metrics=[keras.metrics.MeanAbsoluteError(name="mae")],
)
# Read the training data.
train_dataset = get_dataset_from_csv("train_data.csv", batch_size)
# Read the test data.
eval_dataset = get_dataset_from_csv("eval_data.csv", batch_size, shuffle=False)
# Fit the model with the training data.
history = model.fit(
train_dataset,
epochs=num_epochs,
validation_data=eval_dataset,
)
return history
"""
## Experiment 1: baseline collaborative filtering model
### Implement embedding encoder
"""
def embedding_encoder(vocabulary, embedding_dim, num_oov_indices=0, name=None):
return keras.Sequential(
[
StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=num_oov_indices
),
layers.Embedding(
input_dim=len(vocabulary) + num_oov_indices, output_dim=embedding_dim
),
],
name=f"{name}_embedding" if name else None,
)
"""
### Implement the baseline model
"""
def create_baseline_model():
# Receive the user as an input.
user_input = layers.Input(name="user_id", shape=(), dtype=tf.string)
# Get user embedding.
user_embedding = embedding_encoder(
vocabulary=user_vocabulary, embedding_dim=base_embedding_dim, name="user"
)(user_input)
# Receive the movie as an input.
movie_input = layers.Input(name="movie_id", shape=(), dtype=tf.string)
# Get embedding.
movie_embedding = embedding_encoder(
vocabulary=movie_vocabulary, embedding_dim=base_embedding_dim, name="movie"
)(movie_input)
# Compute dot product similarity between user and movie embeddings.
logits = layers.Dot(axes=1, name="dot_similarity")(
[user_embedding, movie_embedding]
)
# Convert to rating scale.
prediction = keras.activations.sigmoid(logits) * 5
# Create the model.
model = keras.Model(
inputs=[user_input, movie_input], outputs=prediction, name="baseline_model"
)
return model
baseline_model = create_baseline_model()
baseline_model.summary()
"""
Notice that the number of trainable parameters is 623,744
"""
history = run_experiment(baseline_model)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "eval"], loc="upper left")
plt.show()
"""
## Experiment 2: memory-efficient model
"""
"""
### Implement Quotient-Remainder embedding as a layer
The Quotient-Remainder technique works as follows. For a set of vocabulary and embedding size
`embedding_dim`, instead of creating a `vocabulary_size X embedding_dim` embedding table,
we create *two* `num_buckets X embedding_dim` embedding tables, where `num_buckets`
is much smaller than `vocabulary_size`.
An embedding for a given item `index` is generated via the following steps:
1. Compute the `quotient_index` as `index // num_buckets`.
2. Compute the `remainder_index` as `index % num_buckets`.
3. Lookup `quotient_embedding` from the first embedding table using `quotient_index`.
4. Lookup `remainder_embedding` from the second embedding table using `remainder_index`.
5. Return `quotient_embedding` * `remainder_embedding`.
This technique not only reduces the number of embedding vectors needs to be stored and trained,
but also generates a *unique* embedding vector for each item of size `embedding_dim`.
Note that `q_embedding` and `r_embedding` can be combined using other operations,
like `Add` and `Concatenate`.
"""
class QREmbedding(keras.layers.Layer):
def __init__(self, vocabulary, embedding_dim, num_buckets, name=None):
super().__init__(name=name)
self.num_buckets = num_buckets
self.index_lookup = StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
self.q_embeddings = layers.Embedding(
num_buckets,
embedding_dim,
)
self.r_embeddings = layers.Embedding(
num_buckets,
embedding_dim,
)
def call(self, inputs):
# Get the item index.
embedding_index = self.index_lookup(inputs)
# Get the quotient index.
quotient_index = tf.math.floordiv(embedding_index, self.num_buckets)
# Get the reminder index.
remainder_index = tf.math.floormod(embedding_index, self.num_buckets)
# Lookup the quotient_embedding using the quotient_index.
quotient_embedding = self.q_embeddings(quotient_index)
# Lookup the remainder_embedding using the remainder_index.
remainder_embedding = self.r_embeddings(remainder_index)
# Use multiplication as a combiner operation
return quotient_embedding * remainder_embedding
"""
### Implement Mixed Dimension embedding as a layer
In the mixed dimension embedding technique, we train embedding vectors with full dimensions
for the frequently queried items, while train embedding vectors with *reduced dimensions*
for less frequent items, plus a *projection weights matrix* to bring low dimension embeddings
to the full dimensions.
More precisely, we define *blocks* of items of similar frequencies. For each block,
a `block_vocab_size X block_embedding_dim` embedding table and `block_embedding_dim X full_embedding_dim`
projection weights matrix are created. Note that, if `block_embedding_dim` equals `full_embedding_dim`,
the projection weights matrix becomes an *identity* matrix. Embeddings for a given batch of item
`indices` are generated via the following steps:
1. For each block, lookup the `block_embedding_dim` embedding vectors using `indices`, and
project them to the `full_embedding_dim`.
2. If an item index does not belong to a given block, an out-of-vocabulary embedding is returned.
Each block will return a `batch_size X full_embedding_dim` tensor.
3. A mask is applied to the embeddings returned from each block in order to convert the
out-of-vocabulary embeddings to vector of zeros. That is, for each item in the batch,
a single non-zero embedding vector is returned from the all block embeddings.
4. Embeddings retrieved from the blocks are combined using *sum* to produce the final
`batch_size X full_embedding_dim` tensor.
"""
class MDEmbedding(keras.layers.Layer):
def __init__(
self, blocks_vocabulary, blocks_embedding_dims, base_embedding_dim, name=None
):
super().__init__(name=name)
self.num_blocks = len(blocks_vocabulary)
# Create vocab to block lookup.
keys = []
values = []
for block_idx, block_vocab in enumerate(blocks_vocabulary):
keys.extend(block_vocab)
values.extend([block_idx] * len(block_vocab))
self.vocab_to_block = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(keys, values), default_value=-1
)
self.block_embedding_encoders = []
self.block_embedding_projectors = []
# Create block embedding encoders and projectors.
for idx in range(self.num_blocks):
vocabulary = blocks_vocabulary[idx]
embedding_dim = blocks_embedding_dims[idx]
block_embedding_encoder = embedding_encoder(
vocabulary, embedding_dim, num_oov_indices=1
)
self.block_embedding_encoders.append(block_embedding_encoder)
if embedding_dim == base_embedding_dim:
self.block_embedding_projectors.append(layers.Lambda(lambda x: x))
else:
self.block_embedding_projectors.append(
layers.Dense(units=base_embedding_dim)
)
def call(self, inputs):
# Get block index for each input item.
block_indicies = self.vocab_to_block.lookup(inputs)
# Initialize output embeddings to zeros.
embeddings = tf.zeros(shape=(tf.shape(inputs)[0], base_embedding_dim))
# Generate embeddings from blocks.
for idx in range(self.num_blocks):
# Lookup embeddings from the current block.
block_embeddings = self.block_embedding_encoders[idx](inputs)
# Project embeddings to base_embedding_dim.
block_embeddings = self.block_embedding_projectors[idx](block_embeddings)
# Create a mask to filter out embeddings of items that do not belong to the current block.
mask = tf.expand_dims(tf.cast(block_indicies == idx, tf.dtypes.float32), 1)
# Set the embeddings for the items not belonging to the current block to zeros.
block_embeddings = block_embeddings * mask
# Add the block embeddings to the final embeddings.
embeddings += block_embeddings
return embeddings
"""
### Implement the memory-efficient model
In this experiment, we are going to use the **Quotient-Remainder** technique to reduce the
size of the user embeddings, and the **Mixed Dimension** technique to reduce the size of the
movie embeddings.
While in the [paper](https://arxiv.org/abs/1909.11810), an alpha-power rule is used to determined
the dimensions of the embedding of each block, we simply set the number of blocks and the
dimensions of embeddings of each block based on the histogram visualization of movies popularity.
"""
movie_frequencies = ratings_data["movie_id"].value_counts()
movie_frequencies.hist(bins=10)
"""
You can see that we can group the movies into three blocks, and assign them 64, 32, and 16
embedding dimensions, respectively. Feel free to experiment with different number of blocks
and dimensions.
"""
sorted_movie_vocabulary = list(movie_frequencies.keys())
movie_blocks_vocabulary = [
sorted_movie_vocabulary[:400], # high popularity movies block
sorted_movie_vocabulary[400:1700], # normal popularity movies block
sorted_movie_vocabulary[1700:], # low popularity movies block
]
movie_blocks_embedding_dims = [64, 32, 16]
user_embedding_num_buckets = len(user_vocabulary) // 50
def create_memory_efficient_model():
# Take the user as an input.
user_input = layers.Input(name="user_id", shape=(), dtype="string")
# Get user embedding.
user_embedding = QREmbedding(
vocabulary=user_vocabulary,
embedding_dim=base_embedding_dim,
num_buckets=user_embedding_num_buckets,
name="user_embedding",
)(user_input)
# Take the movie as an input.
movie_input = layers.Input(name="movie_id", shape=(), dtype="string")
# Get embedding.
movie_embedding = MDEmbedding(
blocks_vocabulary=movie_blocks_vocabulary,
blocks_embedding_dims=movie_blocks_embedding_dims,
base_embedding_dim=base_embedding_dim,
name="movie_embedding",
)(movie_input)
# Compute dot product similarity between user and movie embeddings.
logits = layers.Dot(axes=1, name="dot_similarity")(
[user_embedding, movie_embedding]
)
# Convert to rating scale.
prediction = keras.activations.sigmoid(logits) * 5
# Create the model.
model = keras.Model(
inputs=[user_input, movie_input], outputs=prediction, name="baseline_model"
)
return model
memory_efficient_model = create_memory_efficient_model()
memory_efficient_model.summary()
"""
Notice that the number of trainable parameters is 117,968, which is more than 5x less than
the number of parameters in the baseline model.
"""
history = run_experiment(memory_efficient_model)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "eval"], loc="upper left")
plt.show()
| keras-io/examples/keras_recipes/memory_efficient_embeddings.py/0 | {
"file_path": "keras-io/examples/keras_recipes/memory_efficient_embeddings.py",
"repo_id": "keras-io",
"token_count": 5657
} | 80 |
<jupyter_start><jupyter_text>Review Classification using Active Learning**Author:** [Darshan Deshpande](https://twitter.com/getdarshan)**Date created:** 2021/10/29**Last modified:** 2021/10/29**Description:** Demonstrating the advantages of active learning through review classification. IntroductionWith the growth of data-centric Machine Learning, Active Learning has grown in popularityamongst businesses and researchers. Active Learning seeks to progressivelytrain ML models so that the resultant model requires lesser amount of training data toachieve competitive scores.The structure of an Active Learning pipeline involves a classifier and an oracle. Theoracle is an annotator that cleans, selects, labels the data, and feeds it to the modelwhen required. The oracle is a trained individual or a group of individuals thatensure consistency in labeling of new data.The process starts with annotating a small subset of the full dataset and training aninitial model. The best model checkpoint is saved and then tested on a balanced testset. The test set must be carefully sampled because the full training process will bedependent on it. Once we have the initial evaluation scores, the oracle is tasked withlabeling more samples; the number of data points to be sampled is usually determined bythe business requirements. After that, the newly sampled data is added to the trainingset, and the training procedure repeats. This cycle continues until either anacceptable score is reached or some other business metric is met.This tutorial provides a basic demonstration of how Active Learning works bydemonstrating a ratio-based (least confidence) sampling strategy that results in loweroverall false positive and negative rates when compared to a model trained on the entiredataset. This sampling falls under the domain of *uncertainty sampling*, in which newdatasets are sampled based on the uncertainty that the model outputs for thecorresponding label. In our example, we compare our model's false positive and falsenegative rates and annotate the new data based on their ratio.Some other sampling techniques include:1. [Committee sampling](https://www.researchgate.net/publication/51909346_Committee-Based_Sample_Selection_for_Probabilistic_Classifiers):Using multiple models to vote for the best data points to be sampled2. [Entropy reduction](https://www.researchgate.net/publication/51909346_Committee-Based_Sample_Selection_for_Probabilistic_Classifiers):Sampling according to an entropy threshold, selecting more of the samples that produce the highest entropy score.3. [Minimum margin based sampling](https://arxiv.org/abs/1906.00025v1):Selects data points closest to the decision boundary Importing required libraries<jupyter_code>import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import re
import string
tfds.disable_progress_bar()<jupyter_output><empty_output><jupyter_text>Loading and preprocessing the dataWe will be using the IMDB reviews dataset for our experiments. This dataset has 50,000reviews in total, including training and testing splits. We will merge these splits andsample our own, balanced training, validation and testing sets.<jupyter_code>dataset = tfds.load(
"imdb_reviews",
split="train + test",
as_supervised=True,
batch_size=-1,
shuffle_files=False,
)
reviews, labels = tfds.as_numpy(dataset)
print("Total examples:", reviews.shape[0])<jupyter_output><empty_output><jupyter_text>Active learning starts with labeling a subset of data.For the ratio sampling technique that we will be using, we will need well-balanced training,validation and testing splits.<jupyter_code>val_split = 2500
test_split = 2500
train_split = 7500
# Separating the negative and positive samples for manual stratification
x_positives, y_positives = reviews[labels == 1], labels[labels == 1]
x_negatives, y_negatives = reviews[labels == 0], labels[labels == 0]
# Creating training, validation and testing splits
x_val, y_val = (
tf.concat((x_positives[:val_split], x_negatives[:val_split]), 0),
tf.concat((y_positives[:val_split], y_negatives[:val_split]), 0),
)
x_test, y_test = (
tf.concat(
(
x_positives[val_split : val_split + test_split],
x_negatives[val_split : val_split + test_split],
),
0,
),
tf.concat(
(
y_positives[val_split : val_split + test_split],
y_negatives[val_split : val_split + test_split],
),
0,
),
)
x_train, y_train = (
tf.concat(
(
x_positives[val_split + test_split : val_split + test_split + train_split],
x_negatives[val_split + test_split : val_split + test_split + train_split],
),
0,
),
tf.concat(
(
y_positives[val_split + test_split : val_split + test_split + train_split],
y_negatives[val_split + test_split : val_split + test_split + train_split],
),
0,
),
)
# Remaining pool of samples are stored separately. These are only labeled as and when required
x_pool_positives, y_pool_positives = (
x_positives[val_split + test_split + train_split :],
y_positives[val_split + test_split + train_split :],
)
x_pool_negatives, y_pool_negatives = (
x_negatives[val_split + test_split + train_split :],
y_negatives[val_split + test_split + train_split :],
)
# Creating TF Datasets for faster prefetching and parallelization
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
pool_negatives = tf.data.Dataset.from_tensor_slices(
(x_pool_negatives, y_pool_negatives)
)
pool_positives = tf.data.Dataset.from_tensor_slices(
(x_pool_positives, y_pool_positives)
)
print(f"Initial training set size: {len(train_dataset)}")
print(f"Validation set size: {len(val_dataset)}")
print(f"Testing set size: {len(test_dataset)}")
print(f"Unlabeled negative pool: {len(pool_negatives)}")
print(f"Unlabeled positive pool: {len(pool_positives)}")<jupyter_output><empty_output><jupyter_text>Fitting the `TextVectorization` layerSince we are working with text data, we will need to encode the text strings as vectors whichwould then be passed through an `Embedding` layer. To make this tokenization processfaster, we use the `map()` function with its parallelization functionality.<jupyter_code>def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, f"[{re.escape(string.punctuation)}]", ""
)
vectorizer = layers.TextVectorization(
3000, standardize=custom_standardization, output_sequence_length=150
)
# Adapting the dataset
vectorizer.adapt(
train_dataset.map(lambda x, y: x, num_parallel_calls=tf.data.AUTOTUNE).batch(256)
)
def vectorize_text(text, label):
text = vectorizer(text)
return text, label
train_dataset = train_dataset.map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
pool_negatives = pool_negatives.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
pool_positives = pool_positives.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
val_dataset = val_dataset.batch(256).map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
)
test_dataset = test_dataset.batch(256).map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
)<jupyter_output><empty_output><jupyter_text>Creating Helper Functions<jupyter_code># Helper function for merging new history objects with older ones
def append_history(losses, val_losses, accuracy, val_accuracy, history):
losses = losses + history.history["loss"]
val_losses = val_losses + history.history["val_loss"]
accuracy = accuracy + history.history["binary_accuracy"]
val_accuracy = val_accuracy + history.history["val_binary_accuracy"]
return losses, val_losses, accuracy, val_accuracy
# Plotter function
def plot_history(losses, val_losses, accuracies, val_accuracies):
plt.plot(losses)
plt.plot(val_losses)
plt.legend(["train_loss", "val_loss"])
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
plt.plot(accuracies)
plt.plot(val_accuracies)
plt.legend(["train_accuracy", "val_accuracy"])
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.show()<jupyter_output><empty_output><jupyter_text>Creating the ModelWe create a small bidirectional LSTM model. When using Active Learning, you should make surethat the model architecture is capable of overfitting to the initial data.Overfitting gives a strong hint that the model will have enough capacity forfuture, unseen data.<jupyter_code>def create_model():
model = keras.models.Sequential(
[
layers.Input(shape=(150,)),
layers.Embedding(input_dim=3000, output_dim=128),
layers.Bidirectional(layers.LSTM(32, return_sequences=True)),
layers.GlobalMaxPool1D(),
layers.Dense(20, activation="relu"),
layers.Dropout(0.5),
layers.Dense(1, activation="sigmoid"),
]
)
model.summary()
return model<jupyter_output><empty_output><jupyter_text>Training on the entire datasetTo show the effectiveness of Active Learning, we will first train the model on the entiredataset containing 40,000 labeled samples. This model will be used for comparison later.<jupyter_code>def train_full_model(full_train_dataset, val_dataset, test_dataset):
model = create_model()
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
# We will save the best model at every epoch and load the best one for evaluation on the test set
history = model.fit(
full_train_dataset.batch(256),
epochs=20,
validation_data=val_dataset,
callbacks=[
keras.callbacks.EarlyStopping(patience=4, verbose=1),
keras.callbacks.ModelCheckpoint(
"FullModelCheckpoint.h5", verbose=1, save_best_only=True
),
],
)
# Plot history
plot_history(
history.history["loss"],
history.history["val_loss"],
history.history["binary_accuracy"],
history.history["val_binary_accuracy"],
)
# Loading the best checkpoint
model = keras.models.load_model("FullModelCheckpoint.h5")
print("-" * 100)
print(
"Test set evaluation: ",
model.evaluate(test_dataset, verbose=0, return_dict=True),
)
print("-" * 100)
return model
# Sampling the full train dataset to train on
full_train_dataset = (
train_dataset.concatenate(pool_positives)
.concatenate(pool_negatives)
.cache()
.shuffle(20000)
)
# Training the full model
full_dataset_model = train_full_model(full_train_dataset, val_dataset, test_dataset)<jupyter_output><empty_output><jupyter_text>Training via Active LearningThe general process we follow when performing Active Learning is demonstrated below:The pipeline can be summarized in five parts:1. Sample and annotate a small, balanced training dataset2. Train the model on this small subset3. Evaluate the model on a balanced testing set4. If the model satisfies the business criteria, deploy it in a real time setting5. If it doesn't pass the criteria, sample a few more samples according to the ratio offalse positives and negatives, add them to the training set and repeat from step 2 tillthe model passes the tests or till all available data is exhausted.For the code below, we will perform sampling using the following formula:Active Learning techniques use callbacks extensively for progress tracking. We will beusing model checkpointing and early stopping for this example. The `patience` parameterfor Early Stopping can help minimize overfitting and the time required. We have set it`patience=4` for now but since the model is robust, we can increase the patience level ifdesired.Note: We are not loading the checkpoint after the first training iteration. In myexperience working on Active Learning techniques, this helps the model probe thenewly formed loss landscape. Even if the model fails to improve in the second iteration,we will still gain insight about the possible future false positive and negative rates.This will help us sample a better set in the next iteration where the model will have agreater chance to improve.<jupyter_code>def train_active_learning_models(
train_dataset,
pool_negatives,
pool_positives,
val_dataset,
test_dataset,
num_iterations=3,
sampling_size=5000,
):
# Creating lists for storing metrics
losses, val_losses, accuracies, val_accuracies = [], [], [], []
model = create_model()
# We will monitor the false positives and false negatives predicted by our model
# These will decide the subsequent sampling ratio for every Active Learning loop
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
# Defining checkpoints.
# The checkpoint callback is reused throughout the training since it only saves the best overall model.
checkpoint = keras.callbacks.ModelCheckpoint(
"AL_Model.h5", save_best_only=True, verbose=1
)
# Here, patience is set to 4. This can be set higher if desired.
early_stopping = keras.callbacks.EarlyStopping(patience=4, verbose=1)
print(f"Starting to train with {len(train_dataset)} samples")
# Initial fit with a small subset of the training set
history = model.fit(
train_dataset.cache().shuffle(20000).batch(256),
epochs=20,
validation_data=val_dataset,
callbacks=[checkpoint, early_stopping],
)
# Appending history
losses, val_losses, accuracies, val_accuracies = append_history(
losses, val_losses, accuracies, val_accuracies, history
)
for iteration in range(num_iterations):
# Getting predictions from previously trained model
predictions = model.predict(test_dataset)
# Generating labels from the output probabilities
rounded = tf.where(tf.greater(predictions, 0.5), 1, 0)
# Evaluating the number of zeros and ones incorrrectly classified
_, _, false_negatives, false_positives = model.evaluate(test_dataset, verbose=0)
print("-" * 100)
print(
f"Number of zeros incorrectly classified: {false_negatives}, Number of ones incorrectly classified: {false_positives}"
)
# This technique of Active Learning demonstrates ratio based sampling where
# Number of ones/zeros to sample = Number of ones/zeros incorrectly classified / Total incorrectly classified
if false_negatives != 0 and false_positives != 0:
total = false_negatives + false_positives
sample_ratio_ones, sample_ratio_zeros = (
false_positives / total,
false_negatives / total,
)
# In the case where all samples are correctly predicted, we can sample both classes equally
else:
sample_ratio_ones, sample_ratio_zeros = 0.5, 0.5
print(
f"Sample ratio for positives: {sample_ratio_ones}, Sample ratio for negatives:{sample_ratio_zeros}"
)
# Sample the required number of ones and zeros
sampled_dataset = pool_negatives.take(
int(sample_ratio_zeros * sampling_size)
).concatenate(pool_positives.take(int(sample_ratio_ones * sampling_size)))
# Skip the sampled data points to avoid repetition of sample
pool_negatives = pool_negatives.skip(int(sample_ratio_zeros * sampling_size))
pool_positives = pool_positives.skip(int(sample_ratio_ones * sampling_size))
# Concatenating the train_dataset with the sampled_dataset
train_dataset = train_dataset.concatenate(sampled_dataset).prefetch(
tf.data.AUTOTUNE
)
print(f"Starting training with {len(train_dataset)} samples")
print("-" * 100)
# We recompile the model to reset the optimizer states and retrain the model
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
history = model.fit(
train_dataset.cache().shuffle(20000).batch(256),
validation_data=val_dataset,
epochs=20,
callbacks=[
checkpoint,
keras.callbacks.EarlyStopping(patience=4, verbose=1),
],
)
# Appending the history
losses, val_losses, accuracies, val_accuracies = append_history(
losses, val_losses, accuracies, val_accuracies, history
)
# Loading the best model from this training loop
model = keras.models.load_model("AL_Model.h5")
# Plotting the overall history and evaluating the final model
plot_history(losses, val_losses, accuracies, val_accuracies)
print("-" * 100)
print(
"Test set evaluation: ",
model.evaluate(test_dataset, verbose=0, return_dict=True),
)
print("-" * 100)
return model
active_learning_model = train_active_learning_models(
train_dataset, pool_negatives, pool_positives, val_dataset, test_dataset
)<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/active_learning_review_classification.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/active_learning_review_classification.ipynb",
"repo_id": "keras-io",
"token_count": 6404
} | 81 |
<jupyter_start><jupyter_text>Pretraining BERT with Hugging Face Transformers**Author:** Sreyan Ghosh**Date created:** 2022/07/01**Last modified:** 2022/08/27**Description:** Pretraining BERT using Hugging Face Transformers on NSP and MLM. Introduction BERT (Bidirectional Encoder Representations from Transformers)In the field of computer vision, researchers have repeatedly shown the value oftransfer learning — pretraining a neural network model on a known task/dataset, forinstance ImageNet classification, and then performing fine-tuning — using the trained neuralnetwork as the basis of a new specific-purpose model. In recent years, researchershave shown that a similar technique can be useful in many natural language tasks.BERT makes use of Transformer, an attention mechanism that learns contextual relationsbetween words (or subwords) in a text. In its vanilla form, Transformer includes twoseparate mechanisms — an encoder that reads the text input and a decoder that producesa prediction for the task. Since BERT’s goal is to generate a language model, only theencoder mechanism is necessary. The detailed workings of Transformer are described ina paper by Google.As opposed to directional models, which read the text input sequentially(left-to-right or right-to-left), the Transformer encoder reads the entiresequence of words at once. Therefore it is considered bidirectional, thoughit would be more accurate to say that it’s non-directional. This characteristicallows the model to learn the context of a word based on all of its surroundings(left and right of the word).When training language models, a challenge is defining a prediction goal.Many models predict the next word in a sequence (e.g. `"The child came home from _"`),a directional approach which inherently limits context learning. To overcome thischallenge, BERT uses two training strategies: Masked Language Modeling (MLM)Before feeding word sequences into BERT, 15% of the words in each sequence are replacedwith a `[MASK]` token. The model then attempts to predict the original value of the maskedwords, based on the context provided by the other, non-masked, words in the sequence. Next Sentence Prediction (NSP)In the BERT training process, the model receives pairs of sentences as input and learns topredict if the second sentence in the pair is the subsequent sentence in the originaldocument. During training, 50% of the inputs are a pair in which the second sentence is thesubsequent sentence in the original document, while in the other 50% a random sentencefrom the corpus is chosen as the second sentence. The assumption is that the random sentencewill represent a disconnect from the first sentence.Though Google provides a pretrained BERT checkpoint for English, you may often needto either pretrain the model from scratch for a different language, or do acontinued-pretraining to fit the model to a new domain. In this notebook, we pretrainBERT from scratch optimizing both MLM and NSP objectves using 🤗 Transformers on the `WikiText`English dataset loaded from 🤗 Datasets. Setup Installing the requirements<jupyter_code>pip install git+https://github.com/huggingface/transformers.git
pip install datasets
pip install huggingface-hub
pip install nltk<jupyter_output><empty_output><jupyter_text>Importing the necessary libraries<jupyter_code>import nltk
import random
import logging
import tensorflow as tf
from tensorflow import keras
nltk.download("punkt")
# Only log error messages
tf.get_logger().setLevel(logging.ERROR)
# Set random seed
tf.keras.utils.set_random_seed(42)<jupyter_output><empty_output><jupyter_text>Define certain variables<jupyter_code>TOKENIZER_BATCH_SIZE = 256 # Batch-size to train the tokenizer on
TOKENIZER_VOCABULARY = 25000 # Total number of unique subwords the tokenizer can have
BLOCK_SIZE = 128 # Maximum number of tokens in an input sample
NSP_PROB = 0.50 # Probability that the next sentence is the actual next sentence in NSP
SHORT_SEQ_PROB = 0.1 # Probability of generating shorter sequences to minimize the mismatch between pretraining and fine-tuning.
MAX_LENGTH = 512 # Maximum number of tokens in an input sample after padding
MLM_PROB = 0.2 # Probability with which tokens are masked in MLM
TRAIN_BATCH_SIZE = 2 # Batch-size for pretraining the model on
MAX_EPOCHS = 1 # Maximum number of epochs to train the model for
LEARNING_RATE = 1e-4 # Learning rate for training the model
MODEL_CHECKPOINT = "bert-base-cased" # Name of pretrained model from 🤗 Model Hub<jupyter_output><empty_output><jupyter_text>Load the WikiText dataset We now download the `WikiText` language modeling dataset. It is a collection of over100 million tokens extracted from the set of verified "Good" and "Featured" articles onWikipedia.We load the dataset from [🤗 Datasets](https://github.com/huggingface/datasets).For the purpose of demonstration in this notebook, we work with only the `train`split of the dataset. This can be easily done with the `load_dataset` function.<jupyter_code>from datasets import load_dataset
dataset = load_dataset("wikitext", "wikitext-2-raw-v1")<jupyter_output><empty_output><jupyter_text>The dataset just has one column which is the raw text, and this is all we need forpretraining BERT!<jupyter_code>print(dataset)<jupyter_output><empty_output><jupyter_text>Training a new Tokenizer First we train our own tokenizer from scratch on our corpus, so that can wecan use it to train our language model from scratch.But why would you need to train a tokenizer? That's because Transformer models veryoften use subword tokenization algorithms, and they need to be trained to identify theparts of words that are often present in the corpus you are using.The 🤗 Transformers `Tokenizer` (as the name indicates) will tokenize the inputs(including converting the tokens to their corresponding IDs in the pretrained vocabulary)and put it in a format the model expects, as well as generate the other inputs that modelrequires.First we make a list of all the raw documents from the `WikiText` corpus:<jupyter_code>all_texts = [
doc for doc in dataset["train"]["text"] if len(doc) > 0 and not doc.startswith(" =")
]<jupyter_output><empty_output><jupyter_text>Next we make a `batch_iterator` function that will aid us to train our tokenizer.<jupyter_code>def batch_iterator():
for i in range(0, len(all_texts), TOKENIZER_BATCH_SIZE):
yield all_texts[i : i + TOKENIZER_BATCH_SIZE]<jupyter_output><empty_output><jupyter_text>In this notebook, we train a tokenizer with the exact same algorithms andparameters as an existing one. For instance, we train a new version of the`BERT-CASED` tokenzier on `Wikitext-2` using the same tokenization algorithm.First we need to load the tokenizer we want to use as a model:<jupyter_code>from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_CHECKPOINT)<jupyter_output><empty_output><jupyter_text>Now we train our tokenizer using the entire `train` split of the `Wikitext-2`dataset.<jupyter_code>tokenizer = tokenizer.train_new_from_iterator(
batch_iterator(), vocab_size=TOKENIZER_VOCABULARY
)<jupyter_output><empty_output><jupyter_text>So now we our done training our new tokenizer! Next we move on to the datapre-processing steps. Data Pre-processing For the sake of demonstrating the workflow, in this notebook we only takesmall subsets of the entire WikiText `train` and `test` splits.<jupyter_code>dataset["train"] = dataset["train"].select([i for i in range(1000)])
dataset["validation"] = dataset["validation"].select([i for i in range(1000)])<jupyter_output><empty_output><jupyter_text>Before we can feed those texts to our model, we need to pre-process them and get themready for the task. As mentioned earlier, the BERT pretraining task includes two tasksin total, the `NSP` task and the `MLM` task. 🤗 Transformers have an easy to implement`collator` called the `DataCollatorForLanguageModeling`. However, we need to get thedata ready for `NSP` manually.Next we write a simple function called the `prepare_train_features` that helps us inthe pre-processing and is compatible with 🤗 Datasets. To summarize, our pre-processingfunction should:- Get the dataset ready for the NSP task by creating pairs of sentences (A,B), where Beither actually follows A, or B is randomly sampled from somewhere else in the corpus.It should also generate a corresponding label for each pair, which is 1 if B actuallyfollows A and 0 if not.- Tokenize the text dataset into it's corresponding token ids that will be used forembedding look-up in BERT- Create additional inputs for the model like `token_type_ids`, `attention_mask`, etc.<jupyter_code># We define the maximum number of tokens after tokenization that each training sample
# will have
max_num_tokens = BLOCK_SIZE - tokenizer.num_special_tokens_to_add(pair=True)
def prepare_train_features(examples):
"""Function to prepare features for NSP task
Arguments:
examples: A dictionary with 1 key ("text")
text: List of raw documents (str)
Returns:
examples: A dictionary with 4 keys
input_ids: List of tokenized, concatnated, and batched
sentences from the individual raw documents (int)
token_type_ids: List of integers (0 or 1) corresponding
to: 0 for senetence no. 1 and padding, 1 for sentence
no. 2
attention_mask: List of integers (0 or 1) corresponding
to: 1 for non-padded tokens, 0 for padded
next_sentence_label: List of integers (0 or 1) corresponding
to: 1 if the second sentence actually follows the first,
0 if the senetence is sampled from somewhere else in the corpus
"""
# Remove un-wanted samples from the training set
examples["document"] = [
d.strip() for d in examples["text"] if len(d) > 0 and not d.startswith(" =")
]
# Split the documents from the dataset into it's individual sentences
examples["sentences"] = [
nltk.tokenize.sent_tokenize(document) for document in examples["document"]
]
# Convert the tokens into ids using the trained tokenizer
examples["tokenized_sentences"] = [
[tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sent)) for sent in doc]
for doc in examples["sentences"]
]
# Define the outputs
examples["input_ids"] = []
examples["token_type_ids"] = []
examples["attention_mask"] = []
examples["next_sentence_label"] = []
for doc_index, document in enumerate(examples["tokenized_sentences"]):
current_chunk = [] # a buffer stored current working segments
current_length = 0
i = 0
# We *usually* want to fill up the entire sequence since we are padding
# to `block_size` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pretraining and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `block_size` is a hard limit.
target_seq_length = max_num_tokens
if random.random() < SHORT_SEQ_PROB:
target_seq_length = random.randint(2, max_num_tokens)
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
if len(current_chunk) == 1 or random.random() < NSP_PROB:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = random.randint(
0, len(examples["tokenized_sentences"]) - 1
)
if random_document_index != doc_index:
break
random_document = examples["tokenized_sentences"][
random_document_index
]
random_start = random.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
input_ids = tokenizer.build_inputs_with_special_tokens(
tokens_a, tokens_b
)
# add token type ids, 0 for sentence a, 1 for sentence b
token_type_ids = tokenizer.create_token_type_ids_from_sequences(
tokens_a, tokens_b
)
padded = tokenizer.pad(
{"input_ids": input_ids, "token_type_ids": token_type_ids},
padding="max_length",
max_length=MAX_LENGTH,
)
examples["input_ids"].append(padded["input_ids"])
examples["token_type_ids"].append(padded["token_type_ids"])
examples["attention_mask"].append(padded["attention_mask"])
examples["next_sentence_label"].append(1 if is_random_next else 0)
current_chunk = []
current_length = 0
i += 1
# We delete all the un-necessary columns from our dataset
del examples["document"]
del examples["sentences"]
del examples["text"]
del examples["tokenized_sentences"]
return examples
tokenized_dataset = dataset.map(
prepare_train_features, batched=True, remove_columns=["text"], num_proc=1,
)<jupyter_output><empty_output><jupyter_text>For MLM we are going to use the same preprocessing as before for our dataset withone additional step: we randomly mask some tokens (by replacing them by [MASK])and the labels will be adjusted to only include the masked tokens(we don't have to predict the non-masked tokens). If you use a tokenizer you trainedyourself, make sure the [MASK] token is among the special tokens you passed during training!To get the data ready for MLM, we simply use the `collator` called the`DataCollatorForLanguageModeling` provided by the 🤗 Transformers library on our datasetthat is already ready for the NSP task. The `collator` expects certain parameters.We use the default ones from the original BERT paper in this notebook. The`return_tensors='tf'` ensures that we get `tf.Tensor` objects back.<jupyter_code>from transformers import DataCollatorForLanguageModeling
collater = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=MLM_PROB, return_tensors="tf"
)<jupyter_output><empty_output><jupyter_text>Next we define our training set with which we train our model. Again, 🤗 Datasetsprovides us with the `to_tf_dataset` method which will help us integrate our dataset withthe `collator` defined above. The method expects certain parameters:- **columns**: the columns which will serve as our independant variables- **label_cols**: the columns which will serve as our labels or dependant variables- **batch_size**: our batch size for training- **shuffle**: whether we want to shuffle our training dataset- **collate_fn**: our collator function<jupyter_code>train = tokenized_dataset["train"].to_tf_dataset(
columns=["input_ids", "token_type_ids", "attention_mask"],
label_cols=["labels", "next_sentence_label"],
batch_size=TRAIN_BATCH_SIZE,
shuffle=True,
collate_fn=collater,
)
validation = tokenized_dataset["validation"].to_tf_dataset(
columns=["input_ids", "token_type_ids", "attention_mask"],
label_cols=["labels", "next_sentence_label"],
batch_size=TRAIN_BATCH_SIZE,
shuffle=True,
collate_fn=collater,
)<jupyter_output><empty_output><jupyter_text>Defining the model To define our model, first we need to define a config which will help us define certainparameters of our model architecture. This includes parameters like number of transformerlayers, number of attention heads, hidden dimension, etc. For this notebook, we tryto define the exact config defined in the original BERT paper.We can easily achieve this using the `BertConfig` class from the 🤗 Transformers library.The `from_pretrained()` method expects the name of a model. Here we define the simplestmodel with which we also trained our model, i.e., `bert-base-cased`.<jupyter_code>from transformers import BertConfig
config = BertConfig.from_pretrained(MODEL_CHECKPOINT)<jupyter_output><empty_output><jupyter_text>For defining our model we use the `TFBertForPreTraining` class from the 🤗 Transformerslibrary. This class internally handles everything starting from defining our model, tounpacking our inputs and calculating the loss. So we need not do anything ourselves exceptdefining the model with the correct `config` we want!<jupyter_code>from transformers import TFBertForPreTraining
model = TFBertForPreTraining(config)<jupyter_output><empty_output><jupyter_text>Now we define our optimizer and compile the model. The loss calculation is handledinternally and so we need not worry about that!<jupyter_code>optimizer = keras.optimizers.Adam(learning_rate=LEARNING_RATE)
model.compile(optimizer=optimizer)<jupyter_output><empty_output><jupyter_text>Finally all steps are done and now we can start training our model!<jupyter_code>model.fit(train, validation_data=validation, epochs=MAX_EPOCHS)<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/pretraining_BERT.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/pretraining_BERT.ipynb",
"repo_id": "keras-io",
"token_count": 6588
} | 82 |
# Bidirectional LSTM on IMDB
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2020/05/03<br>
**Last modified:** 2020/05/03<br>
**Description:** Train a 2-layer bidirectional LSTM on the IMDB movie review sentiment classification dataset.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/bidirectional_lstm_imdb.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/bidirectional_lstm_imdb.py)
---
## Setup
```python
import numpy as np
import keras
from keras import layers
max_features = 20000 # Only consider the top 20k words
maxlen = 200 # Only consider the first 200 words of each movie review
```
---
## Build the model
```python
# Input for variable-length sequences of integers
inputs = keras.Input(shape=(None,), dtype="int32")
# Embed each integer in a 128-dimensional vector
x = layers.Embedding(max_features, 128)(inputs)
# Add 2 bidirectional LSTMs
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x)
x = layers.Bidirectional(layers.LSTM(64))(x)
# Add a classifier
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ embedding (<span style="color: #0087ff; text-decoration-color: #0087ff">Embedding</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,560,000</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ bidirectional (<span style="color: #0087ff; text-decoration-color: #0087ff">Bidirectional</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">98,816</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ bidirectional_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Bidirectional</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">98,816</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">129</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,757,761</span> (10.52 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,757,761</span> (10.52 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Load the IMDB movie review sentiment data
```python
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(
num_words=max_features
)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
# Use pad_sequence to standardize sequence length:
# this will truncate sequences longer than 200 words and zero-pad sequences shorter than 200 words.
x_train = keras.utils.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.utils.pad_sequences(x_val, maxlen=maxlen)
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz
17464789/17464789 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
25000 Training sequences
25000 Validation sequences
```
</div>
---
## Train and evaluate the model
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/bidirectional-lstm-imdb)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/bidirectional_lstm_imdb).
```python
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=32, epochs=2, validation_data=(x_val, y_val))
```
<div class="k-default-codeblock">
```
Epoch 1/2
782/782 ━━━━━━━━━━━━━━━━━━━━ 61s 75ms/step - accuracy: 0.7540 - loss: 0.4697 - val_accuracy: 0.8269 - val_loss: 0.4202
Epoch 2/2
782/782 ━━━━━━━━━━━━━━━━━━━━ 54s 69ms/step - accuracy: 0.9151 - loss: 0.2263 - val_accuracy: 0.8428 - val_loss: 0.3650
<keras.src.callbacks.history.History at 0x7f3efd663850>
```
</div> | keras-io/examples/nlp/md/bidirectional_lstm_imdb.md/0 | {
"file_path": "keras-io/examples/nlp/md/bidirectional_lstm_imdb.md",
"repo_id": "keras-io",
"token_count": 2752
} | 83 |
# Semantic Similarity with BERT
**Author:** [Mohamad Merchant](https://twitter.com/mohmadmerchant1)<br>
**Date created:** 2020/08/15<br>
**Last modified:** 2020/08/29<br>
**Description:** Natural Language Inference by fine-tuning BERT model on SNLI Corpus.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/semantic_similarity_with_bert.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/semantic_similarity_with_bert.py)
---
## Introduction
Semantic Similarity is the task of determining how similar
two sentences are, in terms of what they mean.
This example demonstrates the use of SNLI (Stanford Natural Language Inference) Corpus
to predict sentence semantic similarity with Transformers.
We will fine-tune a BERT model that takes two sentences as inputs
and that outputs a similarity score for these two sentences.
### References
* [BERT](https://arxiv.org/pdf/1810.04805.pdf)
* [SNLI](https://nlp.stanford.edu/projects/snli/)
---
## Setup
Note: install HuggingFace `transformers` via `pip install transformers` (version >= 2.11.0).
```python
import numpy as np
import pandas as pd
import tensorflow as tf
import transformers
```
---
## Configuration
```python
max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
epochs = 2
# Labels in our dataset.
labels = ["contradiction", "entailment", "neutral"]
```
---
## Load the Data
```python
!curl -LO https://raw.githubusercontent.com/MohamadMerchant/SNLI/master/data.tar.gz
!tar -xvzf data.tar.gz
```
```python
# There are more than 550k samples in total; we will use 100k for this example.
train_df = pd.read_csv("SNLI_Corpus/snli_1.0_train.csv", nrows=100000)
valid_df = pd.read_csv("SNLI_Corpus/snli_1.0_dev.csv")
test_df = pd.read_csv("SNLI_Corpus/snli_1.0_test.csv")
# Shape of the data
print(f"Total train samples : {train_df.shape[0]}")
print(f"Total validation samples: {valid_df.shape[0]}")
print(f"Total test samples: {valid_df.shape[0]}")
```
<div class="k-default-codeblock">
```
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 11.1M 100 11.1M 0 0 5231k 0 0:00:02 0:00:02 --:--:-- 5231k
SNLI_Corpus/
SNLI_Corpus/snli_1.0_dev.csv
SNLI_Corpus/snli_1.0_train.csv
SNLI_Corpus/snli_1.0_test.csv
Total train samples : 100000
Total validation samples: 10000
Total test samples: 10000
```
</div>
Dataset Overview:
- sentence1: The premise caption that was supplied to the author of the pair.
- sentence2: The hypothesis caption that was written by the author of the pair.
- similarity: This is the label chosen by the majority of annotators.
Where no majority exists, the label "-" is used (we will skip such samples here).
Here are the "similarity" label values in our dataset:
- Contradiction: The sentences share no similarity.
- Entailment: The sentences have similar meaning.
- Neutral: The sentences are neutral.
Let's look at one sample from the dataset:
```python
print(f"Sentence1: {train_df.loc[1, 'sentence1']}")
print(f"Sentence2: {train_df.loc[1, 'sentence2']}")
print(f"Similarity: {train_df.loc[1, 'similarity']}")
```
<div class="k-default-codeblock">
```
Sentence1: A person on a horse jumps over a broken down airplane.
Sentence2: A person is at a diner, ordering an omelette.
Similarity: contradiction
```
</div>
---
## Preprocessing
```python
# We have some NaN entries in our train data, we will simply drop them.
print("Number of missing values")
print(train_df.isnull().sum())
train_df.dropna(axis=0, inplace=True)
```
<div class="k-default-codeblock">
```
Number of missing values
similarity 0
sentence1 0
sentence2 3
dtype: int64
```
</div>
Distribution of our training targets.
```python
print("Train Target Distribution")
print(train_df.similarity.value_counts())
```
<div class="k-default-codeblock">
```
Train Target Distribution
entailment 33384
contradiction 33310
neutral 33193
- 110
Name: similarity, dtype: int64
```
</div>
Distribution of our validation targets.
```python
print("Validation Target Distribution")
print(valid_df.similarity.value_counts())
```
<div class="k-default-codeblock">
```
Validation Target Distribution
entailment 3329
contradiction 3278
neutral 3235
- 158
Name: similarity, dtype: int64
```
</div>
The value "-" appears as part of our training and validation targets.
We will skip these samples.
```python
train_df = (
train_df[train_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
valid_df = (
valid_df[valid_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
```
One-hot encode training, validation, and test labels.
```python
train_df["label"] = train_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=3)
valid_df["label"] = valid_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_val = tf.keras.utils.to_categorical(valid_df.label, num_classes=3)
test_df["label"] = test_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_test = tf.keras.utils.to_categorical(test_df.label, num_classes=3)
```
---
## Create a custom data generator
```python
class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)
```
---
## Build the model
```python
# Create the model under a distribution strategy scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# Encoded token ids from BERT tokenizer.
input_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="input_ids"
)
# Attention masks indicates to the model which tokens should be attended to.
attention_masks = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="attention_masks"
)
# Token type ids are binary masks identifying different sequences in the model.
token_type_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="token_type_ids"
)
# Loading pretrained BERT model.
bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased")
# Freeze the BERT model to reuse the pretrained features without modifying them.
bert_model.trainable = False
bert_output = bert_model.bert(
input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids
)
sequence_output = bert_output.last_hidden_state
pooled_output = bert_output.pooler_output
# Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.
bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64, return_sequences=True)
)(sequence_output)
# Applying hybrid pooling approach to bi_lstm sequence output.
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3)(concat)
output = tf.keras.layers.Dense(3, activation="softmax")(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_masks, token_type_ids], outputs=output
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["acc"],
)
print(f"Strategy: {strategy}")
model.summary()
```
<div class="k-default-codeblock">
```
HBox(children=(FloatProgress(value=0.0, description='Downloading', max=433.0, style=ProgressStyle(description_…
```
</div>
<div class="k-default-codeblock">
```
HBox(children=(FloatProgress(value=0.0, description='Downloading', max=536063208.0, style=ProgressStyle(descri…
```
</div>
<div class="k-default-codeblock">
```
Strategy: <tensorflow.python.distribute.mirrored_strategy.MirroredStrategy object at 0x7faf9dc63a90>
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_ids (InputLayer) [(None, 128)] 0
__________________________________________________________________________________________________
attention_masks (InputLayer) [(None, 128)] 0
__________________________________________________________________________________________________
token_type_ids (InputLayer) [(None, 128)] 0
__________________________________________________________________________________________________
tf_bert_model (TFBertModel) ((None, 128, 768), ( 109482240 input_ids[0][0]
attention_masks[0][0]
token_type_ids[0][0]
__________________________________________________________________________________________________
bidirectional (Bidirectional) (None, 128, 128) 426496 tf_bert_model[0][0]
__________________________________________________________________________________________________
global_average_pooling1d (Globa (None, 128) 0 bidirectional[0][0]
__________________________________________________________________________________________________
global_max_pooling1d (GlobalMax (None, 128) 0 bidirectional[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 256) 0 global_average_pooling1d[0][0]
global_max_pooling1d[0][0]
__________________________________________________________________________________________________
dropout_37 (Dropout) (None, 256) 0 concatenate[0][0]
__________________________________________________________________________________________________
dense (Dense) (None, 3) 771 dropout_37[0][0]
==================================================================================================
Total params: 109,909,507
Trainable params: 427,267
Non-trainable params: 109,482,240
__________________________________________________________________________________________________
```
</div>
Create train and validation data generators
```python
train_data = BertSemanticDataGenerator(
train_df[["sentence1", "sentence2"]].values.astype("str"),
y_train,
batch_size=batch_size,
shuffle=True,
)
valid_data = BertSemanticDataGenerator(
valid_df[["sentence1", "sentence2"]].values.astype("str"),
y_val,
batch_size=batch_size,
shuffle=False,
)
```
<div class="k-default-codeblock">
```
HBox(children=(FloatProgress(value=0.0, description='Downloading', max=231508.0, style=ProgressStyle(descripti…
```
</div>
---
## Train the Model
Training is done only for the top layers to perform "feature extraction",
which will allow the model to use the representations of the pretrained model.
```python
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
```
<div class="k-default-codeblock">
```
Epoch 1/2
3121/3121 [==============================] - 666s 213ms/step - loss: 0.6925 - acc: 0.7049 - val_loss: 0.5294 - val_acc: 0.7899
Epoch 2/2
3121/3121 [==============================] - 661s 212ms/step - loss: 0.5917 - acc: 0.7587 - val_loss: 0.4955 - val_acc: 0.8052
```
</div>
---
## Fine-tuning
This step must only be performed after the feature extraction model has
been trained to convergence on the new data.
This is an optional last step where `bert_model` is unfreezed and retrained
with a very low learning rate. This can deliver meaningful improvement by
incrementally adapting the pretrained features to the new data.
```python
# Unfreeze the bert_model.
bert_model.trainable = True
# Recompile the model to make the change effective.
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()
```
<div class="k-default-codeblock">
```
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_ids (InputLayer) [(None, 128)] 0
__________________________________________________________________________________________________
attention_masks (InputLayer) [(None, 128)] 0
__________________________________________________________________________________________________
token_type_ids (InputLayer) [(None, 128)] 0
__________________________________________________________________________________________________
tf_bert_model (TFBertModel) ((None, 128, 768), ( 109482240 input_ids[0][0]
attention_masks[0][0]
token_type_ids[0][0]
__________________________________________________________________________________________________
bidirectional (Bidirectional) (None, 128, 128) 426496 tf_bert_model[0][0]
__________________________________________________________________________________________________
global_average_pooling1d (Globa (None, 128) 0 bidirectional[0][0]
__________________________________________________________________________________________________
global_max_pooling1d (GlobalMax (None, 128) 0 bidirectional[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 256) 0 global_average_pooling1d[0][0]
global_max_pooling1d[0][0]
__________________________________________________________________________________________________
dropout_37 (Dropout) (None, 256) 0 concatenate[0][0]
__________________________________________________________________________________________________
dense (Dense) (None, 3) 771 dropout_37[0][0]
==================================================================================================
Total params: 109,909,507
Trainable params: 109,909,507
Non-trainable params: 0
__________________________________________________________________________________________________
```
</div>
## Train the entire model end-to-end
```python
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
```
<div class="k-default-codeblock">
```
Epoch 1/2
3121/3121 [==============================] - 1574s 504ms/step - loss: 0.4698 - accuracy: 0.8181 - val_loss: 0.3787 - val_accuracy: 0.8598
Epoch 2/2
3121/3121 [==============================] - 1569s 503ms/step - loss: 0.3516 - accuracy: 0.8702 - val_loss: 0.3416 - val_accuracy: 0.8757
```
</div>
---
## Evaluate model on the test set
```python
test_data = BertSemanticDataGenerator(
test_df[["sentence1", "sentence2"]].values.astype("str"),
y_test,
batch_size=batch_size,
shuffle=False,
)
model.evaluate(test_data, verbose=1)
```
<div class="k-default-codeblock">
```
312/312 [==============================] - 55s 177ms/step - loss: 0.3697 - accuracy: 0.8629
[0.3696725070476532, 0.8628805875778198]
```
</div>
---
## Inference on custom sentences
```python
def check_similarity(sentence1, sentence2):
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data[0])[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba
```
Check results on some example sentence pairs.
```python
sentence1 = "Two women are observing something together."
sentence2 = "Two women are standing with their eyes closed."
check_similarity(sentence1, sentence2)
```
<div class="k-default-codeblock">
```
('contradiction', ' 0.91%')
```
</div>
Check results on some example sentence pairs.
```python
sentence1 = "A smiling costumed woman is holding an umbrella"
sentence2 = "A happy woman in a fairy costume holds an umbrella"
check_similarity(sentence1, sentence2)
```
<div class="k-default-codeblock">
```
('neutral', ' 0.88%')
```
</div>
Check results on some example sentence pairs
```python
sentence1 = "A soccer game with multiple males playing"
sentence2 = "Some men are playing a sport"
check_similarity(sentence1, sentence2)
```
<div class="k-default-codeblock">
```
('entailment', ' 0.94%')
```
</div>
Example available on HuggingFace
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/bert-semantic-similarity) | [](https://huggingface.co/spaces/keras-io/bert-semantic-similarity) |
| keras-io/examples/nlp/md/semantic_similarity_with_bert.md/0 | {
"file_path": "keras-io/examples/nlp/md/semantic_similarity_with_bert.md",
"repo_id": "keras-io",
"token_count": 8210
} | 84 |
"""
Title: Parameter-efficient fine-tuning of GPT-2 with LoRA
Author: [Abheesht Sharma](https://github.com/abheesht17/), [Matthew Watson](https://github.com/mattdangerw/)
Date created: 2023/05/27
Last modified: 2023/05/27
Description: Use KerasNLP to fine-tune a GPT-2 LLM with LoRA.
Accelerator: GPU
"""
"""
## Introduction
Large Language Models (LLMs) have been shown to be effective at a variety of NLP
tasks. An LLM is first pre-trained on a large corpus of text in a
self-supervised fashion. Pre-training helps LLMs learn general-purpose knowledge,
such as statistical relationships between words. An LLM can then be fine-tuned
on a downstream task of interest (such as sentiment analysis).
However, LLMs are extremely large in size, and we don't need to train all the
parameters in the model while fine-tuning, especially because datasets on which
the model is fine-tuned are relatively small. Another way of saying this is
that LLMs are over-parametrized for fine-tuning. This is where
[Low-Rank Adaptation (LoRA)](https://arxiv.org/abs/2106.09685) comes in; it
significantly reduces the number of trainable parameters. This results in a
decrease in training time and GPU memory usage, while maintaining the quality
of the outputs.
In this example, we will explain LoRA in technical terms, show how the technical
explanation translates to code, hack KerasNLP's
[GPT-2 model](https://keras.io/api/keras_nlp/models/gpt2/) and fine-tune
it on the next token prediction task using LoRA. We will compare LoRA GPT-2
with a fully fine-tuned GPT-2 in terms of the quality of the generated text,
training time and GPU memory usage.
Note: This example runs on the TensorFlow backend purely for the
`tf.config.experimental.get_memory_info` API to easily plot memory usage.
Outside of the memory usage callback, this example will run on `jax` and `torch`
backends.
"""
"""
## Setup
Before we start implementing the pipeline, let's install and import all the
libraries we need. We'll be using the KerasNLP library.
Secondly, let's enable mixed precision training. This will help us reduce the
training time.
"""
"""shell
pip install -q --upgrade keras-nlp
pip install -q --upgrade keras # Upgrade to Keras 3.
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras_nlp
import keras
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import time
keras.mixed_precision.set_global_policy("mixed_float16")
"""
Let's also define our hyperparameters.
"""
# General hyperparameters
BATCH_SIZE = 32
NUM_BATCHES = 500
EPOCHS = 1 # Can be set to a higher value for better results
MAX_SEQUENCE_LENGTH = 128
MAX_GENERATION_LENGTH = 200
GPT2_PRESET = "gpt2_base_en"
# LoRA-specific hyperparameters
RANK = 4
ALPHA = 32.0
"""
## Dataset
Let's load a Reddit dataset. We will fine-tune both the GPT-2 model and the
LoRA GPT-2 model on a subset of this dataset. The aim is to produce text similar
in style to Reddit posts.
"""
reddit_ds = tfds.load("reddit_tifu", split="train", as_supervised=True)
"""
The dataset has two fields: `document` and `title`.
"""
for document, title in reddit_ds:
print(document.numpy())
print(title.numpy())
break
"""
We'll now batch the dataset and retain only the `document` field because we are
fine-tuning the model on the next word prediction task. Take a subset
of the dataset for the purpose of this example.
"""
train_ds = (
reddit_ds.map(lambda document, _: document)
.batch(BATCH_SIZE)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
train_ds = train_ds.take(NUM_BATCHES)
"""
## Helper functions
Before we begin fine-tuning the models, let's define a few helper functions and
classes.
"""
"""
### Callback for tracking GPU memory usage
We'll define a custom callback function which tracks GPU memory usage. The
callback function uses TensorFlow's `tf.config.experimental.get_memory_info`
API.
Here, we assume that we are using a single GPU, `GPU:0`.
"""
class GPUMemoryCallback(keras.callbacks.Callback):
def __init__(
self,
target_batches,
print_stats=False,
**kwargs,
):
super().__init__(**kwargs)
self.target_batches = target_batches
self.print_stats = print_stats
self.memory_usage = []
self.labels = []
def _compute_memory_usage(self):
memory_stats = tf.config.experimental.get_memory_info("GPU:0")
# Convert bytes to GB and store in list.
peak_usage = round(memory_stats["peak"] / (2**30), 3)
self.memory_usage.append(peak_usage)
def on_epoch_begin(self, epoch, logs=None):
self._compute_memory_usage()
self.labels.append(f"epoch {epoch} start")
def on_train_batch_begin(self, batch, logs=None):
if batch in self.target_batches:
self._compute_memory_usage()
self.labels.append(f"batch {batch}")
def on_epoch_end(self, epoch, logs=None):
self._compute_memory_usage()
self.labels.append(f"epoch {epoch} end")
"""
### Function for text generation
Here is a helper function to generate text.
"""
def generate_text(model, input_text, max_length=200):
start = time.time()
output = model.generate(input_text, max_length=max_length)
print("\nOutput:")
print(output)
end = time.time()
print(f"Total Time Elapsed: {end - start:.2f}s")
"""
### Define optimizer and loss
We will use AdamW optimizer and cross-entropy loss for training both models.
"""
def get_optimizer_and_loss():
optimizer = keras.optimizers.AdamW(
learning_rate=5e-5,
weight_decay=0.01,
epsilon=1e-6,
global_clipnorm=1.0, # Gradient clipping.
)
# Exclude layernorm and bias terms from weight decay.
optimizer.exclude_from_weight_decay(var_names=["bias"])
optimizer.exclude_from_weight_decay(var_names=["gamma"])
optimizer.exclude_from_weight_decay(var_names=["beta"])
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
return optimizer, loss
"""
## Fine-tune GPT-2
Let's load the model and preprocessor first. We use a sequence length of 128
instead of 1024 (which is the default sequence length). This will limit our
ability to predict long sequences, but will allow us to run this example quickly
on Colab.
"""
preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
"gpt2_base_en",
sequence_length=MAX_SEQUENCE_LENGTH,
)
gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset(
"gpt2_base_en", preprocessor=preprocessor
)
gpt2_lm.summary()
"""
Initialize the GPU memory tracker callback object, and compile the model. We
use the Adam optimizer with a linearly decaying learning rate.
"""
gpu_memory_callback = GPUMemoryCallback(
target_batches=[5, 10, 25, 50, 100, 150, 200, 300, 400, 500],
print_stats=True,
)
optimizer, loss = get_optimizer_and_loss()
gpt2_lm.compile(
optimizer=optimizer,
loss=loss,
weighted_metrics=["accuracy"],
)
"""
We are all set to train the model!
"""
gpt2_lm.fit(train_ds, epochs=EPOCHS, callbacks=[gpu_memory_callback])
gpt2_lm_memory_usage = gpu_memory_callback.memory_usage
"""
As a final step, let's generate some text. We will harness the power of XLA. The
first call to `generate()` will be slow because of XLA compilation, but
subsequent calls will be super-fast. :)
"""
generate_text(gpt2_lm, "I like basketball", max_length=MAX_GENERATION_LENGTH)
generate_text(gpt2_lm, "That Italian restaurant is", max_length=MAX_GENERATION_LENGTH)
"""
## LoRA GPT-2
In this section, we discuss the technical details of LoRA, build a LoRA GPT-2
model, fine-tune it and generate text.
### What exactly is LoRA?
LoRA is a parameter-efficient fine-tuning technique for LLMs. It freezes the
weights of the LLM, and injects trainable rank-decomposition matrices. Let's
understand this more clearly.
Assume we have an `n x n` pre-trained dense layer (or weight matrix), `W0`. We
initialize two dense layers, `A` and `B`, of shapes `n x rank`, and `rank x n`,
respectively. `rank` is much smaller than `n`. In the paper, values between 1
and 4 are shown to work well.
#### LoRA equation
The original equation is `output = W0x + b0`, where `x` is the input, `W0` and
`b0` are the weight matrix and bias terms of the original dense layer (frozen).
The LoRA equation is: `output = W0x + b0 + BAx`, where `A` and `B` are the
rank-decomposition matrices.
LoRA is based on the idea that updates to the weights of the pre-trained
language model have a low "intrinsic rank" since pre-trained language models are
over-parametrized. Predictive performance of full fine-tuning can be replicated
even by constraining `W0`'s updates to low-rank decomposition matrices.
<p align="center">
<img src="https://i.imgur.com/f4TFqMi.png" alt="lora_diagram" height="250"/>
</p>
<br>
#### Number of trainable parameters
Let's do some quick math. Suppose `n` is 768, and `rank` is 4. `W0` has
`768 x 768 = 589,824` parameters, whereas the LoRA layers, `A` and `B` together
have `768 x 4 + 4 x 768 = 6,144` parameters. So, for the dense layer, we go from
`589,824` trainable parameters to `6,144` trainable parameters!
#### Why does LoRA reduce memory footprint?
Even though the total number of parameters increase (since we are adding LoRA
layers), the memory footprint reduces, because the number of trainable
parameters reduces. Let's dive deeper into this.
The memory usage of a model can be split into four parts:
- Model memory: This is the memory required to store the model weights. This
will be slightly higher for LoRA than GPT-2.
- Forward pass memory: This mostly depends on batch size, sequence length, etc.
We keep this constant for both models for a fair comparison.
- Backward pass memory: This is the memory required to store the gradients.
Note that the gradients are computed only for the trainable parameters.
- Optimizer memory: This is the memory required to store the optimizer state.
For example, the Adam optimizer stores the "1st moment vectors" and
"2nd moment vectors" for the trainable parameters.
Since, with LoRA, there is a huge reduction in the number of trainable
parameters, the optimizer memory and the memory required to store the gradients
for LoRA is much less than GPT-2. This is where most of the memory savings
happen.
#### Why is LoRA so popular?
- Reduces GPU memory usage;
- Faster training; and
- No additional inference latency.
### Create LoRA layer
According to the technical description above, let's create a LoRA layer. In
a transformer model, the LoRA layer is created and injected for the query and
value projection matrices. In `keras.layers.MultiHeadAttention`, the query/value
projection layers are `keras.layers.EinsumDense` layers.
"""
import math
class LoraLayer(keras.layers.Layer):
def __init__(
self,
original_layer,
rank=8,
alpha=32,
trainable=False,
**kwargs,
):
# We want to keep the name of this layer the same as the original
# dense layer.
original_layer_config = original_layer.get_config()
name = original_layer_config["name"]
kwargs.pop("name", None)
super().__init__(name=name, trainable=trainable, **kwargs)
self.rank = rank
self.alpha = alpha
self._scale = alpha / rank
self._num_heads = original_layer_config["output_shape"][-2]
self._hidden_dim = self._num_heads * original_layer_config["output_shape"][-1]
# Layers.
# Original dense layer.
self.original_layer = original_layer
# No matter whether we are training the model or are in inference mode,
# this layer should be frozen.
self.original_layer.trainable = False
# LoRA dense layers.
self.A = keras.layers.Dense(
units=rank,
use_bias=False,
# Note: the original paper mentions that normal distribution was
# used for initialization. However, the official LoRA implementation
# uses "Kaiming/He Initialization".
kernel_initializer=keras.initializers.VarianceScaling(
scale=math.sqrt(5), mode="fan_in", distribution="uniform"
),
trainable=trainable,
name=f"lora_A",
)
# B has the same `equation` and `output_shape` as the original layer.
# `equation = abc,cde->abde`, where `a`: batch size, `b`: sequence
# length, `c`: `hidden_dim`, `d`: `num_heads`,
# `e`: `hidden_dim//num_heads`. The only difference is that in layer `B`,
# `c` represents `rank`.
self.B = keras.layers.EinsumDense(
equation=original_layer_config["equation"],
output_shape=original_layer_config["output_shape"],
kernel_initializer="zeros",
trainable=trainable,
name=f"lora_B",
)
def call(self, inputs):
original_output = self.original_layer(inputs)
if self.trainable:
# If we are fine-tuning the model, we will add LoRA layers' output
# to the original layer's output.
lora_output = self.B(self.A(inputs)) * self._scale
return original_output + lora_output
# If we are in inference mode, we "merge" the LoRA layers' weights into
# the original layer's weights - more on this in the text generation
# section!
return original_output
"""
### Inject LoRA layer into the model
We will now hack the original GPT-2 model and inject LoRA layers into it. Let's
do a couple of things before doing that:
- Delete previous model;
- Reset "peak" GPU memory usage using `tf.config.experimental.reset_memory_stats`;
- Load a new GPT-2 model.
"""
del gpt2_lm
del optimizer
del loss
# This resets "peak" memory usage to "current" memory usage.
tf.config.experimental.reset_memory_stats("GPU:0")
# Load the original model.
preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
"gpt2_base_en",
sequence_length=128,
)
lora_model = keras_nlp.models.GPT2CausalLM.from_preset(
"gpt2_base_en",
preprocessor=preprocessor,
)
"""
We will now override the original query/value projection matrices with our
new LoRA layers.
"""
for layer_idx in range(lora_model.backbone.num_layers):
# Change query dense layer.
decoder_layer = lora_model.backbone.get_layer(f"transformer_layer_{layer_idx}")
self_attention_layer = decoder_layer._self_attention_layer
# Allow mutation to Keras layer state.
self_attention_layer._tracker.locked = False
# Change query dense layer.
self_attention_layer._query_dense = LoraLayer(
self_attention_layer._query_dense,
rank=RANK,
alpha=ALPHA,
trainable=True,
)
# Change value dense layer.
self_attention_layer._value_dense = LoraLayer(
self_attention_layer._value_dense,
rank=RANK,
alpha=ALPHA,
trainable=True,
)
"""
Let's now do a forward pass to make sure we still have a valid chain of
computation.
"""
lora_model(preprocessor(["LoRA is very useful for quick LLM finetuning"])[0])
pass
"""
Freeze the entire LLM, only the LoRA layers should be trainable.
"""
for layer in lora_model._flatten_layers():
lst_of_sublayers = list(layer._flatten_layers())
if len(lst_of_sublayers) == 1: # "leaves of the model"
if layer.name in ["lora_A", "lora_B"]:
layer.trainable = True
else:
layer.trainable = False
"""
Print the model's summary and see if the number of non-trainable parameters and
total parameters are correct.
In a previous section, we had calculated the number of parameters associated with
the LoRA layers to be 6,144. The total trainable parameters in the model should
be `num_layers * (query, value) * 6,144 = 12 * 2 * 6,144 = 147,456`. The
number of non-trainable parameters should be the same as the total number of
parameters in the original GPT-2 model, which is `124,439,808`.
"""
lora_model.summary()
"""
### Fine-tune LoRA GPT-2
Now that we have hacked and verified the LoRA GPT-2 model, let's train it!
"""
gpu_memory_callback = GPUMemoryCallback(
target_batches=[5, 10, 25, 50, 100, 150, 200, 300, 400, 500],
print_stats=True,
)
optimizer, loss = get_optimizer_and_loss()
lora_model.compile(
optimizer=optimizer,
loss=loss,
weighted_metrics=["accuracy"],
)
lora_model.fit(
train_ds,
epochs=EPOCHS,
callbacks=[gpu_memory_callback],
)
lora_model_memory_usage = gpu_memory_callback.memory_usage
"""
And we are done fine-tuning the model! Before we generate text, let's compare
the training time and memory usage of the two models. The training time of GPT-2
on a 16 GB Tesla T4 (Colab) is 7 minutes, and for LoRA, it is 5 minutes, a 30%
decrease. The memory usage of LoRA GPT-2 is roughly 35% times less than GPT-2.
"""
plt.bar(
["GPT-2", "LoRA GPT-2"],
[max(gpt2_lm_memory_usage), max(lora_model_memory_usage)],
color=["red", "blue"],
)
plt.xlabel("Time")
plt.ylabel("GPU Memory Usage (in GB)")
plt.title("GPU Memory Usage Comparison")
plt.legend()
plt.show()
"""
### Merge weights and generate text!
One of the biggest advantages of LoRA over other adapter methods is that it
does not incur any additional inference latency. Let's understand why.
Recall our LoRA equation: `output = W0x + b0 + BAx`. We can rewrite this as:
`output = = Wx + b0 = (W0 + BA)x + b0`, where `W = W0 + BA`. This means that if
we merge the weights of the original model and the adapter, we will be essentially
doing the same computation as the original model!
"""
for layer_idx in range(lora_model.backbone.num_layers):
self_attention_layer = lora_model.backbone.get_layer(
f"transformer_layer_{layer_idx}"
)._self_attention_layer
# Merge query dense layer.
query_lora_layer = self_attention_layer._query_dense
A_weights = query_lora_layer.A.kernel # (768, 1) (a, b)
B_weights = query_lora_layer.B.kernel # (1, 12, 64) (b, c, d)
increment_weights = tf.einsum("ab,bcd->acd", A_weights, B_weights) * (ALPHA / RANK)
query_lora_layer.original_layer.kernel.assign_add(increment_weights)
# Merge value dense layer.
value_lora_layer = self_attention_layer._value_dense
A_weights = value_lora_layer.A.kernel # (768, 1) (a, b)
B_weights = value_lora_layer.B.kernel # (1, 12, 64) (b, c, d)
increment_weights = tf.einsum("ab,bcd->acd", A_weights, B_weights) * (ALPHA / RANK)
value_lora_layer.original_layer.kernel.assign_add(increment_weights)
"""
We are now all set to generate text with our LoRA model :).
"""
# Freezing weights not necessary during generation since no weights are updated.
generate_text(lora_model, "I like basketball", max_length=MAX_GENERATION_LENGTH)
generate_text(
lora_model, "That Italian restaurant is", max_length=MAX_GENERATION_LENGTH
)
"""
And we're all done!
"""
| keras-io/examples/nlp/parameter_efficient_finetuning_of_gpt2_with_lora.py/0 | {
"file_path": "keras-io/examples/nlp/parameter_efficient_finetuning_of_gpt2_with_lora.py",
"repo_id": "keras-io",
"token_count": 6788
} | 85 |
<jupyter_start><jupyter_text>FeatureSpace advanced use cases**Author:** [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)**Date created:** 2023/07/01**Last modified:** 2023/07/01**Description:** How to use FeatureSpace for advanced preprocessing use cases. IntroductionThis example is an extension of the[Structured data classification with FeatureSpace](https://keras.io/examples/structured_data/structured_data_classification_with_feature_space/)code example, and here we will extend it to cover more complex usecases of the [`keras.utils.FeatureSpace`](https://keras.io/api/utils/feature_space/)preprocessing utility, like feature hashing, feature crosses, handling missing values andintegrating [Keras preprocessing layers](https://keras.io/guides/preprocessing_layers/)with FeatureSpace.The general task still is structured data classification (also known as tabular dataclassification) using a data that includes numerical features, integer categoricalfeatures, and string categorical features. The dataset[Our dataset](https://archive.ics.uci.edu/dataset/222/bank+marketing) is provided by aPortuguese banking institution.It's a CSV file with 4119 rows. Each row contains information about marketing campaignsbased on phone calls, and each column describes an attribute of the client. We use thefeatures to predict whether the client subscribed ('yes') or not ('no') to the product(bank term deposit).Here's the description of each feature:Column| Description| Feature Type------|------------|-------------Age | Age of the client | NumericalJob | Type of job | CategoricalMarital | Marital status | CategoricalEducation | Education level of the client | CategoricalDefault | Has credit in default? | CategoricalHousing | Has housing loan? | CategoricalLoan | Has personal loan? | CategoricalContact | Contact communication type | CategoricalMonth | Last contact month of year | CategoricalDay_of_week | Last contact day of the week | CategoricalDuration | Last contact duration, in seconds | NumericalCampaign | Number of contacts performed during this campaign and for this client | NumericalPdays | Number of days that passed by after the client was last contacted from a previous campaign | NumericalPrevious | Number of contacts performed before this campaign and for this client | NumericalPoutcome | Outcome of the previous marketing campaign | CategoricalEmp.var.rate | Employment variation rate | NumericalCons.price.idx | Consumer price index | NumericalCons.conf.idx | Consumer confidence index | NumericalEuribor3m | Euribor 3 month rate | NumericalNr.employed | Number of employees | NumericalY | Has the client subscribed a term deposit? | Target**Important note regarding the feature `duration`**: this attribute highly affects theoutput target (e.g., if duration=0 then y='no'). Yet, the duration is not known before acall is performed. Also, after the end of the call y is obviously known. Thus, this inputshould only be included for benchmark purposes and should be discarded if the intentionis to have a realistic predictive model. For this reason we will drop it. Setup<jupyter_code>import pandas as pd
import tensorflow as tf
from pathlib import Path
from zipfile import ZipFile
from tensorflow.keras.utils import FeatureSpace<jupyter_output><empty_output><jupyter_text>Load the dataLet's download the data and load it into a Pandas dataframe:<jupyter_code>data_url = "https://archive.ics.uci.edu/static/public/222/bank+marketing.zip"
data_zipped_path = tf.keras.utils.get_file("bank_marketing.zip", data_url, extract=True)
keras_datasets_path = Path(data_zipped_path).parents[0]
with ZipFile(f"{keras_datasets_path}/bank-additional.zip", "r") as zip:
# Extract files
zip.extractall(path=keras_datasets_path)
dataframe = pd.read_csv(
f"{keras_datasets_path}/bank-additional/bank-additional.csv", sep=";"
)<jupyter_output><empty_output><jupyter_text>We will create a new feature `previously_contacted` to be able to demonstrate some usefulpreprocessing techniques, this feature is based on `pdays`. According to the datasetinformation if `pdays = 999` it means that the client was not previously contacted, solet's create a feature to capture that.<jupyter_code># Droping `duration` to avoid target leak
dataframe.drop("duration", axis=1, inplace=True)
# Creating the new feature `previously_contacted`
dataframe["previously_contacted"] = dataframe["pdays"].map(
lambda x: 0 if x == 999 else 1
)<jupyter_output><empty_output><jupyter_text>The dataset includes 4119 samples with 21 columns per sample (20 features, plus thetarget label), here's a preview of a few samples:<jupyter_code>print(f"Dataframe shape: {dataframe.shape}")
display(dataframe.head())<jupyter_output><empty_output><jupyter_text>The column, "y", indicates whether the client has subscribed a term deposit or not. Train/validation splitLet's split the data into a training and validation set:<jupyter_code>valid_dataframe = dataframe.sample(frac=0.2, random_state=0)
train_dataframe = dataframe.drop(valid_dataframe.index)
print(
f"Using {len(train_dataframe)} samples for training and "
f"{len(valid_dataframe)} for validation"
)<jupyter_output><empty_output><jupyter_text>Generating TF datasetsLet's generate[`tf.data.Dataset`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) objectsfor each dataframe, since our target column `y` is a string we also need to encode it asan integer to be able to train our model with it. To achieve this we will create a`StringLookup` layer that will map the strings "no" and "yes" into "0" and "1"respectively.<jupyter_code>label_lookup = tf.keras.layers.StringLookup(
# the order here is important since the first index will be encoded as 0
vocabulary=["no", "yes"],
num_oov_indices=0,
)
def encode_label(x, y):
encoded_y = label_lookup(y)
return x, encoded_y
def dataframe_to_dataset(dataframe):
dataframe = dataframe.copy()
labels = dataframe.pop("y")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.map(encode_label, num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.shuffle(buffer_size=len(dataframe))
return ds
train_ds = dataframe_to_dataset(train_dataframe)
valid_ds = dataframe_to_dataset(valid_dataframe)<jupyter_output><empty_output><jupyter_text>Each `Dataset` yields a tuple `(input, target)` where `input` is a dictionary of featuresand `target` is the value `0` or `1`:<jupyter_code>for x, y in dataframe_to_dataset(train_dataframe).take(1):
print(f"Input: {x}")
print(f"Target: {y}")<jupyter_output><empty_output><jupyter_text>PreprocessingUsually our data is not on the proper or best format for modeling, this is why most ofthe time we need to do some kind of preprocessing on the features to make them compatiblewith the model or to extract the most of them for the task. We need to do thispreprocessing step for training but but at inference we also need to make sure that thedata goes through the same process, this where a utility like `FeatureSpace` shines, wecan define all the preprocessing once and re-use it at different stages of our system.Here we will see how to use `FeatureSpace` to perform more complex transformations andits flexibility, then combine everything together into a single component to preprocessdata for our model. The `FeatureSpace` utility learns how to process the data by using the `adapt()` functionto learn from it, this requires a dataset containing only feature, so let's create ittogether with a utility function to show the preprocessing example in practice:<jupyter_code>train_ds_with_no_labels = train_ds.map(lambda x, _: x)
def example_feature_space(dataset, feature_space, feature_names):
feature_space.adapt(dataset)
for x in dataset.take(1):
inputs = {feature_name: x[feature_name] for feature_name in feature_names}
preprocessed_x = feature_space(inputs)
print(f"Input: {[{k:v.numpy()} for k, v in inputs.items()]}")
print(
f"Preprocessed output: {[{k:v.numpy()} for k, v in preprocessed_x.items()]}"
)<jupyter_output><empty_output><jupyter_text>Feature hashing **Feature hashing** means hashing or encoding a set of values into a defined number ofbins, in this case we have `campaign` (number of contacts performed during this campaignand for a client) which is a numerical feature that can assume a varying range of valuesand we will hash it into 4 bins, this means that any possible value of the originalfeature will be placed into one of those possible 4 bins. The output here can be aone-hot encoded vector or a single number.<jupyter_code>feature_space = FeatureSpace(
features={
"campaign": FeatureSpace.integer_hashed(num_bins=4, output_mode="one_hot")
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["campaign"])<jupyter_output><empty_output><jupyter_text>**Feature hashing** can also be used for string features.<jupyter_code>feature_space = FeatureSpace(
features={
"education": FeatureSpace.string_hashed(num_bins=3, output_mode="one_hot")
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["education"])<jupyter_output><empty_output><jupyter_text>For numerical features we can get a similar behavior by using the `float_discretized`option, the main difference between this and `integer_hashed` is that with the former webin the values while keeping some numerical relationship (close values will likely beplaced at the same bin) while the later (hashing) we cannot guarantee that those numberswill be hashed into the same bin, it depends on the hashing function.<jupyter_code>feature_space = FeatureSpace(
features={"age": FeatureSpace.float_discretized(num_bins=3, output_mode="one_hot")},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["age"])<jupyter_output><empty_output><jupyter_text>Feature indexing **Indexing** a string feature essentially means creating a discrete numericalrepresentation for it, this is especially important for string features since most modelsonly accept numerical features. This transformation will place the string values intodifferent categories. The output here can be a one-hot encoded vector or a single number.Note that by specifying `num_oov_indices=1` we leave one spot at our output vector forOOV (out of vocabulary) values this is an important tool to handle missing or unseenvalues after the training (values that were not seen during the `adapt()` step)<jupyter_code>feature_space = FeatureSpace(
features={
"default": FeatureSpace.string_categorical(
num_oov_indices=1, output_mode="one_hot"
)
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["default"])<jupyter_output><empty_output><jupyter_text>We also can do **feature indexing** for integer features, this can be quite important forsome datasets where categorical features are replaced by numbers, for instance featureslike `sex` or `gender` where values like (`1 and 0`) do not have a numerical relationshipbetween them, they are just different categories, this behavior can be perfectly capturedby this transformation.On this dataset we can use the feature that we created `previously_contacted`. For thiscase we want to explicitly set `num_oov_indices=0`, the reason is that we only expect twopossible values for the feature, anything else would be either wrong input or an issuewith the data creation, for this reason we would probably just want the code to throw anerror so that we can be aware of the issue and fix it.<jupyter_code>feature_space = FeatureSpace(
features={
"previously_contacted": FeatureSpace.integer_categorical(
num_oov_indices=0, output_mode="one_hot"
)
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["previously_contacted"])<jupyter_output><empty_output><jupyter_text>Feature crosses (mixing features of diverse types)With **crosses** we can do feature interactions between an arbitrary number of featuresof mixed types as long as they are categorical features, you can think of instead ofhaving a feature {'age': 20} and another {'job': 'entrepreneur'} we can have{'age_X_job': 20_entrepreneur}, but with `FeatureSpace` and **crosses** we can applyspecific preprocessing to each individual feature and to the feature cross itself. Thisoption can be very powerful for specific use cases, here might be a good option since agecombined with job can have different meanings for the banking domain.We will cross `age` and `job` and hash the combination output of them into a vectorrepresentation of size 8. The output here can be a one-hot encoded vector or a singlenumber.Sometimes the combination of multiple features can result into on a super large featurespace, think about crossing someone's ZIP code with its last name, the possibilitieswould be in the thousands, that is why the `crossing_dim` parameter is so important itlimits the output dimension of the cross feature.Note that the combination of possible values of the 6 bins of `age` and the 12 values of`job` would be 72, so by choosing `crossing_dim = 8` we are choosing to constrain theoutput vector.<jupyter_code>feature_space = FeatureSpace(
features={
"age": FeatureSpace.integer_hashed(num_bins=6, output_mode="one_hot"),
"job": FeatureSpace.string_categorical(
num_oov_indices=0, output_mode="one_hot"
),
},
crosses=[
FeatureSpace.cross(
feature_names=("age", "job"),
crossing_dim=8,
output_mode="one_hot",
)
],
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["age", "job"])<jupyter_output><empty_output><jupyter_text>FeatureSpace using a Keras preprocessing layerTo be a really flexible and extensible feature we cannot only rely on those pre-definedtransformation, we must be able to re-use other transformations from the Keras/TensorFlowecosystem and customize our own, this is why `FeatureSpace` is also designed to work with[Keras preprocessing layers](https://keras.io/guides/preprocessing_layers/), this way wecan use sophisticated data transformations provided by the framework, you can even createyour own custom Keras preprocessing layers and use it in the same way.Here we are going to use the[`tf.keras.layers.TextVectorization`](https://keras.io/api/layers/preprocessing_layers/text/text_vectorization/textvectorization-class)preprocessing layer to create a TF-IDFfeature from our data. Note that this feature is not a really good use case for TF-IDF,this is just for demonstration purposes.<jupyter_code>custom_layer = tf.keras.layers.TextVectorization(output_mode="tf_idf")
feature_space = FeatureSpace(
features={
"education": FeatureSpace.feature(
preprocessor=custom_layer, dtype="string", output_mode="float"
)
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["education"])<jupyter_output><empty_output><jupyter_text>Configuring the final `FeatureSpace`Now that we know how to use `FeatureSpace` for more complex use cases let's pick the onesthat looks more useful for this task and create the final `FeatureSpace` component.To configure how each feature should be preprocessed,we instantiate a `keras.utils.FeatureSpace`, and wepass to it a dictionary that maps the name of our featuresto the feature transformation function.<jupyter_code>feature_space = FeatureSpace(
features={
# Categorical features encoded as integers
"previously_contacted": FeatureSpace.integer_categorical(num_oov_indices=0),
# Categorical features encoded as string
"marital": FeatureSpace.string_categorical(num_oov_indices=0),
"education": FeatureSpace.string_categorical(num_oov_indices=0),
"default": FeatureSpace.string_categorical(num_oov_indices=0),
"housing": FeatureSpace.string_categorical(num_oov_indices=0),
"loan": FeatureSpace.string_categorical(num_oov_indices=0),
"contact": FeatureSpace.string_categorical(num_oov_indices=0),
"month": FeatureSpace.string_categorical(num_oov_indices=0),
"day_of_week": FeatureSpace.string_categorical(num_oov_indices=0),
"poutcome": FeatureSpace.string_categorical(num_oov_indices=0),
# Categorical features to hash and bin
"job": FeatureSpace.string_hashed(num_bins=3),
# Numerical features to hash and bin
"pdays": FeatureSpace.integer_hashed(num_bins=4),
# Numerical features to normalize and bin
"age": FeatureSpace.float_discretized(num_bins=4),
# Numerical features to normalize
"campaign": FeatureSpace.float_normalized(),
"previous": FeatureSpace.float_normalized(),
"emp.var.rate": FeatureSpace.float_normalized(),
"cons.price.idx": FeatureSpace.float_normalized(),
"cons.conf.idx": FeatureSpace.float_normalized(),
"euribor3m": FeatureSpace.float_normalized(),
"nr.employed": FeatureSpace.float_normalized(),
},
# Specify feature cross with a custom crossing dim.
crosses=[
FeatureSpace.cross(feature_names=("age", "job"), crossing_dim=8),
FeatureSpace.cross(
feature_names=("default", "housing", "loan"), crossing_dim=6
),
FeatureSpace.cross(
feature_names=("poutcome", "previously_contacted"), crossing_dim=2
),
],
output_mode="concat",
)<jupyter_output><empty_output><jupyter_text>Adapt the `FeatureSpace` to the training dataBefore we start using the `FeatureSpace` to build a model, we haveto adapt it to the training data. During `adapt()`, the `FeatureSpace` will:- Index the set of possible values for categorical features.- Compute the mean and variance for numerical features to normalize.- Compute the value boundaries for the different bins for numerical features todiscretize.- Any other kind of preprocessing required by custom layers.Note that `adapt()` should be called on a `tf.data.Dataset` which yields dictsof feature values -- no labels.But first let's batch the datasets<jupyter_code>train_ds = train_ds.batch(32)
valid_ds = valid_ds.batch(32)
train_ds_with_no_labels = train_ds.map(lambda x, _: x)
feature_space.adapt(train_ds_with_no_labels)<jupyter_output><empty_output><jupyter_text>At this point, the `FeatureSpace` can be called on a dict of raw feature values, andbecause we set `output_mode="concat"` it will return a single concatenate vector for eachsample, combining encoded features and feature crosses.<jupyter_code>for x, _ in train_ds.take(1):
preprocessed_x = feature_space(x)
print(f"preprocessed_x shape: {preprocessed_x.shape}")
print(f"preprocessed_x sample: \n{preprocessed_x[0]}")<jupyter_output><empty_output><jupyter_text>Saving the `FeatureSpace`At this point we can choose to save our `FeatureSpace` component, this have manyadvantages like re-using it on different experiments that use the same model, saving timeif you need to re-run the preprocessing step, and mainly for model deployment, where byloading it you can be sure that you will be applying the same preprocessing steps don'tmatter the device or environment, this is a great way to reduce[training/servingskew](https://developers.google.com/machine-learning/guides/rules-of-mltraining-serving_skew).<jupyter_code>feature_space.save("myfeaturespace.keras")<jupyter_output><empty_output><jupyter_text>Preprocessing with `FeatureSpace` as part of the tf.data pipelineWe will opt to use our component asynchronously by making it part of the tf.datapipeline, as noted at the[previous guide](https://keras.io/examples/structured_data/structured_data_classification_with_feature_space/)This enables asynchronous parallel preprocessing of the data on CPU before ithits the model. Usually, this is always the right thing to do during training.Let's create a training and validation dataset of preprocessed batches:<jupyter_code>preprocessed_train_ds = train_ds.map(
lambda x, y: (feature_space(x), y), num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
preprocessed_valid_ds = valid_ds.map(
lambda x, y: (feature_space(x), y), num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>ModelWe will take advantage of our `FeatureSpace` component to build the model, as we want themodel to be compatible with our preprocessing function, let's use the the `FeatureSpace`feature map as the input of our model.<jupyter_code>encoded_features = feature_space.get_encoded_features()
print(encoded_features)<jupyter_output><empty_output><jupyter_text>This model is quite trivial only for demonstration purposes so don't pay too muchattention to the architecture.<jupyter_code>x = tf.keras.layers.Dense(64, activation="relu")(encoded_features)
x = tf.keras.layers.Dropout(0.5)(x)
output = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(inputs=encoded_features, outputs=output)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])<jupyter_output><empty_output><jupyter_text>TrainingLet's train our model for 20 epochs. Note that feature preprocessing is happening as partof the tf.data pipeline, not as part of the model.<jupyter_code>model.fit(
preprocessed_train_ds, validation_data=preprocessed_valid_ds, epochs=20, verbose=2
)<jupyter_output><empty_output><jupyter_text>Inference on new data with the end-to-end modelNow, we can build our inference model (which includes the `FeatureSpace`) to makepredictions based on dicts of raw features values, as follows: Loading the `FeatureSpace`First let's load the `FeatureSpace` that we saved a few moment ago, this can be quitehandy if you train a model but want to do inference at different time, possibly using adifferent device or environment.<jupyter_code>loaded_feature_space = tf.keras.models.load_model("myfeaturespace.keras")<jupyter_output><empty_output><jupyter_text>Building the inference end-to-end modelTo build the inference model we need both the feature input map and the preprocessingencoded Keras tensors.<jupyter_code>dict_inputs = loaded_feature_space.get_inputs()
encoded_features = loaded_feature_space.get_encoded_features()
print(encoded_features)
print(dict_inputs)
outputs = model(encoded_features)
inference_model = tf.keras.Model(inputs=dict_inputs, outputs=outputs)
sample = {
"age": 30,
"job": "blue-collar",
"marital": "married",
"education": "basic.9y",
"default": "no",
"housing": "yes",
"loan": "no",
"contact": "cellular",
"month": "may",
"day_of_week": "fri",
"campaign": 2,
"pdays": 999,
"previous": 0,
"poutcome": "nonexistent",
"emp.var.rate": -1.8,
"cons.price.idx": 92.893,
"cons.conf.idx": -46.2,
"euribor3m": 1.313,
"nr.employed": 5099.1,
"previously_contacted": 0,
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = inference_model.predict(input_dict)
print(
f"This particular client has a {100 * predictions[0][0]:.2f}% probability "
"of subscribing a term deposit, as evaluated by our model."
)<jupyter_output><empty_output> | keras-io/examples/structured_data/ipynb/feature_space_advanced.ipynb/0 | {
"file_path": "keras-io/examples/structured_data/ipynb/feature_space_advanced.ipynb",
"repo_id": "keras-io",
"token_count": 7298
} | 86 |
"""
Title: Image classification with ConvMixer
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/10/12
Last modified: 2021/10/12
Description: An all-convolutional network applied to patches of images.
Accelerator: GPU
Converted to Keras 3 by: [Md Awsafur Rahman](https://awsaf49.github.io)
"""
"""
## Introduction
Vision Transformers (ViT; [Dosovitskiy et al.](https://arxiv.org/abs/1612.00593)) extract
small patches from the input images, linearly project them, and then apply the
Transformer ([Vaswani et al.](https://arxiv.org/abs/1706.03762)) blocks. The application
of ViTs to image recognition tasks is quickly becoming a promising area of research,
because ViTs eliminate the need to have strong inductive biases (such as convolutions) for
modeling locality. This presents them as a general computation primititive capable of
learning just from the training data with as minimal inductive priors as possible. ViTs
yield great downstream performance when trained with proper regularization, data
augmentation, and relatively large datasets.
In the [Patches Are All You Need](https://openreview.net/pdf?id=TVHS5Y4dNvM) paper (note:
at
the time of writing, it is a submission to the ICLR 2022 conference), the authors extend
the idea of using patches to train an all-convolutional network and demonstrate
competitive results. Their architecture namely **ConvMixer** uses recipes from the recent
isotrophic architectures like ViT, MLP-Mixer
([Tolstikhin et al.](https://arxiv.org/abs/2105.01601)), such as using the same
depth and resolution across different layers in the network, residual connections,
and so on.
In this example, we will implement the ConvMixer model and demonstrate its performance on
the CIFAR-10 dataset.
"""
"""
## Imports
"""
import keras
from keras import layers
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
"""
## Hyperparameters
To keep run time short, we will train the model for only 10 epochs. To focus on
the core ideas of ConvMixer, we will not use other training-specific elements like
RandAugment ([Cubuk et al.](https://arxiv.org/abs/1909.13719)). If you are interested in
learning more about those details, please refer to the
[original paper](https://openreview.net/pdf?id=TVHS5Y4dNvM).
"""
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 128
num_epochs = 10
"""
## Load the CIFAR-10 dataset
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
val_split = 0.1
val_indices = int(len(x_train) * val_split)
new_x_train, new_y_train = x_train[val_indices:], y_train[val_indices:]
x_val, y_val = x_train[:val_indices], y_train[:val_indices]
print(f"Training data samples: {len(new_x_train)}")
print(f"Validation data samples: {len(x_val)}")
print(f"Test data samples: {len(x_test)}")
"""
## Prepare `tf.data.Dataset` objects
Our data augmentation pipeline is different from what the authors used for the CIFAR-10
dataset, which is fine for the purpose of the example.
Note that, it's ok to use **TF APIs for data I/O and preprocessing** with other backends
(jax, torch) as it is feature-complete framework when it comes to data preprocessing.
"""
image_size = 32
auto = tf.data.AUTOTUNE
augmentation_layers = [
keras.layers.RandomCrop(image_size, image_size),
keras.layers.RandomFlip("horizontal"),
]
def augment_images(images):
for layer in augmentation_layers:
images = layer(images, training=True)
return images
def make_datasets(images, labels, is_train=False):
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_train:
dataset = dataset.shuffle(batch_size * 10)
dataset = dataset.batch(batch_size)
if is_train:
dataset = dataset.map(
lambda x, y: (augment_images(x), y), num_parallel_calls=auto
)
return dataset.prefetch(auto)
train_dataset = make_datasets(new_x_train, new_y_train, is_train=True)
val_dataset = make_datasets(x_val, y_val)
test_dataset = make_datasets(x_test, y_test)
"""
## ConvMixer utilities
The following figure (taken from the original paper) depicts the ConvMixer model:

ConvMixer is very similar to the MLP-Mixer, model with the following key
differences:
* Instead of using fully-connected layers, it uses standard convolution layers.
* Instead of LayerNorm (which is typical for ViTs and MLP-Mixers), it uses BatchNorm.
Two types of convolution layers are used in ConvMixer. **(1)**: Depthwise convolutions,
for mixing spatial locations of the images, **(2)**: Pointwise convolutions (which follow
the depthwise convolutions), for mixing channel-wise information across the patches.
Another keypoint is the use of *larger kernel sizes* to allow a larger receptive field.
"""
def activation_block(x):
x = layers.Activation("gelu")(x)
return layers.BatchNormalization()(x)
def conv_stem(x, filters: int, patch_size: int):
x = layers.Conv2D(filters, kernel_size=patch_size, strides=patch_size)(x)
return activation_block(x)
def conv_mixer_block(x, filters: int, kernel_size: int):
# Depthwise convolution.
x0 = x
x = layers.DepthwiseConv2D(kernel_size=kernel_size, padding="same")(x)
x = layers.Add()([activation_block(x), x0]) # Residual.
# Pointwise convolution.
x = layers.Conv2D(filters, kernel_size=1)(x)
x = activation_block(x)
return x
def get_conv_mixer_256_8(
image_size=32, filters=256, depth=8, kernel_size=5, patch_size=2, num_classes=10
):
"""ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.
The hyperparameter values are taken from the paper.
"""
inputs = keras.Input((image_size, image_size, 3))
x = layers.Rescaling(scale=1.0 / 255)(inputs)
# Extract patch embeddings.
x = conv_stem(x, filters, patch_size)
# ConvMixer blocks.
for _ in range(depth):
x = conv_mixer_block(x, filters, kernel_size)
# Classification block.
x = layers.GlobalAvgPool2D()(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
return keras.Model(inputs, outputs)
"""
The model used in this experiment is termed as **ConvMixer-256/8** where 256 denotes the
number of channels and 8 denotes the depth. The resulting model only has 0.8 million
parameters.
"""
"""
## Model training and evaluation utility
"""
# Code reference:
# https://keras.io/examples/vision/image_classification_with_vision_transformer/.
def run_experiment(model):
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
checkpoint_filepath = "/tmp/checkpoint.keras"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=False,
)
history = model.fit(
train_dataset,
validation_data=val_dataset,
epochs=num_epochs,
callbacks=[checkpoint_callback],
)
model.load_weights(checkpoint_filepath)
_, accuracy = model.evaluate(test_dataset)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
return history, model
"""
## Train and evaluate model
"""
conv_mixer_model = get_conv_mixer_256_8()
history, conv_mixer_model = run_experiment(conv_mixer_model)
"""
The gap in training and validation performance can be mitigated by using additional
regularization techniques. Nevertheless, being able to get to ~83% accuracy within 10
epochs with 0.8 million parameters is a strong result.
"""
"""
## Visualizing the internals of ConvMixer
We can visualize the patch embeddings and the learned convolution filters. Recall
that each patch embedding and intermediate feature map have the same number of channels
(256 in this case). This will make our visualization utility easier to implement.
"""
# Code reference: https://bit.ly/3awIRbP.
def visualization_plot(weights, idx=1):
# First, apply min-max normalization to the
# given weights to avoid isotrophic scaling.
p_min, p_max = weights.min(), weights.max()
weights = (weights - p_min) / (p_max - p_min)
# Visualize all the filters.
num_filters = 256
plt.figure(figsize=(8, 8))
for i in range(num_filters):
current_weight = weights[:, :, :, i]
if current_weight.shape[-1] == 1:
current_weight = current_weight.squeeze()
ax = plt.subplot(16, 16, idx)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(current_weight)
idx += 1
# We first visualize the learned patch embeddings.
patch_embeddings = conv_mixer_model.layers[2].get_weights()[0]
visualization_plot(patch_embeddings)
"""
Even though we did not train the network to convergence, we can notice that different
patches show different patterns. Some share similarity with others while some are very
different. These visualizations are more salient with larger image sizes.
Similarly, we can visualize the raw convolution kernels. This can help us understand
the patterns to which a given kernel is receptive.
"""
# First, print the indices of the convolution layers that are not
# pointwise convolutions.
for i, layer in enumerate(conv_mixer_model.layers):
if isinstance(layer, layers.DepthwiseConv2D):
if layer.get_config()["kernel_size"] == (5, 5):
print(i, layer)
idx = 26 # Taking a kernel from the middle of the network.
kernel = conv_mixer_model.layers[idx].get_weights()[0]
kernel = np.expand_dims(kernel.squeeze(), axis=2)
visualization_plot(kernel)
"""
We see that different filters in the kernel have different locality spans, and this
pattern
is likely to evolve with more training.
"""
"""
## Final notes
There's been a recent trend on fusing convolutions with other data-agnostic operations
like self-attention. Following works are along this line of research:
* ConViT ([d'Ascoli et al.](https://arxiv.org/abs/2103.10697))
* CCT ([Hassani et al.](https://arxiv.org/abs/2104.05704))
* CoAtNet ([Dai et al.](https://arxiv.org/abs/2106.04803))
"""
| keras-io/examples/vision/convmixer.py/0 | {
"file_path": "keras-io/examples/vision/convmixer.py",
"repo_id": "keras-io",
"token_count": 3490
} | 87 |
<jupyter_start><jupyter_text>Class Attention Image Transformers with LayerScale**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2022/09/19**Last modified:** 2022/11/21**Description:** Implementing an image transformer equipped with Class Attention and LayerScale. IntroductionIn this tutorial, we implement the CaiT (Class-Attention in Image Transformers)proposed in [Going deeper with Image Transformers](https://arxiv.org/abs/2103.17239) byTouvron et al. Depth scaling, i.e. increasing the model depth for obtaining betterperformance and generalization has been quite successful for convolutional neuralnetworks ([Tan et al.](https://arxiv.org/abs/1905.11946),[Dollár et al.](https://arxiv.org/abs/2103.06877), for example). But applyingthe same model scaling principles toVision Transformers ([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929)) doesn'ttranslate equally well -- their performance gets saturated quickly with depth scaling.Note that one assumption here is that the underlying pre-training dataset isalways kept fixed when performing model scaling.In the CaiT paper, the authors investigate this phenomenon and propose modifications tothe vanilla ViT (Vision Transformers) architecture to mitigate this problem.The tutorial is structured like so:* Implementation of the individual blocks of CaiT* Collating all the blocks to create the CaiT model* Loading a pre-trained CaiT model* Obtaining prediction results* Visualization of the different attention layers of CaiTThe readers are assumed to be familiar with Vision Transformers already. Here isan implementation of Vision Transformers in Keras:[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/). Imports<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import io
import typing
from urllib.request import urlopen
import matplotlib.pyplot as plt
import numpy as np
import PIL
import keras
from keras import layers
from keras import ops<jupyter_output><empty_output><jupyter_text>The LayerScale layerWe begin by implementing a **LayerScale** layer which is one of the two modificationsproposed in the CaiT paper.When increasing the depth of the ViT models, they meet with optimization instability andeventually don't converge. The residual connections within each Transformer blockintroduce information bottleneck. When there is an increased amount of depth, thisbottleneck can quickly explode and deviate the optimization pathway for the underlyingmodel.The following equations denote where residual connections are added within a Transformerblock: where, **SA** stands for self-attention, **FFN** stands for feed-forward network, and**eta** denotes the LayerNorm operator ([Ba et al.](https://arxiv.org/abs/1607.06450)).LayerScale is formally implemented like so: where, the lambdas are learnable parameters and are initialized with a very small value({0.1, 1e-5, 1e-6}). **diag** represents a diagonal matrix.Intuitively, LayerScale helps control the contribution of the residual branches. Thelearnable parameters of LayerScale are initialized to a small value to let the branchesact like identity functions and then let them figure out the degrees of interactionsduring the training. The diagonal matrix additionally helps control the contributionsof the individual dimensions of the residual inputs as it is applied on a per-channelbasis.The practical implementation of LayerScale is simpler than it might sound.<jupyter_code>class LayerScale(layers.Layer):
"""LayerScale as introduced in CaiT: https://arxiv.org/abs/2103.17239.
Args:
init_values (float): value to initialize the diagonal matrix of LayerScale.
projection_dim (int): projection dimension used in LayerScale.
"""
def __init__(self, init_values: float, projection_dim: int, **kwargs):
super().__init__(**kwargs)
self.gamma = self.add_weight(
shape=(projection_dim,),
initializer=keras.initializers.Constant(init_values),
)
def call(self, x, training=False):
return x * self.gamma<jupyter_output><empty_output><jupyter_text>Stochastic depth layerSince its introduction ([Huang et al.](https://arxiv.org/abs/1603.09382)), StochasticDepth has become a favorite component in almost all modern neural network architectures.CaiT is no exception. Discussing Stochastic Depth is out of scope for this notebook. Youcan refer to [this resource](https://paperswithcode.com/method/stochastic-depth) in caseyou need a refresher.<jupyter_code>class StochasticDepth(layers.Layer):
"""Stochastic Depth layer (https://arxiv.org/abs/1603.09382).
Reference:
https://github.com/rwightman/pytorch-image-models
"""
def __init__(self, drop_prob: float, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prob
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, x, training=False):
if training:
keep_prob = 1 - self.drop_prob
shape = (ops.shape(x)[0],) + (1,) * (len(x.shape) - 1)
random_tensor = keep_prob + ops.random.uniform(
shape, minval=0, maxval=1, seed=self.seed_generator
)
random_tensor = ops.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x<jupyter_output><empty_output><jupyter_text>Class attentionThe vanilla ViT uses self-attention (SA) layers for modelling how the image patches andthe _learnable_ CLS token interact with each other. The CaiT authors propose to decouplethe attention layers responsible for attending to the image patches and the CLS tokens.When using ViTs for any discriminative tasks (classification, for example), we usuallytake the representations belonging to the CLS token and then pass them to thetask-specific heads. This is as opposed to using something like global average pooling asis typically done in convolutional neural networks.The interactions between the CLS token and other image patches are processed uniformlythrough self-attention layers. As the CaiT authors point out, this setup has got anentangled effect. On one hand, the self-attention layers are responsible for modellingthe image patches. On the other hand, they're also responsible for summarizing themodelled information via the CLS token so that it's useful for the learning objective.To help disentangle these two things, the authors propose to:* Introduce the CLS token at a later stage in the network.* Model the interaction between the CLS token and the representations related to theimage patches through a separate set of attention layers. The authors call this **ClassAttention** (CA).The figure below (taken from the original paper) depicts this idea: This is achieved by treating the CLS token embeddings as the queries in the CA layers.CLS token embeddings and the image patch embeddings are fed as keys as well values.**Note** that "embeddings" and "representations" have been used interchangeably here.<jupyter_code>class ClassAttention(layers.Layer):
"""Class attention as proposed in CaiT: https://arxiv.org/abs/2103.17239.
Args:
projection_dim (int): projection dimension for the query, key, and value
of attention.
num_heads (int): number of attention heads.
dropout_rate (float): dropout rate to be used for dropout in the attention
scores as well as the final projected outputs.
"""
def __init__(
self, projection_dim: int, num_heads: int, dropout_rate: float, **kwargs
):
super().__init__(**kwargs)
self.num_heads = num_heads
head_dim = projection_dim // num_heads
self.scale = head_dim**-0.5
self.q = layers.Dense(projection_dim)
self.k = layers.Dense(projection_dim)
self.v = layers.Dense(projection_dim)
self.attn_drop = layers.Dropout(dropout_rate)
self.proj = layers.Dense(projection_dim)
self.proj_drop = layers.Dropout(dropout_rate)
def call(self, x, training=False):
batch_size, num_patches, num_channels = (
ops.shape(x)[0],
ops.shape(x)[1],
ops.shape(x)[2],
)
# Query projection. `cls_token` embeddings are queries.
q = ops.expand_dims(self.q(x[:, 0]), axis=1)
q = ops.reshape(
q, (batch_size, 1, self.num_heads, num_channels // self.num_heads)
) # Shape: (batch_size, 1, num_heads, dimension_per_head)
q = ops.transpose(q, axes=[0, 2, 1, 3])
scale = ops.cast(self.scale, dtype=q.dtype)
q = q * scale
# Key projection. Patch embeddings as well the cls embedding are used as keys.
k = self.k(x)
k = ops.reshape(
k, (batch_size, num_patches, self.num_heads, num_channels // self.num_heads)
) # Shape: (batch_size, num_tokens, num_heads, dimension_per_head)
k = ops.transpose(k, axes=[0, 2, 3, 1])
# Value projection. Patch embeddings as well the cls embedding are used as values.
v = self.v(x)
v = ops.reshape(
v, (batch_size, num_patches, self.num_heads, num_channels // self.num_heads)
)
v = ops.transpose(v, axes=[0, 2, 1, 3])
# Calculate attention scores between cls_token embedding and patch embeddings.
attn = ops.matmul(q, k)
attn = ops.nn.softmax(attn, axis=-1)
attn = self.attn_drop(attn, training=training)
x_cls = ops.matmul(attn, v)
x_cls = ops.transpose(x_cls, axes=[0, 2, 1, 3])
x_cls = ops.reshape(x_cls, (batch_size, 1, num_channels))
x_cls = self.proj(x_cls)
x_cls = self.proj_drop(x_cls, training=training)
return x_cls, attn<jupyter_output><empty_output><jupyter_text>Talking Head AttentionThe CaiT authors use the Talking Head attention([Shazeer et al.](https://arxiv.org/abs/2003.02436))instead of the vanilla scaled dot-product multi-head attention used inthe original Transformer paper([Vaswani et al.](https://papers.nips.cc/paper/7181-attention-is-all-you-need)).They introduce two linear projections before and after the softmaxoperations for obtaining better results.For a more rigorous treatment of the Talking Head attention and the vanilla attentionmechanisms, please refer to their respective papers (linked above).<jupyter_code>class TalkingHeadAttention(layers.Layer):
"""Talking-head attention as proposed in CaiT: https://arxiv.org/abs/2003.02436.
Args:
projection_dim (int): projection dimension for the query, key, and value
of attention.
num_heads (int): number of attention heads.
dropout_rate (float): dropout rate to be used for dropout in the attention
scores as well as the final projected outputs.
"""
def __init__(
self, projection_dim: int, num_heads: int, dropout_rate: float, **kwargs
):
super().__init__(**kwargs)
self.num_heads = num_heads
head_dim = projection_dim // self.num_heads
self.scale = head_dim**-0.5
self.qkv = layers.Dense(projection_dim * 3)
self.attn_drop = layers.Dropout(dropout_rate)
self.proj = layers.Dense(projection_dim)
self.proj_l = layers.Dense(self.num_heads)
self.proj_w = layers.Dense(self.num_heads)
self.proj_drop = layers.Dropout(dropout_rate)
def call(self, x, training=False):
B, N, C = ops.shape(x)[0], ops.shape(x)[1], ops.shape(x)[2]
# Project the inputs all at once.
qkv = self.qkv(x)
# Reshape the projected output so that they're segregated in terms of
# query, key, and value projections.
qkv = ops.reshape(qkv, (B, N, 3, self.num_heads, C // self.num_heads))
# Transpose so that the `num_heads` becomes the leading dimensions.
# Helps to better segregate the representation sub-spaces.
qkv = ops.transpose(qkv, axes=[2, 0, 3, 1, 4])
scale = ops.cast(self.scale, dtype=qkv.dtype)
q, k, v = qkv[0] * scale, qkv[1], qkv[2]
# Obtain the raw attention scores.
attn = ops.matmul(q, ops.transpose(k, axes=[0, 1, 3, 2]))
# Linear projection of the similarities between the query and key projections.
attn = self.proj_l(ops.transpose(attn, axes=[0, 2, 3, 1]))
# Normalize the attention scores.
attn = ops.transpose(attn, axes=[0, 3, 1, 2])
attn = ops.nn.softmax(attn, axis=-1)
# Linear projection on the softmaxed scores.
attn = self.proj_w(ops.transpose(attn, axes=[0, 2, 3, 1]))
attn = ops.transpose(attn, axes=[0, 3, 1, 2])
attn = self.attn_drop(attn, training=training)
# Final set of projections as done in the vanilla attention mechanism.
x = ops.matmul(attn, v)
x = ops.transpose(x, axes=[0, 2, 1, 3])
x = ops.reshape(x, (B, N, C))
x = self.proj(x)
x = self.proj_drop(x, training=training)
return x, attn<jupyter_output><empty_output><jupyter_text>Feed-forward NetworkNext, we implement the feed-forward network which is one of the components within aTransformer block.<jupyter_code>def mlp(x, dropout_rate: float, hidden_units: typing.List[int]):
"""FFN for a Transformer block."""
for idx, units in enumerate(hidden_units):
x = layers.Dense(
units,
activation=ops.nn.gelu if idx == 0 else None,
bias_initializer=keras.initializers.RandomNormal(stddev=1e-6),
)(x)
x = layers.Dropout(dropout_rate)(x)
return x<jupyter_output><empty_output><jupyter_text>Other blocksIn the next two cells, we implement the remaining blocks as standalone functions:* `LayerScaleBlockClassAttention()` which returns a `keras.Model`. It is a Transformer blockequipped with Class Attention, LayerScale, and Stochastic Depth. It operates on the CLSembeddings and the image patch embeddings.* `LayerScaleBlock()` which returns a `keras.model`. It is also a Transformer block thatoperates only on the embeddings of the image patches. It is equipped with LayerScale andStochastic Depth.<jupyter_code>def LayerScaleBlockClassAttention(
projection_dim: int,
num_heads: int,
layer_norm_eps: float,
init_values: float,
mlp_units: typing.List[int],
dropout_rate: float,
sd_prob: float,
name: str,
):
"""Pre-norm transformer block meant to be applied to the embeddings of the
cls token and the embeddings of image patches.
Includes LayerScale and Stochastic Depth.
Args:
projection_dim (int): projection dimension to be used in the
Transformer blocks and patch projection layer.
num_heads (int): number of attention heads.
layer_norm_eps (float): epsilon to be used for Layer Normalization.
init_values (float): initial value for the diagonal matrix used in LayerScale.
mlp_units (List[int]): dimensions of the feed-forward network used in
the Transformer blocks.
dropout_rate (float): dropout rate to be used for dropout in the attention
scores as well as the final projected outputs.
sd_prob (float): stochastic depth rate.
name (str): a name identifier for the block.
Returns:
A keras.Model instance.
"""
x = keras.Input((None, projection_dim))
x_cls = keras.Input((None, projection_dim))
inputs = keras.layers.Concatenate(axis=1)([x_cls, x])
# Class attention (CA).
x1 = layers.LayerNormalization(epsilon=layer_norm_eps)(inputs)
attn_output, attn_scores = ClassAttention(projection_dim, num_heads, dropout_rate)(
x1
)
attn_output = (
LayerScale(init_values, projection_dim)(attn_output)
if init_values
else attn_output
)
attn_output = StochasticDepth(sd_prob)(attn_output) if sd_prob else attn_output
x2 = keras.layers.Add()([x_cls, attn_output])
# FFN.
x3 = layers.LayerNormalization(epsilon=layer_norm_eps)(x2)
x4 = mlp(x3, hidden_units=mlp_units, dropout_rate=dropout_rate)
x4 = LayerScale(init_values, projection_dim)(x4) if init_values else x4
x4 = StochasticDepth(sd_prob)(x4) if sd_prob else x4
outputs = keras.layers.Add()([x2, x4])
return keras.Model([x, x_cls], [outputs, attn_scores], name=name)
def LayerScaleBlock(
projection_dim: int,
num_heads: int,
layer_norm_eps: float,
init_values: float,
mlp_units: typing.List[int],
dropout_rate: float,
sd_prob: float,
name: str,
):
"""Pre-norm transformer block meant to be applied to the embeddings of the
image patches.
Includes LayerScale and Stochastic Depth.
Args:
projection_dim (int): projection dimension to be used in the
Transformer blocks and patch projection layer.
num_heads (int): number of attention heads.
layer_norm_eps (float): epsilon to be used for Layer Normalization.
init_values (float): initial value for the diagonal matrix used in LayerScale.
mlp_units (List[int]): dimensions of the feed-forward network used in
the Transformer blocks.
dropout_rate (float): dropout rate to be used for dropout in the attention
scores as well as the final projected outputs.
sd_prob (float): stochastic depth rate.
name (str): a name identifier for the block.
Returns:
A keras.Model instance.
"""
encoded_patches = keras.Input((None, projection_dim))
# Self-attention.
x1 = layers.LayerNormalization(epsilon=layer_norm_eps)(encoded_patches)
attn_output, attn_scores = TalkingHeadAttention(
projection_dim, num_heads, dropout_rate
)(x1)
attn_output = (
LayerScale(init_values, projection_dim)(attn_output)
if init_values
else attn_output
)
attn_output = StochasticDepth(sd_prob)(attn_output) if sd_prob else attn_output
x2 = layers.Add()([encoded_patches, attn_output])
# FFN.
x3 = layers.LayerNormalization(epsilon=layer_norm_eps)(x2)
x4 = mlp(x3, hidden_units=mlp_units, dropout_rate=dropout_rate)
x4 = LayerScale(init_values, projection_dim)(x4) if init_values else x4
x4 = StochasticDepth(sd_prob)(x4) if sd_prob else x4
outputs = layers.Add()([x2, x4])
return keras.Model(encoded_patches, [outputs, attn_scores], name=name)<jupyter_output><empty_output><jupyter_text>Given all these blocks, we are now ready to collate them into the final CaiT model. Putting the pieces together: The CaiT model<jupyter_code>class CaiT(keras.Model):
"""CaiT model.
Args:
projection_dim (int): projection dimension to be used in the
Transformer blocks and patch projection layer.
patch_size (int): patch size of the input images.
num_patches (int): number of patches after extracting the image patches.
init_values (float): initial value for the diagonal matrix used in LayerScale.
mlp_units: (List[int]): dimensions of the feed-forward network used in
the Transformer blocks.
sa_ffn_layers (int): number of self-attention Transformer blocks.
ca_ffn_layers (int): number of class-attention Transformer blocks.
num_heads (int): number of attention heads.
layer_norm_eps (float): epsilon to be used for Layer Normalization.
dropout_rate (float): dropout rate to be used for dropout in the attention
scores as well as the final projected outputs.
sd_prob (float): stochastic depth rate.
global_pool (str): denotes how to pool the representations coming out of
the final Transformer block.
pre_logits (bool): if set to True then don't add a classification head.
num_classes (int): number of classes to construct the final classification
layer with.
"""
def __init__(
self,
projection_dim: int,
patch_size: int,
num_patches: int,
init_values: float,
mlp_units: typing.List[int],
sa_ffn_layers: int,
ca_ffn_layers: int,
num_heads: int,
layer_norm_eps: float,
dropout_rate: float,
sd_prob: float,
global_pool: str,
pre_logits: bool,
num_classes: int,
**kwargs,
):
if global_pool not in ["token", "avg"]:
raise ValueError(
'Invalid value received for `global_pool`, should be either `"token"` or `"avg"`.'
)
super().__init__(**kwargs)
# Responsible for patchifying the input images and the linearly projecting them.
self.projection = keras.Sequential(
[
layers.Conv2D(
filters=projection_dim,
kernel_size=(patch_size, patch_size),
strides=(patch_size, patch_size),
padding="VALID",
name="conv_projection",
kernel_initializer="lecun_normal",
),
layers.Reshape(
target_shape=(-1, projection_dim),
name="flatten_projection",
),
],
name="projection",
)
# CLS token and the positional embeddings.
self.cls_token = self.add_weight(
shape=(1, 1, projection_dim), initializer="zeros"
)
self.pos_embed = self.add_weight(
shape=(1, num_patches, projection_dim), initializer="zeros"
)
# Projection dropout.
self.pos_drop = layers.Dropout(dropout_rate, name="projection_dropout")
# Stochastic depth schedule.
dpr = [sd_prob for _ in range(sa_ffn_layers)]
# Self-attention (SA) Transformer blocks operating only on the image patch
# embeddings.
self.blocks = [
LayerScaleBlock(
projection_dim=projection_dim,
num_heads=num_heads,
layer_norm_eps=layer_norm_eps,
init_values=init_values,
mlp_units=mlp_units,
dropout_rate=dropout_rate,
sd_prob=dpr[i],
name=f"sa_ffn_block_{i}",
)
for i in range(sa_ffn_layers)
]
# Class Attention (CA) Transformer blocks operating on the CLS token and image patch
# embeddings.
self.blocks_token_only = [
LayerScaleBlockClassAttention(
projection_dim=projection_dim,
num_heads=num_heads,
layer_norm_eps=layer_norm_eps,
init_values=init_values,
mlp_units=mlp_units,
dropout_rate=dropout_rate,
name=f"ca_ffn_block_{i}",
sd_prob=0.0, # No Stochastic Depth in the class attention layers.
)
for i in range(ca_ffn_layers)
]
# Pre-classification layer normalization.
self.norm = layers.LayerNormalization(epsilon=layer_norm_eps, name="head_norm")
# Representation pooling for classification head.
self.global_pool = global_pool
# Classification head.
self.pre_logits = pre_logits
self.num_classes = num_classes
if not pre_logits:
self.head = layers.Dense(num_classes, name="classification_head")
def call(self, x, training=False):
# Notice how CLS token is not added here.
x = self.projection(x)
x = x + self.pos_embed
x = self.pos_drop(x)
# SA+FFN layers.
sa_ffn_attn = {}
for blk in self.blocks:
x, attn_scores = blk(x)
sa_ffn_attn[f"{blk.name}_att"] = attn_scores
# CA+FFN layers.
ca_ffn_attn = {}
cls_tokens = ops.tile(self.cls_token, (ops.shape(x)[0], 1, 1))
for blk in self.blocks_token_only:
cls_tokens, attn_scores = blk([x, cls_tokens])
ca_ffn_attn[f"{blk.name}_att"] = attn_scores
x = ops.concatenate([cls_tokens, x], axis=1)
x = self.norm(x)
# Always return the attention scores from the SA+FFN and CA+FFN layers
# for convenience.
if self.global_pool:
x = (
ops.reduce_mean(x[:, 1:], axis=1)
if self.global_pool == "avg"
else x[:, 0]
)
return (
(x, sa_ffn_attn, ca_ffn_attn)
if self.pre_logits
else (self.head(x), sa_ffn_attn, ca_ffn_attn)
)<jupyter_output><empty_output><jupyter_text>Having the SA and CA layers segregated this way helps the model to focus on underlyingobjectives more concretely:* model dependencies in between the image patches* summarize the information from the image patches in a CLS token that can be used forthe task at handNow that we have defined the CaiT model, it's time to test it. We will start by defininga model configuration that will be passed to our `CaiT` class for initialization. Defining Model Configuration<jupyter_code>def get_config(
image_size: int = 224,
patch_size: int = 16,
projection_dim: int = 192,
sa_ffn_layers: int = 24,
ca_ffn_layers: int = 2,
num_heads: int = 4,
mlp_ratio: int = 4,
layer_norm_eps=1e-6,
init_values: float = 1e-5,
dropout_rate: float = 0.0,
sd_prob: float = 0.0,
global_pool: str = "token",
pre_logits: bool = False,
num_classes: int = 1000,
) -> typing.Dict:
"""Default configuration for CaiT models (cait_xxs24_224).
Reference:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/cait.py
"""
config = {}
# Patchification and projection.
config["patch_size"] = patch_size
config["num_patches"] = (image_size // patch_size) ** 2
# LayerScale.
config["init_values"] = init_values
# Dropout and Stochastic Depth.
config["dropout_rate"] = dropout_rate
config["sd_prob"] = sd_prob
# Shared across different blocks and layers.
config["layer_norm_eps"] = layer_norm_eps
config["projection_dim"] = projection_dim
config["mlp_units"] = [
projection_dim * mlp_ratio,
projection_dim,
]
# Attention layers.
config["num_heads"] = num_heads
config["sa_ffn_layers"] = sa_ffn_layers
config["ca_ffn_layers"] = ca_ffn_layers
# Representation pooling and task specific parameters.
config["global_pool"] = global_pool
config["pre_logits"] = pre_logits
config["num_classes"] = num_classes
return config<jupyter_output><empty_output><jupyter_text>Most of the configuration variables should sound familiar to you if you already know theViT architecture. Point of focus is given to `sa_ffn_layers` and `ca_ffn_layers` thatcontrol the number of SA-Transformer blocks and CA-Transformer blocks. You can easilyamend this `get_config()` method to instantiate a CaiT model for your own dataset. Model Instantiation<jupyter_code>image_size = 224
num_channels = 3
batch_size = 2
config = get_config()
cait_xxs24_224 = CaiT(**config)
dummy_inputs = ops.ones((batch_size, image_size, image_size, num_channels))
_ = cait_xxs24_224(dummy_inputs)<jupyter_output><empty_output><jupyter_text>We can successfully perform inference with the model. But what about implementationcorrectness? There are many ways to verify it:* Obtain the performance of the model (given it's been populated with the pre-trainedparameters) on the ImageNet-1k validation set (as the pretraining dataset wasImageNet-1k).* Fine-tune the model on a different dataset.In order to verify that, we will load another instance of the same model that has beenalready populated with the pre-trained parameters. Please refer to[this repository](https://github.com/sayakpaul/cait-tf)(developed by the author of this notebook) for more details.Additionally, the repository provides code to verify model performance on the[ImageNet-1k validation set](https://github.com/sayakpaul/cait-tf/tree/main/i1k_eval)as well as[fine-tuning](https://github.com/sayakpaul/cait-tf/blob/main/notebooks/finetune.ipynb). Load a pretrained model<jupyter_code>model_gcs_path = "gs://tfhub-modules/sayakpaul/cait_xxs24_224/1/uncompressed"
pretrained_model = keras.Sequential(
[keras.layers.TFSMLayer(model_gcs_path, call_endpoint="serving_default")]
)<jupyter_output><empty_output><jupyter_text>Inference utilitiesIn the next couple of cells, we develop preprocessing utilities needed to run inferencewith the pretrained model.<jupyter_code># The preprocessing transformations include center cropping, and normalizing
# the pixel values with the ImageNet-1k training stats (mean and standard deviation).
crop_layer = keras.layers.CenterCrop(image_size, image_size)
norm_layer = keras.layers.Normalization(
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
variance=[(0.229 * 255) ** 2, (0.224 * 255) ** 2, (0.225 * 255) ** 2],
)
def preprocess_image(image, size=image_size):
image = np.array(image)
image_resized = ops.expand_dims(image, 0)
resize_size = int((256 / image_size) * size)
image_resized = ops.image.resize(
image_resized, (resize_size, resize_size), interpolation="bicubic"
)
image_resized = crop_layer(image_resized)
return norm_layer(image_resized).numpy()
def load_image_from_url(url):
image_bytes = io.BytesIO(urlopen(url).read())
image = PIL.Image.open(image_bytes)
preprocessed_image = preprocess_image(image)
return image, preprocessed_image<jupyter_output><empty_output><jupyter_text>Now, we retrieve the ImageNet-1k labels and load them as the model we'reloading was pretrained on the ImageNet-1k dataset.<jupyter_code># ImageNet-1k class labels.
imagenet_labels = (
"https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt"
)
label_path = keras.utils.get_file(origin=imagenet_labels)
with open(label_path, "r") as f:
lines = f.readlines()
imagenet_labels = [line.rstrip() for line in lines]<jupyter_output><empty_output><jupyter_text>Load an Image<jupyter_code>img_url = "https://i.imgur.com/ErgfLTn.jpg"
image, preprocessed_image = load_image_from_url(img_url)
# https://unsplash.com/photos/Ho93gVTRWW8
plt.imshow(image)
plt.axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>Obtain Predictions<jupyter_code>outputs = pretrained_model.predict(preprocessed_image)
logits = outputs["output_1"]
ca_ffn_block_0_att = outputs["output_3_ca_ffn_block_0_att"]
ca_ffn_block_1_att = outputs["output_3_ca_ffn_block_1_att"]
predicted_label = imagenet_labels[int(np.argmax(logits))]
print(predicted_label)<jupyter_output><empty_output><jupyter_text>Now that we have obtained the predictions (which appear to be as expected), we canfurther extend our investigation. Following the CaiT authors, we can investigate theattention scores from the attention layers. This helps us to get deeper insights into themodifications introduced in the CaiT paper. Visualizing the Attention LayersWe start by inspecting the shape of the attention weights returned by a Class Attentionlayer.<jupyter_code># (batch_size, nb_attention_heads, num_cls_token, seq_length)
print("Shape of the attention scores from a class attention block:")
print(ca_ffn_block_0_att.shape)<jupyter_output><empty_output><jupyter_text>The shape denotes we have got attention weights for each of the individual attentionheads. They quantify the information about how the CLS token is related to itself and therest of the image patches.Next, we write a utility to:* Visualize what the individual attention heads in the Class Attention layers arefocusing on. This helps us to get an idea of how the _spatial-class relationship_ isinduced in the CaiT model.* Obtain a saliency map from the first Class Attention layer that helps to understand howCA layer aggregates information from the region(s) of interest in the images.This utility is referred from Figures 6 and 7 of the original[CaiT paper](https://arxiv.org/abs/2103.17239). This is also a part of[this notebook](https://github.com/sayakpaul/cait-tf/blob/main/notebooks/classification.ipynb)(developed by the author of this tutorial).<jupyter_code># Reference:
# https://github.com/facebookresearch/dino/blob/main/visualize_attention.py
patch_size = 16
def get_cls_attention_map(
attention_scores,
return_saliency=False,
) -> np.ndarray:
"""
Returns attention scores from a particular attention block.
Args:
attention_scores: the attention scores from the attention block to
visualize.
return_saliency: a boolean flag if set to True also returns the salient
representations of the attention block.
"""
w_featmap = preprocessed_image.shape[2] // patch_size
h_featmap = preprocessed_image.shape[1] // patch_size
nh = attention_scores.shape[1] # Number of attention heads.
# Taking the representations from CLS token.
attentions = attention_scores[0, :, 0, 1:].reshape(nh, -1)
# Reshape the attention scores to resemble mini patches.
attentions = attentions.reshape(nh, w_featmap, h_featmap)
if not return_saliency:
attentions = attentions.transpose((1, 2, 0))
else:
attentions = np.mean(attentions, axis=0)
attentions = (attentions - attentions.min()) / (
attentions.max() - attentions.min()
)
attentions = np.expand_dims(attentions, -1)
# Resize the attention patches to 224x224 (224: 14x16)
attentions = ops.image.resize(
attentions,
size=(h_featmap * patch_size, w_featmap * patch_size),
interpolation="bicubic",
)
return attentions<jupyter_output><empty_output><jupyter_text>In the first CA layer, we notice that the model is focusing solely on the region ofinterest.<jupyter_code>attentions_ca_block_0 = get_cls_attention_map(ca_ffn_block_0_att)
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(13, 13))
img_count = 0
for i in range(attentions_ca_block_0.shape[-1]):
if img_count < attentions_ca_block_0.shape[-1]:
axes[i].imshow(attentions_ca_block_0[:, :, img_count])
axes[i].title.set_text(f"Attention head: {img_count}")
axes[i].axis("off")
img_count += 1
fig.tight_layout()
plt.show()<jupyter_output><empty_output><jupyter_text>Whereas in the second CA layer, the model is trying to focus more on the context thatcontains discriminative signals.<jupyter_code>attentions_ca_block_1 = get_cls_attention_map(ca_ffn_block_1_att)
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(13, 13))
img_count = 0
for i in range(attentions_ca_block_1.shape[-1]):
if img_count < attentions_ca_block_1.shape[-1]:
axes[i].imshow(attentions_ca_block_1[:, :, img_count])
axes[i].title.set_text(f"Attention head: {img_count}")
axes[i].axis("off")
img_count += 1
fig.tight_layout()
plt.show()<jupyter_output><empty_output><jupyter_text>Finally, we obtain the saliency map for the given image.<jupyter_code>saliency_attention = get_cls_attention_map(ca_ffn_block_0_att, return_saliency=True)
image = np.array(image)
image_resized = ops.expand_dims(image, 0)
resize_size = int((256 / 224) * image_size)
image_resized = ops.image.resize(
image_resized, (resize_size, resize_size), interpolation="bicubic"
)
image_resized = crop_layer(image_resized)
plt.imshow(image_resized.numpy().squeeze().astype("int32"))
plt.imshow(saliency_attention.numpy().squeeze(), cmap="cividis", alpha=0.9)
plt.axis("off")
plt.show()<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/cait.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/cait.ipynb",
"repo_id": "keras-io",
"token_count": 13707
} | 88 |
<jupyter_start><jupyter_text>Grad-CAM class activation visualization**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/04/26**Last modified:** 2021/03/07**Description:** How to obtain a class activation heatmap for an image classification model. Adapted from Deep Learning with Python (2017). Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import tensorflow as tf
import keras
# Display
from IPython.display import Image, display
import matplotlib as mpl
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Configurable parametersYou can change these to another model.To get the values for `last_conv_layer_name` use `model.summary()`to see the names of all layers in the model.<jupyter_code>model_builder = keras.applications.xception.Xception
img_size = (299, 299)
preprocess_input = keras.applications.xception.preprocess_input
decode_predictions = keras.applications.xception.decode_predictions
last_conv_layer_name = "block14_sepconv2_act"
# The local path to our target image
img_path = keras.utils.get_file(
"african_elephant.jpg", "https://i.imgur.com/Bvro0YD.png"
)
display(Image(img_path))<jupyter_output><empty_output><jupyter_text>The Grad-CAM algorithm<jupyter_code>def get_img_array(img_path, size):
# `img` is a PIL image of size 299x299
img = keras.utils.load_img(img_path, target_size=size)
# `array` is a float32 Numpy array of shape (299, 299, 3)
array = keras.utils.img_to_array(img)
# We add a dimension to transform our array into a "batch"
# of size (1, 299, 299, 3)
array = np.expand_dims(array, axis=0)
return array
def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None):
# First, we create a model that maps the input image to the activations
# of the last conv layer as well as the output predictions
grad_model = keras.models.Model(
model.inputs, [model.get_layer(last_conv_layer_name).output, model.output]
)
# Then, we compute the gradient of the top predicted class for our input image
# with respect to the activations of the last conv layer
with tf.GradientTape() as tape:
last_conv_layer_output, preds = grad_model(img_array)
if pred_index is None:
pred_index = tf.argmax(preds[0])
class_channel = preds[:, pred_index]
# This is the gradient of the output neuron (top predicted or chosen)
# with regard to the output feature map of the last conv layer
grads = tape.gradient(class_channel, last_conv_layer_output)
# This is a vector where each entry is the mean intensity of the gradient
# over a specific feature map channel
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
# We multiply each channel in the feature map array
# by "how important this channel is" with regard to the top predicted class
# then sum all the channels to obtain the heatmap class activation
last_conv_layer_output = last_conv_layer_output[0]
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
heatmap = tf.squeeze(heatmap)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
return heatmap.numpy()<jupyter_output><empty_output><jupyter_text>Let's test-drive it<jupyter_code># Prepare image
img_array = preprocess_input(get_img_array(img_path, size=img_size))
# Make model
model = model_builder(weights="imagenet")
# Remove last layer's softmax
model.layers[-1].activation = None
# Print what the top predicted class is
preds = model.predict(img_array)
print("Predicted:", decode_predictions(preds, top=1)[0])
# Generate class activation heatmap
heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name)
# Display heatmap
plt.matshow(heatmap)
plt.show()<jupyter_output><empty_output><jupyter_text>Create a superimposed visualization<jupyter_code>def save_and_display_gradcam(img_path, heatmap, cam_path="cam.jpg", alpha=0.4):
# Load the original image
img = keras.utils.load_img(img_path)
img = keras.utils.img_to_array(img)
# Rescale heatmap to a range 0-255
heatmap = np.uint8(255 * heatmap)
# Use jet colormap to colorize heatmap
jet = mpl.colormaps["jet"]
# Use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# Create an image with RGB colorized heatmap
jet_heatmap = keras.utils.array_to_img(jet_heatmap)
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
jet_heatmap = keras.utils.img_to_array(jet_heatmap)
# Superimpose the heatmap on original image
superimposed_img = jet_heatmap * alpha + img
superimposed_img = keras.utils.array_to_img(superimposed_img)
# Save the superimposed image
superimposed_img.save(cam_path)
# Display Grad CAM
display(Image(cam_path))
save_and_display_gradcam(img_path, heatmap)<jupyter_output><empty_output><jupyter_text>Let's try another imageWe will see how the grad cam explains the model's outputs for a multi-label image. Let'stry an image with a cat and a dog together, and see how the grad cam behaves.<jupyter_code>img_path = keras.utils.get_file(
"cat_and_dog.jpg",
"https://storage.googleapis.com/petbacker/images/blog/2017/dog-and-cat-cover.jpg",
)
display(Image(img_path))
# Prepare image
img_array = preprocess_input(get_img_array(img_path, size=img_size))
# Print what the two top predicted classes are
preds = model.predict(img_array)
print("Predicted:", decode_predictions(preds, top=2)[0])<jupyter_output><empty_output><jupyter_text>We generate class activation heatmap for "chow," the class index is 260<jupyter_code>heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=260)
save_and_display_gradcam(img_path, heatmap)<jupyter_output><empty_output><jupyter_text>We generate class activation heatmap for "egyptian cat," the class index is 285<jupyter_code>heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=285)
save_and_display_gradcam(img_path, heatmap)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/grad_cam.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/grad_cam.ipynb",
"repo_id": "keras-io",
"token_count": 2199
} | 89 |
<jupyter_start><jupyter_text>Low-light image enhancement using MIRNet**Author:** [Soumik Rakshit](http://github.com/soumik12345)**Date created:** 2021/09/11**Last modified:** 2023/07/15**Description:** Implementing the MIRNet architecture for low-light image enhancement. IntroductionWith the goal of recovering high-quality image content from its degraded version, imagerestoration enjoys numerous applications, such as inphotography, security, medical imaging, and remote sensing. In this example, we implement the**MIRNet** model for low-light image enhancement, a fully-convolutional architecture thatlearns an enriched set offeatures that combines contextual information from multiple scales, whilesimultaneously preserving the high-resolution spatial details. References:- [Learning Enriched Features for Real Image Restoration and Enhancement](https://arxiv.org/abs/2003.06792)- [The Retinex Theory of Color Vision](http://www.cnbc.cmu.edu/~tai/cp_papers/E.Land_Retinex_Theory_ScientifcAmerican.pdf)- [Two deterministic half-quadratic regularization algorithms for computed imaging](https://ieeexplore.ieee.org/document/413553) Downloading LOLDatasetThe **LoL Dataset** has been created for low-light image enhancement.It provides 485 images for training and 15 for testing. Each image pair in the datasetconsists of a low-light input image and its corresponding well-exposed reference image.<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import random
import numpy as np
from glob import glob
from PIL import Image, ImageOps
import matplotlib.pyplot as plt
import keras
from keras import layers
import tensorflow as tf
!wget https://huggingface.co/datasets/geekyrakshit/LoL-Dataset/resolve/main/lol_dataset.zip
!unzip -q lol_dataset.zip && rm lol_dataset.zip<jupyter_output><empty_output><jupyter_text>Creating a TensorFlow DatasetWe use 300 image pairs from the LoL Dataset's training set for training,and we use the remaining 185 image pairs for validation.We generate random crops of size `128 x 128` from the image pairs to beused for both training and validation.<jupyter_code>random.seed(10)
IMAGE_SIZE = 128
BATCH_SIZE = 4
MAX_TRAIN_IMAGES = 300
def read_image(image_path):
image = tf.io.read_file(image_path)
image = tf.image.decode_png(image, channels=3)
image.set_shape([None, None, 3])
image = tf.cast(image, dtype=tf.float32) / 255.0
return image
def random_crop(low_image, enhanced_image):
low_image_shape = tf.shape(low_image)[:2]
low_w = tf.random.uniform(
shape=(), maxval=low_image_shape[1] - IMAGE_SIZE + 1, dtype=tf.int32
)
low_h = tf.random.uniform(
shape=(), maxval=low_image_shape[0] - IMAGE_SIZE + 1, dtype=tf.int32
)
low_image_cropped = low_image[
low_h : low_h + IMAGE_SIZE, low_w : low_w + IMAGE_SIZE
]
enhanced_image_cropped = enhanced_image[
low_h : low_h + IMAGE_SIZE, low_w : low_w + IMAGE_SIZE
]
# in order to avoid `NONE` during shape inference
low_image_cropped.set_shape([IMAGE_SIZE, IMAGE_SIZE, 3])
enhanced_image_cropped.set_shape([IMAGE_SIZE, IMAGE_SIZE, 3])
return low_image_cropped, enhanced_image_cropped
def load_data(low_light_image_path, enhanced_image_path):
low_light_image = read_image(low_light_image_path)
enhanced_image = read_image(enhanced_image_path)
low_light_image, enhanced_image = random_crop(low_light_image, enhanced_image)
return low_light_image, enhanced_image
def get_dataset(low_light_images, enhanced_images):
dataset = tf.data.Dataset.from_tensor_slices((low_light_images, enhanced_images))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
return dataset
train_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[:MAX_TRAIN_IMAGES]
train_enhanced_images = sorted(glob("./lol_dataset/our485/high/*"))[:MAX_TRAIN_IMAGES]
val_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[MAX_TRAIN_IMAGES:]
val_enhanced_images = sorted(glob("./lol_dataset/our485/high/*"))[MAX_TRAIN_IMAGES:]
test_low_light_images = sorted(glob("./lol_dataset/eval15/low/*"))
test_enhanced_images = sorted(glob("./lol_dataset/eval15/high/*"))
train_dataset = get_dataset(train_low_light_images, train_enhanced_images)
val_dataset = get_dataset(val_low_light_images, val_enhanced_images)
print("Train Dataset:", train_dataset.element_spec)
print("Val Dataset:", val_dataset.element_spec)<jupyter_output><empty_output><jupyter_text>MIRNet ModelHere are the main features of the MIRNet model:- A feature extraction model that computes a complementary set of features across multiplespatial scales, while maintaining the original high-resolution features to preserveprecise spatial details.- A regularly repeated mechanism for information exchange, where the features acrossmulti-resolution branches are progressively fused together for improved representationlearning.- A new approach to fuse multi-scale features using a selective kernel networkthat dynamically combines variable receptive fields and faithfully preservesthe original feature information at each spatial resolution.- A recursive residual design that progressively breaks down the input signalin order to simplify the overall learning process, and allows the constructionof very deep networks. Selective Kernel Feature FusionThe Selective Kernel Feature Fusion or SKFF module performs dynamic adjustment ofreceptive fields via two operations: **Fuse** and **Select**. The Fuse operator generatesglobal feature descriptors by combining the information from multi-resolution streams.The Select operator uses these descriptors to recalibrate the feature maps (of differentstreams) followed by their aggregation.**Fuse**: The SKFF receives inputs from three parallel convolution streams carryingdifferent scales of information. We first combine these multi-scale features using anelement-wise sum, on which we apply Global Average Pooling (GAP) across the spatialdimension. Next, we apply a channel- downscaling convolution layer to generate a compactfeature representation which passes through three parallel channel-upscaling convolutionlayers (one for each resolution stream) and provides us with three feature descriptors.**Select**: This operator applies the softmax function to the feature descriptors toobtain the corresponding activations that are used to adaptively recalibrate multi-scalefeature maps. The aggregated features are defined as the sum of product of the correspondingmulti-scale feature and the feature descriptor.<jupyter_code>def selective_kernel_feature_fusion(
multi_scale_feature_1, multi_scale_feature_2, multi_scale_feature_3
):
channels = list(multi_scale_feature_1.shape)[-1]
combined_feature = layers.Add()(
[multi_scale_feature_1, multi_scale_feature_2, multi_scale_feature_3]
)
gap = layers.GlobalAveragePooling2D()(combined_feature)
channel_wise_statistics = layers.Reshape((1, 1, channels))(gap)
compact_feature_representation = layers.Conv2D(
filters=channels // 8, kernel_size=(1, 1), activation="relu"
)(channel_wise_statistics)
feature_descriptor_1 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_descriptor_2 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_descriptor_3 = layers.Conv2D(
channels, kernel_size=(1, 1), activation="softmax"
)(compact_feature_representation)
feature_1 = multi_scale_feature_1 * feature_descriptor_1
feature_2 = multi_scale_feature_2 * feature_descriptor_2
feature_3 = multi_scale_feature_3 * feature_descriptor_3
aggregated_feature = layers.Add()([feature_1, feature_2, feature_3])
return aggregated_feature<jupyter_output><empty_output><jupyter_text>Dual Attention UnitThe Dual Attention Unit or DAU is used to extract features in the convolutional streams.While the SKFF block fuses information across multi-resolution branches, we also need amechanism to share information within a feature tensor, both along the spatial and thechannel dimensions which is done by the DAU block. The DAU suppresses less usefulfeatures and only allows more informative ones to pass further. This featurerecalibration is achieved by using **Channel Attention** and **Spatial Attention**mechanisms.The **Channel Attention** branch exploits the inter-channel relationships of theconvolutional feature maps by applying squeeze and excitation operations. Given a featuremap, the squeeze operation applies Global Average Pooling across spatial dimensions toencode global context, thus yielding a feature descriptor. The excitation operator passesthis feature descriptor through two convolutional layers followed by the sigmoid gatingand generates activations. Finally, the output of Channel Attention branch is obtained byrescaling the input feature map with the output activations.The **Spatial Attention** branch is designed to exploit the inter-spatial dependencies ofconvolutional features. The goal of Spatial Attention is to generate a spatial attentionmap and use it to recalibrate the incoming features. To generate the spatial attentionmap, the Spatial Attention branch first independently applies Global Average Pooling andMax Pooling operations on input features along the channel dimensions and concatenatesthe outputs to form a resultant feature map which is then passed through a convolutionand sigmoid activation to obtain the spatial attention map. This spatial attention map isthen used to rescale the input feature map.<jupyter_code>class ChannelPooling(layers.Layer):
def __init__(self, axis=-1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.axis = axis
self.concat = layers.Concatenate(axis=self.axis)
def call(self, inputs):
average_pooling = tf.expand_dims(tf.reduce_mean(inputs, axis=-1), axis=-1)
max_pooling = tf.expand_dims(tf.reduce_max(inputs, axis=-1), axis=-1)
return self.concat([average_pooling, max_pooling])
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
def spatial_attention_block(input_tensor):
compressed_feature_map = ChannelPooling(axis=-1)(input_tensor)
feature_map = layers.Conv2D(1, kernel_size=(1, 1))(compressed_feature_map)
feature_map = keras.activations.sigmoid(feature_map)
return input_tensor * feature_map
def channel_attention_block(input_tensor):
channels = list(input_tensor.shape)[-1]
average_pooling = layers.GlobalAveragePooling2D()(input_tensor)
feature_descriptor = layers.Reshape((1, 1, channels))(average_pooling)
feature_activations = layers.Conv2D(
filters=channels // 8, kernel_size=(1, 1), activation="relu"
)(feature_descriptor)
feature_activations = layers.Conv2D(
filters=channels, kernel_size=(1, 1), activation="sigmoid"
)(feature_activations)
return input_tensor * feature_activations
def dual_attention_unit_block(input_tensor):
channels = list(input_tensor.shape)[-1]
feature_map = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(input_tensor)
feature_map = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(
feature_map
)
channel_attention = channel_attention_block(feature_map)
spatial_attention = spatial_attention_block(feature_map)
concatenation = layers.Concatenate(axis=-1)([channel_attention, spatial_attention])
concatenation = layers.Conv2D(channels, kernel_size=(1, 1))(concatenation)
return layers.Add()([input_tensor, concatenation])<jupyter_output><empty_output><jupyter_text>Multi-Scale Residual BlockThe Multi-Scale Residual Block is capable of generating a spatially-precise output bymaintaining high-resolution representations, while receiving rich contextual informationfrom low-resolutions. The MRB consists of multiple (three in this paper)fully-convolutional streams connected in parallel. It allows information exchange acrossparallel streams in order to consolidate the high-resolution features with the help oflow-resolution features, and vice versa. The MIRNet employs a recursive residual design(with skip connections) to ease the flow of information during the learning process. Inorder to maintain the residual nature of our architecture, residual resizing modules areused to perform downsampling and upsampling operations that are used in the Multi-scaleResidual Block.<jupyter_code># Recursive Residual Modules
def down_sampling_module(input_tensor):
channels = list(input_tensor.shape)[-1]
main_branch = layers.Conv2D(channels, kernel_size=(1, 1), activation="relu")(
input_tensor
)
main_branch = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(main_branch)
main_branch = layers.MaxPooling2D()(main_branch)
main_branch = layers.Conv2D(channels * 2, kernel_size=(1, 1))(main_branch)
skip_branch = layers.MaxPooling2D()(input_tensor)
skip_branch = layers.Conv2D(channels * 2, kernel_size=(1, 1))(skip_branch)
return layers.Add()([skip_branch, main_branch])
def up_sampling_module(input_tensor):
channels = list(input_tensor.shape)[-1]
main_branch = layers.Conv2D(channels, kernel_size=(1, 1), activation="relu")(
input_tensor
)
main_branch = layers.Conv2D(
channels, kernel_size=(3, 3), padding="same", activation="relu"
)(main_branch)
main_branch = layers.UpSampling2D()(main_branch)
main_branch = layers.Conv2D(channels // 2, kernel_size=(1, 1))(main_branch)
skip_branch = layers.UpSampling2D()(input_tensor)
skip_branch = layers.Conv2D(channels // 2, kernel_size=(1, 1))(skip_branch)
return layers.Add()([skip_branch, main_branch])
# MRB Block
def multi_scale_residual_block(input_tensor, channels):
# features
level1 = input_tensor
level2 = down_sampling_module(input_tensor)
level3 = down_sampling_module(level2)
# DAU
level1_dau = dual_attention_unit_block(level1)
level2_dau = dual_attention_unit_block(level2)
level3_dau = dual_attention_unit_block(level3)
# SKFF
level1_skff = selective_kernel_feature_fusion(
level1_dau,
up_sampling_module(level2_dau),
up_sampling_module(up_sampling_module(level3_dau)),
)
level2_skff = selective_kernel_feature_fusion(
down_sampling_module(level1_dau),
level2_dau,
up_sampling_module(level3_dau),
)
level3_skff = selective_kernel_feature_fusion(
down_sampling_module(down_sampling_module(level1_dau)),
down_sampling_module(level2_dau),
level3_dau,
)
# DAU 2
level1_dau_2 = dual_attention_unit_block(level1_skff)
level2_dau_2 = up_sampling_module((dual_attention_unit_block(level2_skff)))
level3_dau_2 = up_sampling_module(
up_sampling_module(dual_attention_unit_block(level3_skff))
)
# SKFF 2
skff_ = selective_kernel_feature_fusion(level1_dau_2, level2_dau_2, level3_dau_2)
conv = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(skff_)
return layers.Add()([input_tensor, conv])<jupyter_output><empty_output><jupyter_text>MIRNet Model<jupyter_code>def recursive_residual_group(input_tensor, num_mrb, channels):
conv1 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(input_tensor)
for _ in range(num_mrb):
conv1 = multi_scale_residual_block(conv1, channels)
conv2 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(conv1)
return layers.Add()([conv2, input_tensor])
def mirnet_model(num_rrg, num_mrb, channels):
input_tensor = keras.Input(shape=[None, None, 3])
x1 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(input_tensor)
for _ in range(num_rrg):
x1 = recursive_residual_group(x1, num_mrb, channels)
conv = layers.Conv2D(3, kernel_size=(3, 3), padding="same")(x1)
output_tensor = layers.Add()([input_tensor, conv])
return keras.Model(input_tensor, output_tensor)
model = mirnet_model(num_rrg=3, num_mrb=2, channels=64)<jupyter_output><empty_output><jupyter_text>Training- We train MIRNet using **Charbonnier Loss** as the loss function and **AdamOptimizer** with a learning rate of `1e-4`.- We use **Peak Signal Noise Ratio** or PSNR as a metric which is an expression for theratio between the maximum possible value (power) of a signal and the power of distortingnoise that affects the quality of its representation.<jupyter_code>def charbonnier_loss(y_true, y_pred):
return tf.reduce_mean(tf.sqrt(tf.square(y_true - y_pred) + tf.square(1e-3)))
def peak_signal_noise_ratio(y_true, y_pred):
return tf.image.psnr(y_pred, y_true, max_val=255.0)
optimizer = keras.optimizers.Adam(learning_rate=1e-4)
model.compile(
optimizer=optimizer,
loss=charbonnier_loss,
metrics=[peak_signal_noise_ratio],
)
history = model.fit(
train_dataset,
validation_data=val_dataset,
epochs=50,
callbacks=[
keras.callbacks.ReduceLROnPlateau(
monitor="val_peak_signal_noise_ratio",
factor=0.5,
patience=5,
verbose=1,
min_delta=1e-7,
mode="max",
)
],
)
def plot_history(value, name):
plt.plot(history.history[value], label=f"train_{name.lower()}")
plt.plot(history.history[f"val_{value}"], label=f"val_{name.lower()}")
plt.xlabel("Epochs")
plt.ylabel(name)
plt.title(f"Train and Validation {name} Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_history("loss", "Loss")
plot_history("peak_signal_noise_ratio", "PSNR")<jupyter_output><empty_output><jupyter_text>Inference<jupyter_code>def plot_results(images, titles, figure_size=(12, 12)):
fig = plt.figure(figsize=figure_size)
for i in range(len(images)):
fig.add_subplot(1, len(images), i + 1).set_title(titles[i])
_ = plt.imshow(images[i])
plt.axis("off")
plt.show()
def infer(original_image):
image = keras.utils.img_to_array(original_image)
image = image.astype("float32") / 255.0
image = np.expand_dims(image, axis=0)
output = model.predict(image, verbose=0)
output_image = output[0] * 255.0
output_image = output_image.clip(0, 255)
output_image = output_image.reshape(
(np.shape(output_image)[0], np.shape(output_image)[1], 3)
)
output_image = Image.fromarray(np.uint8(output_image))
original_image = Image.fromarray(np.uint8(original_image))
return output_image<jupyter_output><empty_output><jupyter_text>Inference on Test ImagesWe compare the test images from LOLDataset enhanced by MIRNet with imagesenhanced via the `PIL.ImageOps.autocontrast()` function.You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/lowlight-enhance-mirnet)and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/Enhance_Low_Light_Image).<jupyter_code>for low_light_image in random.sample(test_low_light_images, 6):
original_image = Image.open(low_light_image)
enhanced_image = infer(original_image)
plot_results(
[original_image, ImageOps.autocontrast(original_image), enhanced_image],
["Original", "PIL Autocontrast", "MIRNet Enhanced"],
(20, 12),
)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/mirnet.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/mirnet.ipynb",
"repo_id": "keras-io",
"token_count": 6792
} | 90 |
<jupyter_start><jupyter_text>RandAugment for Image Classification for Improved Robustness**Authors:** [Sayak Paul](https://twitter.com/RisingSayak)[Sachin Prasad](https://github.com/sachinprasadhs)**Date created:** 2021/03/13**Last modified:** 2023/12/12**Description:** RandAugment for training an image classification model with improved robustness. Data augmentation is a very useful technique that can help to improve the translationalinvariance of convolutional neural networks (CNN). RandAugment is a stochastic dataaugmentation routine for vision data and was proposed in[RandAugment: Practical automated data augmentation with a reduced search space](https://arxiv.org/abs/1909.13719).It is composed of strong augmentation transforms like color jitters, Gaussian blurs,saturations, etc. along with more traditional augmentation transforms such asrandom crops.These parameters are tuned for a given dataset and a network architecture. The authors ofRandAugment also provide pseudocode of RandAugment in the original paper (Figure 2).Recently, it has been a key component of works like[Noisy Student Training](https://arxiv.org/abs/1911.04252) and[Unsupervised Data Augmentation for Consistency Training](https://arxiv.org/abs/1904.12848).It has been also central to thesuccess of [EfficientNets](https://arxiv.org/abs/1905.11946).```pythonpip install keras-cv``` Imports & setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import keras_cv
from keras import ops
from keras import layers
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
keras.utils.set_random_seed(42)<jupyter_output><empty_output><jupyter_text>Load the CIFAR10 datasetFor this example, we will be using the[CIFAR10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html).<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
print(f"Total training examples: {len(x_train)}")
print(f"Total test examples: {len(x_test)}")<jupyter_output><empty_output><jupyter_text>Define hyperparameters<jupyter_code>AUTO = tf.data.AUTOTUNE
BATCH_SIZE = 128
EPOCHS = 1
IMAGE_SIZE = 72<jupyter_output><empty_output><jupyter_text>Initialize `RandAugment` objectNow, we will initialize a `RandAugment` object from the `imgaug.augmenters` module withthe parameters suggested by the RandAugment authors.<jupyter_code>rand_augment = keras_cv.layers.RandAugment(
value_range=(0, 255), augmentations_per_image=3, magnitude=0.8
)<jupyter_output><empty_output><jupyter_text>Create TensorFlow `Dataset` objects<jupyter_code>train_ds_rand = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(BATCH_SIZE * 100)
.batch(BATCH_SIZE)
.map(
lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y),
num_parallel_calls=AUTO,
)
.map(
lambda x, y: (rand_augment(tf.cast(x, tf.uint8)), y),
num_parallel_calls=AUTO,
)
.prefetch(AUTO)
)
test_ds = (
tf.data.Dataset.from_tensor_slices((x_test, y_test))
.batch(BATCH_SIZE)
.map(
lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y),
num_parallel_calls=AUTO,
)
.prefetch(AUTO)
)<jupyter_output><empty_output><jupyter_text>For comparison purposes, let's also define a simple augmentation pipeline consisting ofrandom flips, random rotations, and random zoomings.<jupyter_code>simple_aug = keras.Sequential(
[
layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
layers.RandomRotation(factor=0.02),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
]
)
# Now, map the augmentation pipeline to our training dataset
train_ds_simple = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(BATCH_SIZE * 100)
.batch(BATCH_SIZE)
.map(lambda x, y: (simple_aug(x), y), num_parallel_calls=AUTO)
.prefetch(AUTO)
)<jupyter_output><empty_output><jupyter_text>Visualize the dataset augmented with RandAugment<jupyter_code>sample_images, _ = next(iter(train_ds_rand))
plt.figure(figsize=(10, 10))
for i, image in enumerate(sample_images[:9]):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image.numpy().astype("int"))
plt.axis("off")<jupyter_output><empty_output><jupyter_text>You are encouraged to run the above code block a couple of times to see differentvariations. Visualize the dataset augmented with `simple_aug`<jupyter_code>sample_images, _ = next(iter(train_ds_simple))
plt.figure(figsize=(10, 10))
for i, image in enumerate(sample_images[:9]):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image.numpy().astype("int"))
plt.axis("off")<jupyter_output><empty_output><jupyter_text>Define a model building utility functionNow, we define a CNN model that is based on the[ResNet50V2 architecture](https://arxiv.org/abs/1603.05027). Also,notice that the network already has a rescaling layer inside it. This eliminates the needto do any separate preprocessing on our dataset and is specifically very useful fordeployment purposes.<jupyter_code>def get_training_model():
resnet50_v2 = keras.applications.ResNet50V2(
weights=None,
include_top=True,
input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),
classes=10,
)
model = keras.Sequential(
[
layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)),
layers.Rescaling(scale=1.0 / 127.5, offset=-1),
resnet50_v2,
]
)
return model
get_training_model().summary()<jupyter_output><empty_output><jupyter_text>We will train this network on two different versions of our dataset:* One augmented with RandAugment.* Another one augmented with `simple_aug`.Since RandAugment is known to enhance the robustness of models to common perturbationsand corruptions, we will also evaluate our models on the CIFAR-10-C dataset, proposed in[Benchmarking Neural Network Robustness to Common Corruptions and Perturbations](https://arxiv.org/abs/1903.12261)by Hendrycks et al. The CIFAR-10-C datasetconsists of 19 different image corruptions and perturbations (for example speckle noise,fog, Gaussian blur, etc.) that too at varying severity levels. For this example we willbe using the following configuration:[`cifar10_corrupted/saturate_5`](https://www.tensorflow.org/datasets/catalog/cifar10_corruptedcifar10_corruptedsaturate_5).The images from this configuration look like so:In the interest of reproducibility, we serialize the initial random weights of our shallownetwork.<jupyter_code>initial_model = get_training_model()
initial_model.save_weights("initial.weights.h5")<jupyter_output><empty_output><jupyter_text>Train model with RandAugment<jupyter_code>rand_aug_model = get_training_model()
rand_aug_model.load_weights("initial.weights.h5")
rand_aug_model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
rand_aug_model.fit(train_ds_rand, validation_data=test_ds, epochs=EPOCHS)
_, test_acc = rand_aug_model.evaluate(test_ds)
print("Test accuracy: {:.2f}%".format(test_acc * 100))<jupyter_output><empty_output><jupyter_text>Train model with `simple_aug`<jupyter_code>simple_aug_model = get_training_model()
simple_aug_model.load_weights("initial.weights.h5")
simple_aug_model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
simple_aug_model.fit(train_ds_simple, validation_data=test_ds, epochs=EPOCHS)
_, test_acc = simple_aug_model.evaluate(test_ds)
print("Test accuracy: {:.2f}%".format(test_acc * 100))<jupyter_output><empty_output><jupyter_text>Load the CIFAR-10-C dataset and evaluate performance<jupyter_code># Load and prepare the CIFAR-10-C dataset
# (If it's not already downloaded, it takes ~10 minutes of time to download)
cifar_10_c = tfds.load("cifar10_corrupted/saturate_5", split="test", as_supervised=True)
cifar_10_c = cifar_10_c.batch(BATCH_SIZE).map(
lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y),
num_parallel_calls=AUTO,
)
# Evaluate `rand_aug_model`
_, test_acc = rand_aug_model.evaluate(cifar_10_c, verbose=0)
print(
"Accuracy with RandAugment on CIFAR-10-C (saturate_5): {:.2f}%".format(
test_acc * 100
)
)
# Evaluate `simple_aug_model`
_, test_acc = simple_aug_model.evaluate(cifar_10_c, verbose=0)
print(
"Accuracy with simple_aug on CIFAR-10-C (saturate_5): {:.2f}%".format(
test_acc * 100
)
)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/randaugment.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/randaugment.ipynb",
"repo_id": "keras-io",
"token_count": 3137
} | 91 |
<jupyter_start><jupyter_text>Video Classification with a CNN-RNN Architecture**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/05/28**Last modified:** 2023/12/08**Description:** Training a video classifier with transfer learning and a recurrent model on the UCF101 dataset. This example demonstrates video classification, an important use-case withapplications in recommendations, security, and so on.We will be using the [UCF101 dataset](https://www.crcv.ucf.edu/data/UCF101.php)to build our video classifier. The dataset consists of videos categorized into differentactions, like cricket shot, punching, biking, etc. This dataset is commonly used tobuild action recognizers, which are an application of video classification.A video consists of an ordered sequence of frames. Each frame contains *spatial*information, and the sequence of those frames contains *temporal* information. To modelboth of these aspects, we use a hybrid architecture that consists of convolutions(for spatial processing) as well as recurrent layers (for temporal processing).Specifically, we'll use a Convolutional Neural Network (CNN) and a Recurrent NeuralNetwork (RNN) consisting of [GRU layers](https://keras.io/api/layers/recurrent_layers/gru/).This kind of hybrid architecture is popularly known as a **CNN-RNN**.This example requires TensorFlow 2.5 or higher, as well as TensorFlow Docs, which can beinstalled using the following command:<jupyter_code>!pip install -q git+https://github.com/tensorflow/docs<jupyter_output><empty_output><jupyter_text>Data collectionIn order to keep the runtime of this example relatively short, we will be using asubsampled version of the original UCF101 dataset. You can refer to[this notebook](https://colab.research.google.com/github/sayakpaul/Action-Recognition-in-TensorFlow/blob/main/Data_Preparation_UCF101.ipynb)to know how the subsampling was done.<jupyter_code>!!wget -q https://github.com/sayakpaul/Action-Recognition-in-TensorFlow/releases/download/v1.0.0/ucf101_top5.tar.gz
!tar xf ucf101_top5.tar.gz<jupyter_output><empty_output><jupyter_text>Setup<jupyter_code>import os
import keras
from imutils import paths
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import imageio
import cv2
from IPython.display import Image<jupyter_output><empty_output><jupyter_text>Define hyperparameters<jupyter_code>IMG_SIZE = 224
BATCH_SIZE = 64
EPOCHS = 10
MAX_SEQ_LENGTH = 20
NUM_FEATURES = 2048<jupyter_output><empty_output><jupyter_text>Data preparation<jupyter_code>train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
print(f"Total videos for training: {len(train_df)}")
print(f"Total videos for testing: {len(test_df)}")
train_df.sample(10)<jupyter_output><empty_output><jupyter_text>One of the many challenges of training video classifiers is figuring out a way to feedthe videos to a network. [This blog post](https://blog.coast.ai/five-video-classification-methods-implemented-in-keras-and-tensorflow-99cad29cc0b5)discusses five such methods. Since a video is an ordered sequence of frames, we couldjust extract the frames and put them in a 3D tensor. But the number of frames may differfrom video to video which would prevent us from stacking them into batches(unless we use padding). As an alternative, we can **save video frames at a fixedinterval until a maximum frame count is reached**. In this example we will dothe following:1. Capture the frames of a video.2. Extract frames from the videos until a maximum frame count is reached.3. In the case, where a video's frame count is lesser than the maximum frame count wewill pad the video with zeros.Note that this workflow is identical to [problems involving texts sequences](https://developers.google.com/machine-learning/guides/text-classification/). Videos of the UCF101 dataset is [known](https://www.crcv.ucf.edu/papers/UCF101_CRCV-TR-12-01.pdf)to not contain extreme variations in objects and actions across frames. Because of this,it may be okay to only consider a few frames for the learning task. But this approach maynot generalize well to other video classification problems. We will be using[OpenCV's `VideoCapture()` method](https://docs.opencv.org/master/dd/d43/tutorial_py_video_display.html)to read frames from videos.<jupyter_code># The following two methods are taken from this tutorial:
# https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def crop_center_square(frame):
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y : start_y + min_dim, start_x : start_x + min_dim]
def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE)):
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read()
if not ret:
break
frame = crop_center_square(frame)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]]
frames.append(frame)
if len(frames) == max_frames:
break
finally:
cap.release()
return np.array(frames)<jupyter_output><empty_output><jupyter_text>We can use a pre-trained network to extract meaningful features from the extractedframes. The [`Keras Applications`](https://keras.io/api/applications/) module providesa number of state-of-the-art models pre-trained on the [ImageNet-1k dataset](http://image-net.org/).We will be using the [InceptionV3 model](https://arxiv.org/abs/1512.00567) for this purpose.<jupyter_code>def build_feature_extractor():
feature_extractor = keras.applications.InceptionV3(
weights="imagenet",
include_top=False,
pooling="avg",
input_shape=(IMG_SIZE, IMG_SIZE, 3),
)
preprocess_input = keras.applications.inception_v3.preprocess_input
inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3))
preprocessed = preprocess_input(inputs)
outputs = feature_extractor(preprocessed)
return keras.Model(inputs, outputs, name="feature_extractor")
feature_extractor = build_feature_extractor()<jupyter_output><empty_output><jupyter_text>The labels of the videos are strings. Neural networks do not understand string values,so they must be converted to some numerical form before they are fed to the model. Herewe will use the [`StringLookup`](https://keras.io/api/layers/preprocessing_layers/categorical/string_lookup)layer encode the class labels as integers.<jupyter_code>label_processor = keras.layers.StringLookup(
num_oov_indices=0, vocabulary=np.unique(train_df["tag"])
)
print(label_processor.get_vocabulary())<jupyter_output><empty_output><jupyter_text>Finally, we can put all the pieces together to create our data processing utility.<jupyter_code>def prepare_all_videos(df, root_dir):
num_samples = len(df)
video_paths = df["video_name"].values.tolist()
labels = df["tag"].values
labels = keras.ops.convert_to_numpy(label_processor(labels[..., None]))
# `frame_masks` and `frame_features` are what we will feed to our sequence model.
# `frame_masks` will contain a bunch of booleans denoting if a timestep is
# masked with padding or not.
frame_masks = np.zeros(shape=(num_samples, MAX_SEQ_LENGTH), dtype="bool")
frame_features = np.zeros(
shape=(num_samples, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32"
)
# For each video.
for idx, path in enumerate(video_paths):
# Gather all its frames and add a batch dimension.
frames = load_video(os.path.join(root_dir, path))
frames = frames[None, ...]
# Initialize placeholders to store the masks and features of the current video.
temp_frame_mask = np.zeros(
shape=(
1,
MAX_SEQ_LENGTH,
),
dtype="bool",
)
temp_frame_features = np.zeros(
shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32"
)
# Extract features from the frames of the current video.
for i, batch in enumerate(frames):
video_length = batch.shape[0]
length = min(MAX_SEQ_LENGTH, video_length)
for j in range(length):
temp_frame_features[i, j, :] = feature_extractor.predict(
batch[None, j, :], verbose=0,
)
temp_frame_mask[i, :length] = 1 # 1 = not masked, 0 = masked
frame_features[idx,] = temp_frame_features.squeeze()
frame_masks[idx,] = temp_frame_mask.squeeze()
return (frame_features, frame_masks), labels
train_data, train_labels = prepare_all_videos(train_df, "train")
test_data, test_labels = prepare_all_videos(test_df, "test")
print(f"Frame features in train set: {train_data[0].shape}")
print(f"Frame masks in train set: {train_data[1].shape}")<jupyter_output><empty_output><jupyter_text>The above code block will take ~20 minutes to execute depending on the machine it's beingexecuted. The sequence modelNow, we can feed this data to a sequence model consisting of recurrent layers like `GRU`.<jupyter_code># Utility for our sequence model.
def get_sequence_model():
class_vocab = label_processor.get_vocabulary()
frame_features_input = keras.Input((MAX_SEQ_LENGTH, NUM_FEATURES))
mask_input = keras.Input((MAX_SEQ_LENGTH,), dtype="bool")
# Refer to the following tutorial to understand the significance of using `mask`:
# https://keras.io/api/layers/recurrent_layers/gru/
x = keras.layers.GRU(16, return_sequences=True)(
frame_features_input, mask=mask_input
)
x = keras.layers.GRU(8)(x)
x = keras.layers.Dropout(0.4)(x)
x = keras.layers.Dense(8, activation="relu")(x)
output = keras.layers.Dense(len(class_vocab), activation="softmax")(x)
rnn_model = keras.Model([frame_features_input, mask_input], output)
rnn_model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return rnn_model
# Utility for running experiments.
def run_experiment():
filepath = "/tmp/video_classifier/ckpt.weights.h5"
checkpoint = keras.callbacks.ModelCheckpoint(
filepath, save_weights_only=True, save_best_only=True, verbose=1
)
seq_model = get_sequence_model()
history = seq_model.fit(
[train_data[0], train_data[1]],
train_labels,
validation_split=0.3,
epochs=EPOCHS,
callbacks=[checkpoint],
)
seq_model.load_weights(filepath)
_, accuracy = seq_model.evaluate([test_data[0], test_data[1]], test_labels)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
return history, seq_model
_, sequence_model = run_experiment()<jupyter_output><empty_output><jupyter_text>**Note**: To keep the runtime of this example relatively short, we just used a fewtraining examples. This number of training examples is low with respect to the sequencemodel being used that has 99,909 trainable parameters. You are encouraged to sample moredata from the UCF101 dataset using [the notebook](https://colab.research.google.com/github/sayakpaul/Action-Recognition-in-TensorFlow/blob/main/Data_Preparation_UCF101.ipynb) mentioned above and train the same model. Inference<jupyter_code>def prepare_single_video(frames):
frames = frames[None, ...]
frame_mask = np.zeros(
shape=(
1,
MAX_SEQ_LENGTH,
),
dtype="bool",
)
frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32")
for i, batch in enumerate(frames):
video_length = batch.shape[0]
length = min(MAX_SEQ_LENGTH, video_length)
for j in range(length):
frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :])
frame_mask[i, :length] = 1 # 1 = not masked, 0 = masked
return frame_features, frame_mask
def sequence_prediction(path):
class_vocab = label_processor.get_vocabulary()
frames = load_video(os.path.join("test", path))
frame_features, frame_mask = prepare_single_video(frames)
probabilities = sequence_model.predict([frame_features, frame_mask])[0]
for i in np.argsort(probabilities)[::-1]:
print(f" {class_vocab[i]}: {probabilities[i] * 100:5.2f}%")
return frames
# This utility is for visualization.
# Referenced from:
# https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
def to_gif(images):
converted_images = images.astype(np.uint8)
imageio.mimsave("animation.gif", converted_images, duration=100)
return Image("animation.gif")
test_video = np.random.choice(test_df["video_name"].values.tolist())
print(f"Test video path: {test_video}")
test_frames = sequence_prediction(test_video)
to_gif(test_frames[:MAX_SEQ_LENGTH])<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/video_classification.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/video_classification.ipynb",
"repo_id": "keras-io",
"token_count": 4666
} | 92 |
# Barlow Twins for Contrastive SSL
**Author:** [Abhiraam Eranti](https://github.com/dewball345)<br>
**Date created:** 11/4/21<br>
**Last modified:** 12/20/21<br>
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/barlow_twins.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/barlow_twins.py)
**Description:** A keras implementation of Barlow Twins (constrastive SSL with redundancy reduction).
---
## Introduction
Self-supervised learning (SSL) is a relatively novel technique in which a model
learns from unlabeled data, and is often used when the data is corrupted or
if there is very little of it. A practical use for SSL is to create
intermediate embeddings that are learned from the data. These embeddings are
based on the dataset itself, with similar images having similar embeddings, and
vice versa. They are then attached to the rest of the model, which uses those
embeddings as information and effectively learns and makes predictions properly.
These embeddings, ideally, should contain as much information and insight about
the data as possible, so that the model can make better predictions. However,
a common problem that arises is that the model creates embeddings that are
redundant. For example, if two images are similar, the model will create
embeddings that are just a string of 1's, or some other value that
contains repeating bits of information. This is no better than a one-hot
encoding or just having one bit as the model’s representations; it defeats the
purpose of the embeddings, as they do not learn as much about the dataset as
possible. For other approaches, the solution to the problem was to carefully
configure the model such that it tries not to be redundant.
Barlow Twins is a new approach to this problem; while other solutions mainly
tackle the first goal of invariance (similar images have similar embeddings),
the Barlow Twins method also prioritizes the goal of reducing redundancy.
It also has the advantage of being much simpler than other methods, and its
model architecture is symmetric, meaning that both twins in the model do the
same thing. It is also near state-of-the-art on imagenet, even exceeding methods
like SimCLR.
One disadvantage of Barlow Twins is that it is heavily dependent on
augmentation, suffering major performance decreases in accuracy without them.
TL, DR: Barlow twins creates representations that are:
* Invariant.
* Not redundant, and carry as much info about the dataset.
Also, it is simpler than other methods.
This notebook can train a Barlow Twins model and reach up to
64% validation accuracy on the CIFAR-10 dataset.

### High-Level Theory
The model takes two versions of the same image(with different augmentations) as
input. Then it takes a prediction of each of them, creating representations.
They are then used to make a cross-correlation matrix.
Cross-correlation matrix:
```
(pred_1.T @ pred_2) / batch_size
```
The cross-correlation matrix measures the correlation between the output
neurons in the two representations made by the model predictions of the two
augmented versions of data. Ideally, a cross-correlation matrix should look
like an identity matrix if the two images are the same.
When this happens, it means that the representations:
1. Are invariant. The diagonal shows the correlation between each
representation's neurons and its corresponding augmented one. Because the two
versions come from the same image, the diagonal of the matrix should show that
there is a strong correlation between them. If the images are different, there
shouldn't be a diagonal.
2. Do not show signs of redundancy. If the neurons show correlation with a
non-diagonal neuron, it means that it is not correctly identifying similarities
between the two augmented images. This means that it is redundant.
Here is a good way of understanding in pseudocode(information from the original
paper):
```
c[i][i] = 1
c[i][j] = 0
where:
c is the cross-correlation matrix
i is the index of one representation's neuron
j is the index of the second representation's neuron
```
Taken from the original paper: [Barlow Twins: Self-Supervised Learning via Redundancy
Reduction](https://arxiv.org/abs/2103.03230)
### References
Paper:
[Barlow Twins: Self-Supervised Learning via Redundancy
Reduction](https://arxiv.org/abs/2103.03230)
Original Implementation:
[facebookresearch/barlowtwins](https://github.com/facebookresearch/barlowtwins)
---
## Setup
```python
!pip install tensorflow-addons
```
```python
import os
# slightly faster improvements, on the first epoch 30 second decrease and a 1-2 second
# decrease in epoch time. Overall saves approx. 5 min of training time
# Allocates two threads for a gpu private which allows more operations to be
# done faster
os.environ["TF_GPU_THREAD_MODE"] = "gpu_private"
import tensorflow as tf # framework
from tensorflow import keras # for tf.keras
import tensorflow_addons as tfa # LAMB optimizer and gaussian_blur_2d function
import numpy as np # np.random.random
import matplotlib.pyplot as plt # graphs
import datetime # tensorboard logs naming
# XLA optimization for faster performance(up to 10-15 minutes total time saved)
tf.config.optimizer.set_jit(True)
```
<div class="k-default-codeblock">
```
['Requirement already satisfied: tensorflow-addons in /usr/local/lib/python3.7/dist-packages (0.15.0)',
'Requirement already satisfied: typeguard>=2.7 in /usr/local/lib/python3.7/dist-packages (from tensorflow-addons) (2.7.1)']
```
</div>
---
## Load the CIFAR-10 dataset
```python
[
(train_features, train_labels),
(test_features, test_labels),
] = keras.datasets.cifar10.load_data()
train_features = train_features / 255.0
test_features = test_features / 255.0
```
---
## Necessary Hyperparameters
```python
# Batch size of dataset
BATCH_SIZE = 512
# Width and height of image
IMAGE_SIZE = 32
```
---
## Augmentation Utilities
The Barlow twins algorithm is heavily reliant on
Augmentation. One unique feature of the method is that sometimes, augmentations
probabilistically occur.
**Augmentations**
* *RandomToGrayscale*: randomly applies grayscale to image 20% of the time
* *RandomColorJitter*: randomly applies color jitter 80% of the time
* *RandomFlip*: randomly flips image horizontally 50% of the time
* *RandomResizedCrop*: randomly crops an image to a random size then resizes. This
happens 100% of the time
* *RandomSolarize*: randomly applies solarization to an image 20% of the time
* *RandomBlur*: randomly blurs an image 20% of the time
```python
class Augmentation(keras.layers.Layer):
"""Base augmentation class.
Base augmentation class. Contains the random_execute method.
Methods:
random_execute: method that returns true or false based
on a probability. Used to determine whether an augmentation
will be run.
"""
def __init__(self):
super().__init__()
@tf.function
def random_execute(self, prob: float) -> bool:
"""random_execute function.
Arguments:
prob: a float value from 0-1 that determines the
probability.
Returns:
returns true or false based on the probability.
"""
return tf.random.uniform([], minval=0, maxval=1) < prob
class RandomToGrayscale(Augmentation):
"""RandomToGrayscale class.
RandomToGrayscale class. Randomly makes an image
grayscaled based on the random_execute method. There
is a 20% chance that an image will be grayscaled.
Methods:
call: method that grayscales an image 20% of
the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a grayscaled version of the image 20% of the time
and the original image 80% of the time.
"""
if self.random_execute(0.2):
x = tf.image.rgb_to_grayscale(x)
x = tf.tile(x, [1, 1, 3])
return x
class RandomColorJitter(Augmentation):
"""RandomColorJitter class.
RandomColorJitter class. Randomly adds color jitter to an image.
Color jitter means to add random brightness, contrast,
saturation, and hue to an image. There is a 80% chance that an
image will be randomly color-jittered.
Methods:
call: method that color-jitters an image 80% of
the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Adds color jitter to image, including:
Brightness change by a max-delta of 0.8
Contrast change by a max-delta of 0.8
Saturation change by a max-delta of 0.8
Hue change by a max-delta of 0.2
Originally, the same deltas of the original paper
were used, but a performance boost of almost 2% was found
when doubling them.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a color-jittered version of the image 80% of the time
and the original image 20% of the time.
"""
if self.random_execute(0.8):
x = tf.image.random_brightness(x, 0.8)
x = tf.image.random_contrast(x, 0.4, 1.6)
x = tf.image.random_saturation(x, 0.4, 1.6)
x = tf.image.random_hue(x, 0.2)
return x
class RandomFlip(Augmentation):
"""RandomFlip class.
RandomFlip class. Randomly flips image horizontally. There is a 50%
chance that an image will be randomly flipped.
Methods:
call: method that flips an image 50% of
the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Randomly flips the image.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a flipped version of the image 50% of the time
and the original image 50% of the time.
"""
if self.random_execute(0.5):
x = tf.image.random_flip_left_right(x)
return x
class RandomResizedCrop(Augmentation):
"""RandomResizedCrop class.
RandomResizedCrop class. Randomly crop an image to a random size,
then resize the image back to the original size.
Attributes:
image_size: The dimension of the image
Methods:
__call__: method that does random resize crop to the image.
"""
def __init__(self, image_size):
super().__init__()
self.image_size = image_size
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Does random resize crop by randomly cropping an image to a random
size 75% - 100% the size of the image. Then resizes it.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a randomly cropped image.
"""
rand_size = tf.random.uniform(
shape=[],
minval=int(0.75 * self.image_size),
maxval=1 * self.image_size,
dtype=tf.int32,
)
crop = tf.image.random_crop(x, (rand_size, rand_size, 3))
crop_resize = tf.image.resize(crop, (self.image_size, self.image_size))
return crop_resize
class RandomSolarize(Augmentation):
"""RandomSolarize class.
RandomSolarize class. Randomly solarizes an image.
Solarization is when pixels accidentally flip to an inverted state.
Methods:
call: method that does random solarization 20% of the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Randomly solarizes the image.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a solarized version of the image 20% of the time
and the original image 80% of the time.
"""
if self.random_execute(0.2):
# flips abnormally low pixels to abnormally high pixels
x = tf.where(x < 10, x, 255 - x)
return x
class RandomBlur(Augmentation):
"""RandomBlur class.
RandomBlur class. Randomly blurs an image.
Methods:
call: method that does random blur 20% of the time.
"""
@tf.function
def call(self, x: tf.Tensor) -> tf.Tensor:
"""call function.
Randomly solarizes the image.
Arguments:
x: a tf.Tensor representing the image.
Returns:
returns a blurred version of the image 20% of the time
and the original image 80% of the time.
"""
if self.random_execute(0.2):
s = np.random.random()
return tfa.image.gaussian_filter2d(image=x, sigma=s)
return x
class RandomAugmentor(keras.Model):
"""RandomAugmentor class.
RandomAugmentor class. Chains all the augmentations into
one pipeline.
Attributes:
image_size: An integer represing the width and height
of the image. Designed to be used for square images.
random_resized_crop: Instance variable representing the
RandomResizedCrop layer.
random_flip: Instance variable representing the
RandomFlip layer.
random_color_jitter: Instance variable representing the
RandomColorJitter layer.
random_blur: Instance variable representing the
RandomBlur layer
random_to_grayscale: Instance variable representing the
RandomToGrayscale layer
random_solarize: Instance variable representing the
RandomSolarize layer
Methods:
call: chains layers in pipeline together
"""
def __init__(self, image_size: int):
super().__init__()
self.image_size = image_size
self.random_resized_crop = RandomResizedCrop(image_size)
self.random_flip = RandomFlip()
self.random_color_jitter = RandomColorJitter()
self.random_blur = RandomBlur()
self.random_to_grayscale = RandomToGrayscale()
self.random_solarize = RandomSolarize()
def call(self, x: tf.Tensor) -> tf.Tensor:
x = self.random_resized_crop(x)
x = self.random_flip(x)
x = self.random_color_jitter(x)
x = self.random_blur(x)
x = self.random_to_grayscale(x)
x = self.random_solarize(x)
x = tf.clip_by_value(x, 0, 1)
return x
bt_augmentor = RandomAugmentor(IMAGE_SIZE)
```
---
## Data Loading
A class that creates the barlow twins' dataset.
The dataset consists of two copies of each image, with each copy receiving different
augmentations.
```python
class BTDatasetCreator:
"""Barlow twins dataset creator class.
BTDatasetCreator class. Responsible for creating the
barlow twins' dataset.
Attributes:
options: tf.data.Options needed to configure a setting
that may improve performance.
seed: random seed for shuffling. Used to synchronize two
augmented versions.
augmentor: augmentor used for augmentation.
Methods:
__call__: creates barlow dataset.
augmented_version: creates 1 half of the dataset.
"""
def __init__(self, augmentor: RandomAugmentor, seed: int = 1024):
self.options = tf.data.Options()
self.options.threading.max_intra_op_parallelism = 1
self.seed = seed
self.augmentor = augmentor
def augmented_version(self, ds: list) -> tf.data.Dataset:
return (
tf.data.Dataset.from_tensor_slices(ds)
.shuffle(1000, seed=self.seed)
.map(self.augmentor, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(tf.data.AUTOTUNE)
.with_options(self.options)
)
def __call__(self, ds: list) -> tf.data.Dataset:
a1 = self.augmented_version(ds)
a2 = self.augmented_version(ds)
return tf.data.Dataset.zip((a1, a2)).with_options(self.options)
augment_versions = BTDatasetCreator(bt_augmentor)(train_features)
```
View examples of dataset.
```python
sample_augment_versions = iter(augment_versions)
def plot_values(batch: tuple):
fig, axs = plt.subplots(3, 3)
fig1, axs1 = plt.subplots(3, 3)
fig.suptitle("Augmentation 1")
fig1.suptitle("Augmentation 2")
a1, a2 = batch
# plots images on both tables
for i in range(3):
for j in range(3):
# CHANGE(add / 255)
axs[i][j].imshow(a1[3 * i + j])
axs[i][j].axis("off")
axs1[i][j].imshow(a2[3 * i + j])
axs1[i][j].axis("off")
plt.show()
plot_values(next(sample_augment_versions))
```


---
## Pseudocode of loss and model
The following sections follow the original author's pseudocode containing both model and
loss functions(see diagram below). Also contains a reference of variables used.

Reference:
```
y_a: first augmented version of original image.
y_b: second augmented version of original image.
z_a: model representation(embeddings) of y_a.
z_b: model representation(embeddings) of y_b.
z_a_norm: normalized z_a.
z_b_norm: normalized z_b.
c: cross correlation matrix.
c_diff: diagonal portion of loss(invariance term).
off_diag: off-diagonal portion of loss(redundancy reduction term).
```
---
## BarlowLoss: barlow twins model's loss function
Barlow Twins uses the cross correlation matrix for its loss. There are two parts to the
loss function:
* ***The invariance term***(diagonal). This part is used to make the diagonals of the
matrix into 1s. When this is the case, the matrix shows that the images are
correlated(same).
* The loss function subtracts 1 from the diagonal and squares the values.
* ***The redundancy reduction term***(off-diagonal). Here, the barlow twins loss
function aims to make these values zero. As mentioned before, it is redundant if the
representation neurons are correlated with values that are not on the diagonal.
* Off diagonals are squared.
After this the two parts are summed together.
```python
class BarlowLoss(keras.losses.Loss):
"""BarlowLoss class.
BarlowLoss class. Creates a loss function based on the cross-correlation
matrix.
Attributes:
batch_size: the batch size of the dataset
lambda_amt: the value for lambda(used in cross_corr_matrix_loss)
Methods:
__init__: gets instance variables
call: gets the loss based on the cross-correlation matrix
make_diag_zeros: Used in calculating off-diagonal section
of loss function; makes diagonals zeros.
cross_corr_matrix_loss: creates loss based on cross correlation
matrix.
"""
def __init__(self, batch_size: int):
"""__init__ method.
Gets the instance variables
Arguments:
batch_size: An integer value representing the batch size of the
dataset. Used for cross correlation matrix calculation.
"""
super().__init__()
self.lambda_amt = 5e-3
self.batch_size = batch_size
def get_off_diag(self, c: tf.Tensor) -> tf.Tensor:
"""get_off_diag method.
Makes the diagonals of the cross correlation matrix zeros.
This is used in the off-diagonal portion of the loss function,
where we take the squares of the off-diagonal values and sum them.
Arguments:
c: A tf.tensor that represents the cross correlation
matrix
Returns:
Returns a tf.tensor which represents the cross correlation
matrix with its diagonals as zeros.
"""
zero_diag = tf.zeros(c.shape[-1])
return tf.linalg.set_diag(c, zero_diag)
def cross_corr_matrix_loss(self, c: tf.Tensor) -> tf.Tensor:
"""cross_corr_matrix_loss method.
Gets the loss based on the cross correlation matrix.
We want the diagonals to be 1's and everything else to be
zeros to show that the two augmented images are similar.
Loss function procedure:
take the diagonal of the cross-correlation matrix, subtract by 1,
and square that value so no negatives.
Take the off-diagonal of the cc-matrix(see get_off_diag()),
square those values to get rid of negatives and increase the value,
and multiply it by a lambda to weight it such that it is of equal
value to the optimizer as the diagonal(there are more values off-diag
then on-diag)
Take the sum of the first and second parts and then sum them together.
Arguments:
c: A tf.tensor that represents the cross correlation
matrix
Returns:
Returns a tf.tensor which represents the cross correlation
matrix with its diagonals as zeros.
"""
# subtracts diagonals by one and squares them(first part)
c_diff = tf.pow(tf.linalg.diag_part(c) - 1, 2)
# takes off diagonal, squares it, multiplies with lambda(second part)
off_diag = tf.pow(self.get_off_diag(c), 2) * self.lambda_amt
# sum first and second parts together
loss = tf.reduce_sum(c_diff) + tf.reduce_sum(off_diag)
return loss
def normalize(self, output: tf.Tensor) -> tf.Tensor:
"""normalize method.
Normalizes the model prediction.
Arguments:
output: the model prediction.
Returns:
Returns a normalized version of the model prediction.
"""
return (output - tf.reduce_mean(output, axis=0)) / tf.math.reduce_std(
output, axis=0
)
def cross_corr_matrix(self, z_a_norm: tf.Tensor, z_b_norm: tf.Tensor) -> tf.Tensor:
"""cross_corr_matrix method.
Creates a cross correlation matrix from the predictions.
It transposes the first prediction and multiplies this with
the second, creating a matrix with shape (n_dense_units, n_dense_units).
See build_twin() for more info. Then it divides this with the
batch size.
Arguments:
z_a_norm: A normalized version of the first prediction.
z_b_norm: A normalized version of the second prediction.
Returns:
Returns a cross correlation matrix.
"""
return (tf.transpose(z_a_norm) @ z_b_norm) / self.batch_size
def call(self, z_a: tf.Tensor, z_b: tf.Tensor) -> tf.Tensor:
"""call method.
Makes the cross-correlation loss. Uses the CreateCrossCorr
class to make the cross corr matrix, then finds the loss and
returns it(see cross_corr_matrix_loss()).
Arguments:
z_a: The prediction of the first set of augmented data.
z_b: the prediction of the second set of augmented data.
Returns:
Returns a (rank-0) tf.Tensor that represents the loss.
"""
z_a_norm, z_b_norm = self.normalize(z_a), self.normalize(z_b)
c = self.cross_corr_matrix(z_a_norm, z_b_norm)
loss = self.cross_corr_matrix_loss(c)
return loss
```
---
## Barlow Twins' Model Architecture
The model has two parts:
* The encoder network, which is a resnet-34.
* The projector network, which creates the model embeddings.
* This consists of an MLP with 3 dense-batchnorm-relu layers.
Resnet encoder network implementation:
```python
class ResNet34:
"""Resnet34 class.
Responsible for the Resnet 34 architecture.
Modified from
https://www.analyticsvidhya.com/blog/2021/08/how-to-code-your-resnet-from-scratch-in-tensorflow/#h2_2.
https://www.analyticsvidhya.com/blog/2021/08/how-to-code-your-resnet-from-scratch-in-tensorflow/#h2_2.
View their website for more information.
"""
def identity_block(self, x, filter):
# copy tensor to variable called x_skip
x_skip = x
# Layer 1
x = tf.keras.layers.Conv2D(filter, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
x = tf.keras.layers.Activation("relu")(x)
# Layer 2
x = tf.keras.layers.Conv2D(filter, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
# Add Residue
x = tf.keras.layers.Add()([x, x_skip])
x = tf.keras.layers.Activation("relu")(x)
return x
def convolutional_block(self, x, filter):
# copy tensor to variable called x_skip
x_skip = x
# Layer 1
x = tf.keras.layers.Conv2D(filter, (3, 3), padding="same", strides=(2, 2))(x)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
x = tf.keras.layers.Activation("relu")(x)
# Layer 2
x = tf.keras.layers.Conv2D(filter, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=3)(x)
# Processing Residue with conv(1,1)
x_skip = tf.keras.layers.Conv2D(filter, (1, 1), strides=(2, 2))(x_skip)
# Add Residue
x = tf.keras.layers.Add()([x, x_skip])
x = tf.keras.layers.Activation("relu")(x)
return x
def __call__(self, shape=(32, 32, 3)):
# Step 1 (Setup Input Layer)
x_input = tf.keras.layers.Input(shape)
x = tf.keras.layers.ZeroPadding2D((3, 3))(x_input)
# Step 2 (Initial Conv layer along with maxPool)
x = tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding="same")(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding="same")(x)
# Define size of sub-blocks and initial filter size
block_layers = [3, 4, 6, 3]
filter_size = 64
# Step 3 Add the Resnet Blocks
for i in range(4):
if i == 0:
# For sub-block 1 Residual/Convolutional block not needed
for j in range(block_layers[i]):
x = self.identity_block(x, filter_size)
else:
# One Residual/Convolutional Block followed by Identity blocks
# The filter size will go on increasing by a factor of 2
filter_size = filter_size * 2
x = self.convolutional_block(x, filter_size)
for j in range(block_layers[i] - 1):
x = self.identity_block(x, filter_size)
# Step 4 End Dense Network
x = tf.keras.layers.AveragePooling2D((2, 2), padding="same")(x)
x = tf.keras.layers.Flatten()(x)
model = tf.keras.models.Model(inputs=x_input, outputs=x, name="ResNet34")
return model
```
Projector network:
```python
def build_twin() -> keras.Model:
"""build_twin method.
Builds a barlow twins model consisting of an encoder(resnet-34)
and a projector, which generates embeddings for the images
Returns:
returns a barlow twins model
"""
# number of dense neurons in the projector
n_dense_neurons = 5000
# encoder network
resnet = ResNet34()()
last_layer = resnet.layers[-1].output
# intermediate layers of the projector network
n_layers = 2
for i in range(n_layers):
dense = tf.keras.layers.Dense(n_dense_neurons, name=f"projector_dense_{i}")
if i == 0:
x = dense(last_layer)
else:
x = dense(x)
x = tf.keras.layers.BatchNormalization(name=f"projector_bn_{i}")(x)
x = tf.keras.layers.ReLU(name=f"projector_relu_{i}")(x)
x = tf.keras.layers.Dense(n_dense_neurons, name=f"projector_dense_{n_layers}")(x)
model = keras.Model(resnet.input, x)
return model
```
---
## Training Loop Model
See pseudocode for reference.
```python
class BarlowModel(keras.Model):
"""BarlowModel class.
BarlowModel class. Responsible for making predictions and handling
gradient descent with the optimizer.
Attributes:
model: the barlow model architecture.
loss_tracker: the loss metric.
Methods:
train_step: one train step; do model predictions, loss, and
optimizer step.
metrics: Returns metrics.
"""
def __init__(self):
super().__init__()
self.model = build_twin()
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def train_step(self, batch: tf.Tensor) -> tf.Tensor:
"""train_step method.
Do one train step. Make model predictions, find loss, pass loss to
optimizer, and make optimizer apply gradients.
Arguments:
batch: one batch of data to be given to the loss function.
Returns:
Returns a dictionary with the loss metric.
"""
# get the two augmentations from the batch
y_a, y_b = batch
with tf.GradientTape() as tape:
# get two versions of predictions
z_a, z_b = self.model(y_a, training=True), self.model(y_b, training=True)
loss = self.loss(z_a, z_b)
grads_model = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads_model, self.model.trainable_variables))
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
```
---
## Model Training
* Used the LAMB optimizer, instead of ADAM or SGD.
* Similar to the LARS optimizer used in the paper, and lets the model converge much
faster than other methods.
* Expected training time: 1 hour 30 min. Go and eat a snack or take a nap or something.
```python
# sets up model, optimizer, loss
bm = BarlowModel()
# chose the LAMB optimizer due to high batch sizes. Converged MUCH faster
# than ADAM or SGD
optimizer = tfa.optimizers.LAMB()
loss = BarlowLoss(BATCH_SIZE)
bm.compile(optimizer=optimizer, loss=loss)
# Expected training time: 1 hours 30 min
history = bm.fit(augment_versions, epochs=160)
plt.plot(history.history["loss"])
plt.show()
```
<div class="k-default-codeblock">
```
Epoch 1/160
97/97 [==============================] - 89s 294ms/step - loss: 3480.7588
Epoch 2/160
97/97 [==============================] - 29s 294ms/step - loss: 2163.4197
Epoch 3/160
97/97 [==============================] - 29s 294ms/step - loss: 1939.0248
Epoch 4/160
97/97 [==============================] - 29s 294ms/step - loss: 1810.4800
Epoch 5/160
97/97 [==============================] - 29s 294ms/step - loss: 1725.7401
Epoch 6/160
97/97 [==============================] - 29s 294ms/step - loss: 1658.2261
Epoch 7/160
97/97 [==============================] - 29s 294ms/step - loss: 1592.0747
Epoch 8/160
97/97 [==============================] - 29s 294ms/step - loss: 1545.2579
Epoch 9/160
97/97 [==============================] - 29s 294ms/step - loss: 1509.6631
Epoch 10/160
97/97 [==============================] - 29s 294ms/step - loss: 1484.1141
Epoch 11/160
97/97 [==============================] - 29s 293ms/step - loss: 1456.8615
Epoch 12/160
97/97 [==============================] - 29s 294ms/step - loss: 1430.0315
Epoch 13/160
97/97 [==============================] - 29s 294ms/step - loss: 1418.1147
Epoch 14/160
97/97 [==============================] - 29s 294ms/step - loss: 1385.7473
Epoch 15/160
97/97 [==============================] - 29s 294ms/step - loss: 1362.8176
Epoch 16/160
97/97 [==============================] - 29s 294ms/step - loss: 1353.6069
Epoch 17/160
97/97 [==============================] - 29s 294ms/step - loss: 1331.3687
Epoch 18/160
97/97 [==============================] - 29s 294ms/step - loss: 1323.1509
Epoch 19/160
97/97 [==============================] - 29s 294ms/step - loss: 1309.3015
Epoch 20/160
97/97 [==============================] - 29s 294ms/step - loss: 1303.2418
Epoch 21/160
97/97 [==============================] - 29s 294ms/step - loss: 1278.0450
Epoch 22/160
97/97 [==============================] - 29s 294ms/step - loss: 1272.2640
Epoch 23/160
97/97 [==============================] - 29s 294ms/step - loss: 1259.4225
Epoch 24/160
97/97 [==============================] - 29s 294ms/step - loss: 1246.8461
Epoch 25/160
97/97 [==============================] - 29s 294ms/step - loss: 1235.0269
Epoch 26/160
97/97 [==============================] - 29s 295ms/step - loss: 1228.4196
Epoch 27/160
97/97 [==============================] - 29s 295ms/step - loss: 1220.0851
Epoch 28/160
97/97 [==============================] - 29s 294ms/step - loss: 1208.5876
Epoch 29/160
97/97 [==============================] - 29s 294ms/step - loss: 1203.1449
Epoch 30/160
97/97 [==============================] - 29s 294ms/step - loss: 1199.5155
Epoch 31/160
97/97 [==============================] - 29s 294ms/step - loss: 1183.9818
Epoch 32/160
97/97 [==============================] - 29s 294ms/step - loss: 1173.9989
Epoch 33/160
97/97 [==============================] - 29s 294ms/step - loss: 1171.3789
Epoch 34/160
97/97 [==============================] - 29s 294ms/step - loss: 1160.8230
Epoch 35/160
97/97 [==============================] - 29s 294ms/step - loss: 1159.4148
Epoch 36/160
97/97 [==============================] - 29s 294ms/step - loss: 1148.4250
Epoch 37/160
97/97 [==============================] - 29s 294ms/step - loss: 1138.1802
Epoch 38/160
97/97 [==============================] - 29s 294ms/step - loss: 1135.9139
Epoch 39/160
97/97 [==============================] - 29s 294ms/step - loss: 1126.8186
Epoch 40/160
97/97 [==============================] - 29s 294ms/step - loss: 1119.6173
Epoch 41/160
97/97 [==============================] - 29s 293ms/step - loss: 1113.9358
Epoch 42/160
97/97 [==============================] - 29s 294ms/step - loss: 1106.0131
Epoch 43/160
97/97 [==============================] - 29s 294ms/step - loss: 1104.7386
Epoch 44/160
97/97 [==============================] - 29s 294ms/step - loss: 1097.7909
Epoch 45/160
97/97 [==============================] - 29s 294ms/step - loss: 1091.4229
Epoch 46/160
97/97 [==============================] - 29s 293ms/step - loss: 1082.3530
Epoch 47/160
97/97 [==============================] - 29s 294ms/step - loss: 1081.9459
Epoch 48/160
97/97 [==============================] - 29s 294ms/step - loss: 1078.5864
Epoch 49/160
97/97 [==============================] - 29s 293ms/step - loss: 1075.9255
Epoch 50/160
97/97 [==============================] - 29s 293ms/step - loss: 1070.9954
Epoch 51/160
97/97 [==============================] - 29s 294ms/step - loss: 1061.1058
Epoch 52/160
97/97 [==============================] - 29s 294ms/step - loss: 1055.0126
Epoch 53/160
97/97 [==============================] - 29s 294ms/step - loss: 1045.7827
Epoch 54/160
97/97 [==============================] - 29s 293ms/step - loss: 1047.5338
Epoch 55/160
97/97 [==============================] - 29s 294ms/step - loss: 1043.9012
Epoch 56/160
97/97 [==============================] - 29s 294ms/step - loss: 1044.5902
Epoch 57/160
97/97 [==============================] - 29s 294ms/step - loss: 1038.3389
Epoch 58/160
97/97 [==============================] - 29s 294ms/step - loss: 1032.1195
Epoch 59/160
97/97 [==============================] - 29s 294ms/step - loss: 1026.5962
Epoch 60/160
97/97 [==============================] - 29s 294ms/step - loss: 1018.2954
Epoch 61/160
97/97 [==============================] - 29s 294ms/step - loss: 1014.7681
Epoch 62/160
97/97 [==============================] - 29s 294ms/step - loss: 1007.7906
Epoch 63/160
97/97 [==============================] - 29s 294ms/step - loss: 1012.9134
Epoch 64/160
97/97 [==============================] - 29s 294ms/step - loss: 1009.7881
Epoch 65/160
97/97 [==============================] - 29s 294ms/step - loss: 1003.2436
Epoch 66/160
97/97 [==============================] - 29s 293ms/step - loss: 997.0688
Epoch 67/160
97/97 [==============================] - 29s 294ms/step - loss: 999.1620
Epoch 68/160
97/97 [==============================] - 29s 294ms/step - loss: 993.2636
Epoch 69/160
97/97 [==============================] - 29s 295ms/step - loss: 988.5142
Epoch 70/160
97/97 [==============================] - 29s 294ms/step - loss: 981.5876
Epoch 71/160
97/97 [==============================] - 29s 294ms/step - loss: 978.3053
Epoch 72/160
97/97 [==============================] - 29s 295ms/step - loss: 978.8599
Epoch 73/160
97/97 [==============================] - 29s 294ms/step - loss: 973.7569
Epoch 74/160
97/97 [==============================] - 29s 294ms/step - loss: 971.2402
Epoch 75/160
97/97 [==============================] - 29s 295ms/step - loss: 964.2864
Epoch 76/160
97/97 [==============================] - 29s 294ms/step - loss: 963.4999
Epoch 77/160
97/97 [==============================] - 29s 294ms/step - loss: 959.7264
Epoch 78/160
97/97 [==============================] - 29s 294ms/step - loss: 958.1680
Epoch 79/160
97/97 [==============================] - 29s 295ms/step - loss: 952.0243
Epoch 80/160
97/97 [==============================] - 29s 295ms/step - loss: 947.8354
Epoch 81/160
97/97 [==============================] - 29s 295ms/step - loss: 945.8139
Epoch 82/160
97/97 [==============================] - 29s 294ms/step - loss: 944.9114
Epoch 83/160
97/97 [==============================] - 29s 294ms/step - loss: 940.7040
Epoch 84/160
97/97 [==============================] - 29s 295ms/step - loss: 942.7839
Epoch 85/160
97/97 [==============================] - 29s 295ms/step - loss: 937.4374
Epoch 86/160
97/97 [==============================] - 29s 295ms/step - loss: 934.6262
Epoch 87/160
97/97 [==============================] - 29s 295ms/step - loss: 929.8491
Epoch 88/160
97/97 [==============================] - 29s 294ms/step - loss: 937.7441
Epoch 89/160
97/97 [==============================] - 29s 295ms/step - loss: 927.0290
Epoch 90/160
97/97 [==============================] - 29s 295ms/step - loss: 925.6105
Epoch 91/160
97/97 [==============================] - 29s 294ms/step - loss: 921.6296
Epoch 92/160
97/97 [==============================] - 29s 294ms/step - loss: 925.8184
Epoch 93/160
97/97 [==============================] - 29s 294ms/step - loss: 912.5261
Epoch 94/160
97/97 [==============================] - 29s 295ms/step - loss: 915.6510
Epoch 95/160
97/97 [==============================] - 29s 295ms/step - loss: 909.5853
Epoch 96/160
97/97 [==============================] - 29s 294ms/step - loss: 911.1563
Epoch 97/160
97/97 [==============================] - 29s 295ms/step - loss: 906.8965
Epoch 98/160
97/97 [==============================] - 29s 294ms/step - loss: 902.3696
Epoch 99/160
97/97 [==============================] - 29s 295ms/step - loss: 899.8710
Epoch 100/160
97/97 [==============================] - 29s 294ms/step - loss: 894.1641
Epoch 101/160
97/97 [==============================] - 29s 294ms/step - loss: 895.7336
Epoch 102/160
97/97 [==============================] - 29s 294ms/step - loss: 900.1674
Epoch 103/160
97/97 [==============================] - 29s 294ms/step - loss: 887.2552
Epoch 104/160
97/97 [==============================] - 29s 295ms/step - loss: 893.1448
Epoch 105/160
97/97 [==============================] - 29s 294ms/step - loss: 889.9379
Epoch 106/160
97/97 [==============================] - 29s 295ms/step - loss: 884.9587
Epoch 107/160
97/97 [==============================] - 29s 294ms/step - loss: 880.9834
Epoch 108/160
97/97 [==============================] - 29s 295ms/step - loss: 883.2829
Epoch 109/160
97/97 [==============================] - 29s 294ms/step - loss: 876.6734
Epoch 110/160
97/97 [==============================] - 29s 294ms/step - loss: 873.4252
Epoch 111/160
97/97 [==============================] - 29s 294ms/step - loss: 873.2639
Epoch 112/160
97/97 [==============================] - 29s 295ms/step - loss: 871.0381
Epoch 113/160
97/97 [==============================] - 29s 294ms/step - loss: 866.5417
Epoch 114/160
97/97 [==============================] - 29s 294ms/step - loss: 862.2125
Epoch 115/160
97/97 [==============================] - 29s 294ms/step - loss: 862.8839
Epoch 116/160
97/97 [==============================] - 29s 294ms/step - loss: 861.1781
Epoch 117/160
97/97 [==============================] - 29s 294ms/step - loss: 856.6186
Epoch 118/160
97/97 [==============================] - 29s 294ms/step - loss: 857.3196
Epoch 119/160
97/97 [==============================] - 29s 294ms/step - loss: 858.0576
Epoch 120/160
97/97 [==============================] - 29s 294ms/step - loss: 855.3264
Epoch 121/160
97/97 [==============================] - 29s 294ms/step - loss: 850.6841
Epoch 122/160
97/97 [==============================] - 29s 294ms/step - loss: 849.6420
Epoch 123/160
97/97 [==============================] - 29s 294ms/step - loss: 846.6933
Epoch 124/160
97/97 [==============================] - 29s 295ms/step - loss: 847.4681
Epoch 125/160
97/97 [==============================] - 29s 294ms/step - loss: 838.5893
Epoch 126/160
97/97 [==============================] - 29s 294ms/step - loss: 841.2516
Epoch 127/160
97/97 [==============================] - 29s 295ms/step - loss: 840.6940
Epoch 128/160
97/97 [==============================] - 29s 294ms/step - loss: 840.9053
Epoch 129/160
97/97 [==============================] - 29s 294ms/step - loss: 836.9998
Epoch 130/160
97/97 [==============================] - 29s 294ms/step - loss: 836.6874
Epoch 131/160
97/97 [==============================] - 29s 294ms/step - loss: 835.2166
Epoch 132/160
97/97 [==============================] - 29s 295ms/step - loss: 833.7071
Epoch 133/160
97/97 [==============================] - 29s 294ms/step - loss: 829.0735
Epoch 134/160
97/97 [==============================] - 29s 294ms/step - loss: 830.1376
Epoch 135/160
97/97 [==============================] - 29s 294ms/step - loss: 827.7781
Epoch 136/160
97/97 [==============================] - 29s 294ms/step - loss: 825.4308
Epoch 137/160
97/97 [==============================] - 29s 294ms/step - loss: 823.2223
Epoch 138/160
97/97 [==============================] - 29s 294ms/step - loss: 821.3982
Epoch 139/160
97/97 [==============================] - 29s 294ms/step - loss: 821.0161
Epoch 140/160
97/97 [==============================] - 29s 294ms/step - loss: 816.7703
Epoch 141/160
97/97 [==============================] - 29s 294ms/step - loss: 814.1747
Epoch 142/160
97/97 [==============================] - 29s 294ms/step - loss: 813.5908
Epoch 143/160
97/97 [==============================] - 29s 294ms/step - loss: 814.3353
Epoch 144/160
97/97 [==============================] - 29s 295ms/step - loss: 807.3126
Epoch 145/160
97/97 [==============================] - 29s 294ms/step - loss: 811.9185
Epoch 146/160
97/97 [==============================] - 29s 294ms/step - loss: 808.0939
Epoch 147/160
97/97 [==============================] - 29s 294ms/step - loss: 806.7361
Epoch 148/160
97/97 [==============================] - 29s 294ms/step - loss: 804.6682
Epoch 149/160
97/97 [==============================] - 29s 294ms/step - loss: 801.5149
Epoch 150/160
97/97 [==============================] - 29s 294ms/step - loss: 803.6600
Epoch 151/160
97/97 [==============================] - 29s 294ms/step - loss: 799.9028
Epoch 152/160
97/97 [==============================] - 29s 294ms/step - loss: 801.5812
Epoch 153/160
97/97 [==============================] - 29s 294ms/step - loss: 791.5322
Epoch 154/160
97/97 [==============================] - 29s 294ms/step - loss: 795.5021
Epoch 155/160
97/97 [==============================] - 29s 294ms/step - loss: 795.7894
Epoch 156/160
97/97 [==============================] - 29s 294ms/step - loss: 794.7897
Epoch 157/160
97/97 [==============================] - 29s 294ms/step - loss: 794.8560
Epoch 158/160
97/97 [==============================] - 29s 294ms/step - loss: 791.5762
Epoch 159/160
97/97 [==============================] - 29s 294ms/step - loss: 784.3605
Epoch 160/160
97/97 [==============================] - 29s 294ms/step - loss: 781.7180
```
</div>

---
## Evaluation
**Linear evaluation:** to evaluate the model's performance, we add
a linear dense layer at the end and freeze the main model's weights, only letting the
dense layer to be tuned. If the model actually learned something, then the accuracy would
be significantly higher than random chance.
**Accuracy on CIFAR-10** : 64% for this notebook. This is much better than the 10% we get
from random guessing.
```python
# Approx: 64% accuracy with this barlow twins model.
xy_ds = (
tf.data.Dataset.from_tensor_slices((train_features, train_labels))
.shuffle(1000)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(tf.data.AUTOTUNE)
)
test_ds = (
tf.data.Dataset.from_tensor_slices((test_features, test_labels))
.shuffle(1000)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(tf.data.AUTOTUNE)
)
model = keras.models.Sequential(
[
bm.model,
keras.layers.Dense(
10, activation="softmax", kernel_regularizer=keras.regularizers.l2(0.02)
),
]
)
model.layers[0].trainable = False
linear_optimizer = tfa.optimizers.LAMB()
model.compile(
optimizer=linear_optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
model.fit(xy_ds, epochs=35, validation_data=test_ds)
```
<div class="k-default-codeblock">
```
Epoch 1/35
97/97 [==============================] - 12s 84ms/step - loss: 2.9447 - accuracy: 0.2090 - val_loss: 2.3056 - val_accuracy: 0.3741
Epoch 2/35
97/97 [==============================] - 6s 62ms/step - loss: 1.9912 - accuracy: 0.4867 - val_loss: 1.6910 - val_accuracy: 0.5883
Epoch 3/35
97/97 [==============================] - 6s 62ms/step - loss: 1.5476 - accuracy: 0.6278 - val_loss: 1.4605 - val_accuracy: 0.6465
Epoch 4/35
97/97 [==============================] - 6s 62ms/step - loss: 1.3775 - accuracy: 0.6647 - val_loss: 1.3689 - val_accuracy: 0.6644
Epoch 5/35
97/97 [==============================] - 6s 62ms/step - loss: 1.3027 - accuracy: 0.6769 - val_loss: 1.3232 - val_accuracy: 0.6684
Epoch 6/35
97/97 [==============================] - 6s 62ms/step - loss: 1.2574 - accuracy: 0.6820 - val_loss: 1.2905 - val_accuracy: 0.6717
Epoch 7/35
97/97 [==============================] - 6s 63ms/step - loss: 1.2244 - accuracy: 0.6852 - val_loss: 1.2654 - val_accuracy: 0.6742
Epoch 8/35
97/97 [==============================] - 6s 62ms/step - loss: 1.1979 - accuracy: 0.6868 - val_loss: 1.2460 - val_accuracy: 0.6747
Epoch 9/35
97/97 [==============================] - 6s 62ms/step - loss: 1.1754 - accuracy: 0.6884 - val_loss: 1.2247 - val_accuracy: 0.6773
Epoch 10/35
97/97 [==============================] - 6s 62ms/step - loss: 1.1559 - accuracy: 0.6896 - val_loss: 1.2090 - val_accuracy: 0.6770
Epoch 11/35
97/97 [==============================] - 6s 62ms/step - loss: 1.1380 - accuracy: 0.6907 - val_loss: 1.1904 - val_accuracy: 0.6785
Epoch 12/35
97/97 [==============================] - 6s 62ms/step - loss: 1.1223 - accuracy: 0.6915 - val_loss: 1.1796 - val_accuracy: 0.6776
Epoch 13/35
97/97 [==============================] - 6s 62ms/step - loss: 1.1079 - accuracy: 0.6923 - val_loss: 1.1696 - val_accuracy: 0.6785
Epoch 14/35
97/97 [==============================] - 6s 62ms/step - loss: 1.0954 - accuracy: 0.6931 - val_loss: 1.1564 - val_accuracy: 0.6795
Epoch 15/35
97/97 [==============================] - 6s 63ms/step - loss: 1.0841 - accuracy: 0.6939 - val_loss: 1.1454 - val_accuracy: 0.6807
Epoch 16/35
97/97 [==============================] - 6s 62ms/step - loss: 1.0733 - accuracy: 0.6945 - val_loss: 1.1356 - val_accuracy: 0.6810
Epoch 17/35
97/97 [==============================] - 6s 62ms/step - loss: 1.0634 - accuracy: 0.6948 - val_loss: 1.1313 - val_accuracy: 0.6799
Epoch 18/35
97/97 [==============================] - 6s 63ms/step - loss: 1.0535 - accuracy: 0.6957 - val_loss: 1.1208 - val_accuracy: 0.6808
Epoch 19/35
97/97 [==============================] - 6s 63ms/step - loss: 1.0447 - accuracy: 0.6965 - val_loss: 1.1128 - val_accuracy: 0.6813
Epoch 20/35
97/97 [==============================] - 6s 62ms/step - loss: 1.0366 - accuracy: 0.6968 - val_loss: 1.1082 - val_accuracy: 0.6799
Epoch 21/35
97/97 [==============================] - 6s 62ms/step - loss: 1.0295 - accuracy: 0.6968 - val_loss: 1.0971 - val_accuracy: 0.6821
Epoch 22/35
97/97 [==============================] - 6s 63ms/step - loss: 1.0226 - accuracy: 0.6971 - val_loss: 1.0946 - val_accuracy: 0.6799
Epoch 23/35
97/97 [==============================] - 6s 62ms/step - loss: 1.0166 - accuracy: 0.6977 - val_loss: 1.0916 - val_accuracy: 0.6802
Epoch 24/35
97/97 [==============================] - 6s 63ms/step - loss: 1.0103 - accuracy: 0.6980 - val_loss: 1.0823 - val_accuracy: 0.6819
Epoch 25/35
97/97 [==============================] - 6s 62ms/step - loss: 1.0052 - accuracy: 0.6981 - val_loss: 1.0795 - val_accuracy: 0.6804
Epoch 26/35
97/97 [==============================] - 6s 63ms/step - loss: 1.0001 - accuracy: 0.6984 - val_loss: 1.0759 - val_accuracy: 0.6806
Epoch 27/35
97/97 [==============================] - 6s 62ms/step - loss: 0.9947 - accuracy: 0.6992 - val_loss: 1.0699 - val_accuracy: 0.6809
Epoch 28/35
97/97 [==============================] - 6s 62ms/step - loss: 0.9901 - accuracy: 0.6987 - val_loss: 1.0637 - val_accuracy: 0.6821
Epoch 29/35
97/97 [==============================] - 6s 63ms/step - loss: 0.9862 - accuracy: 0.6991 - val_loss: 1.0603 - val_accuracy: 0.6826
Epoch 30/35
97/97 [==============================] - 6s 63ms/step - loss: 0.9817 - accuracy: 0.6994 - val_loss: 1.0582 - val_accuracy: 0.6813
Epoch 31/35
97/97 [==============================] - 6s 63ms/step - loss: 0.9784 - accuracy: 0.6994 - val_loss: 1.0531 - val_accuracy: 0.6826
Epoch 32/35
97/97 [==============================] - 6s 62ms/step - loss: 0.9743 - accuracy: 0.6998 - val_loss: 1.0505 - val_accuracy: 0.6822
Epoch 33/35
97/97 [==============================] - 6s 62ms/step - loss: 0.9711 - accuracy: 0.6996 - val_loss: 1.0506 - val_accuracy: 0.6800
Epoch 34/35
97/97 [==============================] - 6s 62ms/step - loss: 0.9686 - accuracy: 0.6993 - val_loss: 1.0423 - val_accuracy: 0.6828
Epoch 35/35
97/97 [==============================] - 6s 62ms/step - loss: 0.9653 - accuracy: 0.6999 - val_loss: 1.0429 - val_accuracy: 0.6821
<keras.callbacks.History at 0x7f4706ef0090>
```
</div>
---
## Conclusion
* Barlow Twins is a simple and concise method for contrastive and self-supervised
learning.
* With this resnet-34 model architecture, we were able to reach 62-64% validation
accuracy.
---
## Use-Cases of Barlow-Twins(and contrastive learning in General)
* Semi-supervised learning: You can see that this model gave a 62-64% boost in accuracy
when it wasn't even trained with the labels. It can be used when you have little labeled
data but a lot of unlabeled data.
* You do barlow twins training on the unlabeled data, and then you do secondary training
with the labeled data.
---
## Helpful links
* [Paper](https://arxiv.org/abs/2103.03230)
* [Original Pytorch Implementation](https://github.com/facebookresearch/barlowtwins)
* [Sayak Paul's
Implementation](https://colab.research.google.com/github/sayakpaul/Barlow-Twins-TF/blob/main/Barlow_Twins.ipynb#scrollTo=GlWepkM8_prl).
* Thanks to Sayak Paul for his implementation. It helped me with debugging and
comparisons of accuracy, loss.
* [resnet34
implementation](https://www.analyticsvidhya.com/blog/2021/08/how-to-code-your-resnet-from-scratch-in-tensorflow/#h2_2)
* Thanks to Yashowardhan Shinde for writing the article.
| keras-io/examples/vision/md/barlow_twins.md/0 | {
"file_path": "keras-io/examples/vision/md/barlow_twins.md",
"repo_id": "keras-io",
"token_count": 19624
} | 93 |
# Masked image modeling with Autoencoders
**Author:** [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Sayak Paul](https://twitter.com/RisingSayak)<br>
**Date created:** 2021/12/20<br>
**Last modified:** 2021/12/21<br>
**Description:** Implementing Masked Autoencoders for self-supervised pretraining.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/masked_image_modeling.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/masked_image_modeling.py)
---
## Introduction
In deep learning, models with growing **capacity** and **capability** can easily overfit
on large datasets (ImageNet-1K). In the field of natural language processing, the
appetite for data has been **successfully addressed** by self-supervised pretraining.
In the academic paper
[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)
by He et. al. the authors propose a simple yet effective method to pretrain large
vision models (here [ViT Huge](https://arxiv.org/abs/2010.11929)). Inspired from
the pretraining algorithm of BERT ([Devlin et al.](https://arxiv.org/abs/1810.04805)),
they mask patches of an image and, through an autoencoder predict the masked patches.
In the spirit of "masked language modeling", this pretraining task could be referred
to as "masked image modeling".
In this example, we implement
[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)
with the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. After
pretraining a scaled down version of ViT, we also implement the linear evaluation
pipeline on CIFAR-10.
This implementation covers (MAE refers to Masked Autoencoder):
- The masking algorithm
- MAE encoder
- MAE decoder
- Evaluation with linear probing
As a reference, we reuse some of the code presented in
[this example](https://keras.io/examples/vision/image_classification_with_vision_transformer/).
---
## Imports
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
from keras import layers
import matplotlib.pyplot as plt
import numpy as np
import random
# Setting seeds for reproducibility.
SEED = 42
keras.utils.set_random_seed(SEED)
```
---
## Hyperparameters for pretraining
Please feel free to change the hyperparameters and check your results. The best way to
get an intuition about the architecture is to experiment with it. Our hyperparameters are
heavily inspired by the design guidelines laid out by the authors in
[the original paper](https://arxiv.org/abs/2111.06377).
```python
# DATA
BUFFER_SIZE = 1024
BATCH_SIZE = 256
AUTO = tf.data.AUTOTUNE
INPUT_SHAPE = (32, 32, 3)
NUM_CLASSES = 10
# OPTIMIZER
LEARNING_RATE = 5e-3
WEIGHT_DECAY = 1e-4
# PRETRAINING
EPOCHS = 100
# AUGMENTATION
IMAGE_SIZE = 48 # We will resize input images to this size.
PATCH_SIZE = 6 # Size of the patches to be extracted from the input images.
NUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2
MASK_PROPORTION = 0.75 # We have found 75% masking to give us the best results.
# ENCODER and DECODER
LAYER_NORM_EPS = 1e-6
ENC_PROJECTION_DIM = 128
DEC_PROJECTION_DIM = 64
ENC_NUM_HEADS = 4
ENC_LAYERS = 6
DEC_NUM_HEADS = 4
DEC_LAYERS = (
2 # The decoder is lightweight but should be reasonably deep for reconstruction.
)
ENC_TRANSFORMER_UNITS = [
ENC_PROJECTION_DIM * 2,
ENC_PROJECTION_DIM,
] # Size of the transformer layers.
DEC_TRANSFORMER_UNITS = [
DEC_PROJECTION_DIM * 2,
DEC_PROJECTION_DIM,
]
```
---
## Load and prepare the CIFAR-10 dataset
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:40000], y_train[:40000]),
(x_train[40000:], y_train[40000:]),
)
print(f"Training samples: {len(x_train)}")
print(f"Validation samples: {len(x_val)}")
print(f"Testing samples: {len(x_test)}")
train_ds = tf.data.Dataset.from_tensor_slices(x_train)
train_ds = train_ds.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(AUTO)
val_ds = tf.data.Dataset.from_tensor_slices(x_val)
val_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO)
test_ds = tf.data.Dataset.from_tensor_slices(x_test)
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)
```
<div class="k-default-codeblock">
```
Training samples: 40000
Validation samples: 10000
Testing samples: 10000
```
</div>
---
## Data augmentation
In previous self-supervised pretraining methodologies
([SimCLR](https://arxiv.org/abs/2002.05709) alike), we have noticed that the data
augmentation pipeline plays an important role. On the other hand the authors of this
paper point out that Masked Autoencoders **do not** rely on augmentations. They propose a
simple augmentation pipeline of:
- Resizing
- Random cropping (fixed-sized or random sized)
- Random horizontal flipping
```python
def get_train_augmentation_model():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
],
name="train_data_augmentation",
)
return model
def get_test_augmentation_model():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
],
name="test_data_augmentation",
)
return model
```
---
## A layer for extracting patches from images
This layer takes images as input and divides them into patches. The layer also includes
two utility method:
- `show_patched_image` -- Takes a batch of images and its corresponding patches to plot a
random pair of image and patches.
- `reconstruct_from_patch` -- Takes a single instance of patches and stitches them
together into the original image.
```python
class Patches(layers.Layer):
def __init__(self, patch_size=PATCH_SIZE, **kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
# Assuming the image has three channels each patch would be
# of size (patch_size, patch_size, 3).
self.resize = layers.Reshape((-1, patch_size * patch_size * 3))
def call(self, images):
# Create patches from the input images
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
# Reshape the patches to (batch, num_patches, patch_area) and return it.
patches = self.resize(patches)
return patches
def show_patched_image(self, images, patches):
# This is a utility function which accepts a batch of images and its
# corresponding patches and help visualize one image and its patches
# side by side.
idx = np.random.choice(patches.shape[0])
print(f"Index selected: {idx}.")
plt.figure(figsize=(4, 4))
plt.imshow(keras.utils.array_to_img(images[idx]))
plt.axis("off")
plt.show()
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[idx]):
ax = plt.subplot(n, n, i + 1)
patch_img = tf.reshape(patch, (self.patch_size, self.patch_size, 3))
plt.imshow(keras.utils.img_to_array(patch_img))
plt.axis("off")
plt.show()
# Return the index chosen to validate it outside the method.
return idx
# taken from https://stackoverflow.com/a/58082878/10319735
def reconstruct_from_patch(self, patch):
# This utility function takes patches from a *single* image and
# reconstructs it back into the image. This is useful for the train
# monitor callback.
num_patches = patch.shape[0]
n = int(np.sqrt(num_patches))
patch = tf.reshape(patch, (num_patches, self.patch_size, self.patch_size, 3))
rows = tf.split(patch, n, axis=0)
rows = [tf.concat(tf.unstack(x), axis=1) for x in rows]
reconstructed = tf.concat(rows, axis=0)
return reconstructed
```
Let's visualize the image patches.
```python
# Get a batch of images.
image_batch = next(iter(train_ds))
# Augment the images.
augmentation_model = get_train_augmentation_model()
augmented_images = augmentation_model(image_batch)
# Define the patch layer.
patch_layer = Patches()
# Get the patches from the batched images.
patches = patch_layer(images=augmented_images)
# Now pass the images and the corresponding patches
# to the `show_patched_image` method.
random_index = patch_layer.show_patched_image(images=augmented_images, patches=patches)
# Chose the same chose image and try reconstructing the patches
# into the original image.
image = patch_layer.reconstruct_from_patch(patches[random_index])
plt.imshow(image)
plt.axis("off")
plt.show()
```
<div class="k-default-codeblock">
```
Index selected: 102.
```
</div>



---
## Patch encoding with masking
Quoting the paper
> Following ViT, we divide an image into regular non-overlapping patches. Then we sample
a subset of patches and mask (i.e., remove) the remaining ones. Our sampling strategy is
straightforward: we sample random patches without replacement, following a uniform
distribution. We simply refer to this as “random sampling”.
This layer includes masking and encoding the patches.
The utility methods of the layer are:
- `get_random_indices` -- Provides the mask and unmask indices.
- `generate_masked_image` -- Takes patches and unmask indices, results in a random masked
image. This is an essential utility method for our training monitor callback (defined
later).
```python
class PatchEncoder(layers.Layer):
def __init__(
self,
patch_size=PATCH_SIZE,
projection_dim=ENC_PROJECTION_DIM,
mask_proportion=MASK_PROPORTION,
downstream=False,
**kwargs,
):
super().__init__(**kwargs)
self.patch_size = patch_size
self.projection_dim = projection_dim
self.mask_proportion = mask_proportion
self.downstream = downstream
# This is a trainable mask token initialized randomly from a normal
# distribution.
self.mask_token = tf.Variable(
tf.random.normal([1, patch_size * patch_size * 3]), trainable=True
)
def build(self, input_shape):
(_, self.num_patches, self.patch_area) = input_shape
# Create the projection layer for the patches.
self.projection = layers.Dense(units=self.projection_dim)
# Create the positional embedding layer.
self.position_embedding = layers.Embedding(
input_dim=self.num_patches, output_dim=self.projection_dim
)
# Number of patches that will be masked.
self.num_mask = int(self.mask_proportion * self.num_patches)
def call(self, patches):
# Get the positional embeddings.
batch_size = tf.shape(patches)[0]
positions = tf.range(start=0, limit=self.num_patches, delta=1)
pos_embeddings = self.position_embedding(positions[tf.newaxis, ...])
pos_embeddings = tf.tile(
pos_embeddings, [batch_size, 1, 1]
) # (B, num_patches, projection_dim)
# Embed the patches.
patch_embeddings = (
self.projection(patches) + pos_embeddings
) # (B, num_patches, projection_dim)
if self.downstream:
return patch_embeddings
else:
mask_indices, unmask_indices = self.get_random_indices(batch_size)
# The encoder input is the unmasked patch embeddings. Here we gather
# all the patches that should be unmasked.
unmasked_embeddings = tf.gather(
patch_embeddings, unmask_indices, axis=1, batch_dims=1
) # (B, unmask_numbers, projection_dim)
# Get the unmasked and masked position embeddings. We will need them
# for the decoder.
unmasked_positions = tf.gather(
pos_embeddings, unmask_indices, axis=1, batch_dims=1
) # (B, unmask_numbers, projection_dim)
masked_positions = tf.gather(
pos_embeddings, mask_indices, axis=1, batch_dims=1
) # (B, mask_numbers, projection_dim)
# Repeat the mask token number of mask times.
# Mask tokens replace the masks of the image.
mask_tokens = tf.repeat(self.mask_token, repeats=self.num_mask, axis=0)
mask_tokens = tf.repeat(
mask_tokens[tf.newaxis, ...], repeats=batch_size, axis=0
)
# Get the masked embeddings for the tokens.
masked_embeddings = self.projection(mask_tokens) + masked_positions
return (
unmasked_embeddings, # Input to the encoder.
masked_embeddings, # First part of input to the decoder.
unmasked_positions, # Added to the encoder outputs.
mask_indices, # The indices that were masked.
unmask_indices, # The indices that were unmaksed.
)
def get_random_indices(self, batch_size):
# Create random indices from a uniform distribution and then split
# it into mask and unmask indices.
rand_indices = tf.argsort(
tf.random.uniform(shape=(batch_size, self.num_patches)), axis=-1
)
mask_indices = rand_indices[:, : self.num_mask]
unmask_indices = rand_indices[:, self.num_mask :]
return mask_indices, unmask_indices
def generate_masked_image(self, patches, unmask_indices):
# Choose a random patch and it corresponding unmask index.
idx = np.random.choice(patches.shape[0])
patch = patches[idx]
unmask_index = unmask_indices[idx]
# Build a numpy array of same shape as patch.
new_patch = np.zeros_like(patch)
# Iterate of the new_patch and plug the unmasked patches.
count = 0
for i in range(unmask_index.shape[0]):
new_patch[unmask_index[i]] = patch[unmask_index[i]]
return new_patch, idx
```
Let's see the masking process in action on a sample image.
```python
# Create the patch encoder layer.
patch_encoder = PatchEncoder()
# Get the embeddings and positions.
(
unmasked_embeddings,
masked_embeddings,
unmasked_positions,
mask_indices,
unmask_indices,
) = patch_encoder(patches=patches)
# Show a maksed patch image.
new_patch, random_index = patch_encoder.generate_masked_image(patches, unmask_indices)
plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
img = patch_layer.reconstruct_from_patch(new_patch)
plt.imshow(keras.utils.array_to_img(img))
plt.axis("off")
plt.title("Masked")
plt.subplot(1, 2, 2)
img = augmented_images[random_index]
plt.imshow(keras.utils.array_to_img(img))
plt.axis("off")
plt.title("Original")
plt.show()
```

---
## MLP
This serves as the fully connected feed forward network of the transformer architecture.
```python
def mlp(x, dropout_rate, hidden_units):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
```
---
## MAE encoder
The MAE encoder is ViT. The only point to note here is that the encoder outputs a layer
normalized output.
```python
def create_encoder(num_heads=ENC_NUM_HEADS, num_layers=ENC_LAYERS):
inputs = layers.Input((None, ENC_PROJECTION_DIM))
x = inputs
for _ in range(num_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=ENC_PROJECTION_DIM, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, x])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2)
# MLP.
x3 = mlp(x3, hidden_units=ENC_TRANSFORMER_UNITS, dropout_rate=0.1)
# Skip connection 2.
x = layers.Add()([x3, x2])
outputs = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x)
return keras.Model(inputs, outputs, name="mae_encoder")
```
---
## MAE decoder
The authors point out that they use an **asymmetric** autoencoder model. They use a
lightweight decoder that takes "<10% computation per token vs. the encoder". We are not
specific with the "<10% computation" in our implementation but have used a smaller
decoder (both in terms of depth and projection dimensions).
```python
def create_decoder(
num_layers=DEC_LAYERS, num_heads=DEC_NUM_HEADS, image_size=IMAGE_SIZE
):
inputs = layers.Input((NUM_PATCHES, ENC_PROJECTION_DIM))
x = layers.Dense(DEC_PROJECTION_DIM)(inputs)
for _ in range(num_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=DEC_PROJECTION_DIM, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, x])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2)
# MLP.
x3 = mlp(x3, hidden_units=DEC_TRANSFORMER_UNITS, dropout_rate=0.1)
# Skip connection 2.
x = layers.Add()([x3, x2])
x = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x)
x = layers.Flatten()(x)
pre_final = layers.Dense(units=image_size * image_size * 3, activation="sigmoid")(x)
outputs = layers.Reshape((image_size, image_size, 3))(pre_final)
return keras.Model(inputs, outputs, name="mae_decoder")
```
---
## MAE trainer
This is the trainer module. We wrap the encoder and decoder inside of a `tf.keras.Model`
subclass. This allows us to customize what happens in the `model.fit()` loop.
```python
class MaskedAutoencoder(keras.Model):
def __init__(
self,
train_augmentation_model,
test_augmentation_model,
patch_layer,
patch_encoder,
encoder,
decoder,
**kwargs,
):
super().__init__(**kwargs)
self.train_augmentation_model = train_augmentation_model
self.test_augmentation_model = test_augmentation_model
self.patch_layer = patch_layer
self.patch_encoder = patch_encoder
self.encoder = encoder
self.decoder = decoder
def calculate_loss(self, images, test=False):
# Augment the input images.
if test:
augmented_images = self.test_augmentation_model(images)
else:
augmented_images = self.train_augmentation_model(images)
# Patch the augmented images.
patches = self.patch_layer(augmented_images)
# Encode the patches.
(
unmasked_embeddings,
masked_embeddings,
unmasked_positions,
mask_indices,
unmask_indices,
) = self.patch_encoder(patches)
# Pass the unmaksed patche to the encoder.
encoder_outputs = self.encoder(unmasked_embeddings)
# Create the decoder inputs.
encoder_outputs = encoder_outputs + unmasked_positions
decoder_inputs = tf.concat([encoder_outputs, masked_embeddings], axis=1)
# Decode the inputs.
decoder_outputs = self.decoder(decoder_inputs)
decoder_patches = self.patch_layer(decoder_outputs)
loss_patch = tf.gather(patches, mask_indices, axis=1, batch_dims=1)
loss_output = tf.gather(decoder_patches, mask_indices, axis=1, batch_dims=1)
# Compute the total loss.
total_loss = self.compute_loss(y=loss_patch, y_pred=loss_output)
return total_loss, loss_patch, loss_output
def train_step(self, images):
with tf.GradientTape() as tape:
total_loss, loss_patch, loss_output = self.calculate_loss(images)
# Apply gradients.
train_vars = [
self.train_augmentation_model.trainable_variables,
self.patch_layer.trainable_variables,
self.patch_encoder.trainable_variables,
self.encoder.trainable_variables,
self.decoder.trainable_variables,
]
grads = tape.gradient(total_loss, train_vars)
tv_list = []
for grad, var in zip(grads, train_vars):
for g, v in zip(grad, var):
tv_list.append((g, v))
self.optimizer.apply_gradients(tv_list)
# Report progress.
results = {}
for metric in self.metrics:
metric.update_state(loss_patch, loss_output)
results[metric.name] = metric.result()
return results
def test_step(self, images):
total_loss, loss_patch, loss_output = self.calculate_loss(images, test=True)
# Update the trackers.
results = {}
for metric in self.metrics:
metric.update_state(loss_patch, loss_output)
results[metric.name] = metric.result()
return results
```
---
## Model initialization
```python
train_augmentation_model = get_train_augmentation_model()
test_augmentation_model = get_test_augmentation_model()
patch_layer = Patches()
patch_encoder = PatchEncoder()
encoder = create_encoder()
decoder = create_decoder()
mae_model = MaskedAutoencoder(
train_augmentation_model=train_augmentation_model,
test_augmentation_model=test_augmentation_model,
patch_layer=patch_layer,
patch_encoder=patch_encoder,
encoder=encoder,
decoder=decoder,
)
```
---
## Training callbacks
### Visualization callback
```python
# Taking a batch of test inputs to measure model's progress.
test_images = next(iter(test_ds))
class TrainMonitor(keras.callbacks.Callback):
def __init__(self, epoch_interval=None):
self.epoch_interval = epoch_interval
def on_epoch_end(self, epoch, logs=None):
if self.epoch_interval and epoch % self.epoch_interval == 0:
test_augmented_images = self.model.test_augmentation_model(test_images)
test_patches = self.model.patch_layer(test_augmented_images)
(
test_unmasked_embeddings,
test_masked_embeddings,
test_unmasked_positions,
test_mask_indices,
test_unmask_indices,
) = self.model.patch_encoder(test_patches)
test_encoder_outputs = self.model.encoder(test_unmasked_embeddings)
test_encoder_outputs = test_encoder_outputs + test_unmasked_positions
test_decoder_inputs = tf.concat(
[test_encoder_outputs, test_masked_embeddings], axis=1
)
test_decoder_outputs = self.model.decoder(test_decoder_inputs)
# Show a maksed patch image.
test_masked_patch, idx = self.model.patch_encoder.generate_masked_image(
test_patches, test_unmask_indices
)
print(f"\nIdx chosen: {idx}")
original_image = test_augmented_images[idx]
masked_image = self.model.patch_layer.reconstruct_from_patch(
test_masked_patch
)
reconstructed_image = test_decoder_outputs[idx]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))
ax[0].imshow(original_image)
ax[0].set_title(f"Original: {epoch:03d}")
ax[1].imshow(masked_image)
ax[1].set_title(f"Masked: {epoch:03d}")
ax[2].imshow(reconstructed_image)
ax[2].set_title(f"Resonstructed: {epoch:03d}")
plt.show()
plt.close()
```
### Learning rate scheduler
```python
# Some code is taken from:
# https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2.
class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps
):
super().__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.pi = tf.constant(np.pi)
def __call__(self, step):
if self.total_steps < self.warmup_steps:
raise ValueError("Total_steps must be larger or equal to warmup_steps.")
cos_annealed_lr = tf.cos(
self.pi
* (tf.cast(step, tf.float32) - self.warmup_steps)
/ float(self.total_steps - self.warmup_steps)
)
learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr)
if self.warmup_steps > 0:
if self.learning_rate_base < self.warmup_learning_rate:
raise ValueError(
"Learning_rate_base must be larger or equal to "
"warmup_learning_rate."
)
slope = (
self.learning_rate_base - self.warmup_learning_rate
) / self.warmup_steps
warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate
learning_rate = tf.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
return tf.where(
step > self.total_steps, 0.0, learning_rate, name="learning_rate"
)
total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS)
warmup_epoch_percentage = 0.15
warmup_steps = int(total_steps * warmup_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=LEARNING_RATE,
total_steps=total_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)
lrs = [scheduled_lrs(step) for step in range(total_steps)]
plt.plot(lrs)
plt.xlabel("Step", fontsize=14)
plt.ylabel("LR", fontsize=14)
plt.show()
# Assemble the callbacks.
train_callbacks = [TrainMonitor(epoch_interval=5)]
```

---
## Model compilation and training
```python
optimizer = keras.optimizers.AdamW(
learning_rate=scheduled_lrs, weight_decay=WEIGHT_DECAY
)
# Compile and pretrain the model.
mae_model.compile(
optimizer=optimizer, loss=keras.losses.MeanSquaredError(), metrics=["mae"]
)
history = mae_model.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
callbacks=train_callbacks,
)
# Measure its performance.
loss, mae = mae_model.evaluate(test_ds)
print(f"Loss: {loss:.2f}")
print(f"MAE: {mae:.2f}")
```
<div class="k-default-codeblock">
```
Epoch 1/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 80ms/step - mae: 0.2035 - loss: 0.4828
Idx chosen: 92
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 47s 95ms/step - mae: 0.2033 - loss: 0.4828 - val_loss: 0.5225 - val_mae: 0.1600
Epoch 2/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1592 - loss: 0.5128 - val_loss: 0.5290 - val_mae: 0.1511
Epoch 3/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1530 - loss: 0.5193 - val_loss: 0.5336 - val_mae: 0.1478
Epoch 4/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1502 - loss: 0.5220 - val_loss: 0.5298 - val_mae: 0.1436
Epoch 5/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1458 - loss: 0.5245 - val_loss: 0.5296 - val_mae: 0.1405
Epoch 6/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - mae: 0.1414 - loss: 0.5265
Idx chosen: 14
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 14s 88ms/step - mae: 0.1414 - loss: 0.5265 - val_loss: 0.5328 - val_mae: 0.1402
Epoch 7/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1399 - loss: 0.5278 - val_loss: 0.5361 - val_mae: 0.1360
Epoch 8/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1389 - loss: 0.5285 - val_loss: 0.5365 - val_mae: 0.1424
Epoch 9/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1379 - loss: 0.5295 - val_loss: 0.5312 - val_mae: 0.1345
Epoch 10/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1352 - loss: 0.5308 - val_loss: 0.5374 - val_mae: 0.1321
Epoch 11/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 81ms/step - mae: 0.1339 - loss: 0.5317
Idx chosen: 106
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 14s 87ms/step - mae: 0.1339 - loss: 0.5317 - val_loss: 0.5392 - val_mae: 0.1330
Epoch 12/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1321 - loss: 0.5331 - val_loss: 0.5383 - val_mae: 0.1301
Epoch 13/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1317 - loss: 0.5343 - val_loss: 0.5405 - val_mae: 0.1322
Epoch 14/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1326 - loss: 0.5338 - val_loss: 0.5404 - val_mae: 0.1280
Epoch 15/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 84ms/step - mae: 0.1297 - loss: 0.5343 - val_loss: 0.5444 - val_mae: 0.1261
Epoch 16/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 82ms/step - mae: 0.1276 - loss: 0.5361
Idx chosen: 71
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 14s 91ms/step - mae: 0.1276 - loss: 0.5362 - val_loss: 0.5456 - val_mae: 0.1243
Epoch 17/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1262 - loss: 0.5382 - val_loss: 0.5427 - val_mae: 0.1233
Epoch 18/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1221 - loss: 0.5407 - val_loss: 0.5473 - val_mae: 0.1196
Epoch 19/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1209 - loss: 0.5412 - val_loss: 0.5511 - val_mae: 0.1176
Epoch 20/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1202 - loss: 0.5422 - val_loss: 0.5515 - val_mae: 0.1167
Epoch 21/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - mae: 0.1186 - loss: 0.5430
Idx chosen: 188
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 85ms/step - mae: 0.1186 - loss: 0.5430 - val_loss: 0.5546 - val_mae: 0.1168
Epoch 22/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1171 - loss: 0.5446 - val_loss: 0.5500 - val_mae: 0.1155
Epoch 23/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1161 - loss: 0.5457 - val_loss: 0.5559 - val_mae: 0.1135
Epoch 24/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1135 - loss: 0.5479 - val_loss: 0.5521 - val_mae: 0.1112
Epoch 25/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1128 - loss: 0.5480 - val_loss: 0.5505 - val_mae: 0.1122
Epoch 26/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - mae: 0.1123 - loss: 0.5470
Idx chosen: 20
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 86ms/step - mae: 0.1123 - loss: 0.5470 - val_loss: 0.5572 - val_mae: 0.1127
Epoch 27/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1114 - loss: 0.5487 - val_loss: 0.5555 - val_mae: 0.1092
Epoch 28/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1108 - loss: 0.5492 - val_loss: 0.5569 - val_mae: 0.1110
Epoch 29/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1104 - loss: 0.5491 - val_loss: 0.5517 - val_mae: 0.1110
Epoch 30/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1099 - loss: 0.5490 - val_loss: 0.5543 - val_mae: 0.1104
Epoch 31/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - mae: 0.1095 - loss: 0.5501
Idx chosen: 102
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 86ms/step - mae: 0.1095 - loss: 0.5501 - val_loss: 0.5578 - val_mae: 0.1108
Epoch 32/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1089 - loss: 0.5503 - val_loss: 0.5620 - val_mae: 0.1081
Epoch 33/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1079 - loss: 0.5509 - val_loss: 0.5618 - val_mae: 0.1067
Epoch 34/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1067 - loss: 0.5524 - val_loss: 0.5627 - val_mae: 0.1059
Epoch 35/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1068 - loss: 0.5515 - val_loss: 0.5576 - val_mae: 0.1050
Epoch 36/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - mae: 0.1057 - loss: 0.5526
Idx chosen: 121
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 86ms/step - mae: 0.1057 - loss: 0.5526 - val_loss: 0.5627 - val_mae: 0.1050
Epoch 37/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1065 - loss: 0.5534 - val_loss: 0.5638 - val_mae: 0.1050
Epoch 38/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1055 - loss: 0.5528 - val_loss: 0.5527 - val_mae: 0.1083
Epoch 39/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 20s 82ms/step - mae: 0.1056 - loss: 0.5516 - val_loss: 0.5562 - val_mae: 0.1044
Epoch 40/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1053 - loss: 0.5528 - val_loss: 0.5567 - val_mae: 0.1051
Epoch 41/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 78ms/step - mae: 0.1049 - loss: 0.5533
Idx chosen: 210
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 85ms/step - mae: 0.1049 - loss: 0.5533 - val_loss: 0.5620 - val_mae: 0.1030
Epoch 42/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1041 - loss: 0.5534 - val_loss: 0.5650 - val_mae: 0.1052
Epoch 43/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1048 - loss: 0.5526 - val_loss: 0.5619 - val_mae: 0.1027
Epoch 44/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1037 - loss: 0.5543 - val_loss: 0.5615 - val_mae: 0.1031
Epoch 45/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1036 - loss: 0.5535 - val_loss: 0.5575 - val_mae: 0.1026
Epoch 46/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 78ms/step - mae: 0.1032 - loss: 0.5537
Idx chosen: 214
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 85ms/step - mae: 0.1032 - loss: 0.5537 - val_loss: 0.5549 - val_mae: 0.1037
Epoch 47/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 84ms/step - mae: 0.1035 - loss: 0.5539 - val_loss: 0.5597 - val_mae: 0.1031
Epoch 48/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1033 - loss: 0.5533 - val_loss: 0.5650 - val_mae: 0.1013
Epoch 49/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.1027 - loss: 0.5543 - val_loss: 0.5571 - val_mae: 0.1028
Epoch 50/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1024 - loss: 0.5548 - val_loss: 0.5592 - val_mae: 0.1018
Epoch 51/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 78ms/step - mae: 0.1025 - loss: 0.5543
Idx chosen: 74
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 85ms/step - mae: 0.1025 - loss: 0.5543 - val_loss: 0.5645 - val_mae: 0.1007
Epoch 52/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.1025 - loss: 0.5544 - val_loss: 0.5616 - val_mae: 0.1004
Epoch 53/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1014 - loss: 0.5547 - val_loss: 0.5594 - val_mae: 0.1007
Epoch 54/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1014 - loss: 0.5550 - val_loss: 0.5687 - val_mae: 0.1012
Epoch 55/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1022 - loss: 0.5551 - val_loss: 0.5572 - val_mae: 0.1018
Epoch 56/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - mae: 0.1015 - loss: 0.5558
Idx chosen: 202
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 86ms/step - mae: 0.1015 - loss: 0.5558 - val_loss: 0.5619 - val_mae: 0.0996
Epoch 57/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1008 - loss: 0.5550 - val_loss: 0.5614 - val_mae: 0.0996
Epoch 58/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1004 - loss: 0.5557 - val_loss: 0.5620 - val_mae: 0.0995
Epoch 59/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.1002 - loss: 0.5558 - val_loss: 0.5612 - val_mae: 0.0997
Epoch 60/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.1005 - loss: 0.5563 - val_loss: 0.5598 - val_mae: 0.1000
Epoch 61/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - mae: 0.1001 - loss: 0.5564
Idx chosen: 87
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 86ms/step - mae: 0.1001 - loss: 0.5564 - val_loss: 0.5606 - val_mae: 0.0998
Epoch 62/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 86ms/step - mae: 0.0998 - loss: 0.5562 - val_loss: 0.5643 - val_mae: 0.0988
Epoch 63/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.1001 - loss: 0.5556 - val_loss: 0.5657 - val_mae: 0.0985
Epoch 64/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0998 - loss: 0.5566 - val_loss: 0.5624 - val_mae: 0.0989
Epoch 65/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0994 - loss: 0.5564 - val_loss: 0.5576 - val_mae: 0.0999
Epoch 66/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - mae: 0.0993 - loss: 0.5567
Idx chosen: 116
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 86ms/step - mae: 0.0993 - loss: 0.5567 - val_loss: 0.5572 - val_mae: 0.1000
Epoch 67/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0990 - loss: 0.5570 - val_loss: 0.5619 - val_mae: 0.0981
Epoch 68/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0987 - loss: 0.5578 - val_loss: 0.5644 - val_mae: 0.0973
Epoch 69/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0981 - loss: 0.5577 - val_loss: 0.5639 - val_mae: 0.0976
Epoch 70/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.0986 - loss: 0.5563 - val_loss: 0.5601 - val_mae: 0.0989
Epoch 71/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 77ms/step - mae: 0.0982 - loss: 0.5578
Idx chosen: 99
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 84ms/step - mae: 0.0982 - loss: 0.5577 - val_loss: 0.5628 - val_mae: 0.0970
Epoch 72/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0979 - loss: 0.5569 - val_loss: 0.5637 - val_mae: 0.0968
Epoch 73/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0979 - loss: 0.5575 - val_loss: 0.5606 - val_mae: 0.0975
Epoch 74/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0977 - loss: 0.5572 - val_loss: 0.5628 - val_mae: 0.0967
Epoch 75/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.0975 - loss: 0.5572 - val_loss: 0.5631 - val_mae: 0.0964
Epoch 76/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 77ms/step - mae: 0.0973 - loss: 0.5580
Idx chosen: 103
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.0973 - loss: 0.5579 - val_loss: 0.5628 - val_mae: 0.0967
Epoch 77/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0974 - loss: 0.5579 - val_loss: 0.5638 - val_mae: 0.0963
Epoch 78/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0968 - loss: 0.5585 - val_loss: 0.5615 - val_mae: 0.0967
Epoch 79/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0969 - loss: 0.5578 - val_loss: 0.5641 - val_mae: 0.0959
Epoch 80/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.0967 - loss: 0.5584 - val_loss: 0.5619 - val_mae: 0.0962
Epoch 81/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 77ms/step - mae: 0.0965 - loss: 0.5578
Idx chosen: 151
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.0965 - loss: 0.5578 - val_loss: 0.5651 - val_mae: 0.0957
Epoch 82/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0965 - loss: 0.5583 - val_loss: 0.5644 - val_mae: 0.0957
Epoch 83/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0962 - loss: 0.5584 - val_loss: 0.5649 - val_mae: 0.0954
Epoch 84/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0962 - loss: 0.5586 - val_loss: 0.5611 - val_mae: 0.0962
Epoch 85/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0961 - loss: 0.5582 - val_loss: 0.5638 - val_mae: 0.0956
Epoch 86/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 77ms/step - mae: 0.0961 - loss: 0.5584
Idx chosen: 130
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 83ms/step - mae: 0.0961 - loss: 0.5584 - val_loss: 0.5641 - val_mae: 0.0954
Epoch 87/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0959 - loss: 0.5580 - val_loss: 0.5641 - val_mae: 0.0953
Epoch 88/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0960 - loss: 0.5583 - val_loss: 0.5642 - val_mae: 0.0953
Epoch 89/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.0958 - loss: 0.5591 - val_loss: 0.5635 - val_mae: 0.0953
Epoch 90/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0957 - loss: 0.5587 - val_loss: 0.5648 - val_mae: 0.0948
Epoch 91/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 77ms/step - mae: 0.0957 - loss: 0.5585
Idx chosen: 149
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 84ms/step - mae: 0.0957 - loss: 0.5585 - val_loss: 0.5636 - val_mae: 0.0952
Epoch 92/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0957 - loss: 0.5593 - val_loss: 0.5642 - val_mae: 0.0950
Epoch 93/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0957 - loss: 0.5598 - val_loss: 0.5635 - val_mae: 0.0950
Epoch 94/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.0956 - loss: 0.5587 - val_loss: 0.5641 - val_mae: 0.0950
Epoch 95/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0955 - loss: 0.5587 - val_loss: 0.5637 - val_mae: 0.0950
Epoch 96/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 0s 77ms/step - mae: 0.0956 - loss: 0.5585
Idx chosen: 52
```
</div>

<div class="k-default-codeblock">
```
157/157 ━━━━━━━━━━━━━━━━━━━━ 14s 87ms/step - mae: 0.0956 - loss: 0.5585 - val_loss: 0.5643 - val_mae: 0.0950
Epoch 97/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 81ms/step - mae: 0.0956 - loss: 0.5587 - val_loss: 0.5642 - val_mae: 0.0950
Epoch 98/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 82ms/step - mae: 0.0954 - loss: 0.5586 - val_loss: 0.5639 - val_mae: 0.0950
Epoch 99/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0954 - loss: 0.5580 - val_loss: 0.5641 - val_mae: 0.0950
Epoch 100/100
157/157 ━━━━━━━━━━━━━━━━━━━━ 13s 80ms/step - mae: 0.0955 - loss: 0.5587 - val_loss: 0.5639 - val_mae: 0.0951
40/40 ━━━━━━━━━━━━━━━━━━━━ 1s 13ms/step - mae: 0.0955 - loss: 0.5684
Loss: 0.57
MAE: 0.10
```
</div>
---
## Evaluation with linear probing
### Extract the encoder model along with other layers
```python
# Extract the augmentation layers.
train_augmentation_model = mae_model.train_augmentation_model
test_augmentation_model = mae_model.test_augmentation_model
# Extract the patchers.
patch_layer = mae_model.patch_layer
patch_encoder = mae_model.patch_encoder
patch_encoder.downstream = True # Swtich the downstream flag to True.
# Extract the encoder.
encoder = mae_model.encoder
# Pack as a model.
downstream_model = keras.Sequential(
[
layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)),
patch_layer,
patch_encoder,
encoder,
layers.BatchNormalization(), # Refer to A.1 (Linear probing).
layers.GlobalAveragePooling1D(),
layers.Dense(NUM_CLASSES, activation="softmax"),
],
name="linear_probe_model",
)
# Only the final classification layer of the `downstream_model` should be trainable.
for layer in downstream_model.layers[:-1]:
layer.trainable = False
downstream_model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "linear_probe_model"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ patches_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Patches</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">108</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ patch_encoder_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">PatchEncoder</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">22,144</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ mae_encoder (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,981,696</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ batch_normalization │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">512</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalization</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ global_average_pooling1d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePooling1D</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_20 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,290</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,005,642</span> (7.65 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">1,290</span> (5.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,004,352</span> (7.65 MB)
</pre>
We are using average pooling to extract learned representations from the MAE encoder.
Another approach would be to use a learnable dummy token inside the encoder during
pretraining (resembling the [CLS] token). Then we can extract representations from that
token during the downstream tasks.
### Prepare datasets for linear probing
```python
def prepare_data(images, labels, is_train=True):
if is_train:
augmentation_model = train_augmentation_model
else:
augmentation_model = test_augmentation_model
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_train:
dataset = dataset.shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE).map(
lambda x, y: (augmentation_model(x), y), num_parallel_calls=AUTO
)
return dataset.prefetch(AUTO)
train_ds = prepare_data(x_train, y_train)
val_ds = prepare_data(x_train, y_train, is_train=False)
test_ds = prepare_data(x_test, y_test, is_train=False)
```
### Perform linear probing
```python
linear_probe_epochs = 50
linear_prob_lr = 0.1
warm_epoch_percentage = 0.1
steps = int((len(x_train) // BATCH_SIZE) * linear_probe_epochs)
warmup_steps = int(steps * warm_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=linear_prob_lr,
total_steps=steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)
optimizer = keras.optimizers.SGD(learning_rate=scheduled_lrs, momentum=0.9)
downstream_model.compile(
optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
downstream_model.fit(train_ds, validation_data=val_ds, epochs=linear_probe_epochs)
loss, accuracy = downstream_model.evaluate(test_ds)
accuracy = round(accuracy * 100, 2)
print(f"Accuracy on the test set: {accuracy}%.")
```
<div class="k-default-codeblock">
```
Epoch 1/50
7/157 [37m━━━━━━━━━━━━━━━━━━━━ 3s 21ms/step - accuracy: 0.1183 - loss: 3.3939
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1700264823.481598 64012 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
157/157 ━━━━━━━━━━━━━━━━━━━━ 70s 242ms/step - accuracy: 0.1967 - loss: 2.6073 - val_accuracy: 0.3631 - val_loss: 1.7846
Epoch 2/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 35ms/step - accuracy: 0.3521 - loss: 1.8063 - val_accuracy: 0.3677 - val_loss: 1.7301
Epoch 3/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3580 - loss: 1.7580 - val_accuracy: 0.3649 - val_loss: 1.7326
Epoch 4/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3617 - loss: 1.7471 - val_accuracy: 0.3810 - val_loss: 1.7353
Epoch 5/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 35ms/step - accuracy: 0.3547 - loss: 1.7728 - val_accuracy: 0.3526 - val_loss: 1.8496
Epoch 6/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 35ms/step - accuracy: 0.3546 - loss: 1.7866 - val_accuracy: 0.3896 - val_loss: 1.7583
Epoch 7/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 37ms/step - accuracy: 0.3587 - loss: 1.7924 - val_accuracy: 0.3674 - val_loss: 1.7729
Epoch 8/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 38ms/step - accuracy: 0.3616 - loss: 1.7912 - val_accuracy: 0.3685 - val_loss: 1.7928
Epoch 9/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 36ms/step - accuracy: 0.3707 - loss: 1.7543 - val_accuracy: 0.3568 - val_loss: 1.7943
Epoch 10/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3719 - loss: 1.7451 - val_accuracy: 0.3859 - val_loss: 1.7230
Epoch 11/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3781 - loss: 1.7384 - val_accuracy: 0.3711 - val_loss: 1.7608
Epoch 12/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 35ms/step - accuracy: 0.3791 - loss: 1.7249 - val_accuracy: 0.4004 - val_loss: 1.6961
Epoch 13/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3818 - loss: 1.7303 - val_accuracy: 0.3501 - val_loss: 1.8506
Epoch 14/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3841 - loss: 1.7179 - val_accuracy: 0.3810 - val_loss: 1.8033
Epoch 15/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3818 - loss: 1.7172 - val_accuracy: 0.4168 - val_loss: 1.6507
Epoch 16/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 36ms/step - accuracy: 0.3851 - loss: 1.7059 - val_accuracy: 0.3806 - val_loss: 1.7581
Epoch 17/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3747 - loss: 1.7356 - val_accuracy: 0.4094 - val_loss: 1.6466
Epoch 18/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 35ms/step - accuracy: 0.3828 - loss: 1.7221 - val_accuracy: 0.4015 - val_loss: 1.6757
Epoch 19/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3889 - loss: 1.6939 - val_accuracy: 0.4102 - val_loss: 1.6392
Epoch 20/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3943 - loss: 1.6857 - val_accuracy: 0.4028 - val_loss: 1.6518
Epoch 21/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3870 - loss: 1.6970 - val_accuracy: 0.3949 - val_loss: 1.7283
Epoch 22/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3893 - loss: 1.6838 - val_accuracy: 0.4207 - val_loss: 1.6292
Epoch 23/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 35ms/step - accuracy: 0.4005 - loss: 1.6606 - val_accuracy: 0.4152 - val_loss: 1.6320
Epoch 24/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3978 - loss: 1.6556 - val_accuracy: 0.4042 - val_loss: 1.6657
Epoch 25/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4029 - loss: 1.6464 - val_accuracy: 0.4198 - val_loss: 1.6033
Epoch 26/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.3974 - loss: 1.6638 - val_accuracy: 0.4278 - val_loss: 1.5731
Epoch 27/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 37ms/step - accuracy: 0.4035 - loss: 1.6370 - val_accuracy: 0.4302 - val_loss: 1.5663
Epoch 28/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4027 - loss: 1.6349 - val_accuracy: 0.4458 - val_loss: 1.5349
Epoch 29/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4054 - loss: 1.6196 - val_accuracy: 0.4349 - val_loss: 1.5709
Epoch 30/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 35ms/step - accuracy: 0.4070 - loss: 1.6061 - val_accuracy: 0.4297 - val_loss: 1.5578
Epoch 31/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4105 - loss: 1.6172 - val_accuracy: 0.4250 - val_loss: 1.5735
Epoch 32/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4197 - loss: 1.5960 - val_accuracy: 0.4259 - val_loss: 1.5677
Epoch 33/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4156 - loss: 1.5989 - val_accuracy: 0.4400 - val_loss: 1.5395
Epoch 34/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 35ms/step - accuracy: 0.4214 - loss: 1.5862 - val_accuracy: 0.4486 - val_loss: 1.5237
Epoch 35/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4208 - loss: 1.5763 - val_accuracy: 0.4188 - val_loss: 1.5925
Epoch 36/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4227 - loss: 1.5803 - val_accuracy: 0.4525 - val_loss: 1.5174
Epoch 37/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4267 - loss: 1.5700 - val_accuracy: 0.4463 - val_loss: 1.5330
Epoch 38/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 37ms/step - accuracy: 0.4283 - loss: 1.5649 - val_accuracy: 0.4348 - val_loss: 1.5482
Epoch 39/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4332 - loss: 1.5581 - val_accuracy: 0.4486 - val_loss: 1.5251
Epoch 40/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4290 - loss: 1.5596 - val_accuracy: 0.4489 - val_loss: 1.5221
Epoch 41/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4318 - loss: 1.5589 - val_accuracy: 0.4494 - val_loss: 1.5202
Epoch 42/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4317 - loss: 1.5514 - val_accuracy: 0.4505 - val_loss: 1.5184
Epoch 43/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4353 - loss: 1.5504 - val_accuracy: 0.4561 - val_loss: 1.5081
Epoch 44/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4369 - loss: 1.5510 - val_accuracy: 0.4581 - val_loss: 1.5092
Epoch 45/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 35ms/step - accuracy: 0.4379 - loss: 1.5428 - val_accuracy: 0.4555 - val_loss: 1.5099
Epoch 46/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4421 - loss: 1.5475 - val_accuracy: 0.4579 - val_loss: 1.5073
Epoch 47/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4434 - loss: 1.5390 - val_accuracy: 0.4593 - val_loss: 1.5052
Epoch 48/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 34ms/step - accuracy: 0.4418 - loss: 1.5373 - val_accuracy: 0.4600 - val_loss: 1.5038
Epoch 49/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 6s 38ms/step - accuracy: 0.4400 - loss: 1.5367 - val_accuracy: 0.4596 - val_loss: 1.5045
Epoch 50/50
157/157 ━━━━━━━━━━━━━━━━━━━━ 5s 35ms/step - accuracy: 0.4448 - loss: 1.5321 - val_accuracy: 0.4595 - val_loss: 1.5048
40/40 ━━━━━━━━━━━━━━━━━━━━ 3s 71ms/step - accuracy: 0.4496 - loss: 1.5088
Accuracy on the test set: 44.66%.
```
</div>
We believe that with a more sophisticated hyperparameter tuning process and a longer
pretraining it is possible to improve this performance further. For comparison, we took
the encoder architecture and
[trained it from scratch](https://github.com/ariG23498/mae-scalable-vision-learners/blob/master/regular-classification.ipynb)
in a fully supervised manner. This gave us ~76% test top-1 accuracy. The authors of
MAE demonstrates strong performance on the ImageNet-1k dataset as well as
other downstream tasks like object detection and semantic segmentation.
---
## Final notes
We refer the interested readers to other examples on self-supervised learning present on
keras.io:
* [SimCLR](https://keras.io/examples/vision/semisupervised_simclr/)
* [NNCLR](https://keras.io/examples/vision/nnclr)
* [SimSiam](https://keras.io/examples/vision/simsiam)
This idea of using BERT flavored pretraining in computer vision was also explored in
[Selfie](https://arxiv.org/abs/1906.02940), but it could not demonstrate strong results.
Another concurrent work that explores the idea of masked image modeling is
[SimMIM](https://arxiv.org/abs/2111.09886). Finally, as a fun fact, we, the authors of
this example also explored the idea of ["reconstruction as a pretext task"](https://i.ibb.co/k5CpwDX/image.png)
in 2020 but we could not prevent the network from representation collapse, and
hence we did not get strong downstream performance.
We would like to thank [Xinlei Chen](http://xinleic.xyz/)
(one of the authors of MAE) for helpful discussions. We are grateful to
[JarvisLabs](https://jarvislabs.ai/) and
[Google Developers Experts](https://developers.google.com/programs/experts/)
program for helping with GPU credits.
| keras-io/examples/vision/md/masked_image_modeling.md/0 | {
"file_path": "keras-io/examples/vision/md/masked_image_modeling.md",
"repo_id": "keras-io",
"token_count": 28357
} | 94 |
# Image classification with Swin Transformers
**Author:** [Rishit Dagli](https://twitter.com/rishit_dagli)<br>
**Date created:** 2021/09/08<br>
**Last modified:** 2021/09/08<br>
**Description:** Image classification using Swin Transformers, a general-purpose backbone for computer vision.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/swin_transformers.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/swin_transformers.py)
This example implements
[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030)
by Liu et al. for image classification, and demonstrates it on the
[CIFAR-100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html).
Swin Transformer (**S**hifted **Win**dow Transformer) can serve as a
general-purpose backbone for computer vision. Swin Transformer is a hierarchical
Transformer whose representations are computed with _shifted windows_. The
shifted window scheme brings greater efficiency by limiting self-attention
computation to non-overlapping local windows while also allowing for
cross-window connections. This architecture has the flexibility to model
information at various scales and has a linear computational complexity with
respect to image size.
This example requires TensorFlow 2.5 or higher.
---
## Setup
```python
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf # For tf.data and preprocessing only.
import keras
from keras import layers
from keras import ops
```
---
## Configure the hyperparameters
A key parameter to pick is the `patch_size`, the size of the input patches.
In order to use each pixel as an individual input, you can set `patch_size` to
`(1, 1)`. Below, we take inspiration from the original paper settings for
training on ImageNet-1K, keeping most of the original settings for this example.
```python
num_classes = 100
input_shape = (32, 32, 3)
patch_size = (2, 2) # 2-by-2 sized patches
dropout_rate = 0.03 # Dropout rate
num_heads = 8 # Attention heads
embed_dim = 64 # Embedding dimension
num_mlp = 256 # MLP layer size
# Convert embedded patches to query, key, and values with a learnable additive
# value
qkv_bias = True
window_size = 2 # Size of attention window
shift_size = 1 # Size of shifting window
image_dimension = 32 # Initial image size
num_patch_x = input_shape[0] // patch_size[0]
num_patch_y = input_shape[1] // patch_size[1]
learning_rate = 1e-3
batch_size = 128
num_epochs = 40
validation_split = 0.1
weight_decay = 0.0001
label_smoothing = 0.1
```
---
## Prepare the data
We load the CIFAR-100 dataset through `keras.datasets`,
normalize the images, and convert the integer labels to one-hot encoded vectors.
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
num_train_samples = int(len(x_train) * (1 - validation_split))
num_val_samples = len(x_train) - num_train_samples
x_train, x_val = np.split(x_train, [num_train_samples])
y_train, y_val = np.split(y_train, [num_train_samples])
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i])
plt.show()
```
<div class="k-default-codeblock">
```
x_train shape: (45000, 32, 32, 3) - y_train shape: (45000, 100)
x_test shape: (10000, 32, 32, 3) - y_test shape: (10000, 100)
```
</div>

---
## Helper functions
We create two helper functions to help us get a sequence of
patches from the image, merge patches, and apply dropout.
```python
def window_partition(x, window_size):
_, height, width, channels = x.shape
patch_num_y = height // window_size
patch_num_x = width // window_size
x = ops.reshape(
x,
(
-1,
patch_num_y,
window_size,
patch_num_x,
window_size,
channels,
),
)
x = ops.transpose(x, (0, 1, 3, 2, 4, 5))
windows = ops.reshape(x, (-1, window_size, window_size, channels))
return windows
def window_reverse(windows, window_size, height, width, channels):
patch_num_y = height // window_size
patch_num_x = width // window_size
x = ops.reshape(
windows,
(
-1,
patch_num_y,
patch_num_x,
window_size,
window_size,
channels,
),
)
x = ops.transpose(x, (0, 1, 3, 2, 4, 5))
x = ops.reshape(x, (-1, height, width, channels))
return x
```
---
## Window based multi-head self-attention
Usually Transformers perform global self-attention, where the relationships
between a token and all other tokens are computed. The global computation leads
to quadratic complexity with respect to the number of tokens. Here, as the
[original paper](https://arxiv.org/abs/2103.14030) suggests, we compute
self-attention within local windows, in a non-overlapping manner. Global
self-attention leads to quadratic computational complexity in the number of
patches, whereas window-based self-attention leads to linear complexity and is
easily scalable.
```python
class WindowAttention(layers.Layer):
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
dropout_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = layers.Dense(dim * 3, use_bias=qkv_bias)
self.dropout = layers.Dropout(dropout_rate)
self.proj = layers.Dense(dim)
num_window_elements = (2 * self.window_size[0] - 1) * (
2 * self.window_size[1] - 1
)
self.relative_position_bias_table = self.add_weight(
shape=(num_window_elements, self.num_heads),
initializer=keras.initializers.Zeros(),
trainable=True,
)
coords_h = np.arange(self.window_size[0])
coords_w = np.arange(self.window_size[1])
coords_matrix = np.meshgrid(coords_h, coords_w, indexing="ij")
coords = np.stack(coords_matrix)
coords_flatten = coords.reshape(2, -1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.transpose([1, 2, 0])
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.relative_position_index = keras.Variable(
initializer=relative_position_index,
shape=relative_position_index.shape,
dtype="int",
trainable=False,
)
def call(self, x, mask=None):
_, size, channels = x.shape
head_dim = channels // self.num_heads
x_qkv = self.qkv(x)
x_qkv = ops.reshape(x_qkv, (-1, size, 3, self.num_heads, head_dim))
x_qkv = ops.transpose(x_qkv, (2, 0, 3, 1, 4))
q, k, v = x_qkv[0], x_qkv[1], x_qkv[2]
q = q * self.scale
k = ops.transpose(k, (0, 1, 3, 2))
attn = q @ k
num_window_elements = self.window_size[0] * self.window_size[1]
relative_position_index_flat = ops.reshape(self.relative_position_index, (-1,))
relative_position_bias = ops.take(
self.relative_position_bias_table,
relative_position_index_flat,
axis=0,
)
relative_position_bias = ops.reshape(
relative_position_bias,
(num_window_elements, num_window_elements, -1),
)
relative_position_bias = ops.transpose(relative_position_bias, (2, 0, 1))
attn = attn + ops.expand_dims(relative_position_bias, axis=0)
if mask is not None:
nW = mask.shape[0]
mask_float = ops.cast(
ops.expand_dims(ops.expand_dims(mask, axis=1), axis=0),
"float32",
)
attn = ops.reshape(attn, (-1, nW, self.num_heads, size, size)) + mask_float
attn = ops.reshape(attn, (-1, self.num_heads, size, size))
attn = keras.activations.softmax(attn, axis=-1)
else:
attn = keras.activations.softmax(attn, axis=-1)
attn = self.dropout(attn)
x_qkv = attn @ v
x_qkv = ops.transpose(x_qkv, (0, 2, 1, 3))
x_qkv = ops.reshape(x_qkv, (-1, size, channels))
x_qkv = self.proj(x_qkv)
x_qkv = self.dropout(x_qkv)
return x_qkv
```
---
## The complete Swin Transformer model
Finally, we put together the complete Swin Transformer by replacing the standard
multi-head attention (MHA) with shifted windows attention. As suggested in the
original paper, we create a model comprising of a shifted window-based MHA
layer, followed by a 2-layer MLP with GELU nonlinearity in between, applying
`LayerNormalization` before each MSA layer and each MLP, and a residual
connection after each of these layers.
Notice that we only create a simple MLP with 2 Dense and
2 Dropout layers. Often you will see models using ResNet-50 as the MLP which is
quite standard in the literature. However in this paper the authors use a
2-layer MLP with GELU nonlinearity in between.
```python
class SwinTransformer(layers.Layer):
def __init__(
self,
dim,
num_patch,
num_heads,
window_size=7,
shift_size=0,
num_mlp=1024,
qkv_bias=True,
dropout_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim # number of input dimensions
self.num_patch = num_patch # number of embedded patches
self.num_heads = num_heads # number of attention heads
self.window_size = window_size # size of window
self.shift_size = shift_size # size of window shift
self.num_mlp = num_mlp # number of MLP nodes
self.norm1 = layers.LayerNormalization(epsilon=1e-5)
self.attn = WindowAttention(
dim,
window_size=(self.window_size, self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)
self.drop_path = layers.Dropout(dropout_rate)
self.norm2 = layers.LayerNormalization(epsilon=1e-5)
self.mlp = keras.Sequential(
[
layers.Dense(num_mlp),
layers.Activation(keras.activations.gelu),
layers.Dropout(dropout_rate),
layers.Dense(dim),
layers.Dropout(dropout_rate),
]
)
if min(self.num_patch) < self.window_size:
self.shift_size = 0
self.window_size = min(self.num_patch)
def build(self, input_shape):
if self.shift_size == 0:
self.attn_mask = None
else:
height, width = self.num_patch
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
mask_array = np.zeros((1, height, width, 1))
count = 0
for h in h_slices:
for w in w_slices:
mask_array[:, h, w, :] = count
count += 1
mask_array = ops.convert_to_tensor(mask_array)
# mask array to windows
mask_windows = window_partition(mask_array, self.window_size)
mask_windows = ops.reshape(
mask_windows, [-1, self.window_size * self.window_size]
)
attn_mask = ops.expand_dims(mask_windows, axis=1) - ops.expand_dims(
mask_windows, axis=2
)
attn_mask = ops.where(attn_mask != 0, -100.0, attn_mask)
attn_mask = ops.where(attn_mask == 0, 0.0, attn_mask)
self.attn_mask = keras.Variable(
initializer=attn_mask,
shape=attn_mask.shape,
dtype=attn_mask.dtype,
trainable=False,
)
def call(self, x, training=False):
height, width = self.num_patch
_, num_patches_before, channels = x.shape
x_skip = x
x = self.norm1(x)
x = ops.reshape(x, (-1, height, width, channels))
if self.shift_size > 0:
shifted_x = ops.roll(
x, shift=[-self.shift_size, -self.shift_size], axis=[1, 2]
)
else:
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size)
x_windows = ops.reshape(
x_windows, (-1, self.window_size * self.window_size, channels)
)
attn_windows = self.attn(x_windows, mask=self.attn_mask)
attn_windows = ops.reshape(
attn_windows,
(-1, self.window_size, self.window_size, channels),
)
shifted_x = window_reverse(
attn_windows, self.window_size, height, width, channels
)
if self.shift_size > 0:
x = ops.roll(
shifted_x, shift=[self.shift_size, self.shift_size], axis=[1, 2]
)
else:
x = shifted_x
x = ops.reshape(x, (-1, height * width, channels))
x = self.drop_path(x, training=training)
x = x_skip + x
x_skip = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = x_skip + x
return x
```
---
## Model training and evaluation
### Extract and embed patches
We first create 3 layers to help us extract, embed and merge patches from the
images on top of which we will later use the Swin Transformer class we built.
```python
# Using tf ops since it is only used in tf.data.
def patch_extract(images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=(1, patch_size[0], patch_size[1], 1),
strides=(1, patch_size[0], patch_size[1], 1),
rates=(1, 1, 1, 1),
padding="VALID",
)
patch_dim = patches.shape[-1]
patch_num = patches.shape[1]
return tf.reshape(patches, (batch_size, patch_num * patch_num, patch_dim))
class PatchEmbedding(layers.Layer):
def __init__(self, num_patch, embed_dim, **kwargs):
super().__init__(**kwargs)
self.num_patch = num_patch
self.proj = layers.Dense(embed_dim)
self.pos_embed = layers.Embedding(input_dim=num_patch, output_dim=embed_dim)
def call(self, patch):
pos = ops.arange(start=0, stop=self.num_patch)
return self.proj(patch) + self.pos_embed(pos)
class PatchMerging(keras.layers.Layer):
def __init__(self, num_patch, embed_dim):
super().__init__()
self.num_patch = num_patch
self.embed_dim = embed_dim
self.linear_trans = layers.Dense(2 * embed_dim, use_bias=False)
def call(self, x):
height, width = self.num_patch
_, _, C = x.shape
x = ops.reshape(x, (-1, height, width, C))
x0 = x[:, 0::2, 0::2, :]
x1 = x[:, 1::2, 0::2, :]
x2 = x[:, 0::2, 1::2, :]
x3 = x[:, 1::2, 1::2, :]
x = ops.concatenate((x0, x1, x2, x3), axis=-1)
x = ops.reshape(x, (-1, (height // 2) * (width // 2), 4 * C))
return self.linear_trans(x)
```
### Prepare the tf.data.Dataset
We do all the steps, which do not have trainable weights with tf.data.
Prepare the training, validation and testing sets.
```python
def augment(x):
x = tf.image.random_crop(x, size=(image_dimension, image_dimension, 3))
x = tf.image.random_flip_left_right(x)
return x
dataset = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.map(lambda x, y: (augment(x), y))
.batch(batch_size=batch_size)
.map(lambda x, y: (patch_extract(x), y))
.prefetch(tf.data.experimental.AUTOTUNE)
)
dataset_val = (
tf.data.Dataset.from_tensor_slices((x_val, y_val))
.batch(batch_size=batch_size)
.map(lambda x, y: (patch_extract(x), y))
.prefetch(tf.data.experimental.AUTOTUNE)
)
dataset_test = (
tf.data.Dataset.from_tensor_slices((x_test, y_test))
.batch(batch_size=batch_size)
.map(lambda x, y: (patch_extract(x), y))
.prefetch(tf.data.experimental.AUTOTUNE)
)
```
### Build the model
We put together the Swin Transformer model.
```python
input = layers.Input(shape=(256, 12))
x = PatchEmbedding(num_patch_x * num_patch_y, embed_dim)(input)
x = SwinTransformer(
dim=embed_dim,
num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads,
window_size=window_size,
shift_size=0,
num_mlp=num_mlp,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)(x)
x = SwinTransformer(
dim=embed_dim,
num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads,
window_size=window_size,
shift_size=shift_size,
num_mlp=num_mlp,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)(x)
x = PatchMerging((num_patch_x, num_patch_y), embed_dim=embed_dim)(x)
x = layers.GlobalAveragePooling1D()(x)
output = layers.Dense(num_classes, activation="softmax")(x)
```
<div class="k-default-codeblock">
```
An NVIDIA GPU may be present on this machine, but a CUDA-enabled jaxlib is not installed. Falling back to cpu.
```
</div>
### Train on CIFAR-100
We train the model on CIFAR-100. Here, we only train the model
for 40 epochs to keep the training time short in this example.
In practice, you should train for 150 epochs to reach convergence.
```python
model = keras.Model(input, output)
model.compile(
loss=keras.losses.CategoricalCrossentropy(label_smoothing=label_smoothing),
optimizer=keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = model.fit(
dataset,
batch_size=batch_size,
epochs=num_epochs,
validation_data=dataset_val,
)
```
<div class="k-default-codeblock">
```
Epoch 1/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 644s 2s/step - accuracy: 0.0517 - loss: 4.3948 - top-5-accuracy: 0.1816 - val_accuracy: 0.1396 - val_loss: 3.7930 - val_top-5-accuracy: 0.3922
Epoch 2/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 626s 2s/step - accuracy: 0.1606 - loss: 3.7267 - top-5-accuracy: 0.4209 - val_accuracy: 0.1946 - val_loss: 3.5560 - val_top-5-accuracy: 0.4862
Epoch 3/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.2160 - loss: 3.4910 - top-5-accuracy: 0.5076 - val_accuracy: 0.2440 - val_loss: 3.3946 - val_top-5-accuracy: 0.5384
Epoch 4/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 620s 2s/step - accuracy: 0.2599 - loss: 3.3266 - top-5-accuracy: 0.5628 - val_accuracy: 0.2730 - val_loss: 3.2732 - val_top-5-accuracy: 0.5812
Epoch 5/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.2841 - loss: 3.2082 - top-5-accuracy: 0.5988 - val_accuracy: 0.2878 - val_loss: 3.1837 - val_top-5-accuracy: 0.6050
Epoch 6/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 617s 2s/step - accuracy: 0.3049 - loss: 3.1199 - top-5-accuracy: 0.6262 - val_accuracy: 0.3110 - val_loss: 3.0970 - val_top-5-accuracy: 0.6292
Epoch 7/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 620s 2s/step - accuracy: 0.3271 - loss: 3.0387 - top-5-accuracy: 0.6501 - val_accuracy: 0.3292 - val_loss: 3.0374 - val_top-5-accuracy: 0.6488
Epoch 8/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 615s 2s/step - accuracy: 0.3454 - loss: 2.9764 - top-5-accuracy: 0.6679 - val_accuracy: 0.3480 - val_loss: 2.9921 - val_top-5-accuracy: 0.6598
Epoch 9/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 617s 2s/step - accuracy: 0.3571 - loss: 2.9272 - top-5-accuracy: 0.6801 - val_accuracy: 0.3522 - val_loss: 2.9585 - val_top-5-accuracy: 0.6746
Epoch 10/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 624s 2s/step - accuracy: 0.3658 - loss: 2.8809 - top-5-accuracy: 0.6924 - val_accuracy: 0.3562 - val_loss: 2.9364 - val_top-5-accuracy: 0.6784
Epoch 11/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.3796 - loss: 2.8425 - top-5-accuracy: 0.7021 - val_accuracy: 0.3654 - val_loss: 2.9100 - val_top-5-accuracy: 0.6832
Epoch 12/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 622s 2s/step - accuracy: 0.3884 - loss: 2.8113 - top-5-accuracy: 0.7103 - val_accuracy: 0.3740 - val_loss: 2.8808 - val_top-5-accuracy: 0.6948
Epoch 13/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 621s 2s/step - accuracy: 0.3994 - loss: 2.7718 - top-5-accuracy: 0.7239 - val_accuracy: 0.3778 - val_loss: 2.8637 - val_top-5-accuracy: 0.6994
Epoch 14/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.4072 - loss: 2.7491 - top-5-accuracy: 0.7271 - val_accuracy: 0.3848 - val_loss: 2.8533 - val_top-5-accuracy: 0.7002
Epoch 15/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 614s 2s/step - accuracy: 0.4142 - loss: 2.7180 - top-5-accuracy: 0.7344 - val_accuracy: 0.3880 - val_loss: 2.8383 - val_top-5-accuracy: 0.7080
Epoch 16/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 614s 2s/step - accuracy: 0.4231 - loss: 2.6918 - top-5-accuracy: 0.7392 - val_accuracy: 0.3934 - val_loss: 2.8323 - val_top-5-accuracy: 0.7072
Epoch 17/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 617s 2s/step - accuracy: 0.4339 - loss: 2.6633 - top-5-accuracy: 0.7484 - val_accuracy: 0.3972 - val_loss: 2.8237 - val_top-5-accuracy: 0.7138
Epoch 18/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 617s 2s/step - accuracy: 0.4388 - loss: 2.6436 - top-5-accuracy: 0.7506 - val_accuracy: 0.3984 - val_loss: 2.8119 - val_top-5-accuracy: 0.7144
Epoch 19/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 610s 2s/step - accuracy: 0.4439 - loss: 2.6251 - top-5-accuracy: 0.7552 - val_accuracy: 0.4020 - val_loss: 2.8044 - val_top-5-accuracy: 0.7178
Epoch 20/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 611s 2s/step - accuracy: 0.4540 - loss: 2.5989 - top-5-accuracy: 0.7652 - val_accuracy: 0.4012 - val_loss: 2.7969 - val_top-5-accuracy: 0.7246
Epoch 21/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 618s 2s/step - accuracy: 0.4586 - loss: 2.5760 - top-5-accuracy: 0.7684 - val_accuracy: 0.4092 - val_loss: 2.7807 - val_top-5-accuracy: 0.7254
Epoch 22/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 618s 2s/step - accuracy: 0.4607 - loss: 2.5624 - top-5-accuracy: 0.7724 - val_accuracy: 0.4158 - val_loss: 2.7721 - val_top-5-accuracy: 0.7232
Epoch 23/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.4658 - loss: 2.5407 - top-5-accuracy: 0.7786 - val_accuracy: 0.4180 - val_loss: 2.7767 - val_top-5-accuracy: 0.7280
Epoch 24/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 617s 2s/step - accuracy: 0.4744 - loss: 2.5233 - top-5-accuracy: 0.7840 - val_accuracy: 0.4164 - val_loss: 2.7707 - val_top-5-accuracy: 0.7300
Epoch 25/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 615s 2s/step - accuracy: 0.4758 - loss: 2.5129 - top-5-accuracy: 0.7847 - val_accuracy: 0.4196 - val_loss: 2.7677 - val_top-5-accuracy: 0.7294
Epoch 26/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 610s 2s/step - accuracy: 0.4853 - loss: 2.4954 - top-5-accuracy: 0.7863 - val_accuracy: 0.4188 - val_loss: 2.7571 - val_top-5-accuracy: 0.7362
Epoch 27/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 610s 2s/step - accuracy: 0.4858 - loss: 2.4785 - top-5-accuracy: 0.7928 - val_accuracy: 0.4186 - val_loss: 2.7615 - val_top-5-accuracy: 0.7348
Epoch 28/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 613s 2s/step - accuracy: 0.4889 - loss: 2.4691 - top-5-accuracy: 0.7945 - val_accuracy: 0.4208 - val_loss: 2.7561 - val_top-5-accuracy: 0.7350
Epoch 29/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.4940 - loss: 2.4592 - top-5-accuracy: 0.7992 - val_accuracy: 0.4244 - val_loss: 2.7546 - val_top-5-accuracy: 0.7398
Epoch 30/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.4989 - loss: 2.4391 - top-5-accuracy: 0.8025 - val_accuracy: 0.4180 - val_loss: 2.7861 - val_top-5-accuracy: 0.7302
Epoch 31/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 610s 2s/step - accuracy: 0.4994 - loss: 2.4354 - top-5-accuracy: 0.8032 - val_accuracy: 0.4264 - val_loss: 2.7608 - val_top-5-accuracy: 0.7394
Epoch 32/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 607s 2s/step - accuracy: 0.5011 - loss: 2.4238 - top-5-accuracy: 0.8090 - val_accuracy: 0.4292 - val_loss: 2.7625 - val_top-5-accuracy: 0.7384
Epoch 33/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.5065 - loss: 2.4144 - top-5-accuracy: 0.8085 - val_accuracy: 0.4288 - val_loss: 2.7517 - val_top-5-accuracy: 0.7328
Epoch 34/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 612s 2s/step - accuracy: 0.5094 - loss: 2.4099 - top-5-accuracy: 0.8093 - val_accuracy: 0.4260 - val_loss: 2.7550 - val_top-5-accuracy: 0.7390
Epoch 35/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 612s 2s/step - accuracy: 0.5109 - loss: 2.3980 - top-5-accuracy: 0.8115 - val_accuracy: 0.4278 - val_loss: 2.7496 - val_top-5-accuracy: 0.7396
Epoch 36/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 615s 2s/step - accuracy: 0.5178 - loss: 2.3868 - top-5-accuracy: 0.8139 - val_accuracy: 0.4296 - val_loss: 2.7519 - val_top-5-accuracy: 0.7404
Epoch 37/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 618s 2s/step - accuracy: 0.5151 - loss: 2.3842 - top-5-accuracy: 0.8150 - val_accuracy: 0.4308 - val_loss: 2.7504 - val_top-5-accuracy: 0.7424
Epoch 38/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 613s 2s/step - accuracy: 0.5169 - loss: 2.3798 - top-5-accuracy: 0.8159 - val_accuracy: 0.4360 - val_loss: 2.7522 - val_top-5-accuracy: 0.7464
Epoch 39/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 618s 2s/step - accuracy: 0.5228 - loss: 2.3641 - top-5-accuracy: 0.8201 - val_accuracy: 0.4374 - val_loss: 2.7386 - val_top-5-accuracy: 0.7452
Epoch 40/40
352/352 ━━━━━━━━━━━━━━━━━━━━ 634s 2s/step - accuracy: 0.5232 - loss: 2.3633 - top-5-accuracy: 0.8212 - val_accuracy: 0.4266 - val_loss: 2.7614 - val_top-5-accuracy: 0.7410
```
</div>
Let's visualize the training progress of the model.
```python
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
```

Let's display the final results of the training on CIFAR-100.
```python
loss, accuracy, top_5_accuracy = model.evaluate(dataset_test)
print(f"Test loss: {round(loss, 2)}")
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
```
<div class="k-default-codeblock">
```
79/79 ━━━━━━━━━━━━━━━━━━━━ 26s 325ms/step - accuracy: 0.4474 - loss: 2.7119 - top-5-accuracy: 0.7556
Test loss: 2.7
Test accuracy: 44.8%
Test top 5 accuracy: 75.23%
```
</div>
The Swin Transformer model we just trained has just 152K parameters, and it gets
us to ~75% test top-5 accuracy within just 40 epochs without any signs of
overfitting as well as seen in above graph. This means we can train this network
for longer (perhaps with a bit more regularization) and obtain even better
performance. This performance can further be improved by additional techniques
like cosine decay learning rate schedule, other data augmentation techniques.
While experimenting, I tried training the model for 150 epochs with a slightly
higher dropout and greater embedding dimensions which pushes the performance to
~72% test accuracy on CIFAR-100 as you can see in the screenshot.

The authors present a top-1 accuracy of 87.3% on ImageNet. The authors also
present a number of experiments to study how input sizes, optimizers etc. affect
the final performance of this model. The authors further present using this
model for object detection, semantic segmentation and instance segmentation as
well and report competitive results for these. You are strongly advised to also
check out the [original paper](https://arxiv.org/abs/2103.14030).
This example takes inspiration from the official
[PyTorch](https://github.com/microsoft/Swin-Transformer) and
[TensorFlow](https://github.com/VcampSoldiers/Swin-Transformer-Tensorflow)
implementations.
| keras-io/examples/vision/md/swin_transformers.md/0 | {
"file_path": "keras-io/examples/vision/md/swin_transformers.md",
"repo_id": "keras-io",
"token_count": 13166
} | 95 |
"""
Title: Train a Vision Transformer on small datasets
Author: [Aritra Roy Gosthipaty](https://twitter.com/ariG23498)
Date created: 2022/01/07
Last modified: 2022/01/10
Description: Training a ViT from scratch on smaller datasets with shifted patch tokenization and locality self-attention.
Accelerator: GPU
"""
"""
## Introduction
In the academic paper
[An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929),
the authors mention that Vision Transformers (ViT) are data-hungry. Therefore,
pretraining a ViT on a large-sized dataset like JFT300M and fine-tuning
it on medium-sized datasets (like ImageNet) is the only way to beat
state-of-the-art Convolutional Neural Network models.
The self-attention layer of ViT lacks **locality inductive bias** (the notion that
image pixels are locally correlated and that their correlation maps are translation-invariant).
This is the reason why ViTs need more data. On the other hand, CNNs look at images through
spatial sliding windows, which helps them get better results with smaller datasets.
In the academic paper
[Vision Transformer for Small-Size Datasets](https://arxiv.org/abs/2112.13492v1),
the authors set out to tackle the problem of locality inductive bias in ViTs.
The main ideas are:
- **Shifted Patch Tokenization**
- **Locality Self Attention**
This example implements the ideas of the paper. A large part of this
example is inspired from
[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/).
_Note_: This example requires TensorFlow 2.6 or higher, as well as
[TensorFlow Addons](https://www.tensorflow.org/addons), which can be
installed using the following command:
```python
pip install -qq -U tensorflow-addons
```
"""
"""
## Setup
"""
import math
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
from tensorflow.keras import layers
# Setting seed for reproducibiltiy
SEED = 42
keras.utils.set_random_seed(SEED)
"""
## Prepare the data
"""
NUM_CLASSES = 100
INPUT_SHAPE = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
"""
## Configure the hyperparameters
The hyperparameters are different from the paper. Feel free to tune
the hyperparameters yourself.
"""
# DATA
BUFFER_SIZE = 512
BATCH_SIZE = 256
# AUGMENTATION
IMAGE_SIZE = 72
PATCH_SIZE = 6
NUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2
# OPTIMIZER
LEARNING_RATE = 0.001
WEIGHT_DECAY = 0.0001
# TRAINING
EPOCHS = 50
# ARCHITECTURE
LAYER_NORM_EPS = 1e-6
TRANSFORMER_LAYERS = 8
PROJECTION_DIM = 64
NUM_HEADS = 4
TRANSFORMER_UNITS = [
PROJECTION_DIM * 2,
PROJECTION_DIM,
]
MLP_HEAD_UNITS = [2048, 1024]
"""
## Use data augmentation
A snippet from the paper:
*"According to DeiT, various techniques are required to effectively
train ViTs. Thus, we applied data augmentations such as CutMix, Mixup,
Auto Augment, Repeated Augment to all models."*
In this example, we will focus solely on the novelty of the approach
and not on reproducing the paper results. For this reason, we
don't use the mentioned data augmentation schemes. Please feel
free to add to or remove from the augmentation pipeline.
"""
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
layers.RandomRotation(factor=0.02),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)
"""
## Implement Shifted Patch Tokenization
In a ViT pipeline, the input images are divided into patches that are
then linearly projected into tokens. Shifted patch tokenization (STP)
is introduced to combat the low receptive field of ViTs. The steps
for Shifted Patch Tokenization are as follows:
- Start with an image.
- Shift the image in diagonal directions.
- Concat the diagonally shifted images with the original image.
- Extract patches of the concatenated images.
- Flatten the spatial dimension of all patches.
- Layer normalize the flattened patches and then project it.
|  |
| :--: |
| Shifted Patch Tokenization [Source](https://arxiv.org/abs/2112.13492v1) |
"""
class ShiftedPatchTokenization(layers.Layer):
def __init__(
self,
image_size=IMAGE_SIZE,
patch_size=PATCH_SIZE,
num_patches=NUM_PATCHES,
projection_dim=PROJECTION_DIM,
vanilla=False,
**kwargs,
):
super().__init__(**kwargs)
self.vanilla = vanilla # Flag to swtich to vanilla patch extractor
self.image_size = image_size
self.patch_size = patch_size
self.half_patch = patch_size // 2
self.flatten_patches = layers.Reshape((num_patches, -1))
self.projection = layers.Dense(units=projection_dim)
self.layer_norm = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)
def crop_shift_pad(self, images, mode):
# Build the diagonally shifted images
if mode == "left-up":
crop_height = self.half_patch
crop_width = self.half_patch
shift_height = 0
shift_width = 0
elif mode == "left-down":
crop_height = 0
crop_width = self.half_patch
shift_height = self.half_patch
shift_width = 0
elif mode == "right-up":
crop_height = self.half_patch
crop_width = 0
shift_height = 0
shift_width = self.half_patch
else:
crop_height = 0
crop_width = 0
shift_height = self.half_patch
shift_width = self.half_patch
# Crop the shifted images and pad them
crop = tf.image.crop_to_bounding_box(
images,
offset_height=crop_height,
offset_width=crop_width,
target_height=self.image_size - self.half_patch,
target_width=self.image_size - self.half_patch,
)
shift_pad = tf.image.pad_to_bounding_box(
crop,
offset_height=shift_height,
offset_width=shift_width,
target_height=self.image_size,
target_width=self.image_size,
)
return shift_pad
def call(self, images):
if not self.vanilla:
# Concat the shifted images with the original image
images = tf.concat(
[
images,
self.crop_shift_pad(images, mode="left-up"),
self.crop_shift_pad(images, mode="left-down"),
self.crop_shift_pad(images, mode="right-up"),
self.crop_shift_pad(images, mode="right-down"),
],
axis=-1,
)
# Patchify the images and flatten it
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
flat_patches = self.flatten_patches(patches)
if not self.vanilla:
# Layer normalize the flat patches and linearly project it
tokens = self.layer_norm(flat_patches)
tokens = self.projection(tokens)
else:
# Linearly project the flat patches
tokens = self.projection(flat_patches)
return (tokens, patches)
"""
### Visualize the patches
"""
# Get a random image from the training dataset
# and resize the image
image = x_train[np.random.choice(range(x_train.shape[0]))]
resized_image = tf.image.resize(
tf.convert_to_tensor([image]), size=(IMAGE_SIZE, IMAGE_SIZE)
)
# Vanilla patch maker: This takes an image and divides into
# patches as in the original ViT paper
(token, patch) = ShiftedPatchTokenization(vanilla=True)(resized_image / 255.0)
(token, patch) = (token[0], patch[0])
n = patch.shape[0]
count = 1
plt.figure(figsize=(4, 4))
for row in range(n):
for col in range(n):
plt.subplot(n, n, count)
count = count + 1
image = tf.reshape(patch[row][col], (PATCH_SIZE, PATCH_SIZE, 3))
plt.imshow(image)
plt.axis("off")
plt.show()
# Shifted Patch Tokenization: This layer takes the image, shifts it
# diagonally and then extracts patches from the concatinated images
(token, patch) = ShiftedPatchTokenization(vanilla=False)(resized_image / 255.0)
(token, patch) = (token[0], patch[0])
n = patch.shape[0]
shifted_images = ["ORIGINAL", "LEFT-UP", "LEFT-DOWN", "RIGHT-UP", "RIGHT-DOWN"]
for index, name in enumerate(shifted_images):
print(name)
count = 1
plt.figure(figsize=(4, 4))
for row in range(n):
for col in range(n):
plt.subplot(n, n, count)
count = count + 1
image = tf.reshape(patch[row][col], (PATCH_SIZE, PATCH_SIZE, 5 * 3))
plt.imshow(image[..., 3 * index : 3 * index + 3])
plt.axis("off")
plt.show()
"""
## Implement the patch encoding layer
This layer accepts projected patches and then adds positional
information to them.
"""
class PatchEncoder(layers.Layer):
def __init__(
self, num_patches=NUM_PATCHES, projection_dim=PROJECTION_DIM, **kwargs
):
super().__init__(**kwargs)
self.num_patches = num_patches
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
self.positions = tf.range(start=0, limit=self.num_patches, delta=1)
def call(self, encoded_patches):
encoded_positions = self.position_embedding(self.positions)
encoded_patches = encoded_patches + encoded_positions
return encoded_patches
"""
## Implement Locality Self Attention
The regular attention equation is stated below.
|  |
| :--: |
| [Source](https://towardsdatascience.com/attention-is-all-you-need-discovering-the-transformer-paper-73e5ff5e0634) |
The attention module takes a query, key, and value. First, we compute the
similarity between the query and key via a dot product. Then, the result
is scaled by the square root of the key dimension. The scaling prevents
the softmax function from having an overly small gradient. Softmax is then
applied to the scaled dot product to produce the attention weights.
The value is then modulated via the attention weights.
In self-attention, query, key and value come from the same input.
The dot product would result in large self-token relations rather than
inter-token relations. This also means that the softmax gives higher
probabilities to self-token relations than the inter-token relations.
To combat this, the authors propose masking the diagonal of the dot product.
This way, we force the attention module to pay more attention to the
inter-token relations.
The scaling factor is a constant in the regular attention module.
This acts like a temperature term that can modulate the softmax function.
The authors suggest a learnable temperature term instead of a constant.
|  |
| :--: |
| Locality Self Attention [Source](https://arxiv.org/abs/2112.13492v1) |
The above two pointers make the Locality Self Attention. We have subclassed the
[`layers.MultiHeadAttention`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention)
and implemented the trainable temperature. The attention mask is built
at a later stage.
"""
class MultiHeadAttentionLSA(tf.keras.layers.MultiHeadAttention):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# The trainable temperature term. The initial value is
# the square root of the key dimension.
self.tau = tf.Variable(math.sqrt(float(self._key_dim)), trainable=True)
def _compute_attention(self, query, key, value, attention_mask=None, training=None):
query = tf.multiply(query, 1.0 / self.tau)
attention_scores = tf.einsum(self._dot_product_equation, key, query)
attention_scores = self._masked_softmax(attention_scores, attention_mask)
attention_scores_dropout = self._dropout_layer(
attention_scores, training=training
)
attention_output = tf.einsum(
self._combine_equation, attention_scores_dropout, value
)
return attention_output, attention_scores
"""
## Implement the MLP
"""
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
# Build the diagonal attention mask
diag_attn_mask = 1 - tf.eye(NUM_PATCHES)
diag_attn_mask = tf.cast([diag_attn_mask], dtype=tf.int8)
"""
## Build the ViT
"""
def create_vit_classifier(vanilla=False):
inputs = layers.Input(shape=INPUT_SHAPE)
# Augment data.
augmented = data_augmentation(inputs)
# Create patches.
(tokens, _) = ShiftedPatchTokenization(vanilla=vanilla)(augmented)
# Encode patches.
encoded_patches = PatchEncoder()(tokens)
# Create multiple layers of the Transformer block.
for _ in range(TRANSFORMER_LAYERS):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Create a multi-head attention layer.
if not vanilla:
attention_output = MultiHeadAttentionLSA(
num_heads=NUM_HEADS, key_dim=PROJECTION_DIM, dropout=0.1
)(x1, x1, attention_mask=diag_attn_mask)
else:
attention_output = layers.MultiHeadAttention(
num_heads=NUM_HEADS, key_dim=PROJECTION_DIM, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=TRANSFORMER_UNITS, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Add MLP.
features = mlp(representation, hidden_units=MLP_HEAD_UNITS, dropout_rate=0.5)
# Classify outputs.
logits = layers.Dense(NUM_CLASSES)(features)
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=logits)
return model
"""
## Compile, train, and evaluate the mode
"""
# Some code is taken from:
# https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2.
class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps
):
super().__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.pi = tf.constant(np.pi)
def __call__(self, step):
if self.total_steps < self.warmup_steps:
raise ValueError("Total_steps must be larger or equal to warmup_steps.")
cos_annealed_lr = tf.cos(
self.pi
* (tf.cast(step, tf.float32) - self.warmup_steps)
/ float(self.total_steps - self.warmup_steps)
)
learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr)
if self.warmup_steps > 0:
if self.learning_rate_base < self.warmup_learning_rate:
raise ValueError(
"Learning_rate_base must be larger or equal to "
"warmup_learning_rate."
)
slope = (
self.learning_rate_base - self.warmup_learning_rate
) / self.warmup_steps
warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate
learning_rate = tf.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
return tf.where(
step > self.total_steps, 0.0, learning_rate, name="learning_rate"
)
def run_experiment(model):
total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS)
warmup_epoch_percentage = 0.10
warmup_steps = int(total_steps * warmup_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=LEARNING_RATE,
total_steps=total_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)
optimizer = tfa.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
model.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_split=0.1,
)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
return history
# Run experiments with the vanilla ViT
vit = create_vit_classifier(vanilla=True)
history = run_experiment(vit)
# Run experiments with the Shifted Patch Tokenization and
# Locality Self Attention modified ViT
vit_sl = create_vit_classifier(vanilla=False)
history = run_experiment(vit_sl)
"""
# Final Notes
With the help of Shifted Patch Tokenization and Locality Self Attention,
we were able to get ~**3-4%** top-1 accuracy gains on CIFAR100.
The ideas on Shifted Patch Tokenization and Locality Self Attention
are very intuitive and easy to implement. The authors also ablates of
different shifting strategies for Shifted Patch Tokenization in the
supplementary of the paper.
I would like to thank [Jarvislabs.ai](https://jarvislabs.ai/) for
generously helping with GPU credits.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/vit_small_ds_v2)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/vit-small-ds).
"""
| keras-io/examples/vision/vit_small_ds.py/0 | {
"file_path": "keras-io/examples/vision/vit_small_ds.py",
"repo_id": "keras-io",
"token_count": 7554
} | 96 |
<jupyter_start><jupyter_text>Introduction to Keras for Researchers**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/04/01**Last modified:** 2020/10/02**Description:** Everything you need to know to use Keras & TensorFlow for deep learning research. Setup<jupyter_code>import tensorflow as tf
import keras<jupyter_output><empty_output><jupyter_text>IntroductionAre you a machine learning researcher? Do you publish at NeurIPS and push thestate-of-the-art in CV and NLP? This guide will serve as your first introduction to coreKeras & TensorFlow API concepts.In this guide, you will learn about:- Tensors, variables, and gradients in TensorFlow- Creating layers by subclassing the `Layer` class- Writing low-level training loops- Tracking losses created by layers via the `add_loss()` method- Tracking metrics in a low-level training loop- Speeding up execution with a compiled `tf.function`- Executing layers in training or inference mode- The Keras Functional APIYou will also see the Keras API in action in two end-to-end research examples:a Variational Autoencoder, and a Hypernetwork. TensorsTensorFlow is an infrastructure layer for differentiable programming.At its heart, it's a framework for manipulating N-dimensional arrays (tensors),much like NumPy.However, there are three key differences between NumPy and TensorFlow:- TensorFlow can leverage hardware accelerators such as GPUs and TPUs.- TensorFlow can automatically compute the gradient of arbitrary differentiable tensor expressions.- TensorFlow computation can be distributed to large numbers of devices on a single machine, and large number ofmachines (potentially with multiple devices each).Let's take a look at the object that is at the core of TensorFlow: the Tensor.Here's a constant tensor:<jupyter_code>x = tf.constant([[5, 2], [1, 3]])
print(x)<jupyter_output><empty_output><jupyter_text>You can get its value as a NumPy array by calling `.numpy()`:<jupyter_code>x.numpy()<jupyter_output><empty_output><jupyter_text>Much like a NumPy array, it features the attributes `dtype` and `shape`:<jupyter_code>print("dtype:", x.dtype)
print("shape:", x.shape)<jupyter_output><empty_output><jupyter_text>A common way to create constant tensors is via `tf.ones` and `tf.zeros` (just like `np.ones` and `np.zeros`):<jupyter_code>print(tf.ones(shape=(2, 1)))
print(tf.zeros(shape=(2, 1)))<jupyter_output><empty_output><jupyter_text>You can also create random constant tensors:<jupyter_code>x = tf.random.normal(shape=(2, 2), mean=0.0, stddev=1.0)
x = tf.random.uniform(shape=(2, 2), minval=0, maxval=10, dtype="int32")<jupyter_output><empty_output><jupyter_text>VariablesVariables are special tensors used to store mutable state (such as the weights of a neural network).You create a `Variable` using some initial value:<jupyter_code>initial_value = tf.random.normal(shape=(2, 2))
a = tf.Variable(initial_value)
print(a)<jupyter_output><empty_output><jupyter_text>You update the value of a `Variable` by using the methods `.assign(value)`, `.assign_add(increment)`, or `.assign_sub(decrement)`:<jupyter_code>new_value = tf.random.normal(shape=(2, 2))
a.assign(new_value)
for i in range(2):
for j in range(2):
assert a[i, j] == new_value[i, j]
added_value = tf.random.normal(shape=(2, 2))
a.assign_add(added_value)
for i in range(2):
for j in range(2):
assert a[i, j] == new_value[i, j] + added_value[i, j]<jupyter_output><empty_output><jupyter_text>Doing math in TensorFlowIf you've used NumPy, doing math in TensorFlow will look very familiar.The main difference is that your TensorFlow code can run on GPU and TPU.<jupyter_code>a = tf.random.normal(shape=(2, 2))
b = tf.random.normal(shape=(2, 2))
c = a + b
d = tf.square(c)
e = tf.exp(d)<jupyter_output><empty_output><jupyter_text>GradientsHere's another big difference with NumPy: you can automatically retrieve the gradient of any differentiable expression.Just open a `GradientTape`, start "watching" a tensor via `tape.watch()`,and compose a differentiable expression using this tensor as input:<jupyter_code>a = tf.random.normal(shape=(2, 2))
b = tf.random.normal(shape=(2, 2))
with tf.GradientTape() as tape:
tape.watch(a) # Start recording the history of operations applied to `a`
c = tf.sqrt(tf.square(a) + tf.square(b)) # Do some math using `a`
# What's the gradient of `c` with respect to `a`?
dc_da = tape.gradient(c, a)
print(dc_da)<jupyter_output><empty_output><jupyter_text>By default, variables are watched automatically, so you don't need to manually `watch` them:<jupyter_code>a = tf.Variable(a)
with tf.GradientTape() as tape:
c = tf.sqrt(tf.square(a) + tf.square(b))
dc_da = tape.gradient(c, a)
print(dc_da)<jupyter_output><empty_output><jupyter_text>Note that you can compute higher-order derivatives by nesting tapes:<jupyter_code>with tf.GradientTape() as outer_tape:
with tf.GradientTape() as tape:
c = tf.sqrt(tf.square(a) + tf.square(b))
dc_da = tape.gradient(c, a)
d2c_da2 = outer_tape.gradient(dc_da, a)
print(d2c_da2)<jupyter_output><empty_output><jupyter_text>Keras layersWhile TensorFlow is an **infrastructure layer for differentiable programming**,dealing with tensors, variables, and gradients,Keras is a **user interface for deep learning**, dealing withlayers, models, optimizers, loss functions, metrics, and more.Keras serves as the high-level API for TensorFlow:Keras is what makes TensorFlow simple and productive.The `Layer` class is the fundamental abstraction in Keras.A `Layer` encapsulates a state (weights) and some computation(defined in the call method).A simple layer looks like this.The `self.add_weight()` method gives you a shortcut for creating weights:<jupyter_code>class Linear(keras.layers.Layer):
"""y = w.x + b"""
def __init__(self, units=32, input_dim=32):
super().__init__()
self.w = self.add_weight(
shape=(input_dim, units), initializer="random_normal", trainable=True
)
self.b = self.add_weight(shape=(units,), initializer="zeros", trainable=True)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b<jupyter_output><empty_output><jupyter_text>You would use a `Layer` instance much like a Python function:<jupyter_code># Instantiate our layer.
linear_layer = Linear(units=4, input_dim=2)
# The layer can be treated as a function.
# Here we call it on some data.
y = linear_layer(tf.ones((2, 2)))
assert y.shape == (2, 4)<jupyter_output><empty_output><jupyter_text>The weight variables (created in `__init__`) are automaticallytracked under the `weights` property:<jupyter_code>assert linear_layer.weights == [linear_layer.w, linear_layer.b]<jupyter_output><empty_output><jupyter_text>You have many built-in layers available, from `Dense` to `Conv2D` to `LSTM` tofancier ones like `Conv3DTranspose` or `ConvLSTM2D`. Be smart about reusingbuilt-in functionality. Layer weight creation in `build(input_shape)`It's often a good idea to defer weight creation to the `build()` method, sothat you don't need to specify the input dim/shape at layer construction time:<jupyter_code>class Linear(keras.layers.Layer):
"""y = w.x + b"""
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
# Instantiate our layer.
linear_layer = Linear(4)
# This will also call `build(input_shape)` and create the weights.
y = linear_layer(tf.ones((2, 2)))<jupyter_output><empty_output><jupyter_text>Layer gradientsYou can automatically retrieve the gradients of the weights of a layer bycalling it inside a `GradientTape`. Using these gradients, you can update theweights of the layer, either manually, or using an optimizer object. Of course,you can modify the gradients before using them, if you need to.<jupyter_code># Prepare a dataset.
(x_train, y_train), _ = keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(x_train.reshape(60000, 784).astype("float32") / 255, y_train)
)
dataset = dataset.shuffle(buffer_size=1024).batch(64)
# Instantiate our linear layer (defined above) with 10 units.
linear_layer = Linear(10)
# Instantiate a logistic loss function that expects integer targets.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Instantiate an optimizer.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Iterate over the batches of the dataset.
for step, (x, y) in enumerate(dataset):
# Open a GradientTape.
with tf.GradientTape() as tape:
# Forward pass.
logits = linear_layer(x)
# Loss value for this batch.
loss = loss_fn(y, logits)
# Get gradients of the loss wrt the weights.
gradients = tape.gradient(loss, linear_layer.trainable_weights)
# Update the weights of our linear layer.
optimizer.apply_gradients(zip(gradients, linear_layer.trainable_weights))
# Logging.
if step % 100 == 0:
print("Step:", step, "Loss:", float(loss))<jupyter_output><empty_output><jupyter_text>Trainable and non-trainable weightsWeights created by layers can be either trainable or non-trainable. They'reexposed in `trainable_weights` and `non_trainable_weights` respectively.Here's a layer with a non-trainable weight:<jupyter_code>class ComputeSum(keras.layers.Layer):
"""Returns the sum of the inputs."""
def __init__(self, input_dim):
super().__init__()
# Create a non-trainable weight.
self.total = self.add_weight(
initializer="zeros", shape=(input_dim,), trainable=False
)
def call(self, inputs):
self.total.assign_add(tf.reduce_sum(inputs, axis=0))
return self.total
my_sum = ComputeSum(2)
x = tf.ones((2, 2))
y = my_sum(x)
print(y.numpy()) # [2. 2.]
y = my_sum(x)
print(y.numpy()) # [4. 4.]
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []<jupyter_output><empty_output><jupyter_text>Layers that own layersLayers can be recursively nested to create bigger computation blocks.Each layer will track the weights of its sublayers(both trainable and non-trainable).<jupyter_code># Let's reuse the Linear class
# with a `build` method that we defined above.
class MLP(keras.layers.Layer):
"""Simple stack of Linear layers."""
def __init__(self):
super().__init__()
self.linear_1 = Linear(32)
self.linear_2 = Linear(32)
self.linear_3 = Linear(10)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.linear_2(x)
x = tf.nn.relu(x)
return self.linear_3(x)
mlp = MLP()
# The first call to the `mlp` object will create the weights.
y = mlp(tf.ones(shape=(3, 64)))
# Weights are recursively tracked.
assert len(mlp.weights) == 6<jupyter_output><empty_output><jupyter_text>Note that our manually-created MLP above is equivalent to the followingbuilt-in option:<jupyter_code>mlp = keras.Sequential(
[
keras.layers.Dense(32, activation=tf.nn.relu),
keras.layers.Dense(32, activation=tf.nn.relu),
keras.layers.Dense(10),
]
)<jupyter_output><empty_output><jupyter_text>Tracking losses created by layersLayers can create losses during the forward pass via the `add_loss()` method.This is especially useful for regularization losses.The losses created by sublayers are recursively tracked by the parent layers.Here's a layer that creates an activity regularization loss:<jupyter_code>class ActivityRegularization(keras.layers.Layer):
"""Layer that creates an activity sparsity regularization loss."""
def __init__(self, rate=1e-2):
super().__init__()
self.rate = rate
def call(self, inputs):
# We use `add_loss` to create a regularization loss
# that depends on the inputs.
self.add_loss(self.rate * tf.reduce_sum(inputs))
return inputs<jupyter_output><empty_output><jupyter_text>Any model incorporating this layer will track this regularization loss:<jupyter_code># Let's use the loss layer in a MLP block.
class SparseMLP(keras.layers.Layer):
"""Stack of Linear layers with a sparsity regularization loss."""
def __init__(self):
super().__init__()
self.linear_1 = Linear(32)
self.regularization = ActivityRegularization(1e-2)
self.linear_3 = Linear(10)
def call(self, inputs):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.regularization(x)
return self.linear_3(x)
mlp = SparseMLP()
y = mlp(tf.ones((10, 10)))
print(mlp.losses) # List containing one float32 scalar<jupyter_output><empty_output><jupyter_text>These losses are cleared by the top-level layer at the start of each forwardpass -- they don't accumulate. `layer.losses` always contains only the lossescreated during the last forward pass. You would typically use these losses bysumming them before computing your gradients when writing a training loop.<jupyter_code># Losses correspond to the *last* forward pass.
mlp = SparseMLP()
mlp(tf.ones((10, 10)))
assert len(mlp.losses) == 1
mlp(tf.ones((10, 10)))
assert len(mlp.losses) == 1 # No accumulation.
# Let's demonstrate how to use these losses in a training loop.
# Prepare a dataset.
(x_train, y_train), _ = keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(x_train.reshape(60000, 784).astype("float32") / 255, y_train)
)
dataset = dataset.shuffle(buffer_size=1024).batch(64)
# A new MLP.
mlp = SparseMLP()
# Loss and optimizer.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
for step, (x, y) in enumerate(dataset):
with tf.GradientTape() as tape:
# Forward pass.
logits = mlp(x)
# External loss value for this batch.
loss = loss_fn(y, logits)
# Add the losses created during the forward pass.
loss += sum(mlp.losses)
# Get gradients of the loss wrt the weights.
gradients = tape.gradient(loss, mlp.trainable_weights)
# Update the weights of our linear layer.
optimizer.apply_gradients(zip(gradients, mlp.trainable_weights))
# Logging.
if step % 100 == 0:
print("Step:", step, "Loss:", float(loss))<jupyter_output><empty_output><jupyter_text>Keeping track of training metricsKeras offers a broad range of built-in metrics, like `keras.metrics.AUC`or `keras.metrics.PrecisionAtRecall`. It's also easy to create yourown metrics in a few lines of code.To use a metric in a custom training loop, you would:- Instantiate the metric object, e.g. `metric = keras.metrics.AUC()`- Call its `metric.udpate_state(targets, predictions)` method for each batch of data- Query its result via `metric.result()`- Reset the metric's state at the end of an epoch or at the start of an evaluation via`metric.reset_state()`Here's a simple example:<jupyter_code># Instantiate a metric object
accuracy = keras.metrics.SparseCategoricalAccuracy()
# Prepare our layer, loss, and optimizer.
model = keras.Sequential(
[
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(10),
]
)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
for epoch in range(2):
# Iterate over the batches of a dataset.
for step, (x, y) in enumerate(dataset):
with tf.GradientTape() as tape:
logits = model(x)
# Compute the loss value for this batch.
loss_value = loss_fn(y, logits)
# Update the state of the `accuracy` metric.
accuracy.update_state(y, logits)
# Update the weights of the model to minimize the loss value.
gradients = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
# Logging the current accuracy value so far.
if step % 200 == 0:
print("Epoch:", epoch, "Step:", step)
print("Total running accuracy so far: %.3f" % accuracy.result())
# Reset the metric's state at the end of an epoch
accuracy.reset_state()<jupyter_output><empty_output><jupyter_text>You can also define your own metrics by subclassing `keras.metrics.Metric`.You need to override the three functions called above:- Override `update_state()` to update the statistic values.- Override `result()` to return the metric value.- Override `reset_state()` to reset the metric to its initial state.Here is an example where we implement the F1-score metric(with support for sample weighting).<jupyter_code>class F1Score(keras.metrics.Metric):
def __init__(self, name="f1_score", dtype="float32", threshold=0.5, **kwargs):
super().__init__(name=name, dtype=dtype, **kwargs)
self.threshold = 0.5
self.true_positives = self.add_weight(
name="tp", dtype=dtype, initializer="zeros"
)
self.false_positives = self.add_weight(
name="fp", dtype=dtype, initializer="zeros"
)
self.false_negatives = self.add_weight(
name="fn", dtype=dtype, initializer="zeros"
)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.math.greater_equal(y_pred, self.threshold)
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
true_positives = tf.cast(y_true & y_pred, self.dtype)
false_positives = tf.cast(~y_true & y_pred, self.dtype)
false_negatives = tf.cast(y_true & ~y_pred, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
true_positives *= sample_weight
false_positives *= sample_weight
false_negatives *= sample_weight
self.true_positives.assign_add(tf.reduce_sum(true_positives))
self.false_positives.assign_add(tf.reduce_sum(false_positives))
self.false_negatives.assign_add(tf.reduce_sum(false_negatives))
def result(self):
precision = self.true_positives / (self.true_positives + self.false_positives)
recall = self.true_positives / (self.true_positives + self.false_negatives)
return precision * recall * 2.0 / (precision + recall)
def reset_state(self):
self.true_positives.assign(0)
self.false_positives.assign(0)
self.false_negatives.assign(0)<jupyter_output><empty_output><jupyter_text>Let's test-drive it:<jupyter_code>m = F1Score()
m.update_state([0, 1, 0, 0], [0.3, 0.5, 0.8, 0.9])
print("Intermediate result:", float(m.result()))
m.update_state([1, 1, 1, 1], [0.1, 0.7, 0.6, 0.0])
print("Final result:", float(m.result()))<jupyter_output><empty_output><jupyter_text>Compiled functionsRunning eagerly is great for debugging, but you will get better performance bycompiling your computation into static graphs. Static graphs are a researcher'sbest friends. You can compile any function by wrapping it in a `tf.function`decorator.<jupyter_code># Prepare our layer, loss, and optimizer.
model = keras.Sequential(
[
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(32, activation="relu"),
keras.layers.Dense(10),
]
)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Create a training step function.
@tf.function # Make it fast.
def train_on_batch(x, y):
with tf.GradientTape() as tape:
logits = model(x)
loss = loss_fn(y, logits)
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
return loss
# Prepare a dataset.
(x_train, y_train), _ = keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(x_train.reshape(60000, 784).astype("float32") / 255, y_train)
)
dataset = dataset.shuffle(buffer_size=1024).batch(64)
for step, (x, y) in enumerate(dataset):
loss = train_on_batch(x, y)
if step % 100 == 0:
print("Step:", step, "Loss:", float(loss))<jupyter_output><empty_output><jupyter_text>Training mode & inference modeSome layers, in particular the `BatchNormalization` layer and the `Dropout`layer, have different behaviors during training and inference. For such layers,it is standard practice to expose a `training` (boolean) argument in the `call`method.By exposing this argument in `call`, you enable the built-in training andevaluation loops (e.g. fit) to correctly use the layer in training andinference modes.<jupyter_code>class Dropout(keras.layers.Layer):
def __init__(self, rate):
super().__init__()
self.rate = rate
def call(self, inputs, training=None):
if training:
return tf.nn.dropout(inputs, rate=self.rate)
return inputs
class MLPWithDropout(keras.layers.Layer):
def __init__(self):
super().__init__()
self.linear_1 = Linear(32)
self.dropout = Dropout(0.5)
self.linear_3 = Linear(10)
def call(self, inputs, training=None):
x = self.linear_1(inputs)
x = tf.nn.relu(x)
x = self.dropout(x, training=training)
return self.linear_3(x)
mlp = MLPWithDropout()
y_train = mlp(tf.ones((2, 2)), training=True)
y_test = mlp(tf.ones((2, 2)), training=False)<jupyter_output><empty_output><jupyter_text>The Functional API for model-buildingTo build deep learning models, you don't have to use object-oriented programming all thetime. All layers we've seen so far can also be composed functionally, like this (we callit the "Functional API"):<jupyter_code># We use an `Input` object to describe the shape and dtype of the inputs.
# This is the deep learning equivalent of *declaring a type*.
# The shape argument is per-sample; it does not include the batch size.
# The functional API focused on defining per-sample transformations.
# The model we create will automatically batch the per-sample transformations,
# so that it can be called on batches of data.
inputs = keras.Input(shape=(16,), dtype="float32")
# We call layers on these "type" objects
# and they return updated types (new shapes/dtypes).
x = Linear(32)(inputs) # We are reusing the Linear layer we defined earlier.
x = Dropout(0.5)(x) # We are reusing the Dropout layer we defined earlier.
outputs = Linear(10)(x)
# A functional `Model` can be defined by specifying inputs and outputs.
# A model is itself a layer like any other.
model = keras.Model(inputs, outputs)
# A functional model already has weights, before being called on any data.
# That's because we defined its input shape in advance (in `Input`).
assert len(model.weights) == 4
# Let's call our model on some data, for fun.
y = model(tf.ones((2, 16)))
assert y.shape == (2, 10)
# You can pass a `training` argument in `__call__`
# (it will get passed down to the Dropout layer).
y = model(tf.ones((2, 16)), training=True)<jupyter_output><empty_output><jupyter_text>The Functional API tends to be more concise than subclassing, and provides a few otheradvantages (generally the same advantages that functional, typed languages provide overuntyped OO development). However, it can only be used to define DAGs of layers --recursive networks should be defined as Layer subclasses instead.Learn more about the Functional API [here](/guides/functional_api/).In your research workflows, you may often find yourself mix-and-matching OO models andFunctional models.Note that the `Model` class also features built-in training & evaluation loops:`fit()`, `predict()` and `evaluate()` (configured via the `compile()` method).These built-in functions give you access to thefollowing built-in training infrastructure features:* [Callbacks](/api/callbacks/). You can leverage built-incallbacks for early-stopping, model checkpointing,and monitoring training with TensorBoard. You can also[implement custom callbacks](/guides/writing_your_own_callbacks/) if needed.* [Distributed training](https://keras.io/guides/distributed_training/). Youcan easily scale up your training to multiple GPUs, TPU, or even multiple machineswith the `tf.distribute` API -- with no changes to your code.* [Step fusing](https://keras.io/api/models/model_training_apis/compile-method).With the `steps_per_execution` argument in `Model.compile()`, you can processmultiple batches in a single `tf.function` call, which greatly improvesdevice utilization on TPUs.We won't go into the details, but we provide a simple code examplebelow. It leverages the built-in training infrastructure to implement the MNISTexample above.<jupyter_code>inputs = keras.Input(shape=(784,), dtype="float32")
x = keras.layers.Dense(32, activation="relu")(inputs)
x = keras.layers.Dense(32, activation="relu")(x)
outputs = keras.layers.Dense(10)(x)
model = keras.Model(inputs, outputs)
# Specify the loss, optimizer, and metrics with `compile()`.
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train the model with the dataset for 2 epochs.
model.fit(dataset, epochs=2)
model.predict(dataset)
model.evaluate(dataset)<jupyter_output><empty_output><jupyter_text>You can always subclass the `Model` class (it works exactly like subclassing`Layer`) if you want to leverage built-in training loops for your OO models.Just override the `Model.train_step()` tocustomize what happens in `fit()` while retaining supportfor the built-in infrastructure features outlined above -- callbacks,zero-code distribution support, and step fusing support.You may also override `test_step()` to customize what happens in `evaluate()`,and override `predict_step()` to customize what happens in `predict()`. For moreinformation, please refer to[this guide](https://keras.io/guides/customizing_what_happens_in_fit/).<jupyter_code>class CustomModel(keras.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.accuracy = keras.metrics.SparseCategoricalAccuracy()
self.loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.optimizer = keras.optimizers.Adam(learning_rate=1e-3)
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
loss = self.loss_fn(y, y_pred)
gradients = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, self.trainable_weights))
# Update metrics (includes the metric that tracks the loss)
self.loss_tracker.update_state(loss)
self.accuracy.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {"loss": self.loss_tracker.result(), "accuracy": self.accuracy.result()}
@property
def metrics(self):
# We list our `Metric` objects here so that `reset_states()` can be
# called automatically at the start of each epoch.
return [self.loss_tracker, self.accuracy]
inputs = keras.Input(shape=(784,), dtype="float32")
x = keras.layers.Dense(32, activation="relu")(inputs)
x = keras.layers.Dense(32, activation="relu")(x)
outputs = keras.layers.Dense(10)(x)
model = CustomModel(inputs, outputs)
model.compile()
model.fit(dataset, epochs=2)<jupyter_output><empty_output><jupyter_text>End-to-end experiment example 1: variational autoencoders.Here are some of the things you've learned so far:- A `Layer` encapsulates a state (created in `__init__` or `build`) and some computation(defined in `call`).- Layers can be recursively nested to create new, bigger computation blocks.- You can easily write highly hackable training loops by opening a`GradientTape`, calling your model inside the tape's scope, then retrievinggradients and applying them via an optimizer.- You can speed up your training loops using the `@tf.function` decorator.- Layers can create and track losses (typically regularization losses) via`self.add_loss()`.Let's put all of these things together into an end-to-end example: we're going toimplement a Variational AutoEncoder (VAE). We'll train it on MNIST digits.Our VAE will be a subclass of `Layer`, built as a nested composition of layers thatsubclass `Layer`. It will feature a regularization loss (KL divergence). Below is our model definition.First, we have an `Encoder` class, which uses a `Sampling` layer to map a MNIST digit toa latent-space triplet `(z_mean, z_log_var, z)`.<jupyter_code>from tensorflow.keras import layers
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
class Encoder(layers.Layer):
"""Maps MNIST digits to a triplet (z_mean, z_log_var, z)."""
def __init__(self, latent_dim=32, intermediate_dim=64, **kwargs):
super().__init__(**kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation=tf.nn.relu)
self.dense_mean = layers.Dense(latent_dim)
self.dense_log_var = layers.Dense(latent_dim)
self.sampling = Sampling()
def call(self, inputs):
x = self.dense_proj(inputs)
z_mean = self.dense_mean(x)
z_log_var = self.dense_log_var(x)
z = self.sampling((z_mean, z_log_var))
return z_mean, z_log_var, z<jupyter_output><empty_output><jupyter_text>Next, we have a `Decoder` class, which maps the probabilistic latent space coordinatesback to a MNIST digit.<jupyter_code>class Decoder(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(self, original_dim, intermediate_dim=64, **kwargs):
super().__init__(**kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation=tf.nn.relu)
self.dense_output = layers.Dense(original_dim, activation=tf.nn.sigmoid)
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)<jupyter_output><empty_output><jupyter_text>Finally, our `VariationalAutoEncoder` composes together an encoder and a decoder, andcreates a KL divergence regularization loss via `add_loss()`.<jupyter_code>class VariationalAutoEncoder(layers.Layer):
"""Combines the encoder and decoder into an end-to-end model for training."""
def __init__(self, original_dim, intermediate_dim=64, latent_dim=32, **kwargs):
super().__init__(**kwargs)
self.original_dim = original_dim
self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim)
self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim)
def call(self, inputs):
z_mean, z_log_var, z = self.encoder(inputs)
reconstructed = self.decoder(z)
# Add KL divergence regularization loss.
kl_loss = -0.5 * tf.reduce_mean(
z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1
)
self.add_loss(kl_loss)
return reconstructed<jupyter_output><empty_output><jupyter_text>Now, let's write a training loop. Our training step is decorated with a `@tf.function` tocompile into a super fast graph function.<jupyter_code># Our model.
vae = VariationalAutoEncoder(original_dim=784, intermediate_dim=64, latent_dim=32)
# Loss and optimizer.
loss_fn = keras.losses.MeanSquaredError()
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Prepare a dataset.
(x_train, _), _ = keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
x_train.reshape(60000, 784).astype("float32") / 255
)
dataset = dataset.shuffle(buffer_size=1024).batch(32)
@tf.function
def training_step(x):
with tf.GradientTape() as tape:
reconstructed = vae(x) # Compute input reconstruction.
# Compute loss.
loss = loss_fn(x, reconstructed)
loss += sum(vae.losses) # Add KLD term.
# Update the weights of the VAE.
grads = tape.gradient(loss, vae.trainable_weights)
optimizer.apply_gradients(zip(grads, vae.trainable_weights))
return loss
losses = [] # Keep track of the losses over time.
for step, x in enumerate(dataset):
loss = training_step(x)
# Logging.
losses.append(float(loss))
if step % 100 == 0:
print("Step:", step, "Loss:", sum(losses) / len(losses))
# Stop after 1000 steps.
# Training the model to convergence is left
# as an exercise to the reader.
if step >= 1000:
break<jupyter_output><empty_output><jupyter_text>As you can see, building and training this type of model in Kerasis quick and painless. End-to-end experiment example 2: hypernetworks.Let's take a look at another kind of research experiment: hypernetworks.The idea is to use a small deep neural network (the hypernetwork) to generatethe weights for a larger network (the main network).Let's implement a really trivial hypernetwork: we'll use a small 2-layer network togenerate the weights of a larger 3-layer network.<jupyter_code>import numpy as np
input_dim = 784
classes = 10
# This is the main network we'll actually use to predict labels.
main_network = keras.Sequential(
[
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dense(classes),
]
)
# It doesn't need to create its own weights, so let's mark its layers
# as already built. That way, calling `main_network` won't create new variables.
for layer in main_network.layers:
layer.built = True
# This is the number of weight coefficients to generate. Each layer in the
# main network requires output_dim * input_dim + output_dim coefficients.
num_weights_to_generate = (classes * 64 + classes) + (64 * input_dim + 64)
# This is the hypernetwork that generates the weights of the `main_network` above.
hypernetwork = keras.Sequential(
[
keras.layers.Dense(16, activation=tf.nn.relu),
keras.layers.Dense(num_weights_to_generate, activation=tf.nn.sigmoid),
]
)<jupyter_output><empty_output><jupyter_text>This is our training loop. For each batch of data:- We use `hypernetwork` to generate an array of weight coefficients, `weights_pred`- We reshape these coefficients into kernel & bias tensors for the `main_network`- We run the forward pass of the `main_network` to compute the actual MNIST predictions- We run backprop through the weights of the `hypernetwork` to minimize thefinal classification loss<jupyter_code># Loss and optimizer.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam(learning_rate=1e-4)
# Prepare a dataset.
(x_train, y_train), _ = keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(x_train.reshape(60000, 784).astype("float32") / 255, y_train)
)
# We'll use a batch size of 1 for this experiment.
dataset = dataset.shuffle(buffer_size=1024).batch(1)
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
# Predict weights for the outer model.
weights_pred = hypernetwork(x)
# Reshape them to the expected shapes for w and b for the outer model.
# Layer 0 kernel.
start_index = 0
w0_shape = (input_dim, 64)
w0_coeffs = weights_pred[:, start_index : start_index + np.prod(w0_shape)]
w0 = tf.reshape(w0_coeffs, w0_shape)
start_index += np.prod(w0_shape)
# Layer 0 bias.
b0_shape = (64,)
b0_coeffs = weights_pred[:, start_index : start_index + np.prod(b0_shape)]
b0 = tf.reshape(b0_coeffs, b0_shape)
start_index += np.prod(b0_shape)
# Layer 1 kernel.
w1_shape = (64, classes)
w1_coeffs = weights_pred[:, start_index : start_index + np.prod(w1_shape)]
w1 = tf.reshape(w1_coeffs, w1_shape)
start_index += np.prod(w1_shape)
# Layer 1 bias.
b1_shape = (classes,)
b1_coeffs = weights_pred[:, start_index : start_index + np.prod(b1_shape)]
b1 = tf.reshape(b1_coeffs, b1_shape)
start_index += np.prod(b1_shape)
# Set the weight predictions as the weight variables on the outer model.
main_network.layers[0].kernel = w0
main_network.layers[0].bias = b0
main_network.layers[1].kernel = w1
main_network.layers[1].bias = b1
# Inference on the outer model.
preds = main_network(x)
loss = loss_fn(y, preds)
# Train only inner model.
grads = tape.gradient(loss, hypernetwork.trainable_weights)
optimizer.apply_gradients(zip(grads, hypernetwork.trainable_weights))
return loss
losses = [] # Keep track of the losses over time.
for step, (x, y) in enumerate(dataset):
loss = train_step(x, y)
# Logging.
losses.append(float(loss))
if step % 100 == 0:
print("Step:", step, "Loss:", sum(losses) / len(losses))
# Stop after 1000 steps.
# Training the model to convergence is left
# as an exercise to the reader.
if step >= 1000:
break<jupyter_output><empty_output> | keras-io/guides/ipynb/intro_to_keras_for_researchers.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/intro_to_keras_for_researchers.ipynb",
"repo_id": "keras-io",
"token_count": 13867
} | 97 |
<jupyter_start><jupyter_text>Handling failed trials in KerasTuner**Authors:** Haifeng Jin**Date created:** 2023/02/28**Last modified:** 2023/02/28**Description:** The basics of fault tolerance configurations in KerasTuner. IntroductionA KerasTuner program may take a long time to run since each model may take along time to train. We do not want the program to fail just because some trialsfailed randomly.In this guide, we will show how to handle the failed trials in KerasTuner,including:* How to tolerate the failed trials during the search* How to mark a trial as failed during building and evaluating the model* How to terminate the search by raising a `FatalError` Setup<jupyter_code>!pip install keras-tuner -q
import keras
from keras import layers
import keras_tuner
import numpy as np<jupyter_output><empty_output><jupyter_text>Tolerate failed trialsWe will use the `max_retries_per_trial` and `max_consecutive_failed_trials`arguments when initializing the tuners.`max_retries_per_trial` controls the maximum number of retries to run if a trialkeeps failing. For example, if it is set to 3, the trial may run 4 times (1failed run + 3 failed retries) before it is finally marked as failed. Thedefault value of `max_retries_per_trial` is 0.`max_consecutive_failed_trials` controls how many consecutive failed trials(failed trial here refers to a trial that failed all of its retries) occurbefore terminating the search. For example, if it is set to 3 and Trial 2, Trial3, and Trial 4 all failed, the search would be terminated. However, if it is setto 3 and only Trial 2, Trial 3, Trial 5, and Trial 6 fail, the search would notbe terminated since the failed trials are not consecutive. The default value of`max_consecutive_failed_trials` is 3.The following code shows how these two arguments work in action.* We define a search space with 2 hyperparameters for the number of units in the 2 dense layers.* When their product is larger than 800, we raise a `ValueError` for the model too large.<jupyter_code>def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
raise ValueError(f"Model too large! It contains {num_params} params.")
return model<jupyter_output><empty_output><jupyter_text>We set up the tuner as follows.* We set `max_retries_per_trial=3`.* We set `max_consecutive_failed_trials=8`.* We use `GridSearch` to enumerate all hyperparameter value combinations.<jupyter_code>tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
# Print the results.
tuner.results_summary()<jupyter_output><empty_output><jupyter_text>Mark a trial as failedWhen the model is too large, we do not need to retry it. No matter how manytimes we try with the same hyperparameters, it is always too large.We can set `max_retries_per_trial=0` to do it. However, it will not retry nomatter what errors are raised while we may still want to retry for otherunexpected errors. Is there a way to better handle this situation?We can raise the `FailedTrialError` to skip the retries. Whenever, this error israised, the trial would not be retried. The retries will still run when othererrors occur. An example is shown as follows.<jupyter_code>def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
# When this error is raised, it skips the retries.
raise keras_tuner.errors.FailedTrialError(
f"Model too large! It contains {num_params} params."
)
return model
tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
# Print the results.
tuner.results_summary()<jupyter_output><empty_output><jupyter_text>Terminate the search programmaticallyWhen there is a bug in the code we should terminate the search immediately andfix the bug. You can terminate the search programmatically when your definedconditions are met. Raising a `FatalError` (or its subclasses `FatalValueError`,`FatalTypeError`, or `FatalRuntimeError`) will terminate the search regardlessof the `max_consecutive_failed_trials` argument.Following is an example to terminate the search when the model is too large.<jupyter_code>def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
# When this error is raised, the search is terminated.
raise keras_tuner.errors.FatalError(
f"Model too large! It contains {num_params} params."
)
return model
tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
try:
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
except keras_tuner.errors.FatalError:
print("The search is terminated.")<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_tuner/failed_trials.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_tuner/failed_trials.ipynb",
"repo_id": "keras-io",
"token_count": 2530
} | 98 |
<jupyter_start><jupyter_text>Writing a training loop from scratch in PyTorch**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2023/06/25**Last modified:** 2023/06/25**Description:** Writing low-level training & evaluation loops in PyTorch.<jupyter_code>!pip install keras==3.0.0 --upgrade --quiet<jupyter_output><empty_output><jupyter_text>Setup<jupyter_code>import os
# This guide can only be run with the torch backend.
os.environ["KERAS_BACKEND"] = "torch"
import torch
import keras
import numpy as np<jupyter_output><empty_output><jupyter_text>IntroductionKeras provides default training and evaluation loops, `fit()` and `evaluate()`.Their usage is covered in the guide[Training & evaluation with the built-in methods](/guides/training_with_built_in_methods/).If you want to customize the learning algorithm of your model while still leveragingthe convenience of `fit()`(for instance, to train a GAN using `fit()`), you can subclass the `Model` class andimplement your own `train_step()` method, whichis called repeatedly during `fit()`.Now, if you want very low-level control over training & evaluation, you should writeyour own training & evaluation loops from scratch. This is what this guide is about. A first end-to-end exampleTo write a custom training loop, we need the following ingredients:- A model to train, of course.- An optimizer. You could either use a `keras.optimizers` optimizer,or a native PyTorch optimizer from `torch.optim`.- A loss function. You could either use a `keras.losses` loss,or a native PyTorch loss from `torch.nn`.- A dataset. You could use any format: a `tf.data.Dataset`,a PyTorch `DataLoader`, a Python generator, etc.Let's line them up. We'll use torch-native objects in each case --except, of course, for the Keras model.First, let's get the model and the MNIST dataset:<jupyter_code># Let's consider a simple MNIST model
def get_model():
inputs = keras.Input(shape=(784,), name="digits")
x1 = keras.layers.Dense(64, activation="relu")(inputs)
x2 = keras.layers.Dense(64, activation="relu")(x1)
outputs = keras.layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
# Create load up the MNIST dataset and put it in a torch DataLoader
# Prepare the training dataset.
batch_size = 32
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784)).astype("float32")
x_test = np.reshape(x_test, (-1, 784)).astype("float32")
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Create torch Datasets
train_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
val_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_val), torch.from_numpy(y_val)
)
# Create DataLoaders for the Datasets
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False
)<jupyter_output><empty_output><jupyter_text>Next, here's our PyTorch optimizer and our PyTorch loss function:<jupyter_code># Instantiate a torch optimizer
model = get_model()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Instantiate a torch loss function
loss_fn = torch.nn.CrossEntropyLoss()<jupyter_output><empty_output><jupyter_text>Let's train our model using mini-batch gradient with a custom training loop.Calling `loss.backward()` on a loss tensor triggers backpropagation.Once that's done, your optimizer is magically aware of the gradients for each variableand can update its variables, which is done via `optimizer.step()`.Tensors, variables, optimizers are all interconnected to one another via hidden global state.Also, don't forget to call `model.zero_grad()` before `loss.backward()`, or you won'tget the right gradients for your variables.Here's our training loop, step by step:- We open a `for` loop that iterates over epochs- For each epoch, we open a `for` loop that iterates over the dataset, in batches- For each batch, we call the model on the input data to retrive the predictions,then we use them to compute a loss value- We call `loss.backward()` to- Outside the scope, we retrieve the gradients of the weightsof the model with regard to the loss- Finally, we use the optimizer to update the weights of the model based on thegradients<jupyter_code>epochs = 3
for epoch in range(epochs):
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(logits, targets)
# Backward pass
model.zero_grad()
loss.backward()
# Optimizer variable updates
optimizer.step()
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")<jupyter_output><empty_output><jupyter_text>As an alternative, let's look at what the loop looks like when using a Keras optimizerand a Keras loss function.Important differences:- You retrieve the gradients for the variables via `v.value.grad`,called on each trainable variable.- You update your variables via `optimizer.apply()`, which must becalled in a `torch.no_grad()` scope.**Also, a big gotcha:** while all NumPy/TensorFlow/JAX/Keras APIsas well as Python `unittest` APIs use the argument order convention`fn(y_true, y_pred)` (reference values first, predicted values second),PyTorch actually uses `fn(y_pred, y_true)` for its losses.So make sure to invert the order of `logits` and `targets`.<jupyter_code>model = get_model()
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")<jupyter_output><empty_output><jupyter_text>Low-level handling of metricsLet's add metrics monitoring to this basic training loop.You can readily reuse built-in Keras metrics (or custom ones you wrote) in such trainingloops written from scratch. Here's the flow:- Instantiate the metric at the start of the loop- Call `metric.update_state()` after each batch- Call `metric.result()` when you need to display the current value of the metric- Call `metric.reset_state()` when you need to clear the state of the metric(typically at the end of an epoch)Let's use this knowledge to compute `CategoricalAccuracy` on training andvalidation data at the end of each epoch:<jupyter_code># Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.CategoricalAccuracy()
val_acc_metric = keras.metrics.CategoricalAccuracy()<jupyter_output><empty_output><jupyter_text>Here's our training & evaluation loop:<jupyter_code>for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Update training metric.
train_acc_metric.update_state(targets, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataloader:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")<jupyter_output><empty_output><jupyter_text>Low-level handling of losses tracked by the modelLayers & models recursively track any losses created during the forward passby layers that call `self.add_loss(value)`. The resulting list of scalar lossvalues are available via the property `model.losses`at the end of the forward pass.If you want to be using these loss components, you should sum themand add them to the main loss in your training step.Consider this layer, that creates an activity regularization loss:<jupyter_code>class ActivityRegularizationLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * torch.sum(inputs))
return inputs<jupyter_output><empty_output><jupyter_text>Let's build a really simple model that uses it:<jupyter_code>inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = keras.layers.Dense(64, activation="relu")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)<jupyter_output><empty_output><jupyter_text>Here's what our training loop should look like now:<jupyter_code># Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.CategoricalAccuracy()
val_acc_metric = keras.metrics.CategoricalAccuracy()
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
if model.losses:
loss = loss + torch.sum(*model.losses)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Update training metric.
train_acc_metric.update_state(targets, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataloader:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")<jupyter_output><empty_output> | keras-io/guides/ipynb/writing_a_custom_training_loop_in_torch.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/writing_a_custom_training_loop_in_torch.ipynb",
"repo_id": "keras-io",
"token_count": 4736
} | 99 |
"""
Title: Handling failed trials in KerasTuner
Authors: Haifeng Jin
Date created: 2023/02/28
Last modified: 2023/02/28
Description: The basics of fault tolerance configurations in KerasTuner.
Accelerator: GPU
"""
"""
## Introduction
A KerasTuner program may take a long time to run since each model may take a
long time to train. We do not want the program to fail just because some trials
failed randomly.
In this guide, we will show how to handle the failed trials in KerasTuner,
including:
* How to tolerate the failed trials during the search
* How to mark a trial as failed during building and evaluating the model
* How to terminate the search by raising a `FatalError`
"""
"""
## Setup
"""
"""shell
pip install keras-tuner -q
"""
import keras
from keras import layers
import keras_tuner
import numpy as np
"""
## Tolerate failed trials
We will use the `max_retries_per_trial` and `max_consecutive_failed_trials`
arguments when initializing the tuners.
`max_retries_per_trial` controls the maximum number of retries to run if a trial
keeps failing. For example, if it is set to 3, the trial may run 4 times (1
failed run + 3 failed retries) before it is finally marked as failed. The
default value of `max_retries_per_trial` is 0.
`max_consecutive_failed_trials` controls how many consecutive failed trials
(failed trial here refers to a trial that failed all of its retries) occur
before terminating the search. For example, if it is set to 3 and Trial 2, Trial
3, and Trial 4 all failed, the search would be terminated. However, if it is set
to 3 and only Trial 2, Trial 3, Trial 5, and Trial 6 fail, the search would not
be terminated since the failed trials are not consecutive. The default value of
`max_consecutive_failed_trials` is 3.
The following code shows how these two arguments work in action.
* We define a search space with 2 hyperparameters for the number of units in the
2 dense layers.
* When their product is larger than 800, we raise a `ValueError` for the model
too large.
"""
def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
raise ValueError(f"Model too large! It contains {num_params} params.")
return model
"""
We set up the tuner as follows.
* We set `max_retries_per_trial=3`.
* We set `max_consecutive_failed_trials=8`.
* We use `GridSearch` to enumerate all hyperparameter value combinations.
"""
tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
# Print the results.
tuner.results_summary()
"""
## Mark a trial as failed
When the model is too large, we do not need to retry it. No matter how many
times we try with the same hyperparameters, it is always too large.
We can set `max_retries_per_trial=0` to do it. However, it will not retry no
matter what errors are raised while we may still want to retry for other
unexpected errors. Is there a way to better handle this situation?
We can raise the `FailedTrialError` to skip the retries. Whenever, this error is
raised, the trial would not be retried. The retries will still run when other
errors occur. An example is shown as follows.
"""
def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
# When this error is raised, it skips the retries.
raise keras_tuner.errors.FailedTrialError(
f"Model too large! It contains {num_params} params."
)
return model
tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
# Print the results.
tuner.results_summary()
"""
## Terminate the search programmatically
When there is a bug in the code we should terminate the search immediately and
fix the bug. You can terminate the search programmatically when your defined
conditions are met. Raising a `FatalError` (or its subclasses `FatalValueError`,
`FatalTypeError`, or `FatalRuntimeError`) will terminate the search regardless
of the `max_consecutive_failed_trials` argument.
Following is an example to terminate the search when the model is too large.
"""
def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
# When this error is raised, the search is terminated.
raise keras_tuner.errors.FatalError(
f"Model too large! It contains {num_params} params."
)
return model
tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
try:
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
except keras_tuner.errors.FatalError:
print("The search is terminated.")
"""
## Takeaways
In this guide, you learn how to handle failed trials in KerasTuner:
* Use `max_retries_per_trial` to specify the number of retries for a failed
trial.
* Use `max_consecutive_failed_trials` to specify the maximum consecutive failed
trials to tolerate.
* Raise `FailedTrialError` to directly mark a trial as failed and skip the
retries.
* Raise `FatalError`, `FatalValueError`, `FatalTypeError`, `FatalRuntimeError`
to terminate the search immediately.
"""
| keras-io/guides/keras_tuner/failed_trials.py/0 | {
"file_path": "keras-io/guides/keras_tuner/failed_trials.py",
"repo_id": "keras-io",
"token_count": 2656
} | 100 |
# Getting started with KerasTuner
**Authors:** Luca Invernizzi, James Long, Francois Chollet, Tom O'Malley, Haifeng Jin<br>
**Date created:** 2019/05/31<br>
**Last modified:** 2021/10/27<br>
**Description:** The basics of using KerasTuner to tune model hyperparameters.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_tuner/getting_started.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_tuner/getting_started.py)
```python
!pip install keras-tuner -q
```
---
## Introduction
KerasTuner is a general-purpose hyperparameter tuning library. It has strong
integration with Keras workflows, but it isn't limited to them: you could use
it to tune scikit-learn models, or anything else. In this tutorial, you will
see how to tune model architecture, training process, and data preprocessing
steps with KerasTuner. Let's start from a simple example.
---
## Tune the model architecture
The first thing we need to do is writing a function, which returns a compiled
Keras model. It takes an argument `hp` for defining the hyperparameters while
building the model.
### Define the search space
In the following code example, we define a Keras model with two `Dense` layers.
We want to tune the number of units in the first `Dense` layer. We just define
an integer hyperparameter with `hp.Int('units', min_value=32, max_value=512, step=32)`,
whose range is from 32 to 512 inclusive. When sampling from it, the minimum
step for walking through the interval is 32.
```python
import keras
from keras import layers
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
# Define the hyperparameter.
units=hp.Int("units", min_value=32, max_value=512, step=32),
activation="relu",
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
```
You can quickly test if the model builds successfully.
```python
import keras_tuner
build_model(keras_tuner.HyperParameters())
```
<div class="k-default-codeblock">
```
<Sequential name=sequential, built=False>
```
</div>
There are many other types of hyperparameters as well. We can define multiple
hyperparameters in the function. In the following code, we tune whether to
use a `Dropout` layer with `hp.Boolean()`, tune which activation function to
use with `hp.Choice()`, tune the learning rate of the optimizer with
`hp.Float()`.
```python
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
# Tune number of units.
units=hp.Int("units", min_value=32, max_value=512, step=32),
# Tune the activation function to use.
activation=hp.Choice("activation", ["relu", "tanh"]),
)
)
# Tune whether to use dropout.
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(10, activation="softmax"))
# Define the optimizer learning rate as a hyperparameter.
learning_rate = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
build_model(keras_tuner.HyperParameters())
```
<div class="k-default-codeblock">
```
<Sequential name=sequential_1, built=False>
```
</div>
As shown below, the hyperparameters are actual values. In fact, they are just
functions returning actual values. For example, `hp.Int()` returns an `int`
value. Therefore, you can put them into variables, for loops, or if
conditions.
```python
hp = keras_tuner.HyperParameters()
print(hp.Int("units", min_value=32, max_value=512, step=32))
```
<div class="k-default-codeblock">
```
32
```
</div>
You can also define the hyperparameters in advance and keep your Keras code in
a separate function.
```python
def call_existing_code(units, activation, dropout, lr):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(layers.Dense(units=units, activation=activation))
if dropout:
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=lr),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
def build_model(hp):
units = hp.Int("units", min_value=32, max_value=512, step=32)
activation = hp.Choice("activation", ["relu", "tanh"])
dropout = hp.Boolean("dropout")
lr = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
# call existing model-building code with the hyperparameter values.
model = call_existing_code(
units=units, activation=activation, dropout=dropout, lr=lr
)
return model
build_model(keras_tuner.HyperParameters())
```
<div class="k-default-codeblock">
```
<Sequential name=sequential_2, built=False>
```
</div>
Each of the hyperparameters is uniquely identified by its name (the first
argument). To tune the number of units in different `Dense` layers separately
as different hyperparameters, we give them different names as `f"units_{i}"`.
Notably, this is also an example of creating conditional hyperparameters.
There are many hyperparameters specifying the number of units in the `Dense`
layers. The number of such hyperparameters is decided by the number of layers,
which is also a hyperparameter. Therefore, the total number of hyperparameters
used may be different from trial to trial. Some hyperparameter is only used
when a certain condition is satisfied. For example, `units_3` is only used
when `num_layers` is larger than 3. With KerasTuner, you can easily define
such hyperparameters dynamically while creating the model.
```python
def build_model(hp):
model = keras.Sequential()
model.add(layers.Flatten())
# Tune the number of layers.
for i in range(hp.Int("num_layers", 1, 3)):
model.add(
layers.Dense(
# Tune number of units separately.
units=hp.Int(f"units_{i}", min_value=32, max_value=512, step=32),
activation=hp.Choice("activation", ["relu", "tanh"]),
)
)
if hp.Boolean("dropout"):
model.add(layers.Dropout(rate=0.25))
model.add(layers.Dense(10, activation="softmax"))
learning_rate = hp.Float("lr", min_value=1e-4, max_value=1e-2, sampling="log")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
build_model(keras_tuner.HyperParameters())
```
<div class="k-default-codeblock">
```
<Sequential name=sequential_3, built=False>
```
</div>
### Start the search
After defining the search space, we need to select a tuner class to run the
search. You may choose from `RandomSearch`, `BayesianOptimization` and
`Hyperband`, which correspond to different tuning algorithms. Here we use
`RandomSearch` as an example.
To initialize the tuner, we need to specify several arguments in the initializer.
* `hypermodel`. The model-building function, which is `build_model` in our case.
* `objective`. The name of the objective to optimize (whether to minimize or
maximize is automatically inferred for built-in metrics). We will introduce how
to use custom metrics later in this tutorial.
* `max_trials`. The total number of trials to run during the search.
* `executions_per_trial`. The number of models that should be built and fit for
each trial. Different trials have different hyperparameter values. The
executions within the same trial have the same hyperparameter values. The
purpose of having multiple executions per trial is to reduce results variance
and therefore be able to more accurately assess the performance of a model. If
you want to get results faster, you could set `executions_per_trial=1` (single
round of training for each model configuration).
* `overwrite`. Control whether to overwrite the previous results in the same
directory or resume the previous search instead. Here we set `overwrite=True`
to start a new search and ignore any previous results.
* `directory`. A path to a directory for storing the search results.
* `project_name`. The name of the sub-directory in the `directory`.
```python
tuner = keras_tuner.RandomSearch(
hypermodel=build_model,
objective="val_accuracy",
max_trials=3,
executions_per_trial=2,
overwrite=True,
directory="my_dir",
project_name="helloworld",
)
```
You can print a summary of the search space:
```python
tuner.search_space_summary()
```
<div class="k-default-codeblock">
```
Search space summary
Default search space size: 5
num_layers (Int)
{'default': None, 'conditions': [], 'min_value': 1, 'max_value': 3, 'step': 1, 'sampling': 'linear'}
units_0 (Int)
{'default': None, 'conditions': [], 'min_value': 32, 'max_value': 512, 'step': 32, 'sampling': 'linear'}
activation (Choice)
{'default': 'relu', 'conditions': [], 'values': ['relu', 'tanh'], 'ordered': False}
dropout (Boolean)
{'default': False, 'conditions': []}
lr (Float)
{'default': 0.0001, 'conditions': [], 'min_value': 0.0001, 'max_value': 0.01, 'step': None, 'sampling': 'log'}
```
</div>
Before starting the search, let's prepare the MNIST dataset.
```python
import keras
import numpy as np
(x, y), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x[:-10000]
x_val = x[-10000:]
y_train = y[:-10000]
y_val = y[-10000:]
x_train = np.expand_dims(x_train, -1).astype("float32") / 255.0
x_val = np.expand_dims(x_val, -1).astype("float32") / 255.0
x_test = np.expand_dims(x_test, -1).astype("float32") / 255.0
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
```
Then, start the search for the best hyperparameter configuration.
All the arguments passed to `search` is passed to `model.fit()` in each
execution. Remember to pass `validation_data` to evaluate the model.
```python
tuner.search(x_train, y_train, epochs=2, validation_data=(x_val, y_val))
```
<div class="k-default-codeblock">
```
Trial 3 Complete [00h 00m 19s]
val_accuracy: 0.9665500223636627
```
</div>
<div class="k-default-codeblock">
```
Best val_accuracy So Far: 0.9665500223636627
Total elapsed time: 00h 00m 40s
```
</div>
During the `search`, the model-building function is called with different
hyperparameter values in different trial. In each trial, the tuner would
generate a new set of hyperparameter values to build the model. The model is
then fit and evaluated. The metrics are recorded. The tuner progressively
explores the space and finally finds a good set of hyperparameter values.
### Query the results
When search is over, you can retrieve the best model(s). The model is saved at
its best performing epoch evaluated on the `validation_data`.
```python
# Get the top 2 models.
models = tuner.get_best_models(num_models=2)
best_model = models[0]
best_model.summary()
```
<div class="k-default-codeblock">
```
/usr/local/python/3.10.13/lib/python3.10/site-packages/keras/src/saving/saving_lib.py:388: UserWarning: Skipping variable loading for optimizer 'adam', because it has 2 variables whereas the saved optimizer has 18 variables.
trackable.load_own_variables(weights_store.get(inner_path))
/usr/local/python/3.10.13/lib/python3.10/site-packages/keras/src/saving/saving_lib.py:388: UserWarning: Skipping variable loading for optimizer 'adam', because it has 2 variables whereas the saved optimizer has 10 variables.
trackable.load_own_variables(weights_store.get(inner_path))
```
</div>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">784</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">416</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">326,560</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">213,504</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,416</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">330</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">556,810</span> (2.12 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">556,810</span> (2.12 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
You can also print a summary of the search results.
```python
tuner.results_summary()
```
<div class="k-default-codeblock">
```
Results summary
Results in my_dir/helloworld
Showing 10 best trials
Objective(name="val_accuracy", direction="max")
```
</div>
<div class="k-default-codeblock">
```
Trial 2 summary
Hyperparameters:
num_layers: 3
units_0: 416
activation: relu
dropout: True
lr: 0.0001324166048504802
units_1: 512
units_2: 32
Score: 0.9665500223636627
```
</div>
<div class="k-default-codeblock">
```
Trial 0 summary
Hyperparameters:
num_layers: 1
units_0: 128
activation: tanh
dropout: False
lr: 0.001425162921397599
Score: 0.9623999893665314
```
</div>
<div class="k-default-codeblock">
```
Trial 1 summary
Hyperparameters:
num_layers: 2
units_0: 512
activation: tanh
dropout: True
lr: 0.0010584293918512798
units_1: 32
Score: 0.9606499969959259
```
</div>
You will find detailed logs, checkpoints, etc, in the folder
`my_dir/helloworld`, i.e. `directory/project_name`.
You can also visualize the tuning results using TensorBoard and HParams plugin.
For more information, please following
[this link](https://keras.io/guides/keras_tuner/visualize_tuning/).
### Retrain the model
If you want to train the model with the entire dataset, you may retrieve the
best hyperparameters and retrain the model by yourself.
```python
# Get the top 2 hyperparameters.
best_hps = tuner.get_best_hyperparameters(5)
# Build the model with the best hp.
model = build_model(best_hps[0])
# Fit with the entire dataset.
x_all = np.concatenate((x_train, x_val))
y_all = np.concatenate((y_train, y_val))
model.fit(x=x_all, y=y_all, epochs=1)
```
1/1875 [37m━━━━━━━━━━━━━━━━━━━━ 17:57 575ms/step - accuracy: 0.1250 - loss: 2.3113
<div class="k-default-codeblock">
```
```
</div>
29/1875 [37m━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.1753 - loss: 2.2296
<div class="k-default-codeblock">
```
```
</div>
63/1875 [37m━━━━━━━━━━━━━━━━━━━━ 3s 2ms/step - accuracy: 0.2626 - loss: 2.1206
<div class="k-default-codeblock">
```
```
</div>
96/1875 ━[37m━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.3252 - loss: 2.0103
<div class="k-default-codeblock">
```
```
</div>
130/1875 ━[37m━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.3745 - loss: 1.9041
<div class="k-default-codeblock">
```
```
</div>
164/1875 ━[37m━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.4139 - loss: 1.8094
<div class="k-default-codeblock">
```
```
</div>
199/1875 ━━[37m━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.4470 - loss: 1.7246
<div class="k-default-codeblock">
```
```
</div>
235/1875 ━━[37m━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.4752 - loss: 1.6493
<div class="k-default-codeblock">
```
```
</div>
270/1875 ━━[37m━━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.4982 - loss: 1.5857
<div class="k-default-codeblock">
```
```
</div>
305/1875 ━━━[37m━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.5182 - loss: 1.5293
<div class="k-default-codeblock">
```
```
</div>
339/1875 ━━━[37m━━━━━━━━━━━━━━━━━ 2s 2ms/step - accuracy: 0.5354 - loss: 1.4800
<div class="k-default-codeblock">
```
```
</div>
374/1875 ━━━[37m━━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.5513 - loss: 1.4340
<div class="k-default-codeblock">
```
```
</div>
409/1875 ━━━━[37m━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.5656 - loss: 1.3924
<div class="k-default-codeblock">
```
```
</div>
444/1875 ━━━━[37m━━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.5785 - loss: 1.3545
<div class="k-default-codeblock">
```
```
</div>
478/1875 ━━━━━[37m━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.5899 - loss: 1.3208
<div class="k-default-codeblock">
```
```
</div>
513/1875 ━━━━━[37m━━━━━━━━━━━━━━━ 2s 1ms/step - accuracy: 0.6006 - loss: 1.2887
<div class="k-default-codeblock">
```
```
</div>
548/1875 ━━━━━[37m━━━━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6104 - loss: 1.2592
<div class="k-default-codeblock">
```
```
</div>
583/1875 ━━━━━━[37m━━━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6195 - loss: 1.2318
<div class="k-default-codeblock">
```
```
</div>
618/1875 ━━━━━━[37m━━━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6279 - loss: 1.2063
<div class="k-default-codeblock">
```
```
</div>
653/1875 ━━━━━━[37m━━━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6358 - loss: 1.1823
<div class="k-default-codeblock">
```
```
</div>
688/1875 ━━━━━━━[37m━━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6431 - loss: 1.1598
<div class="k-default-codeblock">
```
```
</div>
723/1875 ━━━━━━━[37m━━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6500 - loss: 1.1387
<div class="k-default-codeblock">
```
```
</div>
758/1875 ━━━━━━━━[37m━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6564 - loss: 1.1189
<div class="k-default-codeblock">
```
```
</div>
793/1875 ━━━━━━━━[37m━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6625 - loss: 1.1002
<div class="k-default-codeblock">
```
```
</div>
828/1875 ━━━━━━━━[37m━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6682 - loss: 1.0826
<div class="k-default-codeblock">
```
```
</div>
863/1875 ━━━━━━━━━[37m━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6736 - loss: 1.0658
<div class="k-default-codeblock">
```
```
</div>
899/1875 ━━━━━━━━━[37m━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6788 - loss: 1.0495
<div class="k-default-codeblock">
```
```
</div>
935/1875 ━━━━━━━━━[37m━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6838 - loss: 1.0339
<div class="k-default-codeblock">
```
```
</div>
970/1875 ━━━━━━━━━━[37m━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6885 - loss: 1.0195
<div class="k-default-codeblock">
```
```
</div>
1005/1875 ━━━━━━━━━━[37m━━━━━━━━━━ 1s 1ms/step - accuracy: 0.6929 - loss: 1.0058
<div class="k-default-codeblock">
```
```
</div>
1041/1875 ━━━━━━━━━━━[37m━━━━━━━━━ 1s 1ms/step - accuracy: 0.6972 - loss: 0.9923
<div class="k-default-codeblock">
```
```
</div>
1076/1875 ━━━━━━━━━━━[37m━━━━━━━━━ 1s 1ms/step - accuracy: 0.7012 - loss: 0.9798
<div class="k-default-codeblock">
```
```
</div>
1111/1875 ━━━━━━━━━━━[37m━━━━━━━━━ 1s 1ms/step - accuracy: 0.7051 - loss: 0.9677
<div class="k-default-codeblock">
```
```
</div>
1146/1875 ━━━━━━━━━━━━[37m━━━━━━━━ 1s 1ms/step - accuracy: 0.7088 - loss: 0.9561
<div class="k-default-codeblock">
```
```
</div>
1182/1875 ━━━━━━━━━━━━[37m━━━━━━━━ 1s 1ms/step - accuracy: 0.7124 - loss: 0.9446
<div class="k-default-codeblock">
```
```
</div>
1218/1875 ━━━━━━━━━━━━[37m━━━━━━━━ 0s 1ms/step - accuracy: 0.7159 - loss: 0.9336
<div class="k-default-codeblock">
```
```
</div>
1254/1875 ━━━━━━━━━━━━━[37m━━━━━━━ 0s 1ms/step - accuracy: 0.7193 - loss: 0.9230
<div class="k-default-codeblock">
```
```
</div>
1289/1875 ━━━━━━━━━━━━━[37m━━━━━━━ 0s 1ms/step - accuracy: 0.7225 - loss: 0.9131
<div class="k-default-codeblock">
```
```
</div>
1324/1875 ━━━━━━━━━━━━━━[37m━━━━━━ 0s 1ms/step - accuracy: 0.7255 - loss: 0.9035
<div class="k-default-codeblock">
```
```
</div>
1359/1875 ━━━━━━━━━━━━━━[37m━━━━━━ 0s 1ms/step - accuracy: 0.7284 - loss: 0.8943
<div class="k-default-codeblock">
```
```
</div>
1394/1875 ━━━━━━━━━━━━━━[37m━━━━━━ 0s 1ms/step - accuracy: 0.7313 - loss: 0.8853
<div class="k-default-codeblock">
```
```
</div>
1429/1875 ━━━━━━━━━━━━━━━[37m━━━━━ 0s 1ms/step - accuracy: 0.7341 - loss: 0.8767
<div class="k-default-codeblock">
```
```
</div>
1465/1875 ━━━━━━━━━━━━━━━[37m━━━━━ 0s 1ms/step - accuracy: 0.7368 - loss: 0.8680
<div class="k-default-codeblock">
```
```
</div>
1500/1875 ━━━━━━━━━━━━━━━━[37m━━━━ 0s 1ms/step - accuracy: 0.7394 - loss: 0.8599
<div class="k-default-codeblock">
```
```
</div>
1535/1875 ━━━━━━━━━━━━━━━━[37m━━━━ 0s 1ms/step - accuracy: 0.7419 - loss: 0.8520
<div class="k-default-codeblock">
```
```
</div>
1570/1875 ━━━━━━━━━━━━━━━━[37m━━━━ 0s 1ms/step - accuracy: 0.7443 - loss: 0.8444
<div class="k-default-codeblock">
```
```
</div>
1605/1875 ━━━━━━━━━━━━━━━━━[37m━━━ 0s 1ms/step - accuracy: 0.7467 - loss: 0.8370
<div class="k-default-codeblock">
```
```
</div>
1639/1875 ━━━━━━━━━━━━━━━━━[37m━━━ 0s 1ms/step - accuracy: 0.7489 - loss: 0.8299
<div class="k-default-codeblock">
```
```
</div>
1674/1875 ━━━━━━━━━━━━━━━━━[37m━━━ 0s 1ms/step - accuracy: 0.7511 - loss: 0.8229
<div class="k-default-codeblock">
```
```
</div>
1707/1875 ━━━━━━━━━━━━━━━━━━[37m━━ 0s 1ms/step - accuracy: 0.7532 - loss: 0.8164
<div class="k-default-codeblock">
```
```
</div>
1741/1875 ━━━━━━━━━━━━━━━━━━[37m━━ 0s 1ms/step - accuracy: 0.7552 - loss: 0.8099
<div class="k-default-codeblock">
```
```
</div>
1774/1875 ━━━━━━━━━━━━━━━━━━[37m━━ 0s 1ms/step - accuracy: 0.7572 - loss: 0.8038
<div class="k-default-codeblock">
```
```
</div>
1809/1875 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 1ms/step - accuracy: 0.7592 - loss: 0.7975
<div class="k-default-codeblock">
```
```
</div>
1843/1875 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 1ms/step - accuracy: 0.7611 - loss: 0.7915
<div class="k-default-codeblock">
```
```
</div>
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 1ms/step - accuracy: 0.7629 - loss: 0.7858
<div class="k-default-codeblock">
```
<keras.src.callbacks.history.History at 0x7f31883d9e10>
```
</div>
---
## Tune model training
To tune the model building process, we need to subclass the `HyperModel` class,
which also makes it easy to share and reuse hypermodels.
We need to override `HyperModel.build()` and `HyperModel.fit()` to tune the
model building and training process respectively. A `HyperModel.build()`
method is the same as the model-building function, which creates a Keras model
using the hyperparameters and returns it.
In `HyperModel.fit()`, you can access the model returned by
`HyperModel.build()`,`hp` and all the arguments passed to `search()`. You need
to train the model and return the training history.
In the following code, we will tune the `shuffle` argument in `model.fit()`.
It is generally not needed to tune the number of epochs because a built-in
callback is passed to `model.fit()` to save the model at its best epoch
evaluated by the `validation_data`.
> **Note**: The `**kwargs` should always be passed to `model.fit()` because it
contains the callbacks for model saving and tensorboard plugins.
```python
class MyHyperModel(keras_tuner.HyperModel):
def build(self, hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
units=hp.Int("units", min_value=32, max_value=512, step=32),
activation="relu",
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
def fit(self, hp, model, *args, **kwargs):
return model.fit(
*args,
# Tune whether to shuffle the data in each epoch.
shuffle=hp.Boolean("shuffle"),
**kwargs,
)
```
Again, we can do a quick check to see if the code works correctly.
```python
hp = keras_tuner.HyperParameters()
hypermodel = MyHyperModel()
model = hypermodel.build(hp)
hypermodel.fit(hp, model, np.random.rand(100, 28, 28), np.random.rand(100, 10))
```
1/4 ━━━━━[37m━━━━━━━━━━━━━━━ 0s 279ms/step - accuracy: 0.0000e+00 - loss: 12.2230
<div class="k-default-codeblock">
```
```
</div>
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 108ms/step - accuracy: 0.0679 - loss: 11.9568
<div class="k-default-codeblock">
```
```
</div>
4/4 ━━━━━━━━━━━━━━━━━━━━ 1s 109ms/step - accuracy: 0.0763 - loss: 11.8941
<div class="k-default-codeblock">
```
<keras.src.callbacks.history.History at 0x7f318865c100>
```
</div>
---
## Tune data preprocessing
To tune data preprocessing, we just add an additional step in
`HyperModel.fit()`, where we can access the dataset from the arguments. In the
following code, we tune whether to normalize the data before training the
model. This time we explicitly put `x` and `y` in the function signature
because we need to use them.
```python
class MyHyperModel(keras_tuner.HyperModel):
def build(self, hp):
model = keras.Sequential()
model.add(layers.Flatten())
model.add(
layers.Dense(
units=hp.Int("units", min_value=32, max_value=512, step=32),
activation="relu",
)
)
model.add(layers.Dense(10, activation="softmax"))
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
def fit(self, hp, model, x, y, **kwargs):
if hp.Boolean("normalize"):
x = layers.Normalization()(x)
return model.fit(
x,
y,
# Tune whether to shuffle the data in each epoch.
shuffle=hp.Boolean("shuffle"),
**kwargs,
)
hp = keras_tuner.HyperParameters()
hypermodel = MyHyperModel()
model = hypermodel.build(hp)
hypermodel.fit(hp, model, np.random.rand(100, 28, 28), np.random.rand(100, 10))
```
1/4 ━━━━━[37m━━━━━━━━━━━━━━━ 0s 276ms/step - accuracy: 0.1250 - loss: 12.0090
<div class="k-default-codeblock">
```
```
</div>
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 94ms/step - accuracy: 0.0994 - loss: 12.1242
<div class="k-default-codeblock">
```
```
</div>
4/4 ━━━━━━━━━━━━━━━━━━━━ 1s 95ms/step - accuracy: 0.0955 - loss: 12.1594
<div class="k-default-codeblock">
```
<keras.src.callbacks.history.History at 0x7f31ba836200>
```
</div>
If a hyperparameter is used both in `build()` and `fit()`, you can define it in
`build()` and use `hp.get(hp_name)` to retrieve it in `fit()`. We use the
image size as an example. It is both used as the input shape in `build()`, and
used by data prerprocessing step to crop the images in `fit()`.
```python
class MyHyperModel(keras_tuner.HyperModel):
def build(self, hp):
image_size = hp.Int("image_size", 10, 28)
inputs = keras.Input(shape=(image_size, image_size))
outputs = layers.Flatten()(inputs)
outputs = layers.Dense(
units=hp.Int("units", min_value=32, max_value=512, step=32),
activation="relu",
)(outputs)
outputs = layers.Dense(10, activation="softmax")(outputs)
model = keras.Model(inputs, outputs)
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
def fit(self, hp, model, x, y, validation_data=None, **kwargs):
if hp.Boolean("normalize"):
x = layers.Normalization()(x)
image_size = hp.get("image_size")
cropped_x = x[:, :image_size, :image_size, :]
if validation_data:
x_val, y_val = validation_data
cropped_x_val = x_val[:, :image_size, :image_size, :]
validation_data = (cropped_x_val, y_val)
return model.fit(
cropped_x,
y,
# Tune whether to shuffle the data in each epoch.
shuffle=hp.Boolean("shuffle"),
validation_data=validation_data,
**kwargs,
)
tuner = keras_tuner.RandomSearch(
MyHyperModel(),
objective="val_accuracy",
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="tune_hypermodel",
)
tuner.search(x_train, y_train, epochs=2, validation_data=(x_val, y_val))
```
<div class="k-default-codeblock">
```
Trial 3 Complete [00h 00m 04s]
val_accuracy: 0.9567000269889832
```
</div>
<div class="k-default-codeblock">
```
Best val_accuracy So Far: 0.9685999751091003
Total elapsed time: 00h 00m 13s
```
</div>
### Retrain the model
Using `HyperModel` also allows you to retrain the best model by yourself.
```python
hypermodel = MyHyperModel()
best_hp = tuner.get_best_hyperparameters()[0]
model = hypermodel.build(best_hp)
hypermodel.fit(best_hp, model, x_all, y_all, epochs=1)
```
1/1875 [37m━━━━━━━━━━━━━━━━━━━━ 9:00 289ms/step - accuracy: 0.0000e+00 - loss: 2.4352
<div class="k-default-codeblock">
```
```
</div>
52/1875 [37m━━━━━━━━━━━━━━━━━━━━ 1s 996us/step - accuracy: 0.6035 - loss: 1.3521
<div class="k-default-codeblock">
```
```
</div>
110/1875 ━[37m━━━━━━━━━━━━━━━━━━━ 1s 925us/step - accuracy: 0.7037 - loss: 1.0231
<div class="k-default-codeblock">
```
```
</div>
171/1875 ━[37m━━━━━━━━━━━━━━━━━━━ 1s 890us/step - accuracy: 0.7522 - loss: 0.8572
<div class="k-default-codeblock">
```
```
</div>
231/1875 ━━[37m━━━━━━━━━━━━━━━━━━ 1s 877us/step - accuracy: 0.7804 - loss: 0.7590
<div class="k-default-codeblock">
```
```
</div>
291/1875 ━━━[37m━━━━━━━━━━━━━━━━━ 1s 870us/step - accuracy: 0.7993 - loss: 0.6932
<div class="k-default-codeblock">
```
```
</div>
350/1875 ━━━[37m━━━━━━━━━━━━━━━━━ 1s 867us/step - accuracy: 0.8127 - loss: 0.6467
<div class="k-default-codeblock">
```
```
</div>
413/1875 ━━━━[37m━━━━━━━━━━━━━━━━ 1s 856us/step - accuracy: 0.8238 - loss: 0.6079
<div class="k-default-codeblock">
```
```
</div>
476/1875 ━━━━━[37m━━━━━━━━━━━━━━━ 1s 848us/step - accuracy: 0.8326 - loss: 0.5774
<div class="k-default-codeblock">
```
```
</div>
535/1875 ━━━━━[37m━━━━━━━━━━━━━━━ 1s 849us/step - accuracy: 0.8394 - loss: 0.5536
<div class="k-default-codeblock">
```
```
</div>
600/1875 ━━━━━━[37m━━━━━━━━━━━━━━ 1s 841us/step - accuracy: 0.8458 - loss: 0.5309
<div class="k-default-codeblock">
```
```
</div>
661/1875 ━━━━━━━[37m━━━━━━━━━━━━━ 1s 840us/step - accuracy: 0.8511 - loss: 0.5123
<div class="k-default-codeblock">
```
```
</div>
723/1875 ━━━━━━━[37m━━━━━━━━━━━━━ 0s 837us/step - accuracy: 0.8559 - loss: 0.4955
<div class="k-default-codeblock">
```
```
</div>
783/1875 ━━━━━━━━[37m━━━━━━━━━━━━ 0s 838us/step - accuracy: 0.8600 - loss: 0.4811
<div class="k-default-codeblock">
```
```
</div>
847/1875 ━━━━━━━━━[37m━━━━━━━━━━━ 0s 834us/step - accuracy: 0.8640 - loss: 0.4671
<div class="k-default-codeblock">
```
```
</div>
912/1875 ━━━━━━━━━[37m━━━━━━━━━━━ 0s 830us/step - accuracy: 0.8677 - loss: 0.4544
<div class="k-default-codeblock">
```
```
</div>
976/1875 ━━━━━━━━━━[37m━━━━━━━━━━ 0s 827us/step - accuracy: 0.8709 - loss: 0.4429
<div class="k-default-codeblock">
```
```
</div>
1040/1875 ━━━━━━━━━━━[37m━━━━━━━━━ 0s 825us/step - accuracy: 0.8738 - loss: 0.4325
<div class="k-default-codeblock">
```
```
</div>
1104/1875 ━━━━━━━━━━━[37m━━━━━━━━━ 0s 822us/step - accuracy: 0.8766 - loss: 0.4229
<div class="k-default-codeblock">
```
```
</div>
1168/1875 ━━━━━━━━━━━━[37m━━━━━━━━ 0s 821us/step - accuracy: 0.8791 - loss: 0.4140
<div class="k-default-codeblock">
```
```
</div>
1233/1875 ━━━━━━━━━━━━━[37m━━━━━━━ 0s 818us/step - accuracy: 0.8815 - loss: 0.4056
<div class="k-default-codeblock">
```
```
</div>
1296/1875 ━━━━━━━━━━━━━[37m━━━━━━━ 0s 817us/step - accuracy: 0.8837 - loss: 0.3980
<div class="k-default-codeblock">
```
```
</div>
1361/1875 ━━━━━━━━━━━━━━[37m━━━━━━ 0s 815us/step - accuracy: 0.8858 - loss: 0.3907
<div class="k-default-codeblock">
```
```
</div>
1424/1875 ━━━━━━━━━━━━━━━[37m━━━━━ 0s 814us/step - accuracy: 0.8877 - loss: 0.3840
<div class="k-default-codeblock">
```
```
</div>
1488/1875 ━━━━━━━━━━━━━━━[37m━━━━━ 0s 813us/step - accuracy: 0.8895 - loss: 0.3776
<div class="k-default-codeblock">
```
```
</div>
1550/1875 ━━━━━━━━━━━━━━━━[37m━━━━ 0s 813us/step - accuracy: 0.8912 - loss: 0.3718
<div class="k-default-codeblock">
```
```
</div>
1613/1875 ━━━━━━━━━━━━━━━━━[37m━━━ 0s 813us/step - accuracy: 0.8928 - loss: 0.3662
<div class="k-default-codeblock">
```
```
</div>
1678/1875 ━━━━━━━━━━━━━━━━━[37m━━━ 0s 811us/step - accuracy: 0.8944 - loss: 0.3607
<div class="k-default-codeblock">
```
```
</div>
1744/1875 ━━━━━━━━━━━━━━━━━━[37m━━ 0s 809us/step - accuracy: 0.8959 - loss: 0.3555
<div class="k-default-codeblock">
```
```
</div>
1810/1875 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 808us/step - accuracy: 0.8973 - loss: 0.3504
<div class="k-default-codeblock">
```
```
</div>
1874/1875 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 807us/step - accuracy: 0.8987 - loss: 0.3457
<div class="k-default-codeblock">
```
```
</div>
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 2s 808us/step - accuracy: 0.8987 - loss: 0.3456
<div class="k-default-codeblock">
```
<keras.src.callbacks.history.History at 0x7f31884b3070>
```
</div>
---
## Specify the tuning objective
In all previous examples, we all just used validation accuracy
(`"val_accuracy"`) as the tuning objective to select the best model. Actually,
you can use any metric as the objective. The most commonly used metric is
`"val_loss"`, which is the validation loss.
### Built-in metric as the objective
There are many other built-in metrics in Keras you can use as the objective.
Here is [a list of the built-in metrics](https://keras.io/api/metrics/).
To use a built-in metric as the objective, you need to follow these steps:
* Compile the model with the the built-in metric. For example, you want to use
`MeanAbsoluteError()`. You need to compile the model with
`metrics=[MeanAbsoluteError()]`. You may also use its name string instead:
`metrics=["mean_absolute_error"]`. The name string of the metric is always
the snake case of the class name.
* Identify the objective name string. The name string of the objective is
always in the format of `f"val_{metric_name_string}"`. For example, the
objective name string of mean squared error evaluated on the validation data
should be `"val_mean_absolute_error"`.
* Wrap it into `keras_tuner.Objective`. We usually need to wrap the objective
into a `keras_tuner.Objective` object to specify the direction to optimize the
objective. For example, we want to minimize the mean squared error, we can use
`keras_tuner.Objective("val_mean_absolute_error", "min")`. The direction should
be either `"min"` or `"max"`.
* Pass the wrapped objective to the tuner.
You can see the following barebone code example.
```python
def build_regressor(hp):
model = keras.Sequential(
[
layers.Dense(units=hp.Int("units", 32, 128, 32), activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer="adam",
loss="mean_squared_error",
# Objective is one of the metrics.
metrics=[keras.metrics.MeanAbsoluteError()],
)
return model
tuner = keras_tuner.RandomSearch(
hypermodel=build_regressor,
# The objective name and direction.
# Name is the f"val_{snake_case_metric_class_name}".
objective=keras_tuner.Objective("val_mean_absolute_error", direction="min"),
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="built_in_metrics",
)
tuner.search(
x=np.random.rand(100, 10),
y=np.random.rand(100, 1),
validation_data=(np.random.rand(20, 10), np.random.rand(20, 1)),
)
tuner.results_summary()
```
<div class="k-default-codeblock">
```
Trial 3 Complete [00h 00m 01s]
val_mean_absolute_error: 0.39589792490005493
```
</div>
<div class="k-default-codeblock">
```
Best val_mean_absolute_error So Far: 0.34321871399879456
Total elapsed time: 00h 00m 03s
Results summary
Results in my_dir/built_in_metrics
Showing 10 best trials
Objective(name="val_mean_absolute_error", direction="min")
```
</div>
<div class="k-default-codeblock">
```
Trial 1 summary
Hyperparameters:
units: 32
Score: 0.34321871399879456
```
</div>
<div class="k-default-codeblock">
```
Trial 2 summary
Hyperparameters:
units: 128
Score: 0.39589792490005493
```
</div>
<div class="k-default-codeblock">
```
Trial 0 summary
Hyperparameters:
units: 96
Score: 0.5005304217338562
```
</div>
### Custom metric as the objective
You may implement your own metric and use it as the hyperparameter search
objective. Here, we use mean squared error (MSE) as an example. First, we
implement the MSE metric by subclassing `keras.metrics.Metric`. Remember to
give a name to your metric using the `name` argument of `super().__init__()`,
which will be used later. Note: MSE is actually a build-in metric, which can be
imported with `keras.metrics.MeanSquaredError`. This is just an example to show
how to use a custom metric as the hyperparameter search objective.
For more information about implementing custom metrics, please see [this
tutorial](https://keras.io/api/metrics/#creating-custom-metrics). If you would
like a metric with a different function signature than `update_state(y_true,
y_pred, sample_weight)`, you can override the `train_step()` method of your
model following [this
tutorial](https://keras.io/guides/customizing_what_happens_in_fit/#going-lowerlevel).
```python
from keras import ops
class CustomMetric(keras.metrics.Metric):
def __init__(self, **kwargs):
# Specify the name of the metric as "custom_metric".
super().__init__(name="custom_metric", **kwargs)
self.sum = self.add_weight(name="sum", initializer="zeros")
self.count = self.add_weight(name="count", dtype="int32", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
values = ops.square(y_true - y_pred)
count = ops.shape(y_true)[0]
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, self.dtype)
values *= sample_weight
count *= sample_weight
self.sum.assign_add(ops.sum(values))
self.count.assign_add(count)
def result(self):
return self.sum / ops.cast(self.count, "float32")
def reset_states(self):
self.sum.assign(0)
self.count.assign(0)
```
Run the search with the custom objective.
```python
def build_regressor(hp):
model = keras.Sequential(
[
layers.Dense(units=hp.Int("units", 32, 128, 32), activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer="adam",
loss="mean_squared_error",
# Put custom metric into the metrics.
metrics=[CustomMetric()],
)
return model
tuner = keras_tuner.RandomSearch(
hypermodel=build_regressor,
# Specify the name and direction of the objective.
objective=keras_tuner.Objective("val_custom_metric", direction="min"),
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="custom_metrics",
)
tuner.search(
x=np.random.rand(100, 10),
y=np.random.rand(100, 1),
validation_data=(np.random.rand(20, 10), np.random.rand(20, 1)),
)
tuner.results_summary()
```
<div class="k-default-codeblock">
```
Trial 3 Complete [00h 00m 01s]
val_custom_metric: 0.2830956280231476
```
</div>
<div class="k-default-codeblock">
```
Best val_custom_metric So Far: 0.2529197633266449
Total elapsed time: 00h 00m 02s
Results summary
Results in my_dir/custom_metrics
Showing 10 best trials
Objective(name="val_custom_metric", direction="min")
```
</div>
<div class="k-default-codeblock">
```
Trial 0 summary
Hyperparameters:
units: 32
Score: 0.2529197633266449
```
</div>
<div class="k-default-codeblock">
```
Trial 2 summary
Hyperparameters:
units: 128
Score: 0.2830956280231476
```
</div>
<div class="k-default-codeblock">
```
Trial 1 summary
Hyperparameters:
units: 96
Score: 0.4656866192817688
```
</div>
If your custom objective is hard to put into a custom metric, you can also
evaluate the model by yourself in `HyperModel.fit()` and return the objective
value. The objective value would be minimized by default. In this case, you
don't need to specify the `objective` when initializing the tuner. However, in
this case, the metric value will not be tracked in the Keras logs by only
KerasTuner logs. Therefore, these values would not be displayed by any
TensorBoard view using the Keras metrics.
```python
class HyperRegressor(keras_tuner.HyperModel):
def build(self, hp):
model = keras.Sequential(
[
layers.Dense(units=hp.Int("units", 32, 128, 32), activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer="adam",
loss="mean_squared_error",
)
return model
def fit(self, hp, model, x, y, validation_data, **kwargs):
model.fit(x, y, **kwargs)
x_val, y_val = validation_data
y_pred = model.predict(x_val)
# Return a single float to minimize.
return np.mean(np.abs(y_pred - y_val))
tuner = keras_tuner.RandomSearch(
hypermodel=HyperRegressor(),
# No objective to specify.
# Objective is the return value of `HyperModel.fit()`.
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="custom_eval",
)
tuner.search(
x=np.random.rand(100, 10),
y=np.random.rand(100, 1),
validation_data=(np.random.rand(20, 10), np.random.rand(20, 1)),
)
tuner.results_summary()
```
<div class="k-default-codeblock">
```
Trial 3 Complete [00h 00m 01s]
default_objective: 0.6571611521766413
```
</div>
<div class="k-default-codeblock">
```
Best default_objective So Far: 0.40719249752993525
Total elapsed time: 00h 00m 02s
Results summary
Results in my_dir/custom_eval
Showing 10 best trials
Objective(name="default_objective", direction="min")
```
</div>
<div class="k-default-codeblock">
```
Trial 1 summary
Hyperparameters:
units: 128
Score: 0.40719249752993525
```
</div>
<div class="k-default-codeblock">
```
Trial 0 summary
Hyperparameters:
units: 96
Score: 0.4992297225533352
```
</div>
<div class="k-default-codeblock">
```
Trial 2 summary
Hyperparameters:
units: 32
Score: 0.6571611521766413
```
</div>
If you have multiple metrics to track in KerasTuner, but only use one of them
as the objective, you can return a dictionary, whose keys are the metric names
and the values are the metrics values, for example, return `{"metric_a": 1.0,
"metric_b", 2.0}`. Use one of the keys as the objective name, for example,
`keras_tuner.Objective("metric_a", "min")`.
```python
class HyperRegressor(keras_tuner.HyperModel):
def build(self, hp):
model = keras.Sequential(
[
layers.Dense(units=hp.Int("units", 32, 128, 32), activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer="adam",
loss="mean_squared_error",
)
return model
def fit(self, hp, model, x, y, validation_data, **kwargs):
model.fit(x, y, **kwargs)
x_val, y_val = validation_data
y_pred = model.predict(x_val)
# Return a dictionary of metrics for KerasTuner to track.
return {
"metric_a": -np.mean(np.abs(y_pred - y_val)),
"metric_b": np.mean(np.square(y_pred - y_val)),
}
tuner = keras_tuner.RandomSearch(
hypermodel=HyperRegressor(),
# Objective is one of the keys.
# Maximize the negative MAE, equivalent to minimize MAE.
objective=keras_tuner.Objective("metric_a", "max"),
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="custom_eval_dict",
)
tuner.search(
x=np.random.rand(100, 10),
y=np.random.rand(100, 1),
validation_data=(np.random.rand(20, 10), np.random.rand(20, 1)),
)
tuner.results_summary()
```
<div class="k-default-codeblock">
```
Trial 3 Complete [00h 00m 01s]
metric_a: -0.39470441501524833
```
</div>
<div class="k-default-codeblock">
```
Best metric_a So Far: -0.3836997988261662
Total elapsed time: 00h 00m 02s
Results summary
Results in my_dir/custom_eval_dict
Showing 10 best trials
Objective(name="metric_a", direction="max")
```
</div>
<div class="k-default-codeblock">
```
Trial 1 summary
Hyperparameters:
units: 64
Score: -0.3836997988261662
```
</div>
<div class="k-default-codeblock">
```
Trial 2 summary
Hyperparameters:
units: 32
Score: -0.39470441501524833
```
</div>
<div class="k-default-codeblock">
```
Trial 0 summary
Hyperparameters:
units: 96
Score: -0.46081380465766364
```
</div>
---
## Tune end-to-end workflows
In some cases, it is hard to align your code into build and fit functions. You
can also keep your end-to-end workflow in one place by overriding
`Tuner.run_trial()`, which gives you full control of a trial. You can see it
as a black-box optimizer for anything.
### Tune any function
For example, you can find a value of `x`, which minimizes `f(x)=x*x+1`. In the
following code, we just define `x` as a hyperparameter, and return `f(x)` as
the objective value. The `hypermodel` and `objective` argument for initializing
the tuner can be omitted.
```python
class MyTuner(keras_tuner.RandomSearch):
def run_trial(self, trial, *args, **kwargs):
# Get the hp from trial.
hp = trial.hyperparameters
# Define "x" as a hyperparameter.
x = hp.Float("x", min_value=-1.0, max_value=1.0)
# Return the objective value to minimize.
return x * x + 1
tuner = MyTuner(
# No hypermodel or objective specified.
max_trials=20,
overwrite=True,
directory="my_dir",
project_name="tune_anything",
)
# No need to pass anything to search()
# unless you use them in run_trial().
tuner.search()
print(tuner.get_best_hyperparameters()[0].get("x"))
```
<div class="k-default-codeblock">
```
Trial 20 Complete [00h 00m 00s]
default_objective: 1.6547719581194267
```
</div>
<div class="k-default-codeblock">
```
Best default_objective So Far: 1.0013236767905302
Total elapsed time: 00h 00m 00s
0.03638236922645777
```
</div>
### Keep Keras code separate
You can keep all your Keras code unchanged and use KerasTuner to tune it. It
is useful if you cannot modify the Keras code for some reason.
It also gives you more flexibility. You don't have to separate the model
building and training code apart. However, this workflow would not help you
save the model or connect with the TensorBoard plugins.
To save the model, you can use `trial.trial_id`, which is a string to uniquely
identify a trial, to construct different paths to save the models from
different trials.
```python
import os
def keras_code(units, optimizer, saving_path):
# Build model
model = keras.Sequential(
[
layers.Dense(units=units, activation="relu"),
layers.Dense(units=1),
]
)
model.compile(
optimizer=optimizer,
loss="mean_squared_error",
)
# Prepare data
x_train = np.random.rand(100, 10)
y_train = np.random.rand(100, 1)
x_val = np.random.rand(20, 10)
y_val = np.random.rand(20, 1)
# Train & eval model
model.fit(x_train, y_train)
# Save model
model.save(saving_path)
# Return a single float as the objective value.
# You may also return a dictionary
# of {metric_name: metric_value}.
y_pred = model.predict(x_val)
return np.mean(np.abs(y_pred - y_val))
class MyTuner(keras_tuner.RandomSearch):
def run_trial(self, trial, **kwargs):
hp = trial.hyperparameters
return keras_code(
units=hp.Int("units", 32, 128, 32),
optimizer=hp.Choice("optimizer", ["adam", "adadelta"]),
saving_path=os.path.join("/tmp", f"{trial.trial_id}.keras"),
)
tuner = MyTuner(
max_trials=3,
overwrite=True,
directory="my_dir",
project_name="keep_code_separate",
)
tuner.search()
# Retraining the model
best_hp = tuner.get_best_hyperparameters()[0]
keras_code(**best_hp.values, saving_path="/tmp/best_model.keras")
```
<div class="k-default-codeblock">
```
Trial 3 Complete [00h 00m 00s]
default_objective: 0.18014027375230962
```
</div>
<div class="k-default-codeblock">
```
Best default_objective So Far: 0.18014027375230962
Total elapsed time: 00h 00m 03s
```
</div>
1/4 ━━━━━[37m━━━━━━━━━━━━━━━ 0s 172ms/step - loss: 0.5030
<div class="k-default-codeblock">
```
```
</div>
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 60ms/step - loss: 0.5288
<div class="k-default-codeblock">
```
```
</div>
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 61ms/step - loss: 0.5367
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 27ms/step
<div class="k-default-codeblock">
```
```
</div>
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
<div class="k-default-codeblock">
```
0.5918120126201316
```
</div>
---
## KerasTuner includes pre-made tunable applications: HyperResNet and HyperXception
These are ready-to-use hypermodels for computer vision.
They come pre-compiled with `loss="categorical_crossentropy"` and
`metrics=["accuracy"]`.
```python
from keras_tuner.applications import HyperResNet
hypermodel = HyperResNet(input_shape=(28, 28, 1), classes=10)
tuner = keras_tuner.RandomSearch(
hypermodel,
objective="val_accuracy",
max_trials=2,
overwrite=True,
directory="my_dir",
project_name="built_in_hypermodel",
)
```
| keras-io/guides/md/keras_tuner/getting_started.md/0 | {
"file_path": "keras-io/guides/md/keras_tuner/getting_started.md",
"repo_id": "keras-io",
"token_count": 29735
} | 101 |
# Writing your own callbacks
**Authors:** Rick Chao, Francois Chollet<br>
**Date created:** 2019/03/20<br>
**Last modified:** 2023/06/25<br>
**Description:** Complete guide to writing new Keras callbacks.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/writing_your_own_callbacks.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/writing_your_own_callbacks.py)
---
## Introduction
A callback is a powerful tool to customize the behavior of a Keras model during
training, evaluation, or inference. Examples include `keras.callbacks.TensorBoard`
to visualize training progress and results with TensorBoard, or
`keras.callbacks.ModelCheckpoint` to periodically save your model during training.
In this guide, you will learn what a Keras callback is, what it can do, and how you can
build your own. We provide a few demos of simple callback applications to get you
started.
---
## Setup
```python
import numpy as np
import keras
```
---
## Keras callbacks overview
All callbacks subclass the `keras.callbacks.Callback` class, and
override a set of methods called at various stages of training, testing, and
predicting. Callbacks are useful to get a view on internal states and statistics of
the model during training.
You can pass a list of callbacks (as the keyword argument `callbacks`) to the following
model methods:
- `keras.Model.fit()`
- `keras.Model.evaluate()`
- `keras.Model.predict()`
---
## An overview of callback methods
### Global methods
#### `on_(train|test|predict)_begin(self, logs=None)`
Called at the beginning of `fit`/`evaluate`/`predict`.
#### `on_(train|test|predict)_end(self, logs=None)`
Called at the end of `fit`/`evaluate`/`predict`.
### Batch-level methods for training/testing/predicting
#### `on_(train|test|predict)_batch_begin(self, batch, logs=None)`
Called right before processing a batch during training/testing/predicting.
#### `on_(train|test|predict)_batch_end(self, batch, logs=None)`
Called at the end of training/testing/predicting a batch. Within this method, `logs` is
a dict containing the metrics results.
### Epoch-level methods (training only)
#### `on_epoch_begin(self, epoch, logs=None)`
Called at the beginning of an epoch during training.
#### `on_epoch_end(self, epoch, logs=None)`
Called at the end of an epoch during training.
---
## A basic example
Let's take a look at a concrete example. To get started, let's import tensorflow and
define a simple Sequential Keras model:
```python
# Define the Keras model to add callbacks to
def get_model():
model = keras.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=0.1),
loss="mean_squared_error",
metrics=["mean_absolute_error"],
)
return model
```
Then, load the MNIST data for training and testing from Keras datasets API:
```python
# Load example MNIST data and pre-process it
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784).astype("float32") / 255.0
x_test = x_test.reshape(-1, 784).astype("float32") / 255.0
# Limit the data to 1000 samples
x_train = x_train[:1000]
y_train = y_train[:1000]
x_test = x_test[:1000]
y_test = y_test[:1000]
```
Now, define a simple custom callback that logs:
- When `fit`/`evaluate`/`predict` starts & ends
- When each epoch starts & ends
- When each training batch starts & ends
- When each evaluation (test) batch starts & ends
- When each inference (prediction) batch starts & ends
```python
class CustomCallback(keras.callbacks.Callback):
def on_train_begin(self, logs=None):
keys = list(logs.keys())
print("Starting training; got log keys: {}".format(keys))
def on_train_end(self, logs=None):
keys = list(logs.keys())
print("Stop training; got log keys: {}".format(keys))
def on_epoch_begin(self, epoch, logs=None):
keys = list(logs.keys())
print("Start epoch {} of training; got log keys: {}".format(epoch, keys))
def on_epoch_end(self, epoch, logs=None):
keys = list(logs.keys())
print("End epoch {} of training; got log keys: {}".format(epoch, keys))
def on_test_begin(self, logs=None):
keys = list(logs.keys())
print("Start testing; got log keys: {}".format(keys))
def on_test_end(self, logs=None):
keys = list(logs.keys())
print("Stop testing; got log keys: {}".format(keys))
def on_predict_begin(self, logs=None):
keys = list(logs.keys())
print("Start predicting; got log keys: {}".format(keys))
def on_predict_end(self, logs=None):
keys = list(logs.keys())
print("Stop predicting; got log keys: {}".format(keys))
def on_train_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
def on_train_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: end of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: start of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: end of batch {}; got log keys: {}".format(batch, keys))
def on_predict_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Predicting: start of batch {}; got log keys: {}".format(batch, keys))
def on_predict_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Predicting: end of batch {}; got log keys: {}".format(batch, keys))
```
Let's try it out:
```python
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=1,
verbose=0,
validation_split=0.5,
callbacks=[CustomCallback()],
)
res = model.evaluate(
x_test, y_test, batch_size=128, verbose=0, callbacks=[CustomCallback()]
)
res = model.predict(x_test, batch_size=128, callbacks=[CustomCallback()])
```
<div class="k-default-codeblock">
```
Starting training; got log keys: []
Start epoch 0 of training; got log keys: []
...Training: start of batch 0; got log keys: []
...Training: end of batch 0; got log keys: ['loss', 'mean_absolute_error']
...Training: start of batch 1; got log keys: []
...Training: end of batch 1; got log keys: ['loss', 'mean_absolute_error']
...Training: start of batch 2; got log keys: []
...Training: end of batch 2; got log keys: ['loss', 'mean_absolute_error']
...Training: start of batch 3; got log keys: []
...Training: end of batch 3; got log keys: ['loss', 'mean_absolute_error']
Start testing; got log keys: []
...Evaluating: start of batch 0; got log keys: []
...Evaluating: end of batch 0; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 1; got log keys: []
...Evaluating: end of batch 1; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 2; got log keys: []
...Evaluating: end of batch 2; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 3; got log keys: []
...Evaluating: end of batch 3; got log keys: ['loss', 'mean_absolute_error']
Stop testing; got log keys: ['loss', 'mean_absolute_error']
End epoch 0 of training; got log keys: ['loss', 'mean_absolute_error', 'val_loss', 'val_mean_absolute_error']
Stop training; got log keys: ['loss', 'mean_absolute_error', 'val_loss', 'val_mean_absolute_error']
Start testing; got log keys: []
...Evaluating: start of batch 0; got log keys: []
...Evaluating: end of batch 0; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 1; got log keys: []
...Evaluating: end of batch 1; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 2; got log keys: []
...Evaluating: end of batch 2; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 3; got log keys: []
...Evaluating: end of batch 3; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 4; got log keys: []
...Evaluating: end of batch 4; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 5; got log keys: []
...Evaluating: end of batch 5; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 6; got log keys: []
...Evaluating: end of batch 6; got log keys: ['loss', 'mean_absolute_error']
...Evaluating: start of batch 7; got log keys: []
...Evaluating: end of batch 7; got log keys: ['loss', 'mean_absolute_error']
Stop testing; got log keys: ['loss', 'mean_absolute_error']
Start predicting; got log keys: []
...Predicting: start of batch 0; got log keys: []
...Predicting: end of batch 0; got log keys: ['outputs']
1/8 ━━[37m━━━━━━━━━━━━━━━━━━ 0s 13ms/step...Predicting: start of batch 1; got log keys: []
...Predicting: end of batch 1; got log keys: ['outputs']
...Predicting: start of batch 2; got log keys: []
...Predicting: end of batch 2; got log keys: ['outputs']
...Predicting: start of batch 3; got log keys: []
...Predicting: end of batch 3; got log keys: ['outputs']
...Predicting: start of batch 4; got log keys: []
...Predicting: end of batch 4; got log keys: ['outputs']
...Predicting: start of batch 5; got log keys: []
...Predicting: end of batch 5; got log keys: ['outputs']
...Predicting: start of batch 6; got log keys: []
...Predicting: end of batch 6; got log keys: ['outputs']
...Predicting: start of batch 7; got log keys: []
...Predicting: end of batch 7; got log keys: ['outputs']
Stop predicting; got log keys: []
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step
```
</div>
### Usage of `logs` dict
The `logs` dict contains the loss value, and all the metrics at the end of a batch or
epoch. Example includes the loss and mean absolute error.
```python
class LossAndErrorPrintingCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
print(
"Up to batch {}, the average loss is {:7.2f}.".format(batch, logs["loss"])
)
def on_test_batch_end(self, batch, logs=None):
print(
"Up to batch {}, the average loss is {:7.2f}.".format(batch, logs["loss"])
)
def on_epoch_end(self, epoch, logs=None):
print(
"The average loss for epoch {} is {:7.2f} "
"and mean absolute error is {:7.2f}.".format(
epoch, logs["loss"], logs["mean_absolute_error"]
)
)
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=2,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
res = model.evaluate(
x_test,
y_test,
batch_size=128,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
```
<div class="k-default-codeblock">
```
Up to batch 0, the average loss is 29.25.
Up to batch 1, the average loss is 485.36.
Up to batch 2, the average loss is 330.94.
Up to batch 3, the average loss is 250.62.
Up to batch 4, the average loss is 202.20.
Up to batch 5, the average loss is 169.51.
Up to batch 6, the average loss is 145.98.
Up to batch 7, the average loss is 128.48.
The average loss for epoch 0 is 128.48 and mean absolute error is 6.01.
Up to batch 0, the average loss is 5.10.
Up to batch 1, the average loss is 4.80.
Up to batch 2, the average loss is 4.96.
Up to batch 3, the average loss is 4.96.
Up to batch 4, the average loss is 4.82.
Up to batch 5, the average loss is 4.69.
Up to batch 6, the average loss is 4.51.
Up to batch 7, the average loss is 4.53.
The average loss for epoch 1 is 4.53 and mean absolute error is 1.72.
Up to batch 0, the average loss is 5.08.
Up to batch 1, the average loss is 4.66.
Up to batch 2, the average loss is 4.64.
Up to batch 3, the average loss is 4.72.
Up to batch 4, the average loss is 4.82.
Up to batch 5, the average loss is 4.83.
Up to batch 6, the average loss is 4.77.
Up to batch 7, the average loss is 4.72.
```
</div>
---
## Usage of `self.model` attribute
In addition to receiving log information when one of their methods is called,
callbacks have access to the model associated with the current round of
training/evaluation/inference: `self.model`.
Here are a few of the things you can do with `self.model` in a callback:
- Set `self.model.stop_training = True` to immediately interrupt training.
- Mutate hyperparameters of the optimizer (available as `self.model.optimizer`),
such as `self.model.optimizer.learning_rate`.
- Save the model at period intervals.
- Record the output of `model.predict()` on a few test samples at the end of each
epoch, to use as a sanity check during training.
- Extract visualizations of intermediate features at the end of each epoch, to monitor
what the model is learning over time.
- etc.
Let's see this in action in a couple of examples.
---
## Examples of Keras callback applications
### Early stopping at minimum loss
This first example shows the creation of a `Callback` that stops training when the
minimum of loss has been reached, by setting the attribute `self.model.stop_training`
(boolean). Optionally, you can provide an argument `patience` to specify how many
epochs we should wait before stopping after having reached a local minimum.
`keras.callbacks.EarlyStopping` provides a more complete and general implementation.
```python
class EarlyStoppingAtMinLoss(keras.callbacks.Callback):
"""Stop training when the loss is at its min, i.e. the loss stops decreasing.
Arguments:
patience: Number of epochs to wait after min has been hit. After this
number of no improvement, training stops.
"""
def __init__(self, patience=0):
super().__init__()
self.patience = patience
# best_weights to store the weights at which the minimum loss occurs.
self.best_weights = None
def on_train_begin(self, logs=None):
# The number of epoch it has waited when loss is no longer minimum.
self.wait = 0
# The epoch the training stops at.
self.stopped_epoch = 0
# Initialize the best as infinity.
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get("loss")
if np.less(current, self.best):
self.best = current
self.wait = 0
# Record the best weights if current results is better (less).
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
print("Restoring model weights from the end of the best epoch.")
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print(f"Epoch {self.stopped_epoch + 1}: early stopping")
model = get_model()
model.fit(
x_train,
y_train,
batch_size=64,
epochs=30,
verbose=0,
callbacks=[LossAndErrorPrintingCallback(), EarlyStoppingAtMinLoss()],
)
```
<div class="k-default-codeblock">
```
Up to batch 0, the average loss is 25.57.
Up to batch 1, the average loss is 471.66.
Up to batch 2, the average loss is 322.55.
Up to batch 3, the average loss is 243.88.
Up to batch 4, the average loss is 196.53.
Up to batch 5, the average loss is 165.02.
Up to batch 6, the average loss is 142.34.
Up to batch 7, the average loss is 125.17.
Up to batch 8, the average loss is 111.83.
Up to batch 9, the average loss is 101.35.
Up to batch 10, the average loss is 92.60.
Up to batch 11, the average loss is 85.16.
Up to batch 12, the average loss is 79.02.
Up to batch 13, the average loss is 73.71.
Up to batch 14, the average loss is 69.23.
Up to batch 15, the average loss is 65.26.
The average loss for epoch 0 is 65.26 and mean absolute error is 3.89.
Up to batch 0, the average loss is 3.92.
Up to batch 1, the average loss is 4.34.
Up to batch 2, the average loss is 5.39.
Up to batch 3, the average loss is 6.58.
Up to batch 4, the average loss is 10.55.
Up to batch 5, the average loss is 19.29.
Up to batch 6, the average loss is 31.58.
Up to batch 7, the average loss is 38.20.
Up to batch 8, the average loss is 41.96.
Up to batch 9, the average loss is 41.30.
Up to batch 10, the average loss is 39.31.
Up to batch 11, the average loss is 37.09.
Up to batch 12, the average loss is 35.08.
Up to batch 13, the average loss is 33.27.
Up to batch 14, the average loss is 31.54.
Up to batch 15, the average loss is 30.00.
The average loss for epoch 1 is 30.00 and mean absolute error is 4.23.
Up to batch 0, the average loss is 5.70.
Up to batch 1, the average loss is 6.90.
Up to batch 2, the average loss is 7.74.
Up to batch 3, the average loss is 8.85.
Up to batch 4, the average loss is 12.53.
Up to batch 5, the average loss is 21.55.
Up to batch 6, the average loss is 35.70.
Up to batch 7, the average loss is 44.16.
Up to batch 8, the average loss is 44.82.
Up to batch 9, the average loss is 43.07.
Up to batch 10, the average loss is 40.51.
Up to batch 11, the average loss is 38.44.
Up to batch 12, the average loss is 36.69.
Up to batch 13, the average loss is 34.77.
Up to batch 14, the average loss is 32.97.
Up to batch 15, the average loss is 31.32.
The average loss for epoch 2 is 31.32 and mean absolute error is 4.39.
Restoring model weights from the end of the best epoch.
Epoch 3: early stopping
<keras.src.callbacks.history.History at 0x1187b7430>
```
</div>
### Learning rate scheduling
In this example, we show how a custom Callback can be used to dynamically change the
learning rate of the optimizer during the course of training.
See `callbacks.LearningRateScheduler` for a more general implementations.
```python
class CustomLearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler which sets the learning rate according to schedule.
Arguments:
schedule: a function that takes an epoch index
(integer, indexed from 0) and current learning rate
as inputs and returns a new learning rate as output (float).
"""
def __init__(self, schedule):
super().__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, "learning_rate"):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
# Get the current learning rate from model's optimizer.
lr = self.model.optimizer.learning_rate
# Call schedule function to get the scheduled learning rate.
scheduled_lr = self.schedule(epoch, lr)
# Set the value back to the optimizer before this epoch starts
self.model.optimizer.learning_rate = scheduled_lr
print(f"\nEpoch {epoch}: Learning rate is {float(np.array(scheduled_lr))}.")
LR_SCHEDULE = [
# (epoch to start, learning rate) tuples
(3, 0.05),
(6, 0.01),
(9, 0.005),
(12, 0.001),
]
def lr_schedule(epoch, lr):
"""Helper function to retrieve the scheduled learning rate based on epoch."""
if epoch < LR_SCHEDULE[0][0] or epoch > LR_SCHEDULE[-1][0]:
return lr
for i in range(len(LR_SCHEDULE)):
if epoch == LR_SCHEDULE[i][0]:
return LR_SCHEDULE[i][1]
return lr
model = get_model()
model.fit(
x_train,
y_train,
batch_size=64,
epochs=15,
verbose=0,
callbacks=[
LossAndErrorPrintingCallback(),
CustomLearningRateScheduler(lr_schedule),
],
)
```
<div class="k-default-codeblock">
```
Epoch 0: Learning rate is 0.10000000149011612.
Up to batch 0, the average loss is 27.90.
Up to batch 1, the average loss is 439.49.
Up to batch 2, the average loss is 302.08.
Up to batch 3, the average loss is 228.83.
Up to batch 4, the average loss is 184.97.
Up to batch 5, the average loss is 155.25.
Up to batch 6, the average loss is 134.03.
Up to batch 7, the average loss is 118.29.
Up to batch 8, the average loss is 105.65.
Up to batch 9, the average loss is 95.53.
Up to batch 10, the average loss is 87.25.
Up to batch 11, the average loss is 80.33.
Up to batch 12, the average loss is 74.48.
Up to batch 13, the average loss is 69.46.
Up to batch 14, the average loss is 65.05.
Up to batch 15, the average loss is 61.31.
The average loss for epoch 0 is 61.31 and mean absolute error is 3.85.
```
</div>
<div class="k-default-codeblock">
```
Epoch 1: Learning rate is 0.10000000149011612.
Up to batch 0, the average loss is 57.96.
Up to batch 1, the average loss is 55.11.
Up to batch 2, the average loss is 52.81.
Up to batch 3, the average loss is 51.06.
Up to batch 4, the average loss is 50.58.
Up to batch 5, the average loss is 51.49.
Up to batch 6, the average loss is 53.24.
Up to batch 7, the average loss is 54.20.
Up to batch 8, the average loss is 54.39.
Up to batch 9, the average loss is 54.31.
Up to batch 10, the average loss is 53.83.
Up to batch 11, the average loss is 52.93.
Up to batch 12, the average loss is 51.73.
Up to batch 13, the average loss is 50.34.
Up to batch 14, the average loss is 48.94.
Up to batch 15, the average loss is 47.65.
The average loss for epoch 1 is 47.65 and mean absolute error is 4.30.
```
</div>
<div class="k-default-codeblock">
```
Epoch 2: Learning rate is 0.10000000149011612.
Up to batch 0, the average loss is 46.38.
Up to batch 1, the average loss is 45.16.
Up to batch 2, the average loss is 44.03.
Up to batch 3, the average loss is 43.11.
Up to batch 4, the average loss is 42.52.
Up to batch 5, the average loss is 42.32.
Up to batch 6, the average loss is 43.06.
Up to batch 7, the average loss is 44.58.
Up to batch 8, the average loss is 45.33.
Up to batch 9, the average loss is 45.15.
Up to batch 10, the average loss is 44.59.
Up to batch 11, the average loss is 43.88.
Up to batch 12, the average loss is 43.17.
Up to batch 13, the average loss is 42.40.
Up to batch 14, the average loss is 41.74.
Up to batch 15, the average loss is 41.19.
The average loss for epoch 2 is 41.19 and mean absolute error is 4.27.
```
</div>
<div class="k-default-codeblock">
```
Epoch 3: Learning rate is 0.05.
Up to batch 0, the average loss is 40.85.
Up to batch 1, the average loss is 40.11.
Up to batch 2, the average loss is 39.38.
Up to batch 3, the average loss is 38.69.
Up to batch 4, the average loss is 38.01.
Up to batch 5, the average loss is 37.38.
Up to batch 6, the average loss is 36.77.
Up to batch 7, the average loss is 36.18.
Up to batch 8, the average loss is 35.61.
Up to batch 9, the average loss is 35.08.
Up to batch 10, the average loss is 34.54.
Up to batch 11, the average loss is 34.04.
Up to batch 12, the average loss is 33.56.
Up to batch 13, the average loss is 33.08.
Up to batch 14, the average loss is 32.64.
Up to batch 15, the average loss is 32.25.
The average loss for epoch 3 is 32.25 and mean absolute error is 3.64.
```
</div>
<div class="k-default-codeblock">
```
Epoch 4: Learning rate is 0.05000000074505806.
Up to batch 0, the average loss is 31.83.
Up to batch 1, the average loss is 31.42.
Up to batch 2, the average loss is 31.05.
Up to batch 3, the average loss is 30.72.
Up to batch 4, the average loss is 30.49.
Up to batch 5, the average loss is 30.37.
Up to batch 6, the average loss is 30.15.
Up to batch 7, the average loss is 29.94.
Up to batch 8, the average loss is 29.75.
Up to batch 9, the average loss is 29.56.
Up to batch 10, the average loss is 29.27.
Up to batch 11, the average loss is 28.96.
Up to batch 12, the average loss is 28.67.
Up to batch 13, the average loss is 28.39.
Up to batch 14, the average loss is 28.11.
Up to batch 15, the average loss is 27.80.
The average loss for epoch 4 is 27.80 and mean absolute error is 3.43.
```
</div>
<div class="k-default-codeblock">
```
Epoch 5: Learning rate is 0.05000000074505806.
Up to batch 0, the average loss is 27.51.
Up to batch 1, the average loss is 27.25.
Up to batch 2, the average loss is 27.05.
Up to batch 3, the average loss is 26.88.
Up to batch 4, the average loss is 26.76.
Up to batch 5, the average loss is 26.60.
Up to batch 6, the average loss is 26.44.
Up to batch 7, the average loss is 26.25.
Up to batch 8, the average loss is 26.08.
Up to batch 9, the average loss is 25.89.
Up to batch 10, the average loss is 25.71.
Up to batch 11, the average loss is 25.48.
Up to batch 12, the average loss is 25.26.
Up to batch 13, the average loss is 25.03.
Up to batch 14, the average loss is 24.81.
Up to batch 15, the average loss is 24.58.
The average loss for epoch 5 is 24.58 and mean absolute error is 3.25.
```
</div>
<div class="k-default-codeblock">
```
Epoch 6: Learning rate is 0.01.
Up to batch 0, the average loss is 24.36.
Up to batch 1, the average loss is 24.14.
Up to batch 2, the average loss is 23.93.
Up to batch 3, the average loss is 23.71.
Up to batch 4, the average loss is 23.52.
Up to batch 5, the average loss is 23.32.
Up to batch 6, the average loss is 23.12.
Up to batch 7, the average loss is 22.93.
Up to batch 8, the average loss is 22.74.
Up to batch 9, the average loss is 22.55.
Up to batch 10, the average loss is 22.37.
Up to batch 11, the average loss is 22.19.
Up to batch 12, the average loss is 22.01.
Up to batch 13, the average loss is 21.83.
Up to batch 14, the average loss is 21.67.
Up to batch 15, the average loss is 21.50.
The average loss for epoch 6 is 21.50 and mean absolute error is 2.98.
```
</div>
<div class="k-default-codeblock">
```
Epoch 7: Learning rate is 0.009999999776482582.
Up to batch 0, the average loss is 21.33.
Up to batch 1, the average loss is 21.17.
Up to batch 2, the average loss is 21.01.
Up to batch 3, the average loss is 20.85.
Up to batch 4, the average loss is 20.71.
Up to batch 5, the average loss is 20.57.
Up to batch 6, the average loss is 20.41.
Up to batch 7, the average loss is 20.27.
Up to batch 8, the average loss is 20.13.
Up to batch 9, the average loss is 19.98.
Up to batch 10, the average loss is 19.83.
Up to batch 11, the average loss is 19.69.
Up to batch 12, the average loss is 19.57.
Up to batch 13, the average loss is 19.44.
Up to batch 14, the average loss is 19.32.
Up to batch 15, the average loss is 19.19.
The average loss for epoch 7 is 19.19 and mean absolute error is 2.77.
```
</div>
<div class="k-default-codeblock">
```
Epoch 8: Learning rate is 0.009999999776482582.
Up to batch 0, the average loss is 19.07.
Up to batch 1, the average loss is 18.95.
Up to batch 2, the average loss is 18.83.
Up to batch 3, the average loss is 18.70.
Up to batch 4, the average loss is 18.58.
Up to batch 5, the average loss is 18.46.
Up to batch 6, the average loss is 18.35.
Up to batch 7, the average loss is 18.24.
Up to batch 8, the average loss is 18.12.
Up to batch 9, the average loss is 18.01.
Up to batch 10, the average loss is 17.90.
Up to batch 11, the average loss is 17.79.
Up to batch 12, the average loss is 17.68.
Up to batch 13, the average loss is 17.58.
Up to batch 14, the average loss is 17.48.
Up to batch 15, the average loss is 17.38.
The average loss for epoch 8 is 17.38 and mean absolute error is 2.61.
```
</div>
<div class="k-default-codeblock">
```
Epoch 9: Learning rate is 0.005.
Up to batch 0, the average loss is 17.28.
Up to batch 1, the average loss is 17.18.
Up to batch 2, the average loss is 17.08.
Up to batch 3, the average loss is 16.99.
Up to batch 4, the average loss is 16.90.
Up to batch 5, the average loss is 16.80.
Up to batch 6, the average loss is 16.71.
Up to batch 7, the average loss is 16.62.
Up to batch 8, the average loss is 16.53.
Up to batch 9, the average loss is 16.44.
Up to batch 10, the average loss is 16.35.
Up to batch 11, the average loss is 16.26.
Up to batch 12, the average loss is 16.17.
Up to batch 13, the average loss is 16.09.
Up to batch 14, the average loss is 16.00.
Up to batch 15, the average loss is 15.92.
The average loss for epoch 9 is 15.92 and mean absolute error is 2.48.
```
</div>
<div class="k-default-codeblock">
```
Epoch 10: Learning rate is 0.004999999888241291.
Up to batch 0, the average loss is 15.84.
Up to batch 1, the average loss is 15.76.
Up to batch 2, the average loss is 15.68.
Up to batch 3, the average loss is 15.61.
Up to batch 4, the average loss is 15.53.
Up to batch 5, the average loss is 15.45.
Up to batch 6, the average loss is 15.37.
Up to batch 7, the average loss is 15.29.
Up to batch 8, the average loss is 15.23.
Up to batch 9, the average loss is 15.15.
Up to batch 10, the average loss is 15.08.
Up to batch 11, the average loss is 15.00.
Up to batch 12, the average loss is 14.93.
Up to batch 13, the average loss is 14.86.
Up to batch 14, the average loss is 14.79.
Up to batch 15, the average loss is 14.72.
The average loss for epoch 10 is 14.72 and mean absolute error is 2.37.
```
</div>
<div class="k-default-codeblock">
```
Epoch 11: Learning rate is 0.004999999888241291.
Up to batch 0, the average loss is 14.65.
Up to batch 1, the average loss is 14.58.
Up to batch 2, the average loss is 14.52.
Up to batch 3, the average loss is 14.45.
Up to batch 4, the average loss is 14.39.
Up to batch 5, the average loss is 14.33.
Up to batch 6, the average loss is 14.26.
Up to batch 7, the average loss is 14.20.
Up to batch 8, the average loss is 14.14.
Up to batch 9, the average loss is 14.08.
Up to batch 10, the average loss is 14.02.
Up to batch 11, the average loss is 13.96.
Up to batch 12, the average loss is 13.90.
Up to batch 13, the average loss is 13.84.
Up to batch 14, the average loss is 13.78.
Up to batch 15, the average loss is 13.72.
The average loss for epoch 11 is 13.72 and mean absolute error is 2.27.
```
</div>
<div class="k-default-codeblock">
```
Epoch 12: Learning rate is 0.001.
Up to batch 0, the average loss is 13.67.
Up to batch 1, the average loss is 13.60.
Up to batch 2, the average loss is 13.55.
Up to batch 3, the average loss is 13.49.
Up to batch 4, the average loss is 13.44.
Up to batch 5, the average loss is 13.38.
Up to batch 6, the average loss is 13.33.
Up to batch 7, the average loss is 13.28.
Up to batch 8, the average loss is 13.22.
Up to batch 9, the average loss is 13.17.
Up to batch 10, the average loss is 13.12.
Up to batch 11, the average loss is 13.07.
Up to batch 12, the average loss is 13.02.
Up to batch 13, the average loss is 12.97.
Up to batch 14, the average loss is 12.92.
Up to batch 15, the average loss is 12.87.
The average loss for epoch 12 is 12.87 and mean absolute error is 2.19.
```
</div>
<div class="k-default-codeblock">
```
Epoch 13: Learning rate is 0.0010000000474974513.
Up to batch 0, the average loss is 12.82.
Up to batch 1, the average loss is 12.77.
Up to batch 2, the average loss is 12.72.
Up to batch 3, the average loss is 12.68.
Up to batch 4, the average loss is 12.63.
Up to batch 5, the average loss is 12.58.
Up to batch 6, the average loss is 12.53.
Up to batch 7, the average loss is 12.49.
Up to batch 8, the average loss is 12.45.
Up to batch 9, the average loss is 12.40.
Up to batch 10, the average loss is 12.35.
Up to batch 11, the average loss is 12.30.
Up to batch 12, the average loss is 12.26.
Up to batch 13, the average loss is 12.22.
Up to batch 14, the average loss is 12.17.
Up to batch 15, the average loss is 12.13.
The average loss for epoch 13 is 12.13 and mean absolute error is 2.12.
```
</div>
<div class="k-default-codeblock">
```
Epoch 14: Learning rate is 0.0010000000474974513.
Up to batch 0, the average loss is 12.09.
Up to batch 1, the average loss is 12.05.
Up to batch 2, the average loss is 12.01.
Up to batch 3, the average loss is 11.97.
Up to batch 4, the average loss is 11.92.
Up to batch 5, the average loss is 11.88.
Up to batch 6, the average loss is 11.84.
Up to batch 7, the average loss is 11.80.
Up to batch 8, the average loss is 11.76.
Up to batch 9, the average loss is 11.72.
Up to batch 10, the average loss is 11.68.
Up to batch 11, the average loss is 11.64.
Up to batch 12, the average loss is 11.60.
Up to batch 13, the average loss is 11.57.
Up to batch 14, the average loss is 11.54.
Up to batch 15, the average loss is 11.50.
The average loss for epoch 14 is 11.50 and mean absolute error is 2.06.
<keras.src.callbacks.history.History at 0x168619c60>
```
</div>
### Built-in Keras callbacks
Be sure to check out the existing Keras callbacks by
reading the [API docs](https://keras.io/api/callbacks/).
Applications include logging to CSV, saving
the model, visualizing metrics in TensorBoard, and a lot more!
| keras-io/guides/md/writing_your_own_callbacks.md/0 | {
"file_path": "keras-io/guides/md/writing_your_own_callbacks.md",
"repo_id": "keras-io",
"token_count": 12218
} | 102 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_nlp/modeling_layers/position_embedding/'" />
| keras-io/redirects/api/keras_nlp/layers/position_embedding/index.html/0 | {
"file_path": "keras-io/redirects/api/keras_nlp/layers/position_embedding/index.html",
"repo_id": "keras-io",
"token_count": 46
} | 103 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/examples/generative/gpt2_text_generation_with_kerasnlp/'" />
| keras-io/redirects/examples/nlp/gpt2_text_generation_with_kerasnlp/index.html/0 | {
"file_path": "keras-io/redirects/examples/nlp/gpt2_text_generation_with_kerasnlp/index.html",
"repo_id": "keras-io",
"token_count": 48
} | 104 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/merging_layers/'" />
| keras-io/redirects/layers/merge/index.html/0 | {
"file_path": "keras-io/redirects/layers/merge/index.html",
"repo_id": "keras-io",
"token_count": 38
} | 105 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/preprocessing/text/'" />
| keras-io/redirects/preprocessing/text/index.html/0 | {
"file_path": "keras-io/redirects/preprocessing/text/index.html",
"repo_id": "keras-io",
"token_count": 34
} | 106 |
ORACLE_MASTER = {
"path": "oracles/",
"title": "Oracles",
"toc": True,
"children": [
{
"path": "base_oracle",
"title": "The base Oracle class",
"generate": [
"keras_tuner.Oracle",
"keras_tuner.Oracle.create_trial",
"keras_tuner.Oracle.end_trial",
"keras_tuner.Oracle.get_best_trials",
"keras_tuner.Oracle.get_state",
"keras_tuner.Oracle.set_state",
"keras_tuner.Oracle.score_trial",
"keras_tuner.Oracle.populate_space",
"keras_tuner.Oracle.update_trial",
],
},
{
"path": "synchronized",
"title": "@synchronized decorator",
"generate": [
"keras_tuner.synchronized",
],
},
{
"path": "random",
"title": "RandomSearch Oracle",
"generate": [
"keras_tuner.oracles.RandomSearchOracle",
],
},
{
"path": "grid",
"title": "GridSearch Oracle",
"generate": [
"keras_tuner.oracles.GridSearchOracle",
],
},
{
"path": "bayesian",
"title": "BayesianOptimization Oracle",
"generate": [
"keras_tuner.oracles.BayesianOptimizationOracle",
],
},
{
"path": "hyperband",
"title": "Hyperband Oracle",
"generate": [
"keras_tuner.oracles.HyperbandOracle",
],
},
],
}
HYPERMODEL_MASTER = {
"path": "hypermodels/",
"title": "HyperModels",
"toc": True,
"children": [
{
"path": "base_hypermodel",
"title": "The base HyperModel class",
"generate": [
"keras_tuner.HyperModel",
"keras_tuner.HyperModel.build",
],
},
{
"path": "hyper_efficientnet",
"title": "HyperEfficientNet",
"generate": [
"keras_tuner.applications.HyperEfficientNet",
],
},
{
"path": "hyper_image_augment",
"title": "HyperImageAugment",
"generate": [
"keras_tuner.applications.HyperImageAugment",
],
},
{
"path": "hyper_resnet",
"title": "HyperResNet",
"generate": [
"keras_tuner.applications.HyperResNet",
],
},
{
"path": "hyper_xception",
"title": "HyperXception",
"generate": [
"keras_tuner.applications.HyperXception",
],
},
],
}
TUNER_MASTER = {
"path": "tuners/",
"title": "Tuners",
"toc": True,
"children": [
{
"path": "base_tuner",
"title": "The base Tuner class",
"generate": [
"keras_tuner.Tuner",
"keras_tuner.Tuner.get_best_hyperparameters",
"keras_tuner.Tuner.get_best_models",
"keras_tuner.Tuner.get_state",
"keras_tuner.Tuner.load_model",
"keras_tuner.Tuner.on_epoch_begin",
"keras_tuner.Tuner.on_batch_begin",
"keras_tuner.Tuner.on_batch_end",
"keras_tuner.Tuner.on_epoch_end",
"keras_tuner.Tuner.run_trial",
"keras_tuner.Tuner.results_summary",
"keras_tuner.Tuner.save_model",
"keras_tuner.Tuner.search",
"keras_tuner.Tuner.search_space_summary",
"keras_tuner.Tuner.set_state",
],
},
{
"path": "objective",
"title": "Objective class",
"generate": [
"keras_tuner.Objective",
],
},
{
"path": "random",
"title": "RandomSearch Tuner",
"generate": [
"keras_tuner.RandomSearch",
],
},
{
"path": "grid",
"title": "GridSearch Tuner",
"generate": [
"keras_tuner.GridSearch",
],
},
{
"path": "bayesian",
"title": "BayesianOptimization Tuner",
"generate": [
"keras_tuner.BayesianOptimization",
],
},
{
"path": "hyperband",
"title": "Hyperband Tuner",
"generate": [
"keras_tuner.Hyperband",
],
},
{
"path": "sklearn",
"title": "Sklearn Tuner",
"generate": [
"keras_tuner.SklearnTuner",
],
},
],
}
KT_API_MASTER = {
"path": "keras_tuner/",
"title": "KerasTuner",
"toc": True,
"children": [
{
"path": "hyperparameters",
"title": "HyperParameters",
"generate": [
"keras_tuner.HyperParameters",
"keras_tuner.HyperParameters.Boolean",
"keras_tuner.HyperParameters.Choice",
"keras_tuner.HyperParameters.Fixed",
"keras_tuner.HyperParameters.Float",
"keras_tuner.HyperParameters.Int",
"keras_tuner.HyperParameters.conditional_scope",
"keras_tuner.HyperParameters.get",
],
},
TUNER_MASTER,
ORACLE_MASTER,
HYPERMODEL_MASTER,
{
"path": "errors",
"title": "Errors",
"generate": [
"keras_tuner.errors.FailedTrialError",
"keras_tuner.errors.FatalError",
"keras_tuner.errors.FatalValueError",
"keras_tuner.errors.FatalTypeError",
"keras_tuner.errors.FatalRuntimeError",
],
},
],
}
| keras-io/scripts/kt_api_master.py/0 | {
"file_path": "keras-io/scripts/kt_api_master.py",
"repo_id": "keras-io",
"token_count": 3643
} | 107 |
# KerasCV Regularization Layers
KerasCV regularization layers implement computer vision specific model regularization
techniques.
{{toc}}
| keras-io/templates/api/keras_cv/layers/regularization/index.md/0 | {
"file_path": "keras-io/templates/api/keras_cv/layers/regularization/index.md",
"repo_id": "keras-io",
"token_count": 34
} | 108 |
# The Tuner classes in KerasTuner
The base `Tuner` class is the class that manages the hyperparameter search process,
including model creation, training, and evaluation. For each trial, a `Tuner` receives new
hyperparameter values from an `Oracle` instance. After calling `model.fit(...)`, it
sends the evaluation results back to the `Oracle` instance and it retrieves the next set
of hyperparameters to try.
There are a few built-in `Tuner` subclasses available for widely-used tuning
algorithms: `RandomSearch`, `BayesianOptimization` and `Hyperband`.
You can also subclass the `Tuner` class to customize your tuning process.
In particular, you can [override the `run_trial` function](/guides/keras_tuner/custom_tuner/#overriding-runtrial)
to customize model building and training.
{{toc}}
| keras-io/templates/api/keras_tuner/tuners/index.md/0 | {
"file_path": "keras-io/templates/api/keras_tuner/tuners/index.md",
"repo_id": "keras-io",
"token_count": 218
} | 109 |
# KerasCV
<a class="github-button" href="https://github.com/keras-team/keras-cv" data-size="large" data-show-count="true" aria-label="Star keras-team/keras-cv on GitHub">Star</a>
KerasCV is a library of modular computer vision components that work natively
with TensorFlow, JAX, or PyTorch. Built on Keras 3, these models, layers,
metrics, callbacks, etc., can be trained and serialized in any framework and
re-used in another without costly migrations.
KerasCV can be understood as a horizontal extension of the Keras API: the
components are new first-party Keras objects that are too specialized to be
added to core Keras. They receive the same level of polish and backwards
compatibility guarantees as the core Keras API, and they are maintained by the
Keras team.
Our APIs assist in common computer vision tasks such as data augmentation,
classification, object detection, segmentation, image generation, and more.
Applied computer vision engineers can leverage KerasCV to quickly assemble
production-grade, state-of-the-art training and inference pipelines for all of
these common tasks.
<img style="width: 440px; max-width: 90%;" src="/img/keras-cv-augmentations.gif">
## Quick Links
- [List of available models and presets](https://keras.io/api/keras_cv/models/)
- [Developer Guides](https://keras.io/guides/keras_cv/)
- [Contributing Guide](https://github.com/keras-team/keras-cv/blob/master/.github/CONTRIBUTING.md)
- [API Design Guidelines](https://github.com/keras-team/keras-cv/blob/master/.github/API_DESIGN.md)
## Installation
KerasCV supports both Keras 2 and Keras 3. We recommend Keras 3 for all new
users, as it enables using KerasCV models and layers with JAX, TensorFlow and
PyTorch.
### Keras 2 Installation
To install the latest KerasCV release with Keras 2, simply run:
```
pip install --upgrade keras-cv tensorflow
```
### Keras 3 Installation
There are currently two ways to install Keras 3 with KerasCV. To install the
stable versions of KerasCV and Keras 3, you should install Keras 3 **after**
installing KerasCV. This is a temporary step while TensorFlow is pinned to
Keras 2, and will no longer be necessary after TensorFlow 2.16.
```
pip install --upgrade keras-cv tensorflow
pip install --upgrade keras
```
To install the latest changes nightly for KerasCV and Keras, you can use our
nightly package.
```
pip install --upgrade keras-cv-nightly tf-nightly
```
**Note:** Keras 3 will not function with TensorFlow 2.14 or earlier.
See [Getting started with Keras](/getting_started/) for more information on
installing Keras generally and compatibility with different frameworks.
## Quickstart
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow" # Or "jax" or "torch"!
import tensorflow as tf
import keras_cv
import tensorflow_datasets as tfds
import keras
# Create a preprocessing pipeline with augmentations
BATCH_SIZE = 16
NUM_CLASSES = 3
augmenter = keras_cv.layers.Augmenter(
[
keras_cv.layers.RandomFlip(),
keras_cv.layers.RandAugment(value_range=(0, 255)),
keras_cv.layers.CutMix(),
],
)
def preprocess_data(images, labels, augment=False):
labels = tf.one_hot(labels, NUM_CLASSES)
inputs = {"images": images, "labels": labels}
outputs = inputs
if augment:
outputs = augmenter(outputs)
return outputs['images'], outputs['labels']
train_dataset, test_dataset = tfds.load(
'rock_paper_scissors',
as_supervised=True,
split=['train', 'test'],
)
train_dataset = train_dataset.batch(BATCH_SIZE).map(
lambda x, y: preprocess_data(x, y, augment=True),
num_parallel_calls=tf.data.AUTOTUNE).prefetch(
tf.data.AUTOTUNE)
test_dataset = test_dataset.batch(BATCH_SIZE).map(
preprocess_data, num_parallel_calls=tf.data.AUTOTUNE).prefetch(
tf.data.AUTOTUNE)
# Create a model using a pretrained backbone
backbone = keras_cv.models.EfficientNetV2Backbone.from_preset(
"efficientnetv2_b0_imagenet"
)
model = keras_cv.models.ImageClassifier(
backbone=backbone,
num_classes=NUM_CLASSES,
activation="softmax",
)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(learning_rate=1e-5),
metrics=['accuracy']
)
# Train your model
model.fit(
train_dataset,
validation_data=test_dataset,
epochs=8,
)
```
## Disclaimer
KerasCV provides access to pre-trained models via the `keras_cv.models` API.
These pre-trained models are provided on an "as is" basis, without warranties or
conditions of any kind. The following underlying models are provided by third
parties, and are subject to separate licenses: StableDiffusion, Vision
Transfomer
## Citing KerasCV
If KerasCV helps your research, we appreciate your citations.
Here is the BibTeX entry:
```bibtex
@misc{wood2022kerascv,
title={KerasCV},
author={Wood, Luke and Tan, Zhenyu and Stenbit, Ian and Bischof, Jonathan and Zhu, Scott and Chollet, Fran\c{c}ois and Sreepathihalli, Divyashree and Sampath, Ramesh and others},
year={2022},
howpublished={\url{https://github.com/keras-team/keras-cv}},
}
```
| keras-io/templates/keras_cv/index.md/0 | {
"file_path": "keras-io/templates/keras_cv/index.md",
"repo_id": "keras-io",
"token_count": 1734
} | 110 |
# Style Guide
## Use `black`
For the most part, following our code style is very simple, we just use
[black](https://github.com/psf/black) to format code. See our
[Contributing Guide](CONTRIBUTING.md) for how to run our formatting scripts.
## Naming of Layers and Models
Capitalize all acronyms, e.g. LSTM not Lstm, KLDivergence not KlDivergence,
GPT2, XLMRoberta, etc.
Files should be named with snake case, and an acronym should be consider a
single "segment". For example XLMRoberta would map to xlm_roberta.py filename.
When a specific abbreviation is very common and is pronounceable (acronym),
consider it as a standalone word, e.g. Bert, Deberta, etc. In this case, "Bert"
is considered as a common noun and not an abbreviation anymore.
## Naming of Models and Presets
Naming of models and presets is a difficult and important element of our
library usability. In general we try to to follow the branding of "upstream"
model naming, subject to the consistency constraints laid out here.
- The model and preset names should be recognizable to users familiar with the
original release. E.g. the model that goes with the "DeBERTaV3" paper should
be called `DebertaV3`. A release of a [toxic-bert](https://huggingface.co/unitary/toxic-bert)
checkpoint for `keras_nlp.models.Bert`, should include the string
`"toxic_bert"`.
- All preset names should include the language of the pretraining data. If three
or more language are supported, the preset name should include `"multi"` (not
the single letter "m").
- If a preset lowercases input for cased-based languages, the preset name should
be marked with `"uncased"`.
- Don't abbreviate size names. E.g. "xsmall" or "XL" in an original checkpoint
releases should map to `"extra_small"` or `"extra_large"` in a preset names.
- No configuration in names. E.g. use "bert_base" instead of
"bert_L-12_H-768_A-12".
When in doubt, readability should win out!
## File names
When possible, keep publicly documented classes in their own files, and make
the name of the class match the filename. E.g. the `BertClassifer` model should
be in `bert_classifier.py`, and the `TransformerEncoder` layer
should be in `transformer_encoder.py`
Small and/or unexported utility classes may live together along with code that
uses it if convenient, e.g., our `BytePairTokenizerCache` is collocated in the
same file as our `BytePairTokenizer`.
## Import keras and keras_nlp as top-level objects
Prefer importing `tf`, `keras` and `keras_nlp` as top-level objects. We want
it to be clear to a reader which symbols are from `keras_nlp` and which are
from core `keras`.
For guides and examples using KerasNLP, the import block should look as follows:
```python
import keras_nlp
import tensorflow as tf
from tensorflow import keras
```
❌ `tf.keras.activations.X`<br/>
✅ `keras.activations.X`
❌ `layers.X`<br/>
✅ `keras.layers.X` or `keras_nlp.layers.X`
❌ `Dense(1, activation='softmax')`<br/>
✅ `keras.layers.Dense(1, activation='softmax')`
For KerasNLP library code, `keras_nlp` will not be directly imported, but
`keras` should still be used as a top-level object used to access library
symbols.
## Ideal layer style
When writing a new KerasNLP layer (or tokenizer or metric), please make sure to
do the following:
- Accept `**kwargs` in `__init__` and forward this to the super class.
- Keep a python attribute on the layer for each `__init__` argument to the
layer. The name and value should match the passed value.
- Write a `get_config()` which chains to super.
- Document the layer behavior thoroughly including call behavior though a
class level docstring. Generally methods like `build()` and `call()` should
not have their own docstring.
- Docstring text should start on the same line as the opening quotes and
otherwise follow [PEP 257](https://peps.python.org/pep-0257/).
- Document the
[masking](https://keras.io/guides/understanding_masking_and_padding/) behavior
of the layer in the class level docstring as well.
- Always include usage examples using the full symbol location in `keras_nlp`.
- Include a reference citation if applicable.
````python
class PositionEmbedding(keras.layers.Layer):
"""A layer which learns a position embedding for input sequences.
This class accepts a single dense tensor as input, and will output a
learned position embedding of the same shape.
This class assumes that in the input tensor, the last dimension corresponds
to the features, and the dimension before the last corresponds to the
sequence.
This layer does not supporting masking, but can be combined with a
`keras.layers.Embedding` for padding mask support.
Args:
sequence_length: The maximum length of the dynamic sequence.
Examples:
Direct call.
>>> layer = keras_nlp.layers.PositionEmbedding(sequence_length=10)
>>> layer(tf.zeros((8, 10, 16))).shape
TensorShape([8, 10, 16])
Combining with a token embedding.
```python
seq_length = 50
vocab_size = 5000
embed_dim = 128
inputs = keras.Input(shape=(seq_length,))
token_embeddings = keras.layers.Embedding(
input_dim=vocab_size, output_dim=embed_dim
)(inputs)
position_embeddings = keras_nlp.layers.PositionEmbedding(
sequence_length=seq_length
)(token_embeddings)
outputs = token_embeddings + position_embeddings
```
Reference:
- [Devlin et al., 2019](https://arxiv.org/abs/1810.04805)
"""
def __init__(
self,
sequence_length,
**kwargs,
):
super().__init__(**kwargs)
self.sequence_length = int(sequence_length)
def build(self, input_shape):
super().build(input_shape)
feature_size = input_shape[-1]
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.sequence_length, feature_size],
)
def call(self, inputs):
shape = tf.shape(inputs)
input_length = shape[-2]
position_embeddings = self.position_embeddings[:input_length, :]
return tf.broadcast_to(position_embeddings, shape)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
}
)
return config
````
| keras-nlp/STYLE_GUIDE.md/0 | {
"file_path": "keras-nlp/STYLE_GUIDE.md",
"repo_id": "keras-nlp",
"token_count": 2136
} | 111 |
# English-Spanish machine translation with keras-nlp
This example will show how to train a Transformer-based machine translation
model using APIs provided by Keras-NLP. This instruction shows how to train the
model, and evaluate with customized English sentences.
## Installing dependencies
Pip dependencies for all keras-nlp examples are listed in `setup.py`. To install
both the keras-nlp library from source and all other dependencies required to
run the example, run the below command. You may want to install to a self
contained environment (e.g. a container or a virtualenv).
```shell
pip install -e ".[examples]"
```
## Train the machine translation model and save to disk
At the root directory of keras-nlp, run the following command:
```shell
python ./examples/machine_translation/train.py \
--num_epochs=3 \
--saved_model_path="saved_models/machine_translation"
```
If it finishes successfully, you should see your console print out the
following information:
```
Successfully saved model to saved_models/machine_translation.
```
## Running machine translation on customized inputs
Once you have a model saved successfully, you can play around it via the
inference.py script. To run inference on customized inputs, please run the
following command:
```shell
python ./examples/machine_translation/train.py \
--inputs="Have a nice day" \
--saved_model_path=saved_models/machine_translation"
```
You can set the inputs value as any English sentence, or you can leave it unset,
then the script will run against some predefined English sentences.
| keras-nlp/examples/machine_translation/README.md/0 | {
"file_path": "keras-nlp/examples/machine_translation/README.md",
"repo_id": "keras-nlp",
"token_count": 428
} | 112 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.backend import ops
@keras_nlp_export("keras_nlp.layers.MaskedLMHead")
class MaskedLMHead(keras.layers.Layer):
"""Masked Language Model (MaskedLM) head.
This layer takes two inputs:
- `inputs`: which should be a tensor of encoded tokens with shape
`(batch_size, sequence_length, hidden_dim)`.
- `mask_positions`: which should be a tensor of integer positions to
predict with shape `(batch_size, masks_per_sequence)`.
The token encodings should usually be the last output of an encoder model,
and mask positions should be the integer positions you would like to
predict for the MaskedLM task.
The layer will first gather the token encodings at the mask positions. These
gathered tokens will be passed through a dense layer the same size as
encoding dimension, then transformed to predictions the same size as the
input vocabulary. This layer will produce a single output with shape
`(batch_size, masks_per_sequence, vocabulary_size)`, which can be used to
compute an MaskedLM loss function.
This layer is often be paired with `keras_nlp.layers.MaskedLMMaskGenerator`,
which will help prepare inputs for the MaskedLM task.
Args:
vocabulary_size: The total size of the vocabulary for predictions.
token_embedding: Optional. A `keras_nlp.layers.ReversibleEmbedding`
instance. If passed, the layer will be used to project from the
`hidden_dim` of the model to the output `vocabulary_size`.
intermediate_activation: The activation function of intermediate dense layer.
activation: The activation function for the outputs of the layer.
Usually either `None` (return logits), or `"softmax"`
(return probabilities).
layer_norm_epsilon: float. The epsilon value in layer
normalization components. Defaults to `1e-5`.
kernel_initializer: string or `keras.initializers` initializer.
The kernel initializer for the dense and multiheaded
attention layers. Defaults to `"glorot_uniform"`.
bias_initializer: string or `keras.initializers` initializer.
The bias initializer for the dense and multiheaded
attention layers. Defaults to `"zeros"`.
Examples:
```python
batch_size = 16
vocab_size = 100
hidden_dim = 32
seq_length = 50
# Generate random inputs.
token_ids = np.random.randint(vocab_size, size=(batch_size, seq_length))
# Choose random positions as the masked inputs.
mask_positions = np.random.randint(seq_length, size=(batch_size, 5))
# Embed tokens in a `hidden_dim` feature space.
token_embedding = keras_nlp.layers.ReversibleEmbedding(
vocab_size,
hidden_dim,
)
hidden_states = token_embedding(token_ids)
preds = keras_nlp.layers.MaskedLMHead(
vocabulary_size=vocab_size,
token_embedding=token_embedding,
activation="softmax",
)(hidden_states, mask_positions)
```
References:
- [Press and Wolf, 2016](https://arxiv.org/abs/1608.05859)
"""
def __init__(
self,
vocabulary_size=None,
token_embedding=None,
intermediate_activation="relu",
activation=None,
layer_norm_epsilon=1e-05,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs,
):
super().__init__(**kwargs)
self.vocabulary_size = vocabulary_size
self.token_embedding = token_embedding
self.intermediate_activation = keras.activations.get(
intermediate_activation
)
self.activation = keras.activations.get(activation)
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
if vocabulary_size is None and token_embedding is None:
raise ValueError(
"One of `vocabulary_size` or `token_embedding` must be set. "
"Received: `vocabulary_size=None`, `token_embedding=None`"
)
if token_embedding:
if vocabulary_size and vocabulary_size != token_embedding.input_dim:
raise ValueError(
"`vocabulary_size` should match the input dimension of the "
"of `token_embedding`. Received: "
f"`vocabulary_size={vocabulary_size}`, "
f"`token_embedding.input_dim={token_embedding.input_dim}`"
)
self.vocabulary_size = token_embedding.input_dim
def build(self, inputs_shape, mask_positions_shape=None):
if self.token_embedding is not None:
feature_size = self.token_embedding.output_dim
else:
feature_size = inputs_shape[-1]
self._intermediate_dense = keras.layers.Dense(
feature_size,
activation=self.intermediate_activation,
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
dtype=self.dtype_policy,
name="intermediate_dense",
)
self._intermediate_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="intermediate_layer_norm",
)
# The gather length does not affect any of our built variables, so
# we can pass any value here.
gather_length = None
shape = (inputs_shape[0], gather_length, inputs_shape[-1])
self._intermediate_dense.build(shape)
shape = (inputs_shape[0], gather_length, feature_size)
self._intermediate_layer_norm.build(shape)
if self.token_embedding is None:
self._kernel = self.add_weight(
name="output_kernel",
shape=[feature_size, self.vocabulary_size],
initializer=self.kernel_initializer,
dtype=self.dtype,
)
self._bias = self.add_weight(
name="output_bias",
shape=[self.vocabulary_size],
initializer=self.bias_initializer,
dtype=self.dtype,
)
self.built = True
def call(self, inputs, mask_positions):
# Avoid auto-converting numpy int arrays to float tensors.
mask_positions = ops.convert_to_tensor(mask_positions, dtype="int")
# Gather the encoded tokens at the masked indices.
mask_positions = ops.expand_dims(mask_positions, axis=-1)
x = ops.take_along_axis(inputs, mask_positions, axis=1)
# Apply a trainable linear transformation and a layer norm.
x = self._intermediate_dense(x)
x = self._intermediate_layer_norm(x)
# Transform encodings to vocabulary_size predictions.
if self.token_embedding:
outputs = self.token_embedding(x, reverse=True)
else:
outputs = ops.matmul(x, self._kernel)
outputs = ops.cast(outputs, self.compute_dtype)
outputs = outputs + self._bias
# Apply a final activation.
if self.activation is not None:
outputs = self.activation(outputs)
return outputs
@classmethod
def from_config(cls, config):
embedding = config.get("token_embedding")
if embedding:
config["token_embedding"] = keras.layers.deserialize(embedding)
return super().from_config(config)
def get_config(self):
config = super().get_config()
embedding_config = None
if self.token_embedding:
embedding_config = keras.layers.serialize(self.token_embedding)
config.update(
{
"vocabulary_size": self.vocabulary_size,
"token_embedding": embedding_config,
"intermediate_activation": keras.activations.serialize(
self.intermediate_activation
),
"activation": keras.activations.serialize(self.activation),
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"bias_initializer": keras.initializers.serialize(
self.bias_initializer
),
}
)
return config
def compute_output_shape(self, inputs_shape, mask_positions_shape):
return mask_positions_shape + (self.vocabulary_size,)
| keras-nlp/keras_nlp/layers/modeling/masked_lm_head.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/masked_lm_head.py",
"repo_id": "keras-nlp",
"token_count": 3884
} | 113 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
from keras_nlp.backend import ops
def _check_masks_shapes(inputs, padding_mask, attention_mask):
mask = padding_mask
if hasattr(inputs, "_keras_mask") and mask is None:
mask = inputs._keras_mask
if mask is not None:
if len(mask.shape) != 2:
raise ValueError(
"`padding_mask` should have shape "
"(batch_size, target_length). "
f"Received shape `{mask.shape}`."
)
if attention_mask is not None:
if len(attention_mask.shape) != 3:
raise ValueError(
"`attention_mask` should have shape "
"(batch_size, target_length, source_length). "
f"Received shape `{mask.shape}`."
)
def compute_causal_mask(batch_size, input_length, output_length, cache_index=0):
"""Compute a causal attention mask for a transformer decoder.
Args:
batch_size: batch size for the mask.
input_length: the length of key/value tensors in the attention layer.
output_length: the length of query tensors in the attention layer.
cache_index: the current index for cached generation. If passed, the
query sequence will be considered to start at `cache_index` rather
than zero. For example, a causal mask with `output_length=1` and
`cache_index=5` would allow the query tensor to attend to the first
five positions of the key/value tensors.
Return:
A causal attention mask with shape
`(batch_size, output_length, input_length)` that can be passed to a
attention layer.
"""
i = ops.arange(output_length, dtype="float32")
i = i + ops.cast(cache_index, "float32")
i = ops.expand_dims(i, axis=1)
j = ops.arange(input_length, dtype="float32")
mask = ops.expand_dims(i >= j, axis=0)
return ops.broadcast_to(mask, (batch_size, output_length, input_length))
def merge_padding_and_attention_mask(
inputs,
padding_mask,
attention_mask,
):
"""Merge the padding mask with a customized attention mask.
Args:
inputs: the input sequence.
padding_mask: the 1D padding mask, of shape
[batch_size, sequence_length].
attention_mask: the 2D customized mask, of shape
[batch_size, sequence1_length, sequence2_length].
Return:
A merged 2D mask or None. If only `padding_mask` is provided, the
returned mask is padding_mask with one additional axis.
"""
_check_masks_shapes(inputs, padding_mask, attention_mask)
mask = padding_mask
if hasattr(inputs, "_keras_mask"):
if mask is None:
# If no padding mask is explicitly provided, we look for padding
# mask from the input data.
mask = inputs._keras_mask
else:
logging.warning(
"You are explicitly setting `padding_mask` while the `inputs` "
"have built-in mask, so the built-in mask is ignored."
)
if mask is not None:
# Add an axis for broadcasting, the attention mask should be 2D
# (not including the batch axis).
mask = ops.cast(ops.expand_dims(mask, axis=1), "int32")
if attention_mask is not None:
attention_mask = ops.cast(attention_mask, "int32")
if mask is None:
return attention_mask
else:
return ops.minimum(mask, attention_mask)
return mask
| keras-nlp/keras_nlp/layers/modeling/transformer_layer_utils.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/transformer_layer_utils.py",
"repo_id": "keras-nlp",
"token_count": 1611
} | 114 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_nlp.backend import keras
from keras_nlp.metrics.bleu import Bleu
from keras_nlp.tests.test_case import TestCase
from keras_nlp.tokenizers.byte_tokenizer import ByteTokenizer
class BleuTest(TestCase):
def test_initialization(self):
bleu = Bleu()
result = bleu.result()
self.assertEqual(result, 0.0)
def test_scalar_input(self):
bleu = Bleu(smooth=True)
y_true = [
"He eats a sweet apple.",
"He is eating a tasty apple, isn't he?",
]
y_pred = "He He He eats sweet apple which is a fruit."
bleu_val = bleu(y_true, y_pred)
self.assertAlmostEqual(bleu_val, 0.212, delta=1e-3)
def test_1d_list_input(self):
bleu = Bleu()
y_true = [
["He eats a sweet apple."],
["Silicon Valley is one of my favourite shows!"],
]
y_pred = [
"He He He eats sweet apple which is a fruit.",
"I love Silicon Valley, it's one of my favourite shows.",
]
bleu_val = bleu(y_true, y_pred)
self.assertAlmostEqual(bleu_val, 0.243, delta=1e-3)
def test_2d_list_input(self):
bleu = Bleu()
y_true = [
[["He eats a sweet apple."]],
[["Silicon Valley is one of my favourite shows!"]],
]
y_pred = [
["He He He eats sweet apple which is a fruit."],
["I love Silicon Valley, it's one of my favourite shows."],
]
bleu_val = bleu(y_true, y_pred)
self.assertAlmostEqual(bleu_val, 0.243, delta=1e-3)
def test_custom_tokenizer(self):
byte_tokenizer = ByteTokenizer()
bleu = Bleu(tokenizer=byte_tokenizer)
y_true = [
["He eats a sweet apple."],
["Silicon Valley is one of my favourite shows!"],
]
y_pred = [
"He He He eats sweet apple which is a fruit.",
"I love Silicon Valley, it's one of my favourite shows.",
]
bleu_val = bleu(y_true, y_pred)
self.assertAlmostEqual(bleu_val, 0.609, delta=1e-3)
def test_different_order(self):
bleu = Bleu(max_order=5)
y_true = [
["He eats a sweet apple."],
["Silicon Valley is one of my favourite shows!"],
]
y_pred = [
"He He He eats sweet apple which is a fruit.",
"I love Silicon Valley, it's one of my favourite shows.",
]
bleu_val = bleu(y_true, y_pred)
self.assertAlmostEqual(bleu_val, 0.188, delta=1e-3)
def test_tensor_input(self):
bleu = Bleu()
y_true = tf.constant(
[
["He eats a sweet apple."],
["Silicon Valley is one of my favourite shows!"],
]
)
y_pred = tf.constant(
[
"He He He eats sweet apple which is a fruit.",
"I love Silicon Valley, it's one of my favourite shows.",
]
)
bleu_val = bleu(y_true, y_pred)
self.assertAlmostEqual(bleu_val, 0.243, delta=1e-3)
@pytest.mark.tf_only # string model output only applies to tf.
def test_model_compile(self):
inputs = keras.Input(shape=(), dtype="string")
outputs = keras.layers.Identity()(inputs)
model = keras.Model(inputs, outputs)
model.compile(metrics=[Bleu()])
y_pred = x = tf.constant(
[
"He He He eats sweet apple which is a fruit.",
"I love Silicon Valley, it's one of my favourite shows.",
]
)
y = tf.constant(
[
["He eats a sweet apple."],
["Silicon Valley is one of my favourite shows!"],
]
)
output = model.compute_metrics(x, y, y_pred, sample_weight=None)
self.assertAlmostEqual(output["bleu"], 0.243, delta=1e-3)
def test_reset_state(self):
bleu = Bleu()
y_true = [
["He eats a sweet apple."],
["Silicon Valley is one of my favourite shows!"],
]
y_pred = [
"He He He eats sweet apple which is a fruit.",
"I love Silicon Valley, it's one of my favourite shows.",
]
bleu.update_state(y_true, y_pred)
bleu_val = bleu.result()
self.assertNotEqual(bleu_val, 0.0)
bleu.reset_state()
bleu_val = bleu.result()
self.assertEqual(bleu_val, 0.0)
def test_update_state(self):
bleu = Bleu()
y_true_1 = [
["He eats a sweet apple."],
["Silicon Valley is one of my favourite shows!"],
]
y_pred_1 = [
"He He He eats sweet apple which is a fruit.",
"I love Silicon Valley, it's one of my favourite shows.",
]
bleu.update_state(y_true_1, y_pred_1)
bleu_val = bleu.result()
self.assertAlmostEqual(bleu_val, 0.243, delta=1e-3)
y_true_2 = ["Virat Kohli is the GOAT."]
y_pred_2 = "Virat Kohli is the greatest of all time!"
bleu.update_state(y_true_2, y_pred_2)
bleu_val = bleu.result()
self.assertAlmostEqual(bleu_val, 0.26, delta=1e-3)
def test_get_config(self):
byte_tokenizer = ByteTokenizer()
bleu = Bleu(
tokenizer=byte_tokenizer,
max_order=8,
smooth=True,
dtype=tf.float64,
name="bleu_test",
)
config = bleu.get_config()
expected_config_subset = {
"tokenizer": byte_tokenizer,
"max_order": 8,
"smooth": True,
}
self.assertEqual(config, {**config, **expected_config_subset})
| keras-nlp/keras_nlp/metrics/bleu_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/metrics/bleu_test.py",
"repo_id": "keras-nlp",
"token_count": 3034
} | 115 |
# Copyright 2022 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.models.bart.bart_backbone import BartBackbone
from keras_nlp.models.bart.bart_presets import backbone_presets
from keras_nlp.models.bart.bart_seq_2_seq_lm_preprocessor import (
BartSeq2SeqLMPreprocessor,
)
from keras_nlp.models.generative_task import GenerativeTask
from keras_nlp.utils.python_utils import classproperty
@keras_nlp_export("keras_nlp.models.BartSeq2SeqLM")
class BartSeq2SeqLM(GenerativeTask):
"""An end-to-end BART model for seq2seq language modeling.
A seq2seq language model (LM) is an encoder-decoder model which is used for
conditional text generation. The encoder is given a "context" text (fed to
the encoder), and the decoder predicts the next token based on both the
encoder inputs and the previous tokens. You can finetune `BartSeq2SeqLM` to
generate text for any seq2seq task (e.g., translation or summarization).
This model has a `generate()` method, which generates text based on
encoder inputs and an optional prompt for the decoder. The generation
strategy used is controlled by an additional `sampler` argument passed to
`compile()`. You can recompile the model with different `keras_nlp.samplers`
objects to control the generation. By default, `"top_k"` sampling will be
used.
This model can optionally be configured with a `preprocessor` layer, in
which case it will automatically apply preprocessing to string inputs during
`fit()`, `predict()`, `evaluate()` and `generate()`. This is done by default
when creating the model with `from_preset()`.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind. The underlying model is provided by a
third party and subject to a separate license, available
[here](https://github.com/facebookresearch/fairseq/).
Args:
backbone: A `keras_nlp.models.BartBackbone` instance.
preprocessor: A `keras_nlp.models.BartSeq2SeqLMPreprocessor` or `None`.
If `None`, this model will not apply preprocessing, and inputs
should be preprocessed before calling the model.
Examples:
Use `generate()` to do text generation, given an input context.
```python
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset("bart_base_en")
bart_lm.generate("The quick brown fox", max_length=30)
# Generate with batched inputs.
bart_lm.generate(["The quick brown fox", "The whale"], max_length=30)
```
Compile the `generate()` function with a custom sampler.
```python
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset("bart_base_en")
bart_lm.compile(sampler="greedy")
bart_lm.generate("The quick brown fox", max_length=30)
```
Use `generate()` with encoder inputs and an incomplete decoder input (prompt).
```python
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset("bart_base_en")
bart_lm.generate(
{
"encoder_text": "The quick brown fox",
"decoder_text": "The fast"
}
)
```
Use `generate()` without preprocessing.
```python
# Preprocessed inputs, with encoder inputs corresponding to
# "The quick brown fox", and the decoder inputs to "The fast". Use
# `"padding_mask"` to indicate values that should not be overridden.
prompt = {
"encoder_token_ids": np.array([[0, 133, 2119, 6219, 23602, 2, 1, 1]]),
"encoder_padding_mask": np.array(
[[True, True, True, True, True, True, False, False]]
),
"decoder_token_ids": np.array([[2, 0, 133, 1769, 2, 1, 1]]),
"decoder_padding_mask": np.array([[True, True, True, True, False, False]])
}
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset(
"bart_base_en",
preprocessor=None,
)
bart_lm.generate(prompt)
```
Call `fit()` on a single batch.
```python
features = {
"encoder_text": ["The quick brown fox jumped.", "I forgot my homework."],
"decoder_text": ["The fast hazel fox leapt.", "I forgot my assignment."]
}
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset("bart_base_en")
bart_lm.fit(x=features, batch_size=2)
```
Call `fit()` without preprocessing.
```python
x = {
"encoder_token_ids": np.array([[0, 133, 2119, 2, 1]] * 2),
"encoder_padding_mask": np.array([[1, 1, 1, 1, 0]] * 2),
"decoder_token_ids": np.array([[2, 0, 133, 1769, 2]] * 2),
"decoder_padding_mask": np.array([[1, 1, 1, 1, 1]] * 2),
}
y = np.array([[0, 133, 1769, 2, 1]] * 2)
sw = np.array([[1, 1, 1, 1, 0]] * 2)
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset(
"bart_base_en",
preprocessor=None,
)
bart_lm.fit(x=x, y=y, sample_weight=sw, batch_size=2)
```
Custom backbone and vocabulary.
```python
features = {
"encoder_text": [" afternoon sun"],
"decoder_text": ["noon sun"],
}
vocab = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"Ġafter": 5,
"noon": 6,
"Ġsun": 7,
}
merges = ["Ġ a", "Ġ s", "Ġ n", "e r", "n o", "o n", "Ġs u", "Ġa f", "no on"]
merges += ["Ġsu n", "Ġaf t", "Ġaft er"]
tokenizer = keras_nlp.models.BartTokenizer(
vocabulary=vocab,
merges=merges,
)
preprocessor = keras_nlp.models.BartSeq2SeqLMPreprocessor(
tokenizer=tokenizer,
encoder_sequence_length=128,
decoder_sequence_length=128,
)
backbone = keras_nlp.models.BartBackbone(
vocabulary_size=50265,
num_layers=6,
num_heads=12,
hidden_dim=768,
intermediate_dim=3072,
max_sequence_length=128,
)
bart_lm = keras_nlp.models.BartSeq2SeqLM(
backbone=backbone,
preprocessor=preprocessor,
)
bart_lm.fit(x=features, batch_size=2)
```
"""
def __init__(
self,
backbone,
preprocessor=None,
**kwargs,
):
# === Layers ===
self.backbone = backbone
self.preprocessor = preprocessor
# === Functional Model ===
inputs = backbone.input
hidden_states = backbone(inputs)["decoder_sequence_output"]
outputs = backbone.token_embedding(hidden_states, reverse=True)
super().__init__(
inputs=inputs,
outputs=outputs,
**kwargs,
)
# === Default compilation ===
self.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(2e-5),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
jit_compile=True,
)
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
@classproperty
def backbone_cls(cls):
return BartBackbone
@classproperty
def preprocessor_cls(cls):
return BartSeq2SeqLMPreprocessor
def call_decoder_with_cache(
self,
encoder_hidden_states,
encoder_padding_mask,
decoder_token_ids,
self_attention_cache=None,
self_attention_cache_update_index=None,
cross_attention_cache=None,
cross_attention_cache_update_index=None,
):
"""Forward pass with a key/value caches for generative decoding..
`call_decoder_with_cache` adds an additional inference-time forward pass
for the model for seq2seq text generation. Unlike calling the model
directly, this method does two things to optimize text generation:
- Allows caching previous key/value tensors in the decoder's
self-attention layer to avoid recomputing the outputs of seen tokens.
- Allows caching key/value tensors in the decoder's cross-attention
layer to avoid recomputing the encoder outputs.
Args:
encoder_hidden_states: a dense float Tensor of shape
`(batch_size, encoder_sequence_length, hidden_dim)`. The
sequence of hidden states at the output of the encoder's last
layer.
encoder_padding_mask: a dense float Tensor of shape
`(batch_size, encoder_sequence_length)`. The padding mask for
the encoder input.
decoder_token_ids: a dense int Tensor of shape
`(batch_size, max_length)`. Input token ids to be fed to
the decoder.
self_attention_cache: a dense float Tensor of shape
`(batch_size, num_layers, 2, max_length, num_heads, key_dims)`.
The cached key/value tensors of previously seen tokens in the
decoder's self-attention layer.
self_attention_cache_update_index: an int or int Tensor, the index
at which to update the `self_attention_cache`. Usually, this is
the index of the current token being processed during decoding.
cross_attention_cache: a dense float Tensor of shape
`(batch_size, num_layers, 2, encoder_sequence_length, num_heads, key_dims)`.
The cached key/value tensors of the encoder outputs in the
decoder's cross-attention layer.
cross_attention_cache_update_index: an int or int Tensor, the index
at which to update the `cross_attention_cache`. Usually, this is
either `0` (compute the entire `cross_attention_cache`), or
`None` (reuse a previously computed `cross_attention_cache`).
Returns:
A `(logits, hidden_states, self_attention_cache, cross_attention_cache)`
tuple, where `logits` is the language model logits for the input
`decoder_token_ids`, `hidden_states` is the final hidden
representation of the input tokens, `self_attention_cache` is the
key/value cache in the decoder's self-attention layer and
`cross_attention_cache` is the key/value cache in the decoder's
cross-attention layer.
"""
# Embedding layers.
tokens = self.backbone.token_embedding(decoder_token_ids)
positions = self.backbone.decoder_position_embedding(
tokens,
start_index=self_attention_cache_update_index,
)
# Sum, normalize and apply dropout to embeddings.
x = self.backbone.decoder_embeddings_add((tokens, positions))
x = self.backbone.decoder_embeddings_layer_norm(x)
x = self.backbone.decoder_embeddings_dropout(x)
# Every decoder layer has a separate cache for the self-attention layer
# and the cross-attention layer. We update all of them separately.
self_attention_caches = []
cross_attention_caches = []
for i, layer in enumerate(self.backbone.decoder_transformer_layers):
current_self_attention_cache = self_attention_cache[:, i, ...]
current_cross_attention_cache = cross_attention_cache[:, i, ...]
(
x,
next_self_attention_cache,
next_cross_attention_cache,
) = layer(
decoder_sequence=x,
encoder_sequence=encoder_hidden_states,
encoder_padding_mask=encoder_padding_mask,
self_attention_cache=current_self_attention_cache,
self_attention_cache_update_index=self_attention_cache_update_index,
cross_attention_cache=current_cross_attention_cache,
cross_attention_cache_update_index=cross_attention_cache_update_index,
)
if self_attention_cache_update_index is not None:
self_attention_caches.append(next_self_attention_cache)
if cross_attention_cache_update_index is not None:
cross_attention_caches.append(next_cross_attention_cache)
if self_attention_cache_update_index is not None:
self_attention_cache = ops.stack(self_attention_caches, axis=1)
if cross_attention_cache_update_index is not None:
cross_attention_cache = ops.stack(cross_attention_caches, axis=1)
hidden_states = x
logits = self.backbone.token_embedding(hidden_states, reverse=True)
return (
logits,
hidden_states,
self_attention_cache,
cross_attention_cache,
)
def call_encoder(self, token_ids, padding_mask):
"""Does a forward pass on the encoder and returns the encoder output."""
tokens = self.backbone.token_embedding(token_ids)
positions = self.backbone.encoder_position_embedding(tokens)
x = self.backbone.decoder_embeddings_add((tokens, positions))
x = self.backbone.encoder_embeddings_layer_norm(x)
x = self.backbone.encoder_embeddings_dropout(x)
for transformer_layer in self.backbone.encoder_transformer_layers:
x = transformer_layer(x, padding_mask=padding_mask)
return x
def _initialize_cache(self, encoder_token_ids, decoder_token_ids):
"""Initializes empty self-attention cache and cross-attention cache."""
batch_size = ops.shape(encoder_token_ids)[0]
encoder_max_length = ops.shape(encoder_token_ids)[1]
decoder_max_length = ops.shape(decoder_token_ids)[1]
num_layers = self.backbone.num_layers
num_heads = self.backbone.num_heads
head_dim = self.backbone.hidden_dim // self.backbone.num_heads
shape = [
batch_size,
num_layers,
2,
decoder_max_length,
num_heads,
head_dim,
]
self_attention_cache = ops.zeros(shape, dtype=self.compute_dtype)
shape[3] = encoder_max_length
cross_attention_cache = ops.zeros(shape, dtype=self.compute_dtype)
return (self_attention_cache, cross_attention_cache)
def _build_cache(
self, encoder_token_ids, encoder_padding_mask, decoder_token_ids
):
"""Builds the self-attention cache and the cross-attention cache (key/value pairs)."""
encoder_hidden_states = self.call_encoder(
token_ids=encoder_token_ids, padding_mask=encoder_padding_mask
)
self_attention_cache, cross_attention_cache = self._initialize_cache(
encoder_token_ids, decoder_token_ids
)
# Seed the self-attention cache and the cross-attention cache.
(
_,
hidden_states,
self_attention_cache,
cross_attention_cache,
) = self.call_decoder_with_cache(
encoder_hidden_states=encoder_hidden_states,
encoder_padding_mask=encoder_padding_mask,
decoder_token_ids=decoder_token_ids,
self_attention_cache=self_attention_cache,
self_attention_cache_update_index=0,
cross_attention_cache=cross_attention_cache,
cross_attention_cache_update_index=0,
)
return (
hidden_states,
encoder_hidden_states,
self_attention_cache,
cross_attention_cache,
)
def generate_step(
self,
inputs,
end_token_id=None,
):
"""A compilable generation function for a batch of inputs.
This function represents the inner, XLA-compilable, generation function
for a single batch of inputs. Inputs should have the same structure as
model inputs, a dictionary with keys `"encoder_token_ids"`,
`"encoder_padding_mask"`, `"decoder_token_ids"` and
`"decoder_padding_mask"`.
Args:
inputs: A dictionary with four keys - `"encoder_token_ids"`,
`"encoder_padding_mask"`, `"decoder_token_ids"` and
`"decoder_padding_mask"`, with batched tensor values.
end_token_id: The id of the end token to stop on. If all
sequences have produced a new `end_token_id`, generation
will stop.
"""
(
encoder_token_ids,
encoder_padding_mask,
decoder_token_ids,
decoder_padding_mask,
) = (
inputs["encoder_token_ids"],
inputs["encoder_padding_mask"],
inputs["decoder_token_ids"],
inputs["decoder_padding_mask"],
)
batch_size = ops.shape(encoder_token_ids)[0]
# Create and seed cache with a single forward pass.
(
hidden_states,
encoder_hidden_states,
self_attention_cache,
cross_attention_cache,
) = self._build_cache(
encoder_token_ids, encoder_padding_mask, decoder_token_ids
)
# Compute the lengths of all user inputted tokens ids.
row_lengths = ops.sum(ops.cast(decoder_padding_mask, "int32"), axis=-1)
# Start at the first index that has no user inputted id.
index = ops.min(row_lengths)
def next(prompt, cache, index):
# The cache index is the index of our previous token.
cache_index = index - 1
num_samples = ops.shape(prompt)[0]
prompt = ops.slice(prompt, [0, cache_index], [num_samples, 1])
def repeat_tensor(x):
"""Repeats tensors along batch axis to match dim for beam search."""
if ops.shape(x)[0] == num_samples:
return x
return ops.repeat(x, repeats=num_samples // batch_size, axis=0)
logits, hidden_states, cache, _ = self.call_decoder_with_cache(
encoder_hidden_states=repeat_tensor(encoder_hidden_states),
encoder_padding_mask=repeat_tensor(encoder_padding_mask),
decoder_token_ids=prompt,
self_attention_cache=cache,
self_attention_cache_update_index=cache_index,
cross_attention_cache=repeat_tensor(cross_attention_cache),
cross_attention_cache_update_index=None,
)
return (
ops.squeeze(logits, axis=1),
ops.squeeze(hidden_states, axis=1),
cache,
)
decoder_token_ids = self._sampler(
next=next,
prompt=decoder_token_ids,
cache=self_attention_cache,
index=index,
mask=decoder_padding_mask,
end_token_id=end_token_id,
hidden_states=hidden_states,
model=self,
)
# Compute an output padding mask with the token ids we updated.
if end_token_id is not None:
# Build a mask of `end_token_id` locations not in the original
# prompt (not in locations where `decoder_padding_mask` is True).
end_locations = ops.logical_and(
ops.equal(decoder_token_ids, end_token_id),
ops.logical_not(decoder_padding_mask),
)
end_locations = ops.cast(end_locations, "int32")
# Use cumsum to get ones in all locations after `end_locations`.
cumsum = ops.cast(ops.cumsum(end_locations, axis=-1), "int32")
overflow = cumsum - end_locations
# Our padding mask is the inverse of these overflow locations.
decoder_padding_mask = ops.logical_not(ops.cast(overflow, "bool"))
else:
# Without early stopping, all locations will have been updated.
decoder_padding_mask = ops.ones_like(
decoder_token_ids, dtype="bool"
)
return {
"decoder_token_ids": decoder_token_ids,
"decoder_padding_mask": decoder_padding_mask,
}
| keras-nlp/keras_nlp/models/bart/bart_seq_2_seq_lm.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bart/bart_seq_2_seq_lm.py",
"repo_id": "keras-nlp",
"token_count": 9105
} | 116 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
class RelativeEmbedding(keras.layers.Layer):
"""Relative embedding layer.
This is an implementation of relative embedding as described in the
paper ["DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing"](https://arxiv.org/abs/2111.09543).
This layer initializes an embedding matrix (of shape
`(2 * batch_size, hidden_dim)`) for relative position encoding. It then
applies layer normalization on the embedding matrix and returns the relative
embedding matrix.
Args:
hidden_dim: int. The size of the dense embedding.
bucket_size: int. The size of the relative position buckets.
layer_norm_epsilon: float. Epsilon value to initialize the layer
normalization layer.
kernel_initializer: string or `keras.initializers` initializer.
The kernel initializer for the dense embedding.
Defaults to `"glorot_uniform"`.
"""
def __init__(
self,
hidden_dim,
bucket_size,
layer_norm_epsilon=1e-05,
kernel_initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.bucket_size = bucket_size
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.rel_embeddings = self.add_weight(
shape=(self.bucket_size * 2, self.hidden_dim),
initializer=self.kernel_initializer,
name="rel_embedding",
)
self.layer_norm = keras.layers.LayerNormalization(
epsilon=layer_norm_epsilon,
dtype=self.dtype_policy,
name="rel_embeddings_layer_norm",
)
def call(self, inputs):
batch_size = ops.shape(inputs)[0]
rel_embeddings = ops.expand_dims(
ops.convert_to_tensor(self.rel_embeddings), axis=0
)
rel_embeddings = self.layer_norm(rel_embeddings)
# Repeat `rel_embeddings` along axis = 0 `batch_size` times. The
# resultant shape is `(batch_size, bucket_size * 2, hidden_dim)`.
rel_embeddings = ops.repeat(rel_embeddings, repeats=batch_size, axis=0)
return rel_embeddings
def get_config(self):
config = super().get_config()
config.update(
{
"hidden_dim": self.hidden_dim,
"bucket_size": self.bucket_size,
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
}
)
return config
def compute_output_shape(self, input_shape):
return (input_shape[0],) + (self.bucket_size * 2, self.hidden_dim)
| keras-nlp/keras_nlp/models/deberta_v3/relative_embedding.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/deberta_v3/relative_embedding.py",
"repo_id": "keras-nlp",
"token_count": 1450
} | 117 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.position_embedding import PositionEmbedding
from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding
from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder
from keras_nlp.models.backbone import Backbone
from keras_nlp.utils.keras_utils import gelu_approximate
def electra_kernel_initializer(stddev=0.02):
return keras.initializers.TruncatedNormal(stddev=stddev)
@keras_nlp_export("keras_nlp.models.ElectraBackbone")
class ElectraBackbone(Backbone):
"""A Electra encoder network.
This network implements a bidirectional Transformer-based encoder as
described in ["Electra: Pre-training Text Encoders as Discriminators Rather
Than Generators"](https://arxiv.org/abs/2003.10555). It includes the
embedding lookups and transformer layers, but not the masked language model
or classification task networks.
The default constructor gives a fully customizable, randomly initialized
Electra encoder with any number of layers, heads, and embedding
dimensions.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind. The underlying model is provided by a
third party and subject to a separate license, available
[here](https://huggingface.co/docs/transformers/model_doc/electra#overview).
Args:
vocabulary_size: int. The size of the token vocabulary.
num_layers: int. The number of transformer layers.
num_heads: int. The number of attention heads for each transformer.
The hidden size must be divisible by the number of attention heads.
hidden_dim: int. The size of the transformer encoding and pooler layers.
embedding_dim: int. The size of the token embeddings.
intermediate_dim: int. The output dimension of the first Dense layer in
a two-layer feedforward network for each transformer.
dropout: float. Dropout probability for the Transformer encoder.
max_sequence_length: int. The maximum sequence length that this encoder
can consume. If None, `max_sequence_length` uses the value from
sequence length. This determines the variable shape for positional
embeddings.
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
for model computations and weights. Note that some computations,
such as softmax and layer normalization, will always be done at
float32 precision regardless of dtype.
Examples:
```python
input_data = {
"token_ids": np.ones(shape=(1, 12), dtype="int32"),
"segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]]),
"padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]),
}
# Randomly initialized Electra encoder
backbone = keras_nlp.models.ElectraBackbone(
vocabulary_size=1000,
num_layers=2,
num_heads=2,
hidden_dim=32,
intermediate_dim=64,
dropout=0.1,
max_sequence_length=512,
)
# Returns sequence and pooled outputs.
sequence_output, pooled_output = backbone(input_data)
```
"""
def __init__(
self,
vocab_size,
num_layers,
num_heads,
hidden_dim,
embedding_dim,
intermediate_dim,
dropout=0.1,
max_sequence_length=512,
num_segments=2,
dtype=None,
**kwargs,
):
# === Layers ===
self.token_embedding = ReversibleEmbedding(
input_dim=vocab_size,
output_dim=embedding_dim,
embeddings_initializer=electra_kernel_initializer(),
dtype=dtype,
name="token_embedding",
)
self.position_embedding = PositionEmbedding(
initializer=electra_kernel_initializer(),
sequence_length=max_sequence_length,
dtype=dtype,
name="position_embedding",
)
self.segment_embedding = keras.layers.Embedding(
input_dim=num_segments,
output_dim=embedding_dim,
embeddings_initializer=electra_kernel_initializer(),
dtype=dtype,
name="segment_embedding",
)
self.embeddings_add = keras.layers.Add(
dtype=dtype,
name="embeddings_add",
)
self.embeddings_layer_norm = keras.layers.LayerNormalization(
axis=-1,
epsilon=1e-12,
dtype=dtype,
name="embeddings_layer_norm",
)
self.embeddings_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="embeddings_dropout",
)
if hidden_dim != embedding_dim:
self.embeddings_projection = keras.layers.Dense(
hidden_dim,
kernel_initializer=electra_kernel_initializer(),
dtype=dtype,
name="embeddings_projection",
)
self.transformer_layers = []
for i in range(num_layers):
layer = TransformerEncoder(
num_heads=num_heads,
intermediate_dim=intermediate_dim,
activation=gelu_approximate,
dropout=dropout,
layer_norm_epsilon=1e-12,
kernel_initializer=electra_kernel_initializer(),
dtype=dtype,
name=f"transformer_layer_{i}",
)
self.transformer_layers.append(layer)
self.pooled_dense = keras.layers.Dense(
hidden_dim,
kernel_initializer=electra_kernel_initializer(),
activation="tanh",
dtype=dtype,
name="pooled_dense",
)
# === Functional Model ===
token_id_input = keras.Input(
shape=(None,), dtype="int32", name="token_ids"
)
segment_id_input = keras.Input(
shape=(None,), dtype="int32", name="segment_ids"
)
padding_mask_input = keras.Input(
shape=(None,), dtype="int32", name="padding_mask"
)
# Embed tokens, positions, and segment ids.
tokens = self.token_embedding(token_id_input)
positions = self.position_embedding(tokens)
segments = self.segment_embedding(segment_id_input)
# Add all embeddings together.
x = self.embeddings_add((tokens, positions, segments))
x = self.embeddings_layer_norm(x)
x = self.embeddings_dropout(x)
if hidden_dim != embedding_dim:
x = self.embeddings_projection(x)
# Apply successive transformer encoder blocks.
for transformer_layer in self.transformer_layers:
x = transformer_layer(x, padding_mask=padding_mask_input)
# Index of classification token in the vocabulary
cls_token_index = 0
sequence_output = x
# Construct the two ELECTRA outputs. The pooled output is a dense layer on
# top of the [CLS] token.
pooled_output = self.pooled_dense(x[:, cls_token_index, :])
super().__init__(
inputs={
"token_ids": token_id_input,
"segment_ids": segment_id_input,
"padding_mask": padding_mask_input,
},
outputs={
"sequence_output": sequence_output,
"pooled_output": pooled_output,
},
**kwargs,
)
# === Config ===
self.vocab_size = vocab_size
self.num_layers = num_layers
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.max_sequence_length = max_sequence_length
self.num_segments = num_segments
self.cls_token_index = cls_token_index
def get_config(self):
config = super().get_config()
config.update(
{
"vocab_size": self.vocab_size,
"num_layers": self.num_layers,
"num_heads": self.num_heads,
"hidden_dim": self.hidden_dim,
"embedding_dim": self.embedding_dim,
"intermediate_dim": self.intermediate_dim,
"dropout": self.dropout,
"max_sequence_length": self.max_sequence_length,
"num_segments": self.num_segments,
}
)
return config
| keras-nlp/keras_nlp/models/electra/electra_backbone.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/electra/electra_backbone.py",
"repo_id": "keras-nlp",
"token_count": 4181
} | 118 |
# Copyright 2024 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from keras_nlp.models.gemma.gemma_tokenizer import GemmaTokenizer
from keras_nlp.tests.test_case import TestCase
@pytest.mark.keras_3_only
class GemmaTokenizerTest(TestCase):
def setUp(self):
self.init_kwargs = {
# Generated using create_gemma_test_proto.py
"proto": os.path.join(
self.get_test_data_dir(), "gemma_test_vocab.spm"
)
}
self.input_data = ["the quick brown fox", "the earth is round"]
def test_tokenizer_basics(self):
self.run_preprocessing_layer_test(
cls=GemmaTokenizer,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=[[4, 9, 5, 7], [4, 6, 8, 10]],
)
def test_errors_missing_special_tokens(self):
with self.assertRaises(ValueError):
GemmaTokenizer(
# Generated using create_no_special_token_proto.py
proto=os.path.join(
self.get_test_data_dir(), "no_special_token_vocab.spm"
)
)
@pytest.mark.large
def test_smallest_preset(self):
self.run_preset_test(
cls=GemmaTokenizer,
preset="gemma_2b_en",
input_data=["The quick brown fox."],
expected_output=[[651, 4320, 8426, 25341, 235265]],
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in GemmaTokenizer.presets:
self.run_preset_test(
cls=GemmaTokenizer,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/gemma/gemma_tokenizer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gemma/gemma_tokenizer_test.py",
"repo_id": "keras-nlp",
"token_count": 1017
} | 119 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding
from keras_nlp.utils.keras_utils import clone_initializer
class GPTNeoXAttention(keras.layers.Layer):
"""GPTNeoXAttention layer.
This is an implementation of attention layer as described in the
paper ["GPT-NeoX-20B: An Open-Source Autoregressive Language Model"](https://arxiv.org/abs/2204.06745).
Effectively, this layer implements Multi-Head Self Attention with a rotary
embedding for encoding position information.
Args:
num_heads: int. Number of attention heads.
hidden_dim: int. Hidden dimension of the input, i.e., `hidden_states`.
bucket_size: int. The size of the relative position
buckets. Generally equal to `max_sequence_length // 2`.
dropout: float. Dropout probability.
kernel_initializer: string or `keras.initializers` initializer.
The kernel initializer for the dense layers.
bias_initializer: string or `keras.initializers` initializer.
The bias initializer for the dense layers.
rotary_percentage: float. The percentage by which query, key, value
matrices are to be rotated.
rotary_max_wavelength: int. The maximum angular wavelength of the
sine/cosine curves, for rotary embeddings.
max_sequence_length: int. The maximum input sequence length.
"""
def __init__(
self,
num_heads,
hidden_dim,
dropout=0.0,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
rotary_percentage=0.25,
rotary_max_wavelength=10000,
max_sequence_length=512,
**kwargs,
):
super().__init__(**kwargs)
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.rotary_percentage = rotary_percentage
self.dropout = dropout
self.attn_head_size = hidden_dim // num_heads
self.rotary_max_wavelength = rotary_max_wavelength
self.rotary_dim = int(self.attn_head_size * rotary_percentage)
self.rotary_embedding_layer = RotaryEmbedding(
max_wavelength=rotary_max_wavelength,
dtype=self.dtype_policy,
)
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
self.max_sequence_length = max_sequence_length
def build(self, input_shape):
self._qkv_dense = keras.layers.EinsumDense(
equation="abc,cde->abde",
output_shape=(None, self.num_heads, 3 * self.attn_head_size),
bias_axes="de",
**self._get_common_kwargs_for_sublayer(use_bias=True),
dtype=self.dtype_policy,
name="query_key_value",
)
self._qkv_dense.build(input_shape)
self._attn_dropout_layer = keras.layers.Dropout(
self.dropout,
dtype=self.dtype_policy,
name="attention_dropout",
)
self._softmax = keras.layers.Softmax(
axis=-1,
dtype="float32",
name="attention_softmax",
)
# Output.
self._output_dense = keras.layers.EinsumDense(
equation="abc,cd->abd",
output_shape=(None, self.hidden_dim),
bias_axes="d",
**self._get_common_kwargs_for_sublayer(use_bias=True),
dtype=self.dtype_policy,
name="attention_output",
)
self._output_dense.build(input_shape)
self.built = True
def _get_common_kwargs_for_sublayer(self, use_bias=True):
common_kwargs = {}
kernel_initializer = clone_initializer(self.kernel_initializer)
bias_initializer = clone_initializer(self.bias_initializer)
common_kwargs["kernel_initializer"] = kernel_initializer
if use_bias:
common_kwargs["bias_initializer"] = bias_initializer
return common_kwargs
def _masked_softmax(self, attention_scores, attention_mask=None):
if attention_mask is not None:
mask_expansion_axis = -3
for _ in range(
len(attention_scores.shape) - len(attention_mask.shape)
):
attention_mask = ops.expand_dims(
attention_mask, axis=mask_expansion_axis
)
return self._softmax(attention_scores, attention_mask)
def _compute_attention(
self, query, key, value, attention_mask=None, training=None
):
attention_scores = ops.einsum("aecd,abcd->acbe", key, query)
norm_factor = ops.sqrt(
ops.convert_to_tensor(self.attn_head_size, self.compute_dtype)
)
attention_scores /= norm_factor
attention_scores = self._masked_softmax(
attention_scores, attention_mask
)
attention_scores = self._attn_dropout_layer(
attention_scores, training=training
)
attention_output = ops.einsum(
"acbe,aecd->abcd", attention_scores, value
)
return attention_output
def call(
self,
hidden_states,
attention_mask=None,
cache=None,
cache_update_index=None,
training=None,
):
query_key_value = self._qkv_dense(hidden_states)
query = query_key_value[..., : self.attn_head_size]
if cache is not None:
key_cache = cache[:, 0, ...]
value_cache = cache[:, 1, ...]
if cache_update_index is None:
key = key_cache
value = value_cache
else:
key_update = query_key_value[
..., self.attn_head_size : 2 * self.attn_head_size
]
value_update = query_key_value[..., 2 * self.attn_head_size :]
start = [0, cache_update_index, 0, 0]
key = ops.slice_update(key_cache, start, key_update)
value = ops.slice_update(value_cache, start, value_update)
cache = ops.stack((key, value), axis=1)
else:
if cache_update_index is not None:
raise ValueError(
"`cache_update_index` should not be set if `cache` is "
f"`None`. Received: cache={cache}, "
f"cache_update_index={cache_update_index}"
)
key = query_key_value[
..., self.attn_head_size : 2 * self.attn_head_size
]
value = query_key_value[..., 2 * self.attn_head_size :]
query_rot, query_pass = (
query[..., : self.rotary_dim],
query[..., self.rotary_dim :],
)
key_rot, key_pass = (
key[..., : self.rotary_dim],
key[..., self.rotary_dim :],
)
query_rot = self.rotary_embedding_layer(query_rot)
key_rot = self.rotary_embedding_layer(key_rot)
query = ops.concatenate((query_rot, query_pass), axis=-1)
key = ops.concatenate((key_rot, key_pass), axis=-1)
attention_output = self._compute_attention(
query=query,
key=key,
value=value,
attention_mask=attention_mask,
training=training,
)
# Reshape `attention_output` to `(batch_size, sequence_length, hidden_dim)`.
attention_output = ops.reshape(
attention_output,
[
ops.shape(attention_output)[0],
ops.shape(attention_output)[1],
self.hidden_dim,
],
)
attention_output = self._output_dense(attention_output)
return attention_output, cache
def get_config(self):
config = super().get_config()
config.update(
{
"num_heads": self.num_heads,
"hidden_dim": self.hidden_dim,
"dropout": self.dropout,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"bias_initializer": keras.initializers.serialize(
self.bias_initializer
),
"rotary_percentage": self.rotary_percentage,
"rotary_max_wavelength": self.rotary_max_wavelength,
"max_sequence_length": self.max_sequence_length,
}
)
return config
| keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_attention.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_attention.py",
"repo_id": "keras-nlp",
"token_count": 4325
} | 120 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.layers.modeling.transformer_layer_utils import (
compute_causal_mask,
)
from keras_nlp.layers.modeling.transformer_layer_utils import (
merge_padding_and_attention_mask,
)
from keras_nlp.models.llama.llama_attention import LlamaAttention
from keras_nlp.models.llama.llama_layernorm import LlamaLayerNorm
from keras_nlp.utils.keras_utils import clone_initializer
class LlamaDecoder(keras.layers.Layer):
"""Llama decoder block."""
def __init__(
self,
intermediate_dim,
num_query_heads,
num_key_value_heads,
rope_scaling_factor=1.0,
activation="relu",
layer_norm_epsilon=1e-5,
kernel_initializer="glorot_uniform",
rope_max_wavelength=10000,
max_sequence_length=512,
**kwargs,
):
super().__init__(**kwargs)
self.intermediate_dim = intermediate_dim
self.num_query_heads = num_query_heads
self.num_key_value_heads = num_key_value_heads
self.rope_max_wavelength = rope_max_wavelength
self.rope_scaling_factor = rope_scaling_factor
self.max_sequence_length = max_sequence_length
self.activation = keras.activations.get(activation)
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
def build(self, decoder_sequence_shape):
self.hidden_dim = decoder_sequence_shape[-1]
# Self attention layers.
self._self_attention_layer = LlamaAttention(
num_query_heads=self.num_query_heads,
num_key_value_heads=self.num_key_value_heads,
rope_max_wavelength=self.rope_max_wavelength,
max_sequence_length=self.max_sequence_length,
rope_scaling_factor=self.rope_scaling_factor,
kernel_initializer=clone_initializer(self.kernel_initializer),
dtype=self.dtype_policy,
)
self._self_attention_layer.build(decoder_sequence_shape)
self._self_attention_layernorm = LlamaLayerNorm(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
)
self._self_attention_layernorm.build(decoder_sequence_shape)
# Feedforward layers.
self._feedforward_intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
dtype=self.dtype_policy,
)
self._feedforward_intermediate_dense.build(decoder_sequence_shape)
self._feedforward_gate_dense = keras.layers.Dense(
self.intermediate_dim,
activation=self.activation,
kernel_initializer=clone_initializer(self.kernel_initializer),
dtype=self.dtype_policy,
)
self._feedforward_gate_dense.build(decoder_sequence_shape)
self._feedforward_output_dense = keras.layers.Dense(
self.hidden_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
dtype=self.dtype_policy,
)
intermediate_shape = list(decoder_sequence_shape)
intermediate_shape[-1] = self.intermediate_dim
self._feedforward_output_dense.build(tuple(intermediate_shape))
self._feedforward_layernorm = LlamaLayerNorm(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
)
self._feedforward_layernorm.build(decoder_sequence_shape)
self.built = True
def call(
self,
decoder_sequence,
decoder_padding_mask=None,
decoder_attention_mask=None,
self_attention_cache=None,
self_attention_cache_update_index=None,
):
self_attention_mask = self._compute_self_attention_mask(
decoder_sequence=decoder_sequence,
decoder_padding_mask=decoder_padding_mask,
decoder_attention_mask=decoder_attention_mask,
self_attention_cache=self_attention_cache,
self_attention_cache_update_index=self_attention_cache_update_index,
)
residual = decoder_sequence
x = self._self_attention_layernorm(
decoder_sequence,
)
x = self._self_attention_layer(
hidden_states=x,
attention_mask=self_attention_mask,
cache=self_attention_cache,
cache_update_index=self_attention_cache_update_index,
)
if self_attention_cache is not None:
x, self_attention_cache = x
x = x + residual
residual = x
x = self._feedforward_layernorm(x)
gate_output = self._feedforward_gate_dense(x)
x = self._feedforward_intermediate_dense(x)
x = self._feedforward_output_dense(ops.multiply(x, gate_output))
decoder_output = x + residual
if self_attention_cache is not None:
return (decoder_output, self_attention_cache)
return decoder_output
def _compute_self_attention_mask(
self,
decoder_sequence,
decoder_padding_mask,
decoder_attention_mask,
self_attention_cache=None,
self_attention_cache_update_index=None,
):
decoder_mask = merge_padding_and_attention_mask(
decoder_sequence, decoder_padding_mask, decoder_attention_mask
)
batch_size = ops.shape(decoder_sequence)[0]
input_length = output_length = ops.shape(decoder_sequence)[1]
# We need to handle a rectangular causal mask when doing cached
# decoding. For generative inference, `decoder_sequence` will
# generally be length 1, and `cache` will be the full generation length.
if self_attention_cache is not None:
input_length = ops.shape(self_attention_cache)[2]
causal_mask = compute_causal_mask(
batch_size,
input_length,
output_length,
(
0
if self_attention_cache_update_index is None
else self_attention_cache_update_index
),
)
return (
ops.minimum(decoder_mask, causal_mask)
if decoder_mask is not None
else causal_mask
)
def compute_output_shape(self, decoder_sequence_shape):
return decoder_sequence_shape
def get_config(self):
config = super().get_config()
config.update(
{
"intermediate_dim": self.intermediate_dim,
"hidden_dim": self.hidden_dim,
"num_query_heads": self.num_query_heads,
"rope_max_wavelength": self.rope_max_wavelength,
"rope_scaling_factor": self.rope_scaling_factor,
"num_key_value_heads": self.num_key_value_heads,
"max_sequence_length": self.max_sequence_length,
"activation": keras.activations.serialize(self.activation),
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
}
)
return config
| keras-nlp/keras_nlp/models/llama/llama_decoder.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/llama/llama_decoder.py",
"repo_id": "keras-nlp",
"token_count": 3540
} | 121 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.backend import config as backend_config
from keras_nlp.backend import ops
from keras_nlp.models.t5.t5_backbone import T5Backbone
from keras_nlp.tests.test_case import TestCase
class T5BackboneTest(TestCase):
def setUp(self):
self.init_kwargs = {
"vocabulary_size": 10,
"num_layers": 2,
"num_heads": 2,
"hidden_dim": 2,
"intermediate_dim": 4,
}
self.input_data = {
"encoder_token_ids": ops.ones((2, 3), dtype="int32"),
"encoder_padding_mask": ops.zeros((2, 3), dtype="int32"),
"decoder_token_ids": ops.ones((2, 3), dtype="int32"),
"decoder_padding_mask": ops.zeros((2, 3), dtype="int32"),
}
def test_backbone_basics(self):
self.run_backbone_test(
cls=T5Backbone,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output_shape={
"encoder_sequence_output": (2, 3, 2),
"decoder_sequence_output": (2, 3, 2),
},
)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=T5Backbone,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.large
@pytest.mark.skipif(
not backend_config.keras_3(),
reason="TODO: Fails in Keras2",
)
def test_smallest_preset(self):
self.run_preset_test(
cls=T5Backbone,
preset="t5_small_multi",
input_data=self.input_data,
expected_output_shape={
"encoder_sequence_output": (2, 3, 512),
"decoder_sequence_output": (2, 3, 512),
},
expected_partial_output={
"encoder_sequence_output": ops.array(
[-0.0034, 0.0293, -0.0827, -0.1076]
),
"decoder_sequence_output": ops.array(
[0.0097, 0.3576, -0.1508, 0.0150]
),
},
)
@pytest.mark.extra_large
@pytest.mark.skipif(
not backend_config.keras_3(),
reason="TODO: Fails in Keras2",
)
def test_all_presets(self):
for preset in T5Backbone.presets:
self.run_preset_test(
cls=T5Backbone,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/t5/t5_backbone_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/t5/t5_backbone_test.py",
"repo_id": "keras-nlp",
"token_count": 1530
} | 122 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Whisper encoder block."""
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder
from keras_nlp.models.whisper.whisper_cached_multi_head_attention import (
WhisperCachedMultiHeadAttention,
)
from keras_nlp.utils.keras_utils import clone_initializer
@keras.saving.register_keras_serializable(package="keras_nlp")
class WhisperEncoder(TransformerEncoder):
"""Whisper encoder.
Inherits from `keras_nlp.layers.TransformerEncoder`, and overrides the
`_build` method to use the
`keras_nlp.models.whisper.whisper_multi_head_attention.WhisperCachedMultiHeadAttention`
layer instead of `keras.layers.MultiHeadAttention`.
"""
def build(self, inputs_shape):
# Infer the dimension of our hidden feature size from the build shape.
hidden_dim = inputs_shape[-1]
# Attention head size is `hidden_dim` over the number of heads.
key_dim = int(hidden_dim // self.num_heads)
if key_dim == 0:
raise ValueError(
"Attention `key_dim` computed cannot be zero. "
f"The `hidden_dim` value of {hidden_dim} has to be equal to "
f"or greater than `num_heads` value of {self.num_heads}."
)
# Self attention layers.
self._self_attention_layer = WhisperCachedMultiHeadAttention(
num_heads=self.num_heads,
key_dim=key_dim,
dropout=self.dropout,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="self_attention_layer",
)
self._self_attention_layer.build(
query_shape=inputs_shape,
value_shape=inputs_shape,
)
self._self_attention_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="self_attention_layer_norm",
)
self._self_attention_layer_norm.build(inputs_shape)
self._self_attention_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="self_attention_dropout",
)
# Feedforward layers.
self._feedforward_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="feedforward_layer_norm",
)
self._feedforward_layer_norm.build(inputs_shape)
self._feedforward_intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
activation=self.activation,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="feedforward_intermediate_dense",
)
self._feedforward_intermediate_dense.build(inputs_shape)
self._feedforward_output_dense = keras.layers.Dense(
hidden_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="feedforward_output_dense",
)
intermediate_shape = list(inputs_shape)
intermediate_shape[-1] = self.intermediate_dim
self._feedforward_output_dense.build(tuple(intermediate_shape))
self._feedforward_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="feedforward_dropout",
)
self.built = True
| keras-nlp/keras_nlp/models/whisper/whisper_encoder.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/whisper/whisper_encoder.py",
"repo_id": "keras-nlp",
"token_count": 1855
} | 123 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from keras_nlp.models.xlm_roberta.xlm_roberta_preprocessor import (
XLMRobertaPreprocessor,
)
from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import (
XLMRobertaTokenizer,
)
from keras_nlp.tests.test_case import TestCase
class XLMRobertaPreprocessorTest(TestCase):
def setUp(self):
self.tokenizer = XLMRobertaTokenizer(
# Generated using create_xlm_roberta_test_proto.py
proto=os.path.join(
self.get_test_data_dir(), "xlm_roberta_test_vocab.spm"
)
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
"sequence_length": 8,
}
self.input_data = (
["the quick brown fox"],
[1], # Pass through labels.
[1.0], # Pass through sample_weights.
)
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=XLMRobertaPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=(
{
"token_ids": [[0, 6, 11, 7, 9, 2, 1, 1]],
"padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0]],
},
[1], # Pass through labels.
[1.0], # Pass through sample_weights.
),
)
def test_errors_for_2d_list_input(self):
preprocessor = XLMRobertaPreprocessor(**self.init_kwargs)
ambiguous_input = [["one", "two"], ["three", "four"]]
with self.assertRaises(ValueError):
preprocessor(ambiguous_input)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in XLMRobertaPreprocessor.presets:
self.run_preset_test(
cls=XLMRobertaPreprocessor,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1156
} | 124 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
from keras_nlp.tests.test_case import TestCase
from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
class SentencePieceTokenizerTest(TestCase):
def setUp(self):
super().setUp()
self.proto = os.path.join(
self.get_test_data_dir(), "tokenizer_test_vocab.spm"
)
def test_tokenize(self):
input_data = ["the quick brown fox."]
tokenizer = SentencePieceTokenizer(
proto=self.proto,
)
call_output = tokenizer(input_data)
tokenize_output = tokenizer.tokenize(input_data)
self.assertAllEqual(call_output, [[6, 5, 3, 4]])
self.assertAllEqual(tokenize_output, [[6, 5, 3, 4]])
def test_scalar_tokenize(self):
input_data = "the quick brown fox."
tokenizer = SentencePieceTokenizer(
proto=self.proto,
)
call_output = tokenizer(input_data)
tokenize_output = tokenizer.tokenize(input_data)
self.assertAllEqual(call_output, [6, 5, 3, 4])
self.assertAllEqual(tokenize_output, [6, 5, 3, 4])
def test_dense_output(self):
input_data = ["the quick brown fox."]
tokenizer = SentencePieceTokenizer(
proto=self.proto,
sequence_length=10,
)
output_data = tokenizer(input_data)
self.assertAllEqual(output_data, [[6, 5, 3, 4, 0, 0, 0, 0, 0, 0]])
def test_string_tokenize(self):
input_data = ["the quick brown fox."]
tokenizer = SentencePieceTokenizer(
proto=self.proto,
dtype="string",
)
output_data = tokenizer(input_data)
self.assertAllEqual(
output_data,
[["▁the", "▁quick", "▁brown", "▁fox."]],
)
def test_detokenize(self):
tokenizer = SentencePieceTokenizer(proto=self.proto)
outputs = tokenizer.detokenize([6, 5, 3, 4])
self.assertAllEqual(outputs, "the quick brown fox.")
outputs = tokenizer.detokenize([[6, 5, 3, 4], [6, 4]])
self.assertAllEqual(outputs, ["the quick brown fox.", "the fox."])
def test_accessors(self):
tokenizer = SentencePieceTokenizer(
proto=self.proto,
)
self.assertEqual(
tokenizer.get_vocabulary(),
["<unk>", "<s>", "</s>", "▁brown", "▁fox.", "▁quick", "▁the"],
)
self.assertEqual(type(tokenizer.get_vocabulary()), list)
self.assertEqual(tokenizer.vocabulary_size(), 7)
self.assertEqual(type(tokenizer.vocabulary_size()), int)
self.assertEqual(tokenizer.id_to_token(0), "<unk>")
self.assertEqual(tokenizer.id_to_token(5), "▁quick")
self.assertEqual(type(tokenizer.id_to_token(0)), str)
self.assertEqual(tokenizer.token_to_id("<unk>"), 0)
self.assertEqual(tokenizer.token_to_id("▁quick"), 5)
self.assertEqual(type(tokenizer.token_to_id("<unk>")), int)
def test_error_id_out_of_vocabulary(self):
tokenizer = SentencePieceTokenizer(
proto=self.proto,
)
with self.assertRaises(ValueError):
tokenizer.id_to_token(tokenizer.vocabulary_size())
with self.assertRaises(ValueError):
tokenizer.id_to_token(-1)
def test_from_bytes(self):
with tf.io.gfile.GFile(self.proto, "rb") as file:
proto = file.read()
tokenizer = SentencePieceTokenizer(
proto=proto,
)
output_data = tokenizer(["the quick brown fox."])
self.assertAllEqual(output_data, [[6, 5, 3, 4]])
def test_tokenize_then_batch(self):
tokenizer = SentencePieceTokenizer(
proto=self.proto,
)
ds = tf.data.Dataset.from_tensor_slices(
["the quick brown fox.", "the quick", "the", "quick brown fox."]
)
ds = ds.map(tokenizer).apply(
tf.data.experimental.dense_to_ragged_batch(4)
)
output_data = ds.take(1).get_single_element()
expected = [
[6, 5, 3, 4],
[6, 5],
[6],
[5, 3, 4],
]
for i in range(4):
self.assertAllEqual(output_data[i], expected[i])
def test_batch_then_tokenize(self):
tokenizer = SentencePieceTokenizer(
proto=self.proto,
)
ds = tf.data.Dataset.from_tensor_slices(
["the quick brown fox.", "the quick", "the", "quick brown fox."]
)
ds = ds.batch(4).map(tokenizer)
output_data = ds.take(1).get_single_element()
expected = [
[6, 5, 3, 4],
[6, 5],
[6],
[5, 3, 4],
]
for i in range(4):
self.assertAllEqual(output_data[i], expected[i])
def test_config(self):
input_data = ["the quick brown whale."]
original_tokenizer = SentencePieceTokenizer(
proto=self.proto,
)
cloned_tokenizer = SentencePieceTokenizer.from_config(
original_tokenizer.get_config()
)
cloned_tokenizer.set_proto(original_tokenizer.proto)
self.assertAllEqual(
original_tokenizer(input_data),
cloned_tokenizer(input_data),
)
| keras-nlp/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py",
"repo_id": "keras-nlp",
"token_count": 2765
} | 125 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import os
from keras_nlp.backend import config as backend_config
from keras_nlp.backend import keras
try:
import kagglehub
except ImportError:
kagglehub = None
KAGGLE_PREFIX = "kaggle://"
GS_PREFIX = "gs://"
TOKENIZER_ASSET_DIR = "assets/tokenizer"
def get_file(preset, path):
"""Download a preset file in necessary and return the local path."""
if not isinstance(preset, str):
raise ValueError(
f"A preset identifier must be a string. Received: preset={preset}"
)
if preset.startswith(KAGGLE_PREFIX):
if kagglehub is None:
raise ImportError(
"`from_preset()` requires the `kagglehub` package. "
"Please install with `pip install kagglehub`."
)
kaggle_handle = preset.removeprefix(KAGGLE_PREFIX)
num_segments = len(kaggle_handle.split("/"))
if num_segments not in (4, 5):
raise ValueError(
"Unexpected Kaggle preset. Kaggle model handles should have "
"the form kaggle://{org}/{model}/keras/{variant}[/{version}]. "
"For example, 'kaggle://username/bert/keras/bert_base_en' or "
"'kaggle://username/bert/keras/bert_base_en/1' (to specify a "
f"version). Received: preset={preset}"
)
return kagglehub.model_download(kaggle_handle, path)
elif preset.startswith(GS_PREFIX):
url = os.path.join(preset, path)
url = url.replace(GS_PREFIX, "https://storage.googleapis.com/")
subdir = preset.replace(GS_PREFIX, "gs_")
subdir = subdir.replace("/", "_").replace("-", "_")
filename = os.path.basename(path)
subdir = os.path.join(subdir, os.path.dirname(path))
return keras.utils.get_file(
filename,
url,
cache_subdir=os.path.join("models", subdir),
)
elif os.path.exists(preset):
# Assume a local filepath.
return os.path.join(preset, path)
else:
raise ValueError(
"Unknown preset identifier. A preset must be a one of:\n"
"1) a built in preset identifier like `'bert_base_en'`\n"
"2) a Kaggle Models handle like `'kaggle://keras/bert/keras/bert_base_en'`\n"
"3) a path to a local preset directory like `'./bert_base_en`\n"
"Use `print(cls.presets.keys())` to view all built-in presets for "
"API symbol `cls`.\n"
f"Received: preset='{preset}'"
)
def get_tokenizer(layer):
"""Get the tokenizer from any KerasNLP model or layer."""
# Avoid circular import.
from keras_nlp.tokenizers.tokenizer import Tokenizer
if isinstance(layer, Tokenizer):
return layer
if hasattr(layer, "tokenizer"):
return layer.tokenizer
if hasattr(layer, "preprocessor"):
return getattr(layer.preprocessor, "tokenizer", None)
return None
def recursive_pop(config, key):
"""Remove a key from a nested config object"""
config.pop(key, None)
for value in config.values():
if isinstance(value, dict):
recursive_pop(value, key)
def save_to_preset(
layer,
preset,
save_weights=True,
config_filename="config.json",
weights_filename="model.weights.h5",
):
"""Save a KerasNLP layer to a preset directory."""
os.makedirs(preset, exist_ok=True)
# Save tokenizers assets.
tokenizer = get_tokenizer(layer)
assets = []
if tokenizer:
asset_dir = os.path.join(preset, TOKENIZER_ASSET_DIR)
os.makedirs(asset_dir, exist_ok=True)
tokenizer.save_assets(asset_dir)
for asset_path in os.listdir(asset_dir):
assets.append(os.path.join(TOKENIZER_ASSET_DIR, asset_path))
# Optionally save weights.
save_weights = save_weights and hasattr(layer, "save_weights")
if save_weights:
weights_path = os.path.join(preset, weights_filename)
layer.save_weights(weights_path)
# Save a serialized Keras object.
config_path = os.path.join(preset, config_filename)
config = keras.saving.serialize_keras_object(layer)
# Include references to weights and assets.
config["assets"] = assets
config["weights"] = weights_filename if save_weights else None
recursive_pop(config, "compile_config")
recursive_pop(config, "build_config")
with open(config_path, "w") as config_file:
config_file.write(json.dumps(config, indent=4))
from keras_nlp import __version__ as keras_nlp_version
keras_version = keras.version() if hasattr(keras, "version") else None
# Save any associated metadata.
if config_filename == "config.json":
metadata = {
"keras_version": keras_version,
"keras_nlp_version": keras_nlp_version,
"parameter_count": layer.count_params(),
"date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"),
}
metadata_path = os.path.join(preset, "metadata.json")
with open(metadata_path, "w") as metadata_file:
metadata_file.write(json.dumps(metadata, indent=4))
def load_from_preset(
preset,
load_weights=True,
config_file="config.json",
config_overrides={},
):
"""Load a KerasNLP layer to a preset directory."""
# Load a serialized Keras object.
config_path = get_file(preset, config_file)
with open(config_path) as config_file:
config = json.load(config_file)
config["config"] = {**config["config"], **config_overrides}
layer = keras.saving.deserialize_keras_object(config)
# Load any assets for our tokenizers.
tokenizer = get_tokenizer(layer)
if tokenizer and config["assets"]:
for asset in config["assets"]:
get_file(preset, asset)
config_dir = os.path.dirname(config_path)
asset_dir = os.path.join(config_dir, TOKENIZER_ASSET_DIR)
tokenizer.load_assets(asset_dir)
# Optionally load weights.
load_weights = load_weights and config["weights"]
if load_weights:
# For jax, delete all previous allocated memory to avoid temporarily
# duplicating variable allocations. torch and tensorflow have stateful
# variable types and do not need this fix.
if backend_config.backend() == "jax":
for weight in layer.weights:
if getattr(weight, "_value", None) is not None:
weight._value.delete()
weights_path = get_file(preset, config["weights"])
layer.load_weights(weights_path)
return layer
def check_preset_class(
preset,
classes,
config_file="config.json",
):
"""Validate a preset is being loaded on the correct class."""
config_path = get_file(preset, config_file)
with open(config_path) as config_file:
config = json.load(config_file)
cls = keras.saving.get_registered_object(config["registered_name"])
if not isinstance(classes, (tuple, list)):
classes = (classes,)
# Allow subclasses for testing a base class, e.g.
# `check_preset_class(preset, Backbone)`
if not any(issubclass(cls, x) for x in classes):
raise ValueError(
f"Unexpected class in preset `'{preset}'`. "
"When calling `from_preset()` on a class object, the preset class "
f"much match allowed classes. Allowed classes are `{classes}`. "
f"Received: `{cls}`."
)
return cls
| keras-nlp/keras_nlp/utils/preset_utils.py/0 | {
"file_path": "keras-nlp/keras_nlp/utils/preset_utils.py",
"repo_id": "keras-nlp",
"token_count": 3348
} | 126 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import numpy as np
import tensorflow as tf
import transformers
from absl import app
from absl import flags
from checkpoint_conversion_utils import get_md5_checksum
import keras_nlp
PRESET_MAP = {
"bart_base_en": "facebook/bart-base",
"bart_large_en": "facebook/bart-large",
"bart_large_en_cnn": "facebook/bart-large-cnn",
}
FLAGS = flags.FLAGS
flags.DEFINE_string(
"preset", None, f'Must be one of {",".join(PRESET_MAP.keys())}'
)
def convert_checkpoints(hf_model):
print("\n-> Convert original weights to KerasNLP format.")
print("\n-> Load KerasNLP model.")
keras_nlp_model = keras_nlp.models.BartBackbone.from_preset(
FLAGS.preset, load_weights=False
)
hf_wts = hf_model.state_dict()
print("Original weights:")
print(list(hf_wts.keys()))
hidden_dim = keras_nlp_model.hidden_dim
num_heads = keras_nlp_model.num_heads
# Token embedding weights shared by encoder and decoder.
keras_nlp_model.get_layer("token_embedding").embeddings.assign(
hf_wts["shared.weight"]
)
# Encoder weights.
keras_nlp_model.get_layer(
"encoder_position_embedding"
).position_embeddings.assign(hf_wts["encoder.embed_positions.weight"][2:])
keras_nlp_model.get_layer("encoder_embeddings_layer_norm").gamma.assign(
hf_wts["encoder.layer_norm_embedding.weight"]
)
keras_nlp_model.get_layer("encoder_embeddings_layer_norm").beta.assign(
hf_wts["encoder.layer_norm_embedding.bias"]
)
for i in range(keras_nlp_model.num_layers):
# Self-attention.
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
hf_wts[f"encoder.layers.{i}.self_attn.q_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
hf_wts[f"encoder.layers.{i}.self_attn.q_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
hf_wts[f"encoder.layers.{i}.self_attn.k_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
hf_wts[f"encoder.layers.{i}.self_attn.k_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
hf_wts[f"encoder.layers.{i}.self_attn.v_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
hf_wts[f"encoder.layers.{i}.self_attn.v_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
hf_wts[f"encoder.layers.{i}.self_attn.out_proj.weight"]
.transpose(1, 0)
.reshape((num_heads, -1, hidden_dim))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
hf_wts[f"encoder.layers.{i}.self_attn.out_proj.bias"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer_norm.gamma.assign(
hf_wts[f"encoder.layers.{i}.self_attn_layer_norm.weight"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._self_attention_layer_norm.beta.assign(
hf_wts[f"encoder.layers.{i}.self_attn_layer_norm.bias"].numpy()
)
# Post self-attention layers.
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
hf_wts[f"encoder.layers.{i}.fc1.weight"].transpose(1, 0).numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
hf_wts[f"encoder.layers.{i}.fc1.bias"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._feedforward_output_dense.kernel.assign(
hf_wts[f"encoder.layers.{i}.fc2.weight"].transpose(1, 0).numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._feedforward_output_dense.bias.assign(
hf_wts[f"encoder.layers.{i}.fc2.bias"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._feedforward_layer_norm.gamma.assign(
hf_wts[f"encoder.layers.{i}.final_layer_norm.weight"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_encoder_layer_{i}"
)._feedforward_layer_norm.beta.assign(
hf_wts[f"encoder.layers.{i}.final_layer_norm.bias"].numpy()
)
# Decoder weights.
keras_nlp_model.get_layer(
"decoder_position_embedding"
).position_embeddings.assign(hf_wts["decoder.embed_positions.weight"][2:])
keras_nlp_model.get_layer("decoder_embeddings_layer_norm").gamma.assign(
hf_wts["decoder.layer_norm_embedding.weight"]
)
keras_nlp_model.get_layer("decoder_embeddings_layer_norm").beta.assign(
hf_wts["decoder.layer_norm_embedding.bias"]
)
for i in range(keras_nlp_model.num_layers):
# Self-attention.
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.self_attn.q_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.self_attn.q_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.self_attn.k_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.self_attn.k_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.self_attn.v_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.self_attn.v_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.self_attn.out_proj.weight"]
.transpose(1, 0)
.reshape((num_heads, -1, hidden_dim))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.self_attn.out_proj.bias"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer_norm.gamma.assign(
hf_wts[f"decoder.layers.{i}.self_attn_layer_norm.weight"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._self_attention_layer_norm.beta.assign(
hf_wts[f"decoder.layers.{i}.self_attn_layer_norm.bias"].numpy()
)
# Cross-attention.
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer._query_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn.q_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer._query_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn.q_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer._key_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn.k_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer._key_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn.k_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer._value_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn.v_proj.weight"]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer._value_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn.v_proj.bias"]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer._output_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn.out_proj.weight"]
.transpose(1, 0)
.reshape((num_heads, -1, hidden_dim))
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer._output_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn.out_proj.bias"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer_norm.gamma.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn_layer_norm.weight"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._cross_attention_layer_norm.beta.assign(
hf_wts[f"decoder.layers.{i}.encoder_attn_layer_norm.bias"].numpy()
)
# Post self-attention and cross-attention layers.
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.fc1.weight"].transpose(1, 0).numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.fc1.bias"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._feedforward_output_dense.kernel.assign(
hf_wts[f"decoder.layers.{i}.fc2.weight"].transpose(1, 0).numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._feedforward_output_dense.bias.assign(
hf_wts[f"decoder.layers.{i}.fc2.bias"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._feedforward_layer_norm.gamma.assign(
hf_wts[f"decoder.layers.{i}.final_layer_norm.weight"].numpy()
)
keras_nlp_model.get_layer(
f"transformer_decoder_layer_{i}"
)._feedforward_layer_norm.beta.assign(
hf_wts[f"decoder.layers.{i}.final_layer_norm.bias"].numpy()
)
# Save the model.
print("\n-> Save KerasNLP model weights.")
keras_nlp_model.save_weights(os.path.join(FLAGS.preset, "model.h5"))
return keras_nlp_model
def extract_vocab(hf_tokenizer):
vocabulary_path = os.path.join(FLAGS.preset, "vocab.json")
merges_path = os.path.join(FLAGS.preset, "merges.txt")
print(f"\n-> Save KerasNLP vocab to `{vocabulary_path}`.")
print(f"-> Save KerasNLP merges to `{merges_path}`.")
# Huggingface has a save_vocabulary function but it's not byte-for-byte
# with the source. Instead copy the original downloaded file directly.
shutil.copyfile(
transformers.utils.hub.get_file_from_repo(
hf_tokenizer.name_or_path, "vocab.json"
),
vocabulary_path,
)
shutil.copyfile(
transformers.utils.hub.get_file_from_repo(
hf_tokenizer.name_or_path, "merges.txt"
),
merges_path,
)
keras_nlp_tokenizer = keras_nlp.models.BartTokenizer(
vocabulary=vocabulary_path, merges=merges_path
)
print("-> Print MD5 checksum of the vocab files.")
print(f"`{vocabulary_path}` md5sum: ", get_md5_checksum(vocabulary_path))
print(f"`{merges_path}` md5sum: ", get_md5_checksum(merges_path))
return keras_nlp_tokenizer
def check_output(
keras_nlp_tokenizer,
keras_nlp_model,
hf_tokenizer,
hf_model,
):
print("\n-> Check the outputs.")
enc_sample_text = [
"cricket is awesome, easily the best sport in the world!"
]
dec_sample_text = [
"football is good too, but nowhere near as good as cricket."
]
# KerasNLP
keras_nlp_enc_token_ids = keras_nlp_tokenizer(
tf.constant(enc_sample_text)
).to_tensor()
keras_nlp_enc_token_ids = tf.concat(
[
tf.constant([[keras_nlp_tokenizer.start_token_id]]),
keras_nlp_enc_token_ids,
tf.constant([[keras_nlp_tokenizer.end_token_id]]),
],
axis=-1,
)
keras_nlp_dec_token_ids = keras_nlp_tokenizer(
tf.constant(dec_sample_text)
).to_tensor()
keras_nlp_dec_token_ids = tf.concat(
[
tf.constant([[keras_nlp_tokenizer.start_token_id]]),
keras_nlp_dec_token_ids,
tf.constant([[keras_nlp_tokenizer.end_token_id]]),
],
axis=-1,
)
keras_nlp_inputs = {
"encoder_token_ids": keras_nlp_enc_token_ids,
"encoder_padding_mask": keras_nlp_enc_token_ids
!= keras_nlp_tokenizer.pad_token_id,
"decoder_token_ids": keras_nlp_dec_token_ids,
"decoder_padding_mask": keras_nlp_dec_token_ids
!= keras_nlp_tokenizer.pad_token_id,
}
keras_nlp_output = keras_nlp_model.predict(keras_nlp_inputs)
# HF
hf_enc_inputs = hf_tokenizer(enc_sample_text, return_tensors="pt")
hf_dec_inputs = hf_tokenizer(dec_sample_text, return_tensors="pt")
hf_output = hf_model(
**hf_enc_inputs,
decoder_input_ids=hf_dec_inputs["input_ids"],
decoder_attention_mask=hf_dec_inputs["attention_mask"],
)
print("Encoder Outputs:")
print(
"KerasNLP output:",
keras_nlp_output["encoder_sequence_output"][0, 0, :10],
)
print("HF output:", hf_output.encoder_last_hidden_state[0, 0, :10])
print(
"Difference:",
np.mean(
keras_nlp_output["encoder_sequence_output"]
- hf_output.encoder_last_hidden_state.detach().numpy()
),
)
print("Decoder Outputs:")
print(
"KerasNLP output:",
keras_nlp_output["decoder_sequence_output"][0, 0, :10],
)
print("HF output:", hf_output.last_hidden_state[0, 0, :10])
print(
"Difference:",
np.mean(
keras_nlp_output["decoder_sequence_output"]
- hf_output.last_hidden_state.detach().numpy()
),
)
# Show the MD5 checksum of the model weights.
print(
"Model md5sum: ",
get_md5_checksum(os.path.join(FLAGS.preset, "model.h5")),
)
def main(_):
os.makedirs(FLAGS.preset)
hf_model_name = PRESET_MAP[FLAGS.preset]
print("\n-> Load HF model and HF tokenizer.")
hf_model = transformers.AutoModel.from_pretrained(hf_model_name)
hf_model.eval()
hf_tokenizer = transformers.AutoTokenizer.from_pretrained(hf_model_name)
keras_nlp_model = convert_checkpoints(hf_model)
print("\n -> Load KerasNLP tokenizer.")
keras_nlp_tokenizer = extract_vocab(hf_tokenizer)
check_output(
keras_nlp_tokenizer,
keras_nlp_model,
hf_tokenizer,
hf_model,
)
if __name__ == "__main__":
flags.mark_flag_as_required("preset")
app.run(main)
| keras-nlp/tools/checkpoint_conversion/convert_bart_checkpoints.py/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/convert_bart_checkpoints.py",
"repo_id": "keras-nlp",
"token_count": 9609
} | 127 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script was used to convert our legacy presets into the directory format
used by Kaggle.
This script is for reference only.
"""
import os
import re
import shutil
os.environ["KERAS_HOME"] = os.getcwd()
from keras_nlp import models # noqa: E402
from keras_nlp.src.utils.preset_utils import save_to_preset # noqa: E402
BUCKET = "keras-nlp-kaggle"
def to_snake_case(name):
name = re.sub(r"\W+", "", name)
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
return name
if __name__ == "__main__":
backbone_models = [
(models.AlbertBackbone, models.AlbertTokenizer),
(models.BartBackbone, models.BartTokenizer),
(models.BertBackbone, models.BertTokenizer),
(models.DebertaV3Backbone, models.DebertaV3Tokenizer),
(models.DistilBertBackbone, models.DistilBertTokenizer),
(models.FNetBackbone, models.FNetTokenizer),
(models.GPT2Backbone, models.GPT2Tokenizer),
(models.OPTBackbone, models.OPTTokenizer),
(models.RobertaBackbone, models.RobertaTokenizer),
(models.T5Backbone, models.T5Tokenizer),
(models.WhisperBackbone, models.WhisperTokenizer),
(models.XLMRobertaBackbone, models.XLMRobertaTokenizer),
]
for backbone_cls, tokenizer_cls in backbone_models:
for preset in backbone_cls.presets:
backbone = backbone_cls.from_preset(
preset, name=to_snake_case(backbone_cls.__name__)
)
tokenizer = tokenizer_cls.from_preset(
preset, name=to_snake_case(tokenizer_cls.__name__)
)
save_to_preset(
backbone,
preset,
config_filename="config.json",
)
save_to_preset(
tokenizer,
preset,
config_filename="tokenizer.json",
)
# Delete first to clean up any exising version.
os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}")
os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}")
for root, _, files in os.walk(preset):
for file in files:
path = os.path.join(BUCKET, root, file)
os.system(
f"gcloud storage objects update gs://{path} "
"--add-acl-grant=entity=AllUsers,role=READER"
)
# Clean up local disk usage.
shutil.rmtree("models")
shutil.rmtree(preset)
# Handle our single task model.
preset = "bert_tiny_en_uncased_sst2"
task = models.BertClassifier.from_preset(
preset, name=to_snake_case(models.BertClassifier.__name__)
)
tokenizer = models.BertTokenizer.from_preset(
preset, name=to_snake_case(models.BertTokenizer.__name__)
)
save_to_preset(
task,
preset,
config_filename="config.json",
)
save_to_preset(
tokenizer,
preset,
config_filename="tokenizer.json",
)
# Delete first to clean up any exising version.
os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}")
os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}")
for root, _, files in os.walk(preset):
for file in files:
path = os.path.join(BUCKET, root, file)
os.system(
f"gcloud storage objects update gs://{path} "
"--add-acl-grant=entity=AllUsers,role=READER"
)
# Clean up local disk usage.
shutil.rmtree("models")
shutil.rmtree(preset)
| keras-nlp/tools/convert_legacy_presets.py/0 | {
"file_path": "keras-nlp/tools/convert_legacy_presets.py",
"repo_id": "keras-nlp",
"token_count": 1963
} | 128 |
"""Utilities for real-time data augmentation on image data.
"""
import io
import os
import warnings
from pathlib import Path
import numpy as np
try:
from PIL import Image as pil_image
from PIL import ImageEnhance
except ImportError:
pil_image = None
ImageEnhance = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def validate_filename(filename, white_list_formats):
"""Check if a filename refers to a valid file.
# Arguments
filename: String, absolute path to a file
white_list_formats: Set, allowed file extensions
# Returns
A boolean value indicating if the filename is valid or not
"""
return (filename.lower().endswith(white_list_formats) and
os.path.isfile(filename))
def save_img(path,
x,
data_format='channels_last',
file_format=None,
scale=True,
**kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
if img.mode == 'RGBA' and (file_format == 'jpg' or file_format == 'jpeg'):
warnings.warn('The JPG format does not support '
'RGBA images, converting to RGB.')
img = img.convert('RGB')
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest', keep_aspect_ratio=False):
"""Loads an image into PIL format.
# Arguments
path: Path (string), pathlib.Path object, or io.BytesIO stream to image file.
grayscale: DEPRECATED use `color_mode="grayscale"`.
color_mode: The desired image format. One of "grayscale", "rgb", "rgba".
"grayscale" supports 8-bit images and 32-bit signed integer images.
Default: "rgb".
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported.
Default: "nearest".
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
TypeError: type of 'path' should be path-like or io.Byteio.
"""
if grayscale is True:
warnings.warn('grayscale is deprecated. Please use '
'color_mode = "grayscale"')
color_mode = 'grayscale'
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `load_img` requires PIL.')
if isinstance(path, io.BytesIO):
img = pil_image.open(path)
elif isinstance(path, (Path, bytes, str)):
if isinstance(path, Path):
path = str(path.resolve())
with open(path, 'rb') as f:
img = pil_image.open(io.BytesIO(f.read()))
else:
raise TypeError('path should be path-like or io.BytesIO'
', not {}'.format(type(path)))
if color_mode == 'grayscale':
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
# convert it to an 8-bit grayscale image.
if img.mode not in ('L', 'I;16', 'I'):
img = img.convert('L')
elif color_mode == 'rgba':
if img.mode != 'RGBA':
img = img.convert('RGBA')
elif color_mode == 'rgb':
if img.mode != 'RGB':
img = img.convert('RGB')
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
if keep_aspect_ratio:
width, height = img.size
target_width, target_height = width_height_tuple
crop_height = (width * target_height) // target_width
crop_width = (height * target_width) // target_height
# Set back to input height / width
# if crop_height / crop_width is not smaller.
crop_height = min(height, crop_height)
crop_width = min(width, crop_width)
crop_box_hstart = (height - crop_height) // 2
crop_box_wstart = (width - crop_width) // 2
crop_box_wend = crop_box_wstart + crop_width
crop_box_hend = crop_box_hstart + crop_height
crop_box = [
crop_box_wstart, crop_box_hstart, crop_box_wend,
crop_box_hend
]
img = img.resize(width_height_tuple, resample, box=crop_box)
else:
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext=('jpg', 'jpeg', 'bmp', 'png', 'ppm', 'tif',
'tiff')):
"""Lists all pictures in a directory, including all subdirectories.
# Arguments
directory: string, absolute path to the directory
ext: tuple of strings or single string, extensions of the pictures
# Returns
a list of paths
"""
ext = tuple('.%s' % e for e in ((ext,) if isinstance(ext, str) else ext))
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if f.lower().endswith(ext)]
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean, follow symbolic links to subdirectories.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
if fname.lower().endswith('.tiff'):
warnings.warn('Using ".tiff" files with multiple bands '
'will cause distortion. Please verify your output.')
if fname.lower().endswith(white_list_formats):
yield root, fname
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean, follow symbolic links to subdirectories.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
all_files = list(_iter_valid_files(directory, white_list_formats,
follow_links))
num_files = len(all_files)
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = all_files[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
def array_to_img(x, data_format='channels_last', scale=True, dtype='float32'):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format, either "channels_first" or "channels_last".
Default: "channels_last".
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively.
Default: True.
dtype: Dtype to use.
Default: "float32".
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=dtype)
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape: %s' % (x.shape,))
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format: %s' % data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x - np.min(x)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 4:
# RGBA
return pil_image.fromarray(x.astype('uint8'), 'RGBA')
elif x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
if np.max(x) > 255:
# 32-bit signed integer grayscale image. PIL mode "I"
return pil_image.fromarray(x[:, :, 0].astype('int32'), 'I')
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: %s' % (x.shape[2],))
def img_to_array(img, data_format='channels_last', dtype='float32'):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
dtype: Dtype to use for the returned array.
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: %s' % data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=dtype)
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: %s' % (x.shape,))
return x
| keras-preprocessing/keras_preprocessing/image/utils.py/0 | {
"file_path": "keras-preprocessing/keras_preprocessing/image/utils.py",
"repo_id": "keras-preprocessing",
"token_count": 6122
} | 129 |
# -*- coding: utf-8 -*-
from collections import OrderedDict
import numpy as np
import pytest
from tensorflow import keras
from keras_preprocessing import text
def test_one_hot():
sample_text = 'The cat sat on the mat.'
encoded = text.one_hot(sample_text, 5)
assert len(encoded) == 6
assert np.max(encoded) <= 4
assert np.min(encoded) >= 0
sample_text = 'The-cat-sat-on-the-mat'
encoded2 = text.one_hot(sample_text, 5, analyzer=lambda t: t.lower().split('-'))
assert encoded == encoded2
assert len(encoded) == 6
assert np.max(encoded) <= 4
assert np.min(encoded) >= 0
def test_hashing_trick_hash():
sample_text = 'The cat sat on the mat.'
encoded = text.hashing_trick(sample_text, 5)
assert len(encoded) == 6
assert np.max(encoded) <= 4
assert np.min(encoded) >= 1
def test_hashing_trick_md5():
sample_text = 'The cat sat on the mat.'
encoded = text.hashing_trick(sample_text, 5, hash_function='md5')
assert len(encoded) == 6
assert np.max(encoded) <= 4
assert np.min(encoded) >= 1
def test_tokenizer():
sample_texts = ['The cat sat on the mat.',
'The dog sat on the log.',
'Dogs and cats living together.']
tokenizer = text.Tokenizer(num_words=10)
tokenizer.fit_on_texts(sample_texts)
sequences = []
for seq in tokenizer.texts_to_sequences_generator(sample_texts):
sequences.append(seq)
assert np.max(np.max(sequences)) < 10
assert np.min(np.min(sequences)) == 1
tokenizer.fit_on_sequences(sequences)
for mode in ['binary', 'count', 'tfidf', 'freq']:
tokenizer.texts_to_matrix(sample_texts, mode)
def test_tokenizer_serde_no_fitting():
tokenizer = text.Tokenizer(num_words=100)
tokenizer_json = tokenizer.to_json()
recovered = text.tokenizer_from_json(tokenizer_json)
assert tokenizer.get_config() == recovered.get_config()
assert tokenizer.word_docs == recovered.word_docs
assert tokenizer.word_counts == recovered.word_counts
assert tokenizer.word_index == recovered.word_index
assert tokenizer.index_word == recovered.index_word
assert tokenizer.index_docs == recovered.index_docs
def test_tokenizer_serde_fitting():
sample_texts = [
'There was a time that the pieces fit, but I watched them fall away',
'Mildewed and smoldering, strangled by our coveting',
'I\'ve done the math enough to know the dangers of our second guessing']
tokenizer = text.Tokenizer(num_words=100)
tokenizer.fit_on_texts(sample_texts)
seq_generator = tokenizer.texts_to_sequences_generator(sample_texts)
sequences = [seq for seq in seq_generator]
tokenizer.fit_on_sequences(sequences)
tokenizer_json = tokenizer.to_json()
recovered = text.tokenizer_from_json(tokenizer_json)
assert tokenizer.char_level == recovered.char_level
assert tokenizer.document_count == recovered.document_count
assert tokenizer.filters == recovered.filters
assert tokenizer.lower == recovered.lower
assert tokenizer.num_words == recovered.num_words
assert tokenizer.oov_token == recovered.oov_token
assert tokenizer.word_docs == recovered.word_docs
assert tokenizer.word_counts == recovered.word_counts
assert tokenizer.word_index == recovered.word_index
assert tokenizer.index_word == recovered.index_word
assert tokenizer.index_docs == recovered.index_docs
def test_sequential_fit():
texts = ['The cat sat on the mat.',
'The dog sat on the log.',
'Dogs and cats living together.']
word_sequences = [
['The', 'cat', 'is', 'sitting'],
['The', 'dog', 'is', 'standing']
]
tokenizer = text.Tokenizer()
tokenizer.fit_on_texts(texts)
tokenizer.fit_on_texts(word_sequences)
assert tokenizer.document_count == 5
tokenizer.texts_to_matrix(texts)
tokenizer.texts_to_matrix(word_sequences)
def test_text_to_word_sequence():
sample_text = 'hello! ? world!'
assert text.text_to_word_sequence(sample_text) == ['hello', 'world']
def test_text_to_word_sequence_multichar_split():
sample_text = 'hello!stop?world!'
assert text.text_to_word_sequence(
sample_text, split='stop') == ['hello', 'world']
def test_text_to_word_sequence_unicode():
sample_text = u'ali! veli? kırk dokuz elli'
assert text.text_to_word_sequence(
sample_text) == [u'ali', u'veli', u'kırk', u'dokuz', u'elli']
def test_text_to_word_sequence_unicode_multichar_split():
sample_text = u'ali!stopveli?stopkırkstopdokuzstopelli'
assert text.text_to_word_sequence(
sample_text, split='stop') == [u'ali', u'veli', u'kırk', u'dokuz', u'elli']
def test_tokenizer_unicode():
sample_texts = [u'ali veli kırk dokuz elli',
u'ali veli kırk dokuz elli veli kırk dokuz']
tokenizer = text.Tokenizer(num_words=5)
tokenizer.fit_on_texts(sample_texts)
assert len(tokenizer.word_counts) == 5
def test_tokenizer_oov_flag():
"""Test of Out of Vocabulary (OOV) flag in text.Tokenizer
"""
x_train = ['This text has only known words']
x_test = ['This text has some unknown words'] # 2 OOVs: some, unknown
# Default, without OOV flag
tokenizer = text.Tokenizer()
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
assert len(x_test_seq[0]) == 4 # discards 2 OOVs
# With OOV feature
tokenizer = text.Tokenizer(oov_token='<unk>')
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
assert len(x_test_seq[0]) == 6 # OOVs marked in place
def test_tokenizer_oov_flag_and_num_words():
x_train = ['This text has only known words this text']
x_test = ['This text has some unknown words']
tokenizer = keras.preprocessing.text.Tokenizer(num_words=3,
oov_token='<unk>')
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
trans_text = ' '.join(tokenizer.index_word[t] for t in x_test_seq[0])
assert len(x_test_seq[0]) == 6
assert trans_text == 'this <unk> <unk> <unk> <unk> <unk>'
def test_sequences_to_texts_with_num_words_and_oov_token():
x_train = ['This text has only known words this text']
x_test = ['This text has some unknown words']
tokenizer = keras.preprocessing.text.Tokenizer(num_words=3,
oov_token='<unk>')
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
trans_text = tokenizer.sequences_to_texts(x_test_seq)
assert trans_text == ['this <unk> <unk> <unk> <unk> <unk>']
def test_sequences_to_texts_no_num_words():
x_train = ['This text has only known words this text']
x_test = ['This text has some unknown words']
tokenizer = keras.preprocessing.text.Tokenizer(oov_token='<unk>')
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
trans_text = tokenizer.sequences_to_texts(x_test_seq)
assert trans_text == ['this text has <unk> <unk> words']
def test_sequences_to_texts_no_oov_token():
x_train = ['This text has only known words this text']
x_test = ['This text has some unknown words']
tokenizer = keras.preprocessing.text.Tokenizer(num_words=3)
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
trans_text = tokenizer.sequences_to_texts(x_test_seq)
assert trans_text == ['this text']
def test_sequences_to_texts_no_num_words_no_oov_token():
x_train = ['This text has only known words this text']
x_test = ['This text has some unknown words']
tokenizer = keras.preprocessing.text.Tokenizer()
tokenizer.fit_on_texts(x_train)
x_test_seq = tokenizer.texts_to_sequences(x_test)
trans_text = tokenizer.sequences_to_texts(x_test_seq)
assert trans_text == ['this text has words']
def test_sequences_to_texts():
texts = [
'The cat sat on the mat.',
'The dog sat on the log.',
'Dogs and cats living together.'
]
tokenizer = keras.preprocessing.text.Tokenizer(num_words=10,
oov_token='<unk>')
tokenizer.fit_on_texts(texts)
tokenized_text = tokenizer.texts_to_sequences(texts)
trans_text = tokenizer.sequences_to_texts(tokenized_text)
assert trans_text == ['the cat sat on the mat',
'the dog sat on the log',
'dogs <unk> <unk> <unk> <unk>']
def test_tokenizer_lower_flag():
"""Tests for `lower` flag in text.Tokenizer
"""
# word level tokenizer with sentences as texts
word_tokenizer = text.Tokenizer(lower=True)
texts = ['The cat sat on the mat.',
'The dog sat on the log.',
'Dog and Cat living Together.']
word_tokenizer.fit_on_texts(texts)
expected_word_counts = OrderedDict([('the', 4), ('cat', 2), ('sat', 2),
('on', 2), ('mat', 1), ('dog', 2),
('log', 1), ('and', 1), ('living', 1),
('together', 1)])
assert word_tokenizer.word_counts == expected_word_counts
# word level tokenizer with word_sequences as texts
word_tokenizer = text.Tokenizer(lower=True)
word_sequences = [
['The', 'cat', 'is', 'sitting'],
['The', 'dog', 'is', 'standing']
]
word_tokenizer.fit_on_texts(word_sequences)
expected_word_counts = OrderedDict([('the', 2), ('cat', 1), ('is', 2),
('sitting', 1), ('dog', 1),
('standing', 1)])
assert word_tokenizer.word_counts == expected_word_counts
# char level tokenizer with sentences as texts
char_tokenizer = text.Tokenizer(lower=True, char_level=True)
texts = ['The cat sat on the mat.',
'The dog sat on the log.',
'Dog and Cat living Together.']
char_tokenizer.fit_on_texts(texts)
expected_word_counts = OrderedDict([('t', 11), ('h', 5), ('e', 6), (' ', 14),
('c', 2), ('a', 6), ('s', 2), ('o', 6),
('n', 4), ('m', 1), ('.', 3), ('d', 3),
('g', 5), ('l', 2), ('i', 2), ('v', 1),
('r', 1)])
assert char_tokenizer.word_counts == expected_word_counts
if __name__ == '__main__':
pytest.main([__file__])
| keras-preprocessing/tests/text_test.py/0 | {
"file_path": "keras-preprocessing/tests/text_test.py",
"repo_id": "keras-preprocessing",
"token_count": 4571
} | 130 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/guides/keras_tuner/getting_started/'" />
| keras-tuner/docs/site/examples/helloworld/index.html/0 | {
"file_path": "keras-tuner/docs/site/examples/helloworld/index.html",
"repo_id": "keras-tuner",
"token_count": 39
} | 131 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import time
import numpy as np
import pytest
from sklearn import linear_model
import keras_tuner
from keras_tuner import errors
from keras_tuner.distribute import oracle_client
from keras_tuner.engine import base_tuner
from keras_tuner.test_utils import mock_distribute
from keras_tuner.tuners import gridsearch
from keras_tuner.tuners import randomsearch
INPUT_DIM = 2
NUM_CLASSES = 3
NUM_SAMPLES = 64
TRAIN_INPUTS = np.random.random(size=(NUM_SAMPLES, INPUT_DIM))
TRAIN_TARGETS = np.random.randint(0, NUM_CLASSES, size=(NUM_SAMPLES,))
VAL_INPUTS = np.random.random(size=(NUM_SAMPLES, INPUT_DIM))
VAL_TARGETS = np.random.randint(0, NUM_CLASSES, size=(NUM_SAMPLES,))
def test_base_tuner(tmp_path):
class MyTuner(base_tuner.BaseTuner):
def run_trial(self, trial, x):
model = self.hypermodel.build(trial.hyperparameters)
self.oracle.update_space(trial.hyperparameters)
score = model(x)
return {"score": score}
def get_best_models(self, num_models=1):
best_trials = self.oracle.get_best_trials(num_models)
models = [
self.hypermodel.build(t.hyperparameters) for t in best_trials
]
return models
def build_model(hp):
class MyModel:
def __init__(self):
self.factor = hp.Float("a", 0, 10)
def __call__(self, x):
return self.factor * x
return MyModel()
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"), max_trials=5
)
tuner = MyTuner(oracle=oracle, hypermodel=build_model, directory=tmp_path)
tuner.search(1.0)
models = tuner.get_best_models(5)
# Check that scoring of the model was done correctly.
models_by_factor = sorted(models, key=lambda m: m.factor, reverse=True)
assert models[0] == models_by_factor[0]
def test_oracle_idle(tmp_path):
class MyOracle(randomsearch.RandomSearchOracle):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._first_trial = True
def create_trial(self, *args, **kwargs):
trial = super().create_trial(*args, **kwargs)
if self._first_trial:
self._first_trial = False
trial.status = "IDLE"
else:
trial.status = "STOPPED"
return trial
class MyTuner(base_tuner.BaseTuner):
def run_trial(self, trial, *args, **kwargs):
trial.hyperparameters.Boolean("foo")
trial.hyperparameters.Boolean("bar")
return np.random.rand()
tuner = MyTuner(oracle=MyOracle(max_trials=3), directory=tmp_path)
tuner.search()
def test_simple_sklearn_tuner(tmp_path):
class SimpleSklearnTuner(base_tuner.BaseTuner):
def run_trial(self, trial, x, y, validation_data):
model = self.hypermodel.build(trial.hyperparameters)
model.fit(x, y)
x_val, y_val = validation_data
score = model.score(x_val, y_val)
self.save_model(trial.trial_id, model)
return {"score": score}
def save_model(self, trial_id, model, step=0):
fname = os.path.join(self.get_trial_dir(trial_id), "model.pickle")
with open(fname, "wb") as f:
pickle.dump(model, f)
def load_model(self, trial):
fname = os.path.join(
self.get_trial_dir(trial.trial_id), "model.pickle"
)
with open(fname, "rb") as f:
return pickle.load(f)
def sklearn_build_fn(hp):
c = hp.Float("c", 1e-4, 10)
return linear_model.LogisticRegression(C=c)
tuner = SimpleSklearnTuner(
oracle=randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"), max_trials=2
),
hypermodel=sklearn_build_fn,
directory=tmp_path,
)
tuner.search(
TRAIN_INPUTS, TRAIN_TARGETS, validation_data=(VAL_INPUTS, VAL_TARGETS)
)
models = tuner.get_best_models(2)
score0 = models[0].score(VAL_INPUTS, VAL_TARGETS)
score1 = models[1].score(VAL_INPUTS, VAL_TARGETS)
assert score0 >= score1
def test_retry_common_errors(tmp_path):
class MyTuner(gridsearch.GridSearch):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.count = 0
def run_trial(self, trial, *fit_args, **fit_kwargs):
self.count += 1
hp = trial.hyperparameters
value = hp.Int("value", min_value=1, max_value=5)
if value in [2, 3]:
raise ValueError(f"wrong value {value}.")
return value
tuner = MyTuner(directory=tmp_path, max_retries_per_trial=2)
tuner.search()
assert tuner.count == 5 + 2 * 2 # 5 values, 2 values * 2 retries
def test_fatal_error_is_raise_again(tmp_path):
class MyTuner(gridsearch.GridSearch):
def run_trial(self, trial, *fit_args, **fit_kwargs):
raise errors.FatalError("FatalError raised.")
tuner = MyTuner(directory=tmp_path, max_retries_per_trial=2)
with pytest.raises(errors.FatalError, match="FatalError raised."):
tuner.search()
def test_failed_trial_error_no_retry(tmp_path):
class MyTuner(gridsearch.GridSearch):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.count = 0
def run_trial(self, trial, *fit_args, **fit_kwargs):
self.count += 1
hp = trial.hyperparameters
value = hp.Int("value", min_value=1, max_value=5)
if value in [2, 3]:
raise errors.FailedTrialError(f"wrong value {value}.")
return value
tuner = MyTuner(directory=tmp_path, max_retries_per_trial=2)
tuner.search()
assert tuner.count == 5 # 5 values, no retry
def test_remaining_trials(tmp_path):
class MyTuner(gridsearch.GridSearch):
def run_trial(self, trial, *fit_args, **fit_kwargs):
raise errors.FatalError("FatalError raised.")
tuner = MyTuner(directory=tmp_path, max_retries_per_trial=2, max_trials=200)
assert tuner.remaining_trials == 200
def test_logger_deprecated(tmp_path):
def build_model(hp):
hp.Boolean("a")
with pytest.deprecated_call(match="logger"):
gridsearch.GridSearch(
directory=tmp_path,
hypermodel=build_model,
max_trials=200,
logger=1,
)
def test_unrecognized_arguments_raise_value_error(tmp_path):
def build_model(hp):
hp.Boolean("a")
with pytest.raises(ValueError, match="Unrecognized arguments"):
gridsearch.GridSearch(
directory=tmp_path,
hypermodel=build_model,
max_trials=200,
unrecognized=3,
)
def test_chief_should_wait_for_clients(tmp_path):
timeout = oracle_client.TIMEOUT
oracle_client.TIMEOUT = 10
class MyOracle(randomsearch.RandomSearchOracle):
@keras_tuner.synchronized
def end_trial(self, trial):
super().end_trial(trial)
time.sleep(5)
class MyTuner(base_tuner.BaseTuner):
def run_trial(self, trial, *args, **kwargs):
trial.hyperparameters.Boolean("foo")
trial.hyperparameters.Boolean("bar")
return np.random.rand()
def _the_func():
tuner = MyTuner(oracle=MyOracle(max_trials=1), directory=tmp_path)
tuner.search(verbose=0)
mock_distribute.mock_distribute(
_the_func, num_workers=2, wait_for_chief=True
)
oracle_client.TIMEOUT = timeout
| keras-tuner/keras_tuner/engine/base_tuner_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/base_tuner_test.py",
"repo_id": "keras-tuner",
"token_count": 3741
} | 132 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_tuner.engine.hyperparameters import hp_utils
from keras_tuner.engine.hyperparameters import hyperparameter
class Numerical(hyperparameter.HyperParameter):
"""Super class for all numerical type hyperparameters."""
def __init__(
self,
name,
min_value,
max_value,
step=None,
sampling="linear",
default=None,
**kwargs,
):
super().__init__(name=name, default=default, **kwargs)
self.max_value = max_value
self.min_value = min_value
self.step = step
self.sampling = sampling
self._check_sampling_arg()
def _check_sampling_arg(self):
if self.min_value > self.max_value:
raise ValueError(
f"For HyperParameters.{self.__class__.__name__}"
f"(name='{self.name}'), "
f"min_value {str(self.min_value)} is greater than "
f"the max_value {str(self.max_value)}."
)
sampling_values = {"linear", "log", "reverse_log"}
# This is for backward compatibility.
# sampling=None was allowed and was the default value.
if self.sampling is None:
self.sampling = "linear"
self.sampling = self.sampling.lower()
if self.sampling not in sampling_values:
raise ValueError(
f"For HyperParameters.{self.__class__.__name__}"
f"(name='{self.name}'), sampling must be one "
f"of {sampling_values}"
)
if self.sampling in {"log", "reverse_log"} and self.min_value <= 0:
raise ValueError(
f"For HyperParameters.{self.__class__.__name__}"
f"(name='{self.name}'), "
f"sampling='{str(self.sampling)}' does not support "
f"negative values, found min_value: {str(self.min_value)}."
)
if (
self.sampling in {"log", "reverse_log"}
and self.step is not None
and self.step <= 1
):
raise ValueError(
f"For HyperParameters.{self.__class__.__name__}"
f"(name='{self.name}'), "
f"expected step > 1 with sampling='{str(self.sampling)}'. "
f"Received: step={str(self.step)}."
)
def _sample_numerical_value(self, prob, max_value=None):
"""Sample a value with the cumulative prob in the given range."""
if max_value is None:
max_value = self.max_value
if self.sampling == "linear":
return prob * (max_value - self.min_value) + self.min_value
elif self.sampling == "log":
return self.min_value * math.pow(max_value / self.min_value, prob)
elif self.sampling == "reverse_log":
return (
max_value
+ self.min_value
- self.min_value
* math.pow(max_value / self.min_value, 1 - prob)
)
def _numerical_to_prob(self, value, max_value=None):
"""Convert a numerical value to range [0.0, 1.0)."""
if max_value is None:
max_value = self.max_value
if max_value == self.min_value:
# Center the prob
return 0.5
if self.sampling == "linear":
return (value - self.min_value) / (max_value - self.min_value)
if self.sampling == "log":
return math.log(value / self.min_value) / math.log(
max_value / self.min_value
)
if self.sampling == "reverse_log":
return 1.0 - math.log(
(max_value + self.min_value - value) / self.min_value
) / math.log(max_value / self.min_value)
def _get_n_values(self):
"""Get the total number of possible values using step."""
if self.sampling == "linear":
# +1 so that max_value may be sampled.
return int((self.max_value - self.min_value) // self.step + 1)
# For log and reverse_log
# +1 so that max_value may be sampled.
return (
int(math.log(self.max_value / self.min_value, self.step) + 1e-8) + 1
)
def _get_value_by_index(self, index):
"""Get the index-th value in the range given step."""
if self.sampling == "linear":
return self.min_value + index * self.step
if self.sampling == "log":
return self.min_value * math.pow(self.step, index)
if self.sampling == "reverse_log":
return (
self.max_value
+ self.min_value
- self.min_value * math.pow(self.step, index)
)
def _sample_with_step(self, prob):
"""Sample a value with the cumulative prob in the given range.
The range is divided evenly by `step`. So only sampling from a finite
set of values. When calling the function, no need to use (max_value + 1)
since the function takes care of the inclusion of max_value.
"""
n_values = self._get_n_values()
index = hp_utils.prob_to_index(prob, n_values)
return self._get_value_by_index(index)
@property
def values(self):
if self.step is None:
# Evenly select 10 samples as the values.
return tuple(
{self.prob_to_value(i * 0.1 + 0.05) for i in range(10)}
)
n_values = self._get_n_values()
return (self._get_value_by_index(i) for i in range(n_values))
def _to_prob_with_step(self, value):
"""Convert to cumulative prob with step specified.
When calling the function, no need to use (max_value + 1) since the
function takes care of the inclusion of max_value.
"""
if self.sampling == "linear":
index = (value - self.min_value) // self.step
if self.sampling == "log":
index = math.log(value / self.min_value, self.step)
if self.sampling == "reverse_log":
index = math.log(
(self.max_value - value + self.min_value) / self.min_value,
self.step,
)
n_values = self._get_n_values()
return hp_utils.index_to_prob(index, n_values)
| keras-tuner/keras_tuner/engine/hyperparameters/hp_types/numerical.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/numerical.py",
"repo_id": "keras-tuner",
"token_count": 3159
} | 133 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"The Tuner class."
import contextlib
import copy
import gc
import math
import os
import numpy as np
from keras_tuner import backend
from keras_tuner import errors
from keras_tuner import utils
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.backend import config
from keras_tuner.backend import keras
from keras_tuner.engine import base_tuner
from keras_tuner.engine import tuner_utils
@keras_tuner_export(
[
"keras_tuner.Tuner",
"keras_tuner.tuners.Tuner",
"keras_tuner.engine.tuner.Tuner",
]
)
class Tuner(base_tuner.BaseTuner):
"""Tuner class for Keras models.
This is the base `Tuner` class for all tuners for Keras models. It manages
the building, training, evaluation and saving of the Keras models. New
tuners can be created by subclassing the class.
All Keras related logics are in `Tuner.run_trial()` and its subroutines.
When subclassing `Tuner`, if not calling `super().run_trial()`, it can tune
anything.
Args:
oracle: Instance of `Oracle` class.
hypermodel: Instance of `HyperModel` class (or callable that takes
hyperparameters and returns a `Model` instance). It is optional
when `Tuner.run_trial()` is overriden and does not use
`self.hypermodel`.
max_model_size: Integer, maximum number of scalars in the parameters of
a model. Models larger than this are rejected.
optimizer: Optional optimizer. It is used to override the `optimizer`
argument in the `compile` step for the models. If the hypermodel
does not compile the models it generates, then this argument must be
specified.
loss: Optional loss. May be used to override the `loss` argument in the
`compile` step for the models. If the hypermodel does not compile
the models it generates, then this argument must be specified.
metrics: Optional metrics. May be used to override the `metrics`
argument in the `compile` step for the models. If the hypermodel
does not compile the models it generates, then this argument must
be specified.
distribution_strategy: Optional instance of `tf.distribute.Strategy`.
If specified, each trial will run under this scope. For example,
`tf.distribute.MirroredStrategy(['/gpu:0', '/gpu:1'])` will run
each trial on two GPUs. Currently only single-worker strategies are
supported.
directory: A string, the relative path to the working directory.
project_name: A string, the name to use as prefix for files saved by
this `Tuner`.
tuner_id: Optional string, used as the ID of this `Tuner`.
overwrite: Boolean, defaults to `False`. If `False`, reloads an
existing project of the same name if one is found. Otherwise,
overwrites the project.
executions_per_trial: Integer, the number of executions (training a
model from scratch, starting from a new initialization) to run per
trial (model configuration). Model metrics may vary greatly
depending on random initialization, hence it is often a good idea
to run several executions per trial in order to evaluate the
performance of a given set of hyperparameter values.
**kwargs: Arguments for `BaseTuner`.
Attributes:
remaining_trials: Number of trials remaining, `None` if `max_trials` is
not set. This is useful when resuming a previously stopped search.
"""
def __init__(
self,
oracle,
hypermodel=None,
max_model_size=None,
optimizer=None,
loss=None,
metrics=None,
distribution_strategy=None,
directory=None,
project_name=None,
logger=None,
tuner_id=None,
overwrite=False,
executions_per_trial=1,
**kwargs,
):
if hypermodel is None and self.__class__.run_trial is Tuner.run_trial:
raise ValueError(
"Received `hypermodel=None`. We only allow not specifying "
"`hypermodel` if the user defines the search space in "
"`Tuner.run_trial()` by subclassing a `Tuner` class without "
"using a `HyperModel` instance."
)
super().__init__(
oracle=oracle,
hypermodel=hypermodel,
directory=directory,
project_name=project_name,
logger=logger,
overwrite=overwrite,
**kwargs,
)
self.max_model_size = max_model_size
self.optimizer = optimizer
self.loss = loss
self.metrics = metrics
self.distribution_strategy = distribution_strategy
self.executions_per_trial = executions_per_trial
# Support multi-worker distribution strategies w/ distributed tuning.
# Only the chief worker in each cluster should report results.
if self.distribution_strategy is not None and hasattr(
self.distribution_strategy.extended, "_in_multi_worker_mode"
):
self.oracle.multi_worker = (
self.distribution_strategy.extended._in_multi_worker_mode()
)
self.oracle.should_report = (
self.distribution_strategy.extended.should_checkpoint
)
self.tuner_id = tuner_id or self.tuner_id
def _build_hypermodel(self, hp):
with maybe_distribute(self.distribution_strategy):
model = self.hypermodel.build(hp)
self._override_compile_args(model)
return model
def _try_build(self, hp):
# clean-up TF graph from previously stored (defunct) graph
keras.backend.clear_session()
gc.collect()
model = self._build_hypermodel(hp)
# Stop if `build()` does not return a valid model.
if not isinstance(model, keras.models.Model):
raise errors.FatalTypeError(
"Expected the model-building function, or HyperModel.build() "
"to return a valid Keras Model instance. "
f"Received: {model} of type {type(model)}."
)
# Check model size.
size = maybe_compute_model_size(model)
if self.max_model_size and size > self.max_model_size:
raise errors.FailedTrialError(
f"Oversized model: {size} parameters. Skip model."
)
return model
def _filter_metrics(self, metrics):
if metrics is None:
return None
return list(
filter(
lambda metric: metric.name not in ("loss", "compile_metric"),
metrics,
)
)
def _override_compile_args(self, model):
with maybe_distribute(self.distribution_strategy):
if self.optimizer or self.loss or self.metrics:
compile_kwargs = {
"optimizer": model.optimizer,
"loss": model.loss,
"metrics": self._filter_metrics(model.metrics),
}
if self.loss:
compile_kwargs["loss"] = self.loss
if self.optimizer:
optimizer = (
self.optimizer
if isinstance(self.optimizer, str)
else keras.optimizers.deserialize(
keras.optimizers.serialize(self.optimizer)
)
)
compile_kwargs["optimizer"] = optimizer
if self.metrics:
compile_kwargs["metrics"] = self.metrics
model.compile(**compile_kwargs)
def _build_and_fit_model(self, trial, *args, **kwargs):
"""For AutoKeras to override.
DO NOT REMOVE this function. AutoKeras overrides the function to tune
tf.data preprocessing pipelines, preprocess the dataset to obtain
the input shape before building the model, adapt preprocessing layers,
and tune other fit_args and fit_kwargs.
Args:
trial: A `Trial` instance that contains the information needed to
run this trial. `Hyperparameters` can be accessed via
`trial.hyperparameters`.
*args: Positional arguments passed by `search`.
**kwargs: Keyword arguments passed by `search`.
Returns:
The fit history.
"""
hp = trial.hyperparameters
model = self._try_build(hp)
results = self.hypermodel.fit(hp, model, *args, **kwargs)
# Save the build config for model loading later.
if backend.config.multi_backend():
utils.save_json(
self._get_build_config_fname(trial.trial_id),
model.get_build_config(),
)
tuner_utils.validate_trial_results(
results, self.oracle.objective, "HyperModel.fit()"
)
return results
def run_trial(self, trial, *args, **kwargs):
"""Evaluates a set of hyperparameter values.
This method is called multiple times during `search` to build and
evaluate the models with different hyperparameters and return the
objective value.
Example:
You can use it with `self.hypermodel` to build and fit the model.
```python
def run_trial(self, trial, *args, **kwargs):
hp = trial.hyperparameters
model = self.hypermodel.build(hp)
return self.hypermodel.fit(hp, model, *args, **kwargs)
```
You can also use it as a black-box optimizer for anything.
```python
def run_trial(self, trial, *args, **kwargs):
hp = trial.hyperparameters
x = hp.Float("x", -2.0, 2.0)
y = x * x + 2 * x + 1
return y
```
Args:
trial: A `Trial` instance that contains the information needed to
run this trial. Hyperparameters can be accessed via
`trial.hyperparameters`.
*args: Positional arguments passed by `search`.
**kwargs: Keyword arguments passed by `search`.
Returns:
A `History` object, which is the return value of `model.fit()`, a
dictionary, a float, or a list of one of these types.
If return a dictionary, it should be a dictionary of the metrics to
track. The keys are the metric names, which contains the
`objective` name. The values should be the metric values.
If return a float, it should be the `objective` value.
If evaluating the model for multiple times, you may return a list
of results of any of the types above. The final objective value is
the average of the results in the list.
"""
# Not using `ModelCheckpoint` to support MultiObjective.
# It can only track one of the metrics to save the best model.
model_checkpoint = tuner_utils.SaveBestEpoch(
objective=self.oracle.objective,
filepath=self._get_checkpoint_fname(trial.trial_id),
)
original_callbacks = kwargs.pop("callbacks", [])
# Run the training process multiple times.
histories = []
for execution in range(self.executions_per_trial):
copied_kwargs = copy.copy(kwargs)
callbacks = self._deepcopy_callbacks(original_callbacks)
self._configure_tensorboard_dir(callbacks, trial, execution)
callbacks.append(tuner_utils.TunerCallback(self, trial))
# Only checkpoint the best epoch across all executions.
callbacks.append(model_checkpoint)
copied_kwargs["callbacks"] = callbacks
obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
histories.append(obj_value)
return histories
def load_model(self, trial):
model = self._try_build(trial.hyperparameters)
# Build model to create the weights.
if backend.config.multi_backend() and not model.built:
model.build_from_config(
utils.load_json(self._get_build_config_fname(trial.trial_id))
)
# Reload best checkpoint.
# Only load weights to avoid loading `custom_objects`.
with maybe_distribute(self.distribution_strategy):
model.load_weights(self._get_checkpoint_fname(trial.trial_id))
return model
def on_batch_begin(self, trial, model, batch, logs):
"""Called at the beginning of a batch.
Args:
trial: A `Trial` instance.
model: A Keras `Model`.
batch: The current batch number within the current epoch.
logs: Additional metrics.
"""
pass
def on_batch_end(self, trial, model, batch, logs=None):
"""Called at the end of a batch.
Args:
trial: A `Trial` instance.
model: A Keras `Model`.
batch: The current batch number within the current epoch.
logs: Additional metrics.
"""
pass
def on_epoch_begin(self, trial, model, epoch, logs=None):
"""Called at the beginning of an epoch.
Args:
trial: A `Trial` instance.
model: A Keras `Model`.
epoch: The current epoch number.
logs: Additional metrics.
"""
pass
def on_epoch_end(self, trial, model, epoch, logs=None):
"""Called at the end of an epoch.
Args:
trial: A `Trial` instance.
model: A Keras `Model`.
epoch: The current epoch number.
logs: Dict. Metrics for this epoch. This should include
the value of the objective for this epoch.
"""
# Intermediate results are not passed to the Oracle, and
# checkpointing is handled via a `SaveBestEpoch` callback.
pass
def get_best_models(self, num_models=1):
"""Returns the best model(s), as determined by the tuner's objective.
The models are loaded with the weights corresponding to
their best checkpoint (at the end of the best epoch of best trial).
This method is for querying the models trained during the search.
For best performance, it is recommended to retrain your Model on the
full dataset using the best hyperparameters found during `search`,
which can be obtained using `tuner.get_best_hyperparameters()`.
Args:
num_models: Optional number of best models to return.
Defaults to 1.
Returns:
List of trained model instances sorted from the best to the worst.
"""
# Method only exists in this class for the docstring override.
return super().get_best_models(num_models)
def _deepcopy_callbacks(self, callbacks):
try:
callbacks = copy.deepcopy(callbacks)
except:
raise errors.FatalValueError(
"All callbacks used during a search "
"should be deep-copyable (since they are "
"reused across trials). "
"It is not possible to do `copy.deepcopy(%s)`" % (callbacks,)
)
return callbacks
def _configure_tensorboard_dir(self, callbacks, trial, execution=0):
# Only import tensorboard when using tensorflow backend to avoid
# importing tensorflow with other backend (tensorboard would import
# tensorflow).
if backend.config.backend() != "tensorflow":
return
from tensorboard.plugins.hparams import api as hparams_api
for callback in callbacks:
if callback.__class__.__name__ == "TensorBoard":
# Patch TensorBoard log_dir and add HParams KerasCallback
logdir = self._get_tensorboard_dir(
callback.log_dir, trial.trial_id, execution
)
callback.log_dir = logdir
hparams = tuner_utils.convert_hyperparams_to_hparams(
trial.hyperparameters,
hparams_api,
)
callbacks.append(
hparams_api.KerasCallback(
writer=logdir, hparams=hparams, trial_id=trial.trial_id
)
)
def _get_tensorboard_dir(self, logdir, trial_id, execution):
return os.path.join(
str(logdir), str(trial_id), f"execution{str(execution)}"
)
def _get_checkpoint_fname(self, trial_id):
return os.path.join(
# Each checkpoint is saved in its own directory.
self.get_trial_dir(trial_id),
"checkpoint.weights.h5" if config.multi_backend() else "checkpoint",
)
def _get_build_config_fname(self, trial_id):
return os.path.join(
# Each checkpoint is saved in its own directory.
self.get_trial_dir(trial_id),
"build_config.json",
)
def maybe_compute_model_size(model):
"""Compute the size of a given model, if it has been built."""
if model.built:
params = [math.prod(p.shape) for p in model.trainable_weights]
return int(np.sum(params))
return 0
@keras_tuner_export("keras_tuner.engine.tuner.maybe_distribute")
@contextlib.contextmanager
def maybe_distribute(distribution_strategy):
"""Distributes if distribution_strategy is set."""
if distribution_strategy is None:
yield
else:
with distribution_strategy.scope():
yield
| keras-tuner/keras_tuner/engine/tuner.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/tuner.py",
"repo_id": "keras-tuner",
"token_count": 7915
} | 134 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import keras_tuner
from keras_tuner import backend
from keras_tuner import utils
from keras_tuner.backend import keras
from keras_tuner.engine import tuner_utils
from keras_tuner.tuners import hyperband as hyperband_module
INPUT_DIM = 5
def build_model(hp):
model = keras.Sequential()
for i in range(hp.Int("layers", 1, 3)):
model.add(
keras.layers.Dense(
hp.Int(f"units{str(i)}", 1, 5), activation="relu"
)
)
model.add(
keras.layers.Lambda(lambda x: x + hp.Float(f"bias{str(i)}", -1, 1))
)
model.add(keras.layers.Dense(1, activation="sigmoid"))
model.compile("sgd", "mse")
return model
def test_hyperband_oracle_bracket_configs(tmp_path):
oracle = hyperband_module.HyperbandOracle(
objective=keras_tuner.Objective("score", "max"),
hyperband_iterations=1,
max_epochs=8,
factor=2,
)
oracle._set_project_dir(tmp_path, "untitled")
# 8, 4, 2, 1 starting epochs.
assert oracle._get_num_brackets() == 4
assert oracle._get_num_rounds(bracket_num=3) == 4
assert oracle._get_size(bracket_num=3, round_num=0) == 8
assert oracle._get_epochs(bracket_num=3, round_num=0) == 1
assert oracle._get_size(bracket_num=3, round_num=3) == 1
assert oracle._get_epochs(bracket_num=3, round_num=3) == 8
assert oracle._get_num_rounds(bracket_num=0) == 1
assert oracle._get_size(bracket_num=0, round_num=0) == 4
assert oracle._get_epochs(bracket_num=0, round_num=0) == 8
def test_hyperband_oracle_one_sweep_single_thread(tmp_path):
hp = keras_tuner.HyperParameters()
hp.Float("a", -100, 100)
hp.Float("b", -100, 100)
oracle = hyperband_module.HyperbandOracle(
hyperparameters=hp,
objective=keras_tuner.Objective("score", "max"),
hyperband_iterations=1,
max_epochs=9,
factor=3,
)
oracle._set_project_dir(tmp_path, "untitled")
score = 0
for bracket_num in reversed(range(oracle._get_num_brackets())):
for round_num in range(oracle._get_num_rounds(bracket_num)):
for _ in range(oracle._get_size(bracket_num, round_num)):
trial = oracle.create_trial("tuner0")
assert trial.status == "RUNNING"
score += 1
oracle.update_trial(trial.trial_id, {"score": score})
trial.status = "COMPLETED"
oracle.end_trial(trial)
assert len(
oracle._brackets[0]["rounds"][round_num]
) == oracle._get_size(bracket_num, round_num)
assert len(oracle._brackets) == 1
# Iteration should now be complete.
trial = oracle.create_trial("tuner0")
assert trial.status == "STOPPED", oracle.hyperband_iterations
assert len(oracle.ongoing_trials) == 0
# Brackets should all be finished and removed.
assert len(oracle._brackets) == 0
best_trial = oracle.get_best_trials()[0]
assert best_trial.score == score
def test_hyperband_oracle_one_sweep_parallel(tmp_path):
hp = keras_tuner.HyperParameters()
hp.Float("a", -100, 100)
hp.Float("b", -100, 100)
oracle = hyperband_module.HyperbandOracle(
hyperparameters=hp,
objective=keras_tuner.Objective("score", "max"),
hyperband_iterations=1,
max_epochs=4,
factor=2,
)
oracle._set_project_dir(tmp_path, "untitled")
# All round 0 trials from different brackets can be run
# in parallel.
round0_trials = []
for i in range(10):
t = oracle.create_trial(f"tuner{str(i)}")
assert t.status == "RUNNING"
round0_trials.append(t)
assert len(oracle._brackets) == 3
# Round 1 can't be run until enough models from round 0
# have completed.
t = oracle.create_trial("tuner10")
assert t.status == "IDLE"
for t in round0_trials:
oracle.update_trial(t.trial_id, {"score": 1})
t.status = "COMPLETED"
oracle.end_trial(t)
round1_trials = []
for i in range(4):
t = oracle.create_trial(f"tuner{str(i)}")
assert t.status == "RUNNING"
round1_trials.append(t)
# Bracket 0 is complete as it only has round 0.
assert len(oracle._brackets) == 2
# Round 2 can't be run until enough models from round 1
# have completed.
t = oracle.create_trial("tuner10")
assert t.status == "IDLE"
for t in round1_trials:
oracle.update_trial(t.trial_id, {"score": 1})
t.status = "COMPLETED"
oracle.end_trial(t)
# Only one trial runs in round 2.
round2_trial = oracle.create_trial("tuner0")
assert len(oracle._brackets) == 1
# No more trials to run, but wait for existing brackets to end.
t = oracle.create_trial("tuner10")
assert t.status == "IDLE"
oracle.update_trial(round2_trial.trial_id, {"score": 1})
round2_trial.status = "COMPLETED"
oracle.end_trial(round2_trial)
t = oracle.create_trial("tuner10")
assert t.status == "STOPPED", oracle._current_sweep
def test_hyperband_integration(tmp_path):
tuner = hyperband_module.Hyperband(
objective="val_loss",
hypermodel=build_model,
hyperband_iterations=2,
max_epochs=6,
factor=3,
directory=tmp_path,
)
x, y = np.ones((2, INPUT_DIM)), np.ones((2, 1))
tuner.search(x, y, validation_data=(x, y))
# Make sure Oracle is registering new HPs.
updated_hps = tuner.oracle.get_space().values
assert "units1" in updated_hps
assert "bias1" in updated_hps
best_score = tuner.oracle.get_best_trials()[0].score
best_model = tuner.get_best_models()[0]
assert best_model.evaluate(x, y) == best_score
def test_hyperband_save_and_restore(tmp_path):
tuner = hyperband_module.Hyperband(
objective="val_loss",
hypermodel=build_model,
hyperband_iterations=1,
max_epochs=7,
factor=2,
directory=tmp_path,
)
x, y = np.ones((2, INPUT_DIM)), np.ones((2, 1))
tuner.search(x, y, validation_data=(x, y))
num_trials = len(tuner.oracle.trials)
assert num_trials > 0
assert tuner.oracle._current_iteration == 0
tuner.save()
tuner.trials = {}
tuner.oracle._current_iteration = 0
tuner.reload()
assert len(tuner.oracle.trials) == num_trials
assert tuner.oracle._current_iteration == 0
def test_hyperband_load_weights(tmp_path):
tuner = hyperband_module.Hyperband(
objective="val_loss",
hypermodel=build_model,
hyperband_iterations=1,
max_epochs=2,
factor=2,
directory=tmp_path,
)
x, y = np.ones((2, INPUT_DIM)), np.ones((2, 1))
nb_brackets = tuner.oracle._get_num_brackets()
assert nb_brackets == 2
nb_models_round_0 = tuner.oracle._get_size(bracket_num=1, round_num=0)
assert nb_models_round_0 == 2
# run the trials for the round 0 (from scratch)
for _ in range(nb_models_round_0):
trial = tuner.oracle.create_trial("tuner0")
result = tuner.run_trial(trial, x, y, validation_data=(x, y))
tuner.oracle.update_trial(
trial.trial_id,
tuner_utils.convert_to_metrics_dict(result, tuner.oracle.objective),
tuner_utils.get_best_step(result, tuner.oracle.objective),
)
trial.status = "COMPLETED"
tuner.oracle.end_trial(trial)
# ensure the model run in round 1 is loaded from the best model in round 0
trial = tuner.oracle.create_trial("tuner0")
hp = trial.hyperparameters
assert "tuner/trial_id" in hp
new_model = tuner._try_build(hp)
assert new_model.predict(x).shape == y.shape
# get new model weights
new_model_weights = new_model.weights.copy()
# get weights from the best model in round 0
best_trial_round_0_id = hp["tuner/trial_id"]
best_hp_round_0 = tuner.oracle.trials[best_trial_round_0_id].hyperparameters
best_model_round_0 = tuner._try_build(best_hp_round_0)
if backend.config.multi_backend():
best_model_round_0.build_from_config(
utils.load_json(
tuner._get_build_config_fname(best_trial_round_0_id)
)
)
best_model_round_0.load_weights(
tuner._get_checkpoint_fname(best_trial_round_0_id)
)
assert best_model_round_0.predict(x).shape == y.shape
best_model_round_0_weights = best_model_round_0.weights.copy()
# compare the weights
assert len(new_model_weights) == len(best_model_round_0_weights)
assert all(
np.all(np.array(new_weight) == np.array(best_old_weight))
for new_weight, best_old_weight in zip(
new_model_weights, best_model_round_0_weights
)
)
def test_factor_less_than_2_error():
with pytest.raises(ValueError, match="factor needs"):
hyperband_module.HyperbandOracle(
objective=keras_tuner.Objective("score", "max"),
hyperband_iterations=1,
max_epochs=8,
factor=1,
)
def test_exhausted_values_during_init_bracket(tmp_path):
oracle = hyperband_module.HyperbandOracle(
objective=keras_tuner.Objective("score", "max"),
hyperband_iterations=1,
max_epochs=8,
factor=3,
)
oracle._set_project_dir(tmp_path, "untitled")
hp = oracle.get_space()
hp.Boolean("bool")
oracle.update_space(hp)
trial_1 = oracle.create_trial("a")
trial_2 = oracle.create_trial("b")
trial_3 = oracle.create_trial("c")
assert trial_3.status == "IDLE"
print(oracle.trials)
oracle.update_trial(
trial_id=trial_1.trial_id,
metrics={oracle.objective.name: np.random.rand()},
)
oracle.end_trial(trial_1)
oracle.update_trial(
trial_id=trial_2.trial_id,
metrics={oracle.objective.name: np.random.rand()},
)
oracle.end_trial(trial_2)
trial_1 = oracle.create_trial("a")
assert trial_1.status == "STOPPED"
| keras-tuner/keras_tuner/tuners/hyperband_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/tuners/hyperband_test.py",
"repo_id": "keras-tuner",
"token_count": 4663
} | 135 |
isort keras_tuner
black keras_tuner
for i in $(find keras_tuner -name '*.py') # or whatever other pattern...
do
if ! grep -q Copyright $i
then
echo $i
cat shell/copyright.txt $i >$i.new && mv $i.new $i
fi
done
flake8 keras_tuner
| keras-tuner/shell/format.sh/0 | {
"file_path": "keras-tuner/shell/format.sh",
"repo_id": "keras-tuner",
"token_count": 102
} | 136 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.