text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tensorflow.keras.callbacks import Callback
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import ops
from keras_cv.metrics.coco import compute_pycoco_metrics
from keras_cv.models.object_detection.__internal__ import unpack_input
from keras_cv.utils.conditional_imports import assert_pycocotools_installed
@keras_cv_export("keras_cv.callbacks.PyCOCOCallback")
class PyCOCOCallback(Callback):
def __init__(
self, validation_data, bounding_box_format, cache=True, **kwargs
):
"""Creates a callback to evaluate PyCOCO metrics on a validation
dataset.
Args:
validation_data: a tf.data.Dataset containing validation data.
Entries should have the form ```(images, {"boxes": boxes,
"classes": classes})```.
bounding_box_format: the KerasCV bounding box format used in the
validation dataset (e.g. "xywh")
cache: whether the callback should cache the dataset between
iterations. Note that if the validation dataset has shuffling of
any kind (e.g. from `shuffle_files=True` in a call to TFDS).
Load or a call to tf.data.Dataset.shuffle() with
`reshuffle_each_iteration=True`), you **must** cache the dataset
to preserve iteration order. This will store your entire dataset
in main memory, so for large datasets consider avoiding shuffle
operations and passing `cache=False`.
"""
assert_pycocotools_installed("PyCOCOCallback")
self.val_data = validation_data
if cache:
# We cache the dataset to preserve a consistent iteration order.
self.val_data = self.val_data.cache()
self.bounding_box_format = bounding_box_format
super().__init__(**kwargs)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def images_only(data, maybe_boxes=None):
if maybe_boxes is None:
images, boxes = unpack_input(data)
else:
images = data
return images
def boxes_only(data, maybe_boxes=None):
if maybe_boxes is None:
images, boxes = unpack_input(data)
else:
boxes = maybe_boxes
return boxes
images_only_ds = self.val_data.map(images_only)
y_pred = self.model.predict(images_only_ds)
box_pred = y_pred["boxes"]
cls_pred = ops.convert_to_numpy(y_pred["classes"])
confidence_pred = ops.convert_to_numpy(y_pred["confidence"])
valid_det = ops.convert_to_numpy(y_pred["num_detections"])
gt = [boxes for boxes in self.val_data.map(boxes_only)]
gt_boxes = ops.concatenate(
[ops.convert_to_numpy(boxes["boxes"]) for boxes in gt],
axis=0,
)
gt_classes = ops.concatenate(
[ops.convert_to_numpy(boxes["classes"]) for boxes in gt],
axis=0,
)
first_image_batch = next(iter(images_only_ds))
height = first_image_batch.shape[1]
width = first_image_batch.shape[2]
total_images = gt_boxes.shape[0]
gt_boxes = bounding_box.convert_format(
gt_boxes, source=self.bounding_box_format, target="yxyx"
)
source_ids = np.char.mod(
"%d", np.linspace(1, total_images, total_images)
)
num_detections = ops.sum(ops.cast(gt_classes > 0, "int32"), axis=-1)
ground_truth = {
"source_id": [source_ids],
"height": [
ops.convert_to_numpy(
ops.tile(ops.array([height]), [total_images])
)
],
"width": [
ops.convert_to_numpy(
ops.tile(ops.array([width]), [total_images])
)
],
"num_detections": [ops.convert_to_numpy(num_detections)],
"boxes": [ops.convert_to_numpy(gt_boxes)],
"classes": [ops.convert_to_numpy(gt_classes)],
}
box_pred = bounding_box.convert_format(
box_pred, source=self.bounding_box_format, target="yxyx"
)
predictions = {
"source_id": [source_ids],
"detection_boxes": [ops.convert_to_numpy(box_pred)],
"detection_classes": [cls_pred],
"detection_scores": [confidence_pred],
"num_detections": [valid_det],
}
metrics = compute_pycoco_metrics(ground_truth, predictions)
# Mark these as validation metrics by prepending a val_ prefix
metrics = {"val_" + name: val for name, val in metrics.items()}
logs.update(metrics)
| keras-cv/keras_cv/callbacks/pycoco_callback.py/0 | {
"file_path": "keras-cv/keras_cv/callbacks/pycoco_callback.py",
"repo_id": "keras-cv",
"token_count": 2440
} | 42 |
/* Copyright 2022 The Keras CV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef THIRD_PARTY_PY_KERAS_CV_OPS_BOX_UTIL_H_
#define THIRD_PARTY_PY_KERAS_CV_OPS_BOX_UTIL_H_
#include <string>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace kerascv {
namespace box {
// A vertex with (x, y) coordinate.
//
// This is an internal implementation detail of RotatedBox2D.
struct Vertex {
// Creates an empty Vertex.
Vertex() = default;
Vertex(const double x, const double y) : x(x), y(y) {}
Vertex(const double x, const double y, const double z) : x(x), y(y), z(z) {}
double x = 0;
double y = 0;
double z = 0;
};
// A rotated 2D bounding box represented as (cx, cy, w, h, r). cx, cy are the
// box center coordinates; w, h are the box width and height; heading is the
// rotation angle in radian relative to the 'positive x' direction.
class RotatedBox2D {
public:
// Creates an empty rotated 2D box.
RotatedBox2D() : RotatedBox2D(0, 0, 0, 0, 0) {}
RotatedBox2D(const double cx, const double cy, const double w, const double h,
const double heading);
// Returns the area of the box.
double Area() const;
// Returns the intersection area between this box and the given box.
double Intersection(const RotatedBox2D& other) const;
// Returns the IoU between this box and the given box.
double IoU(const RotatedBox2D& other) const;
// Returns true if the box is valid (width and height are not extremely
// large or small).
bool NonZeroAndValid() const;
double MinX() const;
double MaxX() const;
double MinY() const;
double MaxY() const;
bool WithinBox2D(const Vertex& point) const;
private:
bool left_hand_side(const Vertex& point, const Vertex& v1,
const Vertex& v2) const;
// Computes / caches box_vertices_ calculation.
const std::vector<Vertex>& box_vertices() const;
// Returns true if this box and 'other' might intersect.
//
// If this returns false, the two boxes definitely do not intersect. If this
// returns true, it is still possible that the two boxes do not intersect, and
// the more expensive intersection code will be called.
bool MaybeIntersects(const RotatedBox2D& other) const;
double cx_ = 0;
double cy_ = 0;
double w_ = 0;
double h_ = 0;
double heading_ = 0;
// Loose boundaries for fast intersection test.
double loose_min_x_ = -1;
double loose_max_x_ = -1;
double loose_min_y_ = -1;
double loose_max_y_ = -1;
// True if the dimensions of the box are very small or very large in any
// dimension.
bool extreme_box_dim_ = false;
// The following fields are computed on demand. They are logically
// const.
// Cached area. Access via Area() public API.
mutable double area_ = -1;
// Stores the vertices of the box. Access via box_vertices().
mutable std::vector<Vertex> box_vertices_;
};
// A 3D box of 7-DOFs: only allows rotation around the z-axis.
struct Upright3DBox {
RotatedBox2D rbox = RotatedBox2D();
double z_min = 0;
double z_max = 0;
// Creates an empty rotated 3D box.
Upright3DBox() = default;
// Creates a 3D box from the raw input data with size 7. The data format is
// (center_x, center_y, center_z, dimension_x, dimension_y, dimension_z,
// heading)
Upright3DBox(const std::vector<double>& raw)
: rbox(raw[0], raw[1], raw[3], raw[4], raw[6]),
z_min(raw[2] - raw[5] / 2.0),
z_max(raw[2] + raw[5] / 2.0) {}
Upright3DBox(const RotatedBox2D& rb, const double z_min, const double z_max)
: rbox(rb), z_min(z_min), z_max(z_max) {}
// Computes intersection over union (of the volume).
double IoU(const Upright3DBox& other) const;
// Computes overlap: intersection of this box and the given box normalized
// over the volume of this box.
double Overlap(const Upright3DBox& other) const;
// Returns true if the box is valid (width and height are not extremely
// large or small, and zmin < zmax).
bool NonZeroAndValid() const;
bool WithinBox3D(const Vertex& point) const;
};
// Converts a [N, 7] tensor to a vector of N Upright3DBox objects.
std::vector<Upright3DBox> ParseBoxesFromTensor(const Tensor& boxes_tensor);
// Converts a [N, 3] tensor to a vector of N Vertex objects.
std::vector<Vertex> ParseVerticesFromTensor(const Tensor& points_tensor);
std::vector<int> GetMinXIndexFromBoxes(std::vector<Upright3DBox>& box,
std::vector<double>& points);
std::vector<int> GetMaxXIndexFromBoxes(std::vector<Upright3DBox>& box,
std::vector<double>& points);
std::vector<int> GetMinYIndexFromBoxes(std::vector<Upright3DBox>& box,
std::vector<double>& points);
std::vector<int> GetMaxYIndexFromBoxes(std::vector<Upright3DBox>& box,
std::vector<double>& points);
} // namespace box
} // namespace kerascv
} // namespace tensorflow
#endif // THIRD_PARTY_PY_KERAS_CV_OPS_BOX_UTIL_H_
| keras-cv/keras_cv/custom_ops/box_util.h/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/box_util.h",
"repo_id": "keras-cv",
"token_count": 2003
} | 43 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
EPSILON = 1e-8
@keras_cv_export("keras_cv.layers.NonMaxSuppression")
class NonMaxSuppression(keras.layers.Layer):
"""A Keras layer that decodes predictions of an object detection model.
Args:
bounding_box_format: The format of bounding boxes of input dataset. Refer
[to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/)
for more details on supported bounding box
formats.
from_logits: boolean, True means input score is logits, False means
confidence.
iou_threshold: a float value in the range [0, 1] representing the minimum
IoU threshold for two boxes to be considered same for suppression.
Defaults to 0.5.
confidence_threshold: a float value in the range [0, 1]. All boxes with
confidence below this value will be discarded, defaults to 0.5.
max_detections: the maximum detections to consider after nms is applied. A
large number may trigger significant memory overhead, defaults to 100.
""" # noqa: E501
def __init__(
self,
bounding_box_format,
from_logits,
iou_threshold=0.5,
confidence_threshold=0.5,
max_detections=100,
**kwargs,
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.from_logits = from_logits
self.iou_threshold = iou_threshold
self.confidence_threshold = confidence_threshold
self.max_detections = max_detections
self.built = True
def call(
self, box_prediction, class_prediction, images=None, image_shape=None
):
"""Accepts images and raw predictions, and returns bounding box
predictions.
Args:
box_prediction: Dense Tensor of shape [batch, boxes, 4] in the
`bounding_box_format` specified in the constructor.
class_prediction: Dense Tensor of shape [batch, boxes, num_classes].
"""
target_format = "yxyx"
if bounding_box.is_relative(self.bounding_box_format):
target_format = bounding_box.as_relative(target_format)
box_prediction = bounding_box.convert_format(
box_prediction,
source=self.bounding_box_format,
target=target_format,
images=images,
image_shape=image_shape,
)
if self.from_logits:
class_prediction = ops.sigmoid(class_prediction)
confidence_prediction = ops.max(class_prediction, axis=-1)
if not keras_3() or keras.backend.backend() == "tensorflow":
idx, valid_det = tf.image.non_max_suppression_padded(
box_prediction,
confidence_prediction,
max_output_size=self.max_detections,
iou_threshold=self.iou_threshold,
score_threshold=self.confidence_threshold,
pad_to_max_output_size=True,
sorted_input=False,
)
elif keras.backend.backend() == "torch":
# Since TorchVision has a nice efficient NMS op, we might as well
# use it!
import torchvision
batch_size = box_prediction.shape[0]
idx = ops.zeros((batch_size, self.max_detections))
valid_det = ops.zeros((batch_size), "int32")
for batch_idx in range(batch_size):
conf_mask = (
confidence_prediction[batch_idx] > self.confidence_threshold
)
conf_mask_idx = ops.squeeze(ops.nonzero(conf_mask), axis=0)
conf_i = confidence_prediction[batch_idx][conf_mask]
box_i = box_prediction[batch_idx][conf_mask]
idx_i = torchvision.ops.nms(
box_i, conf_i, iou_threshold=self.iou_threshold
)
idx_i = conf_mask_idx[idx_i]
num_boxes = idx_i.shape[0]
if num_boxes >= self.max_detections:
idx_i = idx_i[: self.max_detections]
num_boxes = self.max_detections
valid_det[batch_idx] = ops.cast(ops.size(idx_i), "int32")
idx[batch_idx, :num_boxes] = idx_i
else:
idx, valid_det = non_max_suppression(
box_prediction,
confidence_prediction,
max_output_size=self.max_detections,
iou_threshold=self.iou_threshold,
score_threshold=self.confidence_threshold,
)
box_prediction = ops.take_along_axis(
box_prediction, ops.expand_dims(idx, axis=-1), axis=1
)
box_prediction = ops.reshape(
box_prediction, (-1, self.max_detections, 4)
)
confidence_prediction = ops.take_along_axis(
confidence_prediction, idx, axis=1
)
class_prediction = ops.take_along_axis(
class_prediction, ops.expand_dims(idx, axis=-1), axis=1
)
box_prediction = bounding_box.convert_format(
box_prediction,
source=target_format,
target=self.bounding_box_format,
images=images,
image_shape=image_shape,
)
bounding_boxes = {
"boxes": box_prediction,
"confidence": confidence_prediction,
"classes": ops.argmax(class_prediction, axis=-1),
"num_detections": valid_det,
}
# this is required to comply with KerasCV bounding box format.
return bounding_box.mask_invalid_detections(
bounding_boxes, output_ragged=False
)
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"from_logits": self.from_logits,
"iou_threshold": self.iou_threshold,
"confidence_threshold": self.confidence_threshold,
"max_detections": self.max_detections,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def non_max_suppression(
boxes,
scores,
max_output_size,
iou_threshold=0.5,
score_threshold=0.0,
tile_size=512,
):
# Box format must be yxyx
"""Non-maximum suppression.
Ported from https://github.com/tensorflow/tensorflow/blob/v2.12.0/tensorflow/python/ops/image_ops_impl.py#L5368-L5458
Args:
boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4].
Dimensions except the last two are batch dimensions. The last dimension
represents box coordinates in yxyx format.
scores: a tensor of rank 1 or higher with a shape of [..., num_boxes].
max_output_size: a scalar integer tensor representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IoU (intersection over union).
score_threshold: a float representing the threshold for box scores. Boxes
with a score that is not larger than this threshold will be suppressed.
tile_size: an integer representing the number of boxes in a tile, i.e.,
the maximum number of boxes per image that can be used to suppress other
boxes in parallel; larger tile_size means larger parallelism and
potentially more redundant work.
Returns:
idx: a tensor with a shape of [..., num_boxes] representing the
indices selected by non-max suppression. The leading dimensions
are the batch dimensions of the input boxes. All numbers are within
[0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i]
indices (i.e., idx[i][:num_valid[i]]) are valid.
num_valid: a tensor of rank 0 or higher with a shape of [...]
representing the number of valid indices in idx. Its dimensions are the
batch dimensions of the input boxes.
""" # noqa: E501
def _sort_scores_and_boxes(scores, boxes):
"""Sort boxes based their score from highest to lowest.
Args:
scores: a tensor with a shape of [batch_size, num_boxes] representing
the scores of boxes.
boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing
the boxes.
Returns:
sorted_scores: a tensor with a shape of [batch_size, num_boxes]
representing the sorted scores.
sorted_boxes: a tensor representing the sorted boxes.
sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes]
representing the index of the scores in a sorted descending order.
""" # noqa: E501
with ops.name_scope("sort_scores_and_boxes"):
sorted_scores_indices = ops.flip(
ops.cast(ops.argsort(scores, axis=1), "int32"), axis=1
)
sorted_scores = ops.take_along_axis(
scores,
sorted_scores_indices,
axis=1,
)
sorted_boxes = ops.take_along_axis(
boxes,
ops.expand_dims(sorted_scores_indices, axis=-1),
axis=1,
)
return sorted_scores, sorted_boxes, sorted_scores_indices
batch_dims = ops.shape(boxes)[:-2]
num_boxes = boxes.shape[-2]
boxes = ops.reshape(boxes, [-1, num_boxes, 4])
scores = ops.reshape(scores, [-1, num_boxes])
batch_size = boxes.shape[0]
if score_threshold != float("-inf"):
with ops.name_scope("filter_by_score"):
score_mask = ops.cast(scores > score_threshold, scores.dtype)
scores *= score_mask
box_mask = ops.expand_dims(ops.cast(score_mask, boxes.dtype), 2)
boxes *= box_mask
scores, boxes, sorted_indices = _sort_scores_and_boxes(scores, boxes)
pad = (
math.ceil(max(num_boxes, max_output_size) / tile_size) * tile_size
- num_boxes
)
boxes = ops.pad(ops.cast(boxes, "float32"), [[0, 0], [0, pad], [0, 0]])
scores = ops.pad(ops.cast(scores, "float32"), [[0, 0], [0, pad]])
num_boxes_after_padding = num_boxes + pad
num_iterations = num_boxes_after_padding // tile_size
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return ops.logical_and(
ops.min(output_size) < ops.cast(max_output_size, "int32"),
ops.cast(idx, "int32") < num_iterations,
)
def suppression_loop_body(boxes, iou_threshold, output_size, idx):
return _suppression_loop_body(
boxes, iou_threshold, output_size, idx, tile_size
)
selected_boxes, _, output_size, _ = ops.while_loop(
_loop_cond,
suppression_loop_body,
[
boxes,
iou_threshold,
ops.zeros([batch_size], "int32"),
ops.array(0),
],
)
num_valid = ops.minimum(output_size, max_output_size)
idx = num_boxes_after_padding - ops.cast(
ops.top_k(
ops.cast(ops.any(selected_boxes > 0, [2]), "int32")
* ops.cast(
ops.expand_dims(ops.arange(num_boxes_after_padding, 0, -1), 0),
"int32",
),
max_output_size,
)[0],
"int32",
)
idx = ops.minimum(idx, num_boxes - 1)
index_offsets = ops.cast(ops.arange(batch_size) * num_boxes, "int32")
take_along_axis_idx = ops.reshape(
idx + ops.expand_dims(index_offsets, 1), [-1]
)
# TODO(ianstenbit): Fix bug in tfnp.take_along_axis that causes this hack.
# (This will be removed anyway when we use built-in NMS for TF.)
if keras_3() and keras.backend.backend() != "tensorflow":
idx = ops.take_along_axis(
ops.reshape(sorted_indices, [-1]), take_along_axis_idx
)
else:
import tensorflow as tf
idx = tf.gather(ops.reshape(sorted_indices, [-1]), take_along_axis_idx)
idx = ops.reshape(idx, [batch_size, -1])
invalid_index = ops.zeros([batch_size, max_output_size], dtype="int32")
idx_index = ops.cast(
ops.expand_dims(ops.arange(max_output_size), 0), "int32"
)
num_valid_expanded = ops.expand_dims(num_valid, 1)
idx = ops.where(idx_index < num_valid_expanded, idx, invalid_index)
num_valid = ops.reshape(num_valid, batch_dims)
return idx, num_valid
def _bbox_overlap(boxes_a, boxes_b):
"""Calculates the overlap (iou - intersection over union) between boxes_a and boxes_b.
Args:
boxes_a: a tensor with a shape of [batch_size, N, 4]. N is the number of
boxes per image. The last dimension is the pixel coordinates in
[ymin, xmin, ymax, xmax] form.
boxes_b: a tensor with a shape of [batch_size, M, 4]. M is the number of
boxes. The last dimension is the pixel coordinates in
[ymin, xmin, ymax, xmax] form.
Returns:
intersection_over_union: a tensor with as a shape of [batch_size, N, M],
representing the ratio of intersection area over union area (IoU) between
two boxes
""" # noqa: E501
with ops.name_scope("bbox_overlap"):
if len(boxes_a.shape) == 4:
boxes_a = ops.squeeze(boxes_a, axis=0)
a_y_min, a_x_min, a_y_max, a_x_max = ops.split(boxes_a, 4, axis=2)
b_y_min, b_x_min, b_y_max, b_x_max = ops.split(boxes_b, 4, axis=2)
# Calculates the intersection area.
i_xmin = ops.maximum(a_x_min, ops.transpose(b_x_min, [0, 2, 1]))
i_xmax = ops.minimum(a_x_max, ops.transpose(b_x_max, [0, 2, 1]))
i_ymin = ops.maximum(a_y_min, ops.transpose(b_y_min, [0, 2, 1]))
i_ymax = ops.minimum(a_y_max, ops.transpose(b_y_max, [0, 2, 1]))
i_area = ops.maximum((i_xmax - i_xmin), 0) * ops.maximum(
(i_ymax - i_ymin), 0
)
# Calculates the union area.
a_area = (a_y_max - a_y_min) * (a_x_max - a_x_min)
b_area = (b_y_max - b_y_min) * (b_x_max - b_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = a_area + ops.transpose(b_area, [0, 2, 1]) - i_area + EPSILON
intersection_over_union = i_area / u_area
return intersection_over_union
def _self_suppression(iou, _, iou_sum, iou_threshold):
"""Suppress boxes in the same tile.
Compute boxes that cannot be suppressed by others (i.e.,
can_suppress_others), and then use them to suppress boxes in the same tile.
Args:
iou: a tensor of shape [batch_size, num_boxes_with_padding] representing
intersection over union.
iou_sum: a scalar tensor.
iou_threshold: a scalar tensor.
Returns:
iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding].
iou_diff: a scalar tensor representing whether any box is supressed in
this step.
iou_sum_new: a scalar tensor of shape [batch_size] that represents
the iou sum after suppression.
iou_threshold: a scalar tensor.
""" # noqa: E501
batch_size = ops.shape(iou)[0]
can_suppress_others = ops.cast(
ops.reshape(ops.max(iou, 1) < iou_threshold, [batch_size, -1, 1]),
iou.dtype,
)
iou_after_suppression = (
ops.reshape(
ops.cast(
ops.max(can_suppress_others * iou, 1) < iou_threshold, iou.dtype
),
[batch_size, -1, 1],
)
* iou
)
iou_sum_new = ops.sum(iou_after_suppression, [1, 2])
return [
iou_after_suppression,
ops.any(iou_sum - iou_sum_new > iou_threshold),
iou_sum_new,
iou_threshold,
]
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size):
"""Suppress boxes between different tiles.
Args:
boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4]
box_slice: a tensor of shape [batch_size, tile_size, 4]
iou_threshold: a scalar tensor
inner_idx: a scalar tensor representing the tile index of the tile
that is used to supress box_slice
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: unchanged boxes as input
box_slice_after_suppression: box_slice after suppression
iou_threshold: unchanged
"""
slice_index = ops.expand_dims(
ops.expand_dims(
ops.cast(
ops.linspace(
inner_idx * tile_size,
(inner_idx + 1) * tile_size - 1,
tile_size,
),
"int32",
),
axis=0,
),
axis=-1,
)
new_slice = ops.expand_dims(
ops.take_along_axis(boxes, slice_index, axis=1), 0
)
iou = _bbox_overlap(new_slice, box_slice)
box_slice_after_suppression = (
ops.expand_dims(
ops.cast(ops.all(iou < iou_threshold, [1]), box_slice.dtype), 2
)
* box_slice
)
return boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1
def _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size):
"""Process boxes in the range [idx*tile_size, (idx+1)*tile_size).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
tile_size: an integer representing the number of boxes in a tile
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
""" # noqa: E501
with ops.name_scope("suppression_loop_body"):
num_tiles = boxes.shape[1] // tile_size
batch_size = boxes.shape[0]
def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx):
return _cross_suppression(
boxes, box_slice, iou_threshold, inner_idx, tile_size
)
# Iterates over tiles that can possibly suppress the current tile.
slice_index = ops.expand_dims(
ops.expand_dims(
ops.cast(
ops.linspace(
idx * tile_size, (idx + 1) * tile_size - 1, tile_size
),
"int32",
),
axis=0,
),
axis=-1,
)
box_slice = ops.take_along_axis(boxes, slice_index, axis=1)
_, box_slice, _, _ = ops.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
cross_suppression_func,
[boxes, box_slice, iou_threshold, ops.array(0)],
)
# Iterates over the current tile to compute self-suppression.
iou = _bbox_overlap(box_slice, box_slice)
mask = ops.expand_dims(
ops.reshape(ops.arange(tile_size), [1, -1])
> ops.reshape(ops.arange(tile_size), [-1, 1]),
0,
)
iou *= ops.cast(ops.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _, _ = ops.while_loop(
lambda _iou, loop_condition, _iou_sum, _: loop_condition,
_self_suppression,
[iou, ops.array(True), ops.sum(iou, [1, 2]), iou_threshold],
)
suppressed_box = ops.sum(suppressed_iou, 1) > 0
box_slice *= ops.expand_dims(
1.0 - ops.cast(suppressed_box, box_slice.dtype), 2
)
# Uses box_slice to update the input boxes.
mask = ops.reshape(
ops.cast(ops.equal(ops.arange(num_tiles), idx), boxes.dtype),
[1, -1, 1, 1],
)
boxes = ops.tile(
ops.expand_dims(box_slice, 1), [1, num_tiles, 1, 1]
) * mask + ops.reshape(boxes, [batch_size, num_tiles, tile_size, 4]) * (
1 - mask
)
boxes = ops.reshape(boxes, [batch_size, -1, 4])
# Updates output_size.
output_size += ops.cast(
ops.sum(ops.any(box_slice > 0, [2]), [1]), "int32"
)
return boxes, iou_threshold, output_size, idx + 1
| keras-cv/keras_cv/layers/object_detection/non_max_suppression.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/non_max_suppression.py",
"repo_id": "keras-cv",
"token_count": 9826
} | 44 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.object_detection_3d import voxel_utils
def decode_bin_heading(predictions, num_bin):
"""Decode bin heading.
Computes the box heading (orientation) by decoding the bin predictions. The
predictions should contain bin classification scores (first num_bin scores)
and corresponding bin residuals (the following num_bin scores).
Args:
predictions: Prediction scores tensor with size [N, num_bin*2]
predictions = [:, bin_1, bin_2, ..., bin_k, res_1, res_2, ..., res_k],
where k is the number of bins and N is the number of boxes.
num_bin: A constant showing the number of bins used in heading bin loss.
Returns:
heading: Decoded heading tensor with size [N] in which heading values are
in the [-pi, pi] range.
Raises:
ValueError: If the rank of `predictions` is not 2 or `predictions` tensor
does not more than the expected number of dimensions.
"""
with keras.backend.name_scope("decode_bin_heading"):
if len(predictions.shape) != 2:
raise ValueError(
"The rank of the prediction tensor is expected to be 2. "
f"Instead it is : {len(predictions.shape)}."
)
# Get the index of the bin with the maximum score to build a tensor of
# [N].
bin_idx = ops.cast(
ops.argmax(predictions[:, 0:num_bin], axis=-1), "int32"
)
bin_idx_float = ops.cast(bin_idx, dtype=predictions.dtype)
residual_norm = ops.take_along_axis(
predictions[:, num_bin : num_bin * 2],
ops.expand_dims(bin_idx, axis=-1),
axis=-1,
)[:, 0]
# Divide 2pi into equal sized bins to compute the angle per class/bin.
angle_per_class = (2 * np.pi) / num_bin
residual_angle = residual_norm * (angle_per_class / 2)
# bin_center is computed using the bin_idx and angle_per class,
# (e.g., 0, 30, 60, 90, 120, ..., 270, 300, 330). Then residual is
# added.
heading = ops.mod(
bin_idx_float * angle_per_class + residual_angle, 2 * np.pi
)
heading_mask = heading > np.pi
heading = ops.where(heading_mask, heading - 2 * np.pi, heading)
return heading
def decode_bin_box(pd, num_head_bin, anchor_size):
"""Decode bin based box encoding."""
with keras.backend.name_scope("decode_bin_box"):
delta = []
start = 0
for dim in [0, 1, 2]:
delta.append(pd[:, start])
start = start + 1
heading = decode_bin_heading(pd[:, start:], num_head_bin)
start = start + num_head_bin * 2
size_res_norm = pd[:, start : start + 3]
# [N,3]
lwh = ops.cast(
size_res_norm
* ops.array(list(anchor_size), dtype=size_res_norm.dtype)
+ ops.array(list(anchor_size), dtype=size_res_norm.dtype),
pd.dtype,
)
loc = ops.stack(delta, axis=-1)
box = ops.concatenate(
[loc, lwh, ops.expand_dims(heading, axis=-1)], axis=-1
)
return box
@keras_cv_export("keras_cv.layers.HeatmapDecoder")
class HeatmapDecoder(keras.layers.Layer):
"""A Keras layer that decodes predictions of a 3d object detection model.
Arg:
class_id: the integer index for a particular class.
num_head_bin: number of bin classes divided by [-2pi, 2pi].
anchor_size: the size of anchor at each xyz dimension.
max_pool_size: the 2d pooling size for heatmap.
max_num_box: top number of boxes select from heatmap.
heatmap_threshold: the threshold to set a heatmap as positive.
voxel_size: the x, y, z dimension of each voxel.
spatial_size: the x, y, z boundary of voxels.
"""
def __init__(
self,
class_id,
num_head_bin,
anchor_size,
max_pool_size,
max_num_box,
heatmap_threshold,
voxel_size,
spatial_size,
**kwargs,
):
super().__init__(**kwargs)
self.class_id = class_id
self.num_head_bin = num_head_bin
self.anchor_size = anchor_size
self.max_pool_size = max_pool_size
self.max_num_box = max_num_box
self.heatmap_threshold = heatmap_threshold
self.voxel_size = voxel_size
self.spatial_size = spatial_size
self.built = True
def call(self, prediction):
"""Accepts raw predictions, and returns decoded boxes.
Args:
prediction: float Tensor.
"""
heatmap = ops.softmax(prediction[..., :2])[..., 1:2]
heatmap_pool = ops.max_pool(heatmap, self.max_pool_size, 1, "same")
heatmap_mask = heatmap > self.heatmap_threshold
heatmap_local_maxima_mask = ops.equal(heatmap, heatmap_pool)
# [B, H, W, 1]
heatmap_mask = ops.logical_and(heatmap_mask, heatmap_local_maxima_mask)
# [B, H, W, 1]
heatmap = ops.where(heatmap_mask, heatmap, 0)
# [B, H, W]
heatmap = ops.squeeze(heatmap, axis=-1)
b, h, w = ops.shape(heatmap)
heatmap = ops.reshape(heatmap, [b, h * w])
_, top_index = ops.top_k(heatmap, k=self.max_num_box)
# [B, H, W, ?]
box_prediction = prediction[:, :, :, 2:]
f = box_prediction.shape[-1]
box_prediction = ops.reshape(box_prediction, [b, h * w, f])
heatmap = ops.reshape(heatmap, [b, h * w])
# [B, max_num_box, ?]
box_prediction = ops.take_along_axis(
box_prediction, ops.expand_dims(top_index, axis=-1), axis=1
)
# [B, max_num_box]
box_score = ops.take_along_axis(heatmap, top_index, axis=1)
box_class = ops.ones_like(box_score, "int32") * self.class_id
# [B*max_num_box, ?]
f = ops.shape(box_prediction)[-1]
box_prediction_reshape = ops.reshape(
box_prediction, [b * self.max_num_box, f]
)
# [B*max_num_box, 7]
box_decoded = decode_bin_box(
box_prediction_reshape, self.num_head_bin, self.anchor_size
)
# [B, max_num_box, 7]
box_decoded = ops.reshape(box_decoded, [b, self.max_num_box, 7])
global_xyz = ops.zeros([b, 3])
ref_xyz = voxel_utils.compute_feature_map_ref_xyz(
self.voxel_size, self.spatial_size, global_xyz
)
# [B, H, W, 3]
ref_xyz = ops.squeeze(ref_xyz, axis=-2)
f = list(ref_xyz.shape)[-1]
ref_xyz = ops.reshape(ref_xyz, [b, h * w, f])
# [B, max_num_box, 3]
ref_xyz = ops.take_along_axis(
ref_xyz, ops.expand_dims(top_index, axis=-1), axis=1
)
box_decoded_cxyz = ops.cast(
ref_xyz + box_decoded[:, :, :3], box_decoded.dtype
)
box_decoded = ops.concatenate(
[box_decoded_cxyz, box_decoded[:, :, 3:]], axis=-1
)
return box_decoded, box_class, box_score
def get_config(self):
config = {
"class_id": self.class_id,
"num_head_bin": self.num_head_bin,
"anchor_size": self.anchor_size,
"max_pool_size": self.max_pool_size,
"max_num_box": self.max_num_box,
"heatmap_threshold": self.heatmap_threshold,
"voxel_size": self.voxel_size,
"spatial_size": self.spatial_size,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/object_detection_3d/heatmap_decoder.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/heatmap_decoder.py",
"repo_id": "keras-cv",
"token_count": 3810
} | 45 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import fill_utils
@keras_cv_export("keras_cv.layers.CutMix")
class CutMix(BaseImageAugmentationLayer):
"""CutMix implements the CutMix data augmentation technique.
Args:
alpha: Float between 0 and 1. Inverse scale parameter for the gamma
distribution. This controls the shape of the distribution from which
the smoothing values are sampled. Defaults to 1.0, which is a
recommended value when training an imagenet1k classification model.
seed: Integer. Used to create a random seed.
References:
- [CutMix paper]( https://arxiv.org/abs/1905.04899).
"""
def __init__(
self,
alpha=1.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.alpha = alpha
self.seed = seed
def _sample_from_beta(self, alpha, beta, shape):
sample_alpha = tf.random.gamma(
shape,
alpha=alpha,
)
sample_beta = tf.random.gamma(
shape,
alpha=beta,
)
return sample_alpha / (sample_alpha + sample_beta)
def _batch_augment(self, inputs):
self._validate_inputs(inputs)
images = inputs.get("images", None)
labels = inputs.get("labels", None)
segmentation_masks = inputs.get("segmentation_masks", None)
(
images,
lambda_sample,
permutation_order,
random_center_height,
random_center_width,
cut_width,
cut_height,
) = self._cutmix(images)
if labels is not None:
labels = self._update_labels(
labels, lambda_sample, permutation_order
)
inputs["labels"] = labels
if segmentation_masks is not None:
segmentation_masks = self._update_segmentation_masks(
segmentation_masks,
permutation_order,
random_center_height,
random_center_width,
cut_width,
cut_height,
)
inputs["segmentation_masks"] = segmentation_masks
inputs["images"] = images
return inputs
def _augment(self, inputs):
raise ValueError(
"CutMix received a single image to `call`. The layer relies on "
"combining multiple examples, and as such will not behave as "
"expected. Please call the layer with 2 or more samples."
)
def _cutmix(self, images):
"""Apply cutmix."""
input_shape = tf.shape(images)
batch_size, image_height, image_width = (
input_shape[0],
input_shape[1],
input_shape[2],
)
permutation_order = tf.random.shuffle(
tf.range(0, batch_size), seed=self.seed
)
lambda_sample = self._sample_from_beta(
self.alpha, self.alpha, (batch_size,)
)
ratio = tf.math.sqrt(1 - lambda_sample)
cut_height = tf.cast(
ratio * tf.cast(image_height, dtype=tf.float32), dtype=tf.int32
)
cut_width = tf.cast(
ratio * tf.cast(image_width, dtype=tf.float32), dtype=tf.int32
)
random_center_height = tf.random.uniform(
shape=[batch_size], minval=0, maxval=image_height, dtype=tf.int32
)
random_center_width = tf.random.uniform(
shape=[batch_size], minval=0, maxval=image_width, dtype=tf.int32
)
bounding_box_area = cut_height * cut_width
lambda_sample = 1.0 - bounding_box_area / (image_height * image_width)
lambda_sample = tf.cast(lambda_sample, dtype=self.compute_dtype)
images = fill_utils.fill_rectangle(
images,
random_center_width,
random_center_height,
cut_width,
cut_height,
tf.gather(images, permutation_order),
)
return (
images,
lambda_sample,
permutation_order,
random_center_height,
random_center_width,
cut_width,
cut_height,
)
def _update_labels(self, labels, lambda_sample, permutation_order):
cutout_labels = tf.gather(labels, permutation_order)
lambda_sample = tf.reshape(lambda_sample, [-1, 1])
labels = lambda_sample * labels + (1.0 - lambda_sample) * cutout_labels
return labels
def _update_segmentation_masks(
self,
segmentation_masks,
permutation_order,
random_center_height,
random_center_width,
cut_width,
cut_height,
):
cutout_segmentation_masks = tf.gather(
segmentation_masks, permutation_order
)
segmentation_masks = fill_utils.fill_rectangle(
segmentation_masks,
random_center_width,
random_center_height,
cut_width,
cut_height,
cutout_segmentation_masks,
)
return segmentation_masks
def _validate_inputs(self, inputs):
images = inputs.get("images", None)
labels = inputs.get("labels", None)
segmentation_masks = inputs.get("segmentation_masks", None)
if images is None or (labels is None and segmentation_masks is None):
raise ValueError(
"CutMix expects inputs in a dictionary with format "
'{"images": images, "labels": labels}. or'
'{"images": images, "segmentation_masks": segmentation_masks}. '
f"Got: inputs = {inputs}."
)
if labels is not None and not labels.dtype.is_floating:
raise ValueError(
f"CutMix received labels with type {labels.dtype}. "
"Labels must be of type float."
)
def get_config(self):
config = {
"alpha": self.alpha,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/cut_mix.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/cut_mix.py",
"repo_id": "keras-cv",
"token_count": 3181
} | 46 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils.preprocessing import transform_value_range
@keras_cv_export("keras_cv.layers.Posterization")
class Posterization(BaseImageAugmentationLayer):
"""Reduces the number of bits for each color channel.
References:
- [AutoAugment: Learning Augmentation Policies from Data](https://arxiv.org/abs/1805.09501)
- [RandAugment: Practical automated data augmentation with a reduced search space](https://arxiv.org/abs/1909.13719)
Args:
value_range: a tuple or a list of two elements. The first value
represents the lower bound for values in passed images, the second
represents the upper bound. Images passed to the layer should have
values within `value_range`. Defaults to `(0, 255)`.
bits: integer, the number of bits to keep for each channel. Must be a
value between 1-8.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
print(images[0, 0, 0])
# [59 62 63]
# Note that images are Tensors with values in the range [0, 255] and uint8
dtype
posterization = Posterization(bits=4, value_range=[0, 255])
images = posterization(images)
print(images[0, 0, 0])
# [48., 48., 48.]
# NOTE: the layer will output values in tf.float32, regardless of input
dtype.
```
Call arguments:
inputs: input tensor in two possible formats:
1. single 3D (HWC) image or 4D (NHWC) batch of images.
2. A dict of tensors where the images are under `"images"` key.
""" # noqa: E501
def __init__(self, value_range, bits, **kwargs):
super().__init__(**kwargs)
if not len(value_range) == 2:
raise ValueError(
"value_range must be a sequence of two elements. "
f"Received: {value_range}"
)
if not (0 < bits < 9):
raise ValueError(
f"Bits value must be between 1-8. Received bits: {bits}."
)
self._shift = 8 - bits
self._value_range = value_range
def augment_image(self, image, **kwargs):
image = transform_value_range(
images=image,
original_range=self._value_range,
target_range=[0, 255],
)
image = tf.cast(image, tf.uint8)
image = self._posterize(image)
image = tf.cast(image, self.compute_dtype)
return transform_value_range(
images=image,
original_range=[0, 255],
target_range=self._value_range,
dtype=self.compute_dtype,
)
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
def _batch_augment(self, inputs):
# Skip the use of vectorized_map or map_fn as the implementation is
# already vectorized
return self._augment(inputs)
def _posterize(self, image):
return tf.bitwise.left_shift(
tf.bitwise.right_shift(image, self._shift), self._shift
)
def augment_label(self, label, transformation=None, **kwargs):
return label
def get_config(self):
config = {"bits": 8 - self._shift, "value_range": self._value_range}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/preprocessing/posterization.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/posterization.py",
"repo_id": "keras-cv",
"token_count": 1668
} | 47 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv import layers
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.tests.test_case import TestCase
class AddOneToInputs(BaseImageAugmentationLayer):
"""Add 1 to all image values, for testing purposes."""
def __init__(self):
super(AddOneToInputs, self).__init__()
self.call_counter = tf.Variable(initial_value=0)
def augment_image(self, image, transformation=None, **kwargs):
self.call_counter.assign_add(1)
return image + 1
class RandomChoiceTest(TestCase):
def test_calls_layer_augmentation_per_image(self):
layer = AddOneToInputs()
pipeline = layers.RandomChoice(layers=[layer])
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + 1, os)
@pytest.mark.tf_keras_only
def test_calls_layer_augmentation_in_graph(self):
layer = AddOneToInputs()
pipeline = layers.RandomChoice(layers=[layer])
@tf.function()
def call_pipeline(xs):
return pipeline(xs)
xs = tf.random.uniform((2, 5, 5, 3), 0, 100, dtype=tf.float32)
os = call_pipeline(xs)
self.assertAllClose(xs + 1, os)
def test_batchwise(self):
layer = AddOneToInputs()
pipeline = layers.RandomChoice(layers=[layer], batchwise=True)
xs = tf.random.uniform((4, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + 1, os)
# Ensure the layer is only called once for the entire batch
self.assertEqual(layer.call_counter, 1)
def test_works_with_cutmix_mixup(self):
pipeline = layers.RandomChoice(
layers=[layers.CutMix(), layers.MixUp()], batchwise=True
)
xs = {
"images": tf.random.uniform((4, 5, 5, 3), 0, 100, dtype=tf.float32),
"labels": tf.random.uniform((4, 10), 0, 1, dtype=tf.float32),
}
pipeline(xs)
def test_calls_layer_augmentation_single_image(self):
layer = AddOneToInputs()
pipeline = layers.RandomChoice(layers=[layer])
xs = tf.random.uniform((5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + 1, os)
def test_calls_choose_one_layer_augmentation(self):
batch_size = 10
pipeline = layers.RandomChoice(
layers=[AddOneToInputs(), AddOneToInputs()]
)
xs = tf.random.uniform((batch_size, 5, 5, 3), 0, 100, dtype=tf.float32)
os = pipeline(xs)
self.assertAllClose(xs + 1, os)
total_calls = (
pipeline.layers[0].call_counter + pipeline.layers[1].call_counter
)
self.assertEqual(total_calls, batch_size)
| keras-cv/keras_cv/layers/preprocessing/random_choice_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_choice_test.py",
"repo_id": "keras-cv",
"token_count": 1425
} | 48 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.random_zoom import RandomZoom
from keras_cv.tests.test_case import TestCase
class RandomZoomTest(TestCase):
@parameterized.named_parameters(
("random_zoom_in_4_by_6", -0.4, -0.6),
("random_zoom_in_2_by_3", -0.2, -0.3),
("random_zoom_in_tuple_factor", (-0.4, -0.5), (-0.2, -0.3)),
("random_zoom_out_4_by_6", 0.4, 0.6),
("random_zoom_out_2_by_3", 0.2, 0.3),
("random_zoom_out_tuple_factor", (0.4, 0.5), (0.2, 0.3)),
)
def test_output_shapes(self, height_factor, width_factor):
np.random.seed(1337)
num_samples = 2
orig_height = 5
orig_width = 8
channels = 3
input = {
"images": tf.random.uniform(
shape=[num_samples, orig_height, orig_width, channels],
),
"segmentation_masks": tf.random.uniform(
shape=[num_samples, orig_height, orig_width, 1],
minval=0,
maxval=2,
),
}
layer = RandomZoom(height_factor, width_factor)
actual_output = layer(input)
expected_output = {
"images": tf.random.uniform(
shape=[num_samples, orig_height, orig_width, channels],
),
"segmentation_masks": tf.random.uniform(
shape=[num_samples, orig_height, orig_width, 1],
minval=0,
maxval=2,
),
}
# Check output shape of images
self.assertAllEqual(
expected_output["images"].shape, actual_output["images"].shape
)
# Check output shape of segmentation masks
self.assertAllEqual(
expected_output["segmentation_masks"].shape,
actual_output["segmentation_masks"].shape,
)
def test_random_zoom_in_numeric(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = RandomZoom(
(-0.5, -0.5), (-0.5, -0.5), interpolation="nearest"
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = RandomZoom(
(0.5, 0.5),
(0.8, 0.8),
fill_mode="constant",
interpolation="nearest",
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 5, 7, 9, 0],
[0, 10, 12, 14, 0],
[0, 20, 22, 24, 0],
[0, 0, 0, 0, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_out_numeric_preserve_aspect_ratio(self):
for dtype in (np.int64, np.float32):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(dtype)
layer = RandomZoom(
(0.5, 0.5), fill_mode="constant", interpolation="nearest"
)
output_image = layer(np.expand_dims(input_image, axis=0))
expected_output = np.asarray(
[
[0, 0, 0, 0, 0],
[0, 6, 7, 9, 0],
[0, 11, 12, 14, 0],
[0, 21, 22, 24, 0],
[0, 0, 0, 0, 0],
]
).astype(dtype)
expected_output = np.reshape(expected_output, (1, 5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_random_zoom_on_batched_images_independently(self):
image = tf.random.uniform(shape=(100, 100, 3))
input_images = tf.stack([image, image], axis=0)
layer = RandomZoom(
height_factor=(-0.4, -0.5), width_factor=(-0.2, -0.3)
)
results = layer(input_images)
self.assertNotAllClose(results[0], results[1])
def test_config_with_custom_name(self):
layer = RandomZoom(0.5, 0.6, name="image_preproc")
config = layer.get_config()
layer_1 = RandomZoom.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_unbatched_image(self):
input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.int64)
layer = RandomZoom((-0.5, -0.5), (-0.5, -0.5), interpolation="nearest")
output_image = layer(input_image)
expected_output = np.asarray(
[
[6, 7, 7, 8, 8],
[11, 12, 12, 13, 13],
[11, 12, 12, 13, 13],
[16, 17, 17, 18, 18],
[16, 17, 17, 18, 18],
]
).astype(np.int64)
expected_output = np.reshape(expected_output, (5, 5, 1))
self.assertAllEqual(expected_output, output_image)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = RandomZoom(0.5, 0.5)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = RandomZoom(0.5, 0.5, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
| keras-cv/keras_cv/layers/preprocessing/random_zoom_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_zoom_test.py",
"repo_id": "keras-cv",
"token_count": 3492
} | 49 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.backend import random
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class RandomAddLayer(base_augmentation_layer_3d.BaseAugmentationLayer3D):
def __init__(self, translate_noise=(0.0, 0.0, 0.0), **kwargs):
super().__init__(**kwargs)
self._translate_noise = translate_noise
def get_random_transformation(self, **kwargs):
random_x = random.normal(
(),
mean=0.0,
stddev=self._translate_noise[0],
seed=self._random_generator,
)
random_y = random.normal(
(),
mean=0.0,
stddev=self._translate_noise[1],
seed=self._random_generator,
)
random_z = random.normal(
(),
mean=0.0,
stddev=self._translate_noise[2],
seed=self._random_generator,
)
return {
"pose": tf.stack([random_x, random_y, random_z, 0, 0, 0], axis=0)
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
point_clouds_xyz = point_clouds[..., :3]
point_clouds_xyz += transformation["pose"][:3]
bounding_boxes_xyz = bounding_boxes[..., :3]
bounding_boxes_xyz += transformation["pose"][:3]
return (
tf.concat([point_clouds_xyz, point_clouds[..., 3:]], axis=-1),
tf.concat([bounding_boxes_xyz, bounding_boxes[..., 3:]], axis=-1),
)
class VectorizeDisabledLayer(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
def __init__(self, **kwargs):
self.auto_vectorize = False
super().__init__(**kwargs)
class BaseImageAugmentationLayerTest(TestCase):
def test_auto_vectorize_disabled(self):
vectorize_disabled_layer = VectorizeDisabledLayer()
self.assertFalse(vectorize_disabled_layer.auto_vectorize)
self.assertEqual(vectorize_disabled_layer._map_fn, tf.map_fn)
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = RandomAddLayer(translate_noise=(1.0, 1.0, 1.0))
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = RandomAddLayer(translate_noise=(1.0, 1.0, 1.0))
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_augment_leaves_extra_dict_entries_unmodified(self):
add_layer = RandomAddLayer(translate_noise=(1.0, 1.0, 1.0))
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
dummy = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
"dummy": dummy,
}
outputs = add_layer(inputs)
self.assertAllEqual(inputs["dummy"], outputs["dummy"])
self.assertNotAllClose(inputs, outputs)
def test_augment_leaves_batched_extra_dict_entries_unmodified(self):
add_layer = RandomAddLayer(translate_noise=(1.0, 1.0, 1.0))
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
dummy = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
"dummy": dummy,
}
outputs = add_layer(inputs)
self.assertAllEqual(inputs["dummy"], outputs["dummy"])
self.assertNotAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/base_augmentation_layer_3d_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/base_augmentation_layer_3d_test.py",
"repo_id": "keras-cv",
"token_count": 2198
} | 50 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import random
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import coordinate_transform
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GlobalRandomTranslation")
class GlobalRandomTranslation(
base_augmentation_layer_3d.BaseAugmentationLayer3D
):
"""A preprocessing layer which randomly translates point clouds and bounding
boxes along X, Y, and Z axes during training.
This layer will randomly translate the whole scene along the X, Y,and Z axes
based on three randomly sampled translation factors following three normal
distributions centered at 0 with standard deviation [x_stddev, y_stddev,
z_stddev].
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Arguments:
x_stddev: A float scalar sets the translation noise standard deviation
along the X axis.
y_stddev: A float scalar sets the translation noise standard deviation
along the Y axis.
z_stddev: A float scalar sets the translation noise standard deviation
along the Z axis.
"""
def __init__(self, x_stddev=None, y_stddev=None, z_stddev=None, **kwargs):
super().__init__(**kwargs)
x_stddev = x_stddev if x_stddev else 0.0
y_stddev = y_stddev if y_stddev else 0.0
z_stddev = z_stddev if z_stddev else 0.0
if x_stddev < 0 or y_stddev < 0 or z_stddev < 0:
raise ValueError("x_stddev, y_stddev, and z_stddev must be >=0.")
self._x_stddev = x_stddev
self._y_stddev = y_stddev
self._z_stddev = z_stddev
def get_config(self):
return {
"x_stddev": self._x_stddev,
"y_stddev": self._x_stddev,
"z_stddev": self._z_stddev,
}
def get_random_transformation(self, **kwargs):
random_x_translation = random.normal(
(),
mean=0.0,
stddev=self._x_stddev,
dtype=self.compute_dtype,
seed=self._random_generator,
)
random_y_translation = random.normal(
(),
mean=0.0,
stddev=self._y_stddev,
dtype=self.compute_dtype,
seed=self._random_generator,
)
random_z_translation = random.normal(
(),
mean=0.0,
stddev=self._z_stddev,
dtype=self.compute_dtype,
seed=self._random_generator,
)
return {
"pose": tf.stack(
[
random_x_translation,
random_y_translation,
random_z_translation,
0.0,
0.0,
0.0,
],
axis=0,
)
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
pose = transformation["pose"]
point_clouds_xyz = coordinate_transform(point_clouds[..., :3], pose)
point_clouds = tf.concat(
[point_clouds_xyz, point_clouds[..., 3:]], axis=-1
)
bounding_boxes_xyz = coordinate_transform(
bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.Z + 1], pose
)
bounding_boxes = tf.concat(
[
bounding_boxes_xyz,
bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.DX :],
],
axis=-1,
)
return (point_clouds, bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_translation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_translation.py",
"repo_id": "keras-cv",
"token_count": 2151
} | 51 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.bounding_box.iou import compute_ciou
@keras_cv_export("keras_cv.losses.CIoULoss")
class CIoULoss(keras.losses.Loss):
"""Implements the Complete IoU (CIoU) Loss
CIoU loss is an extension of GIoU loss, which further improves the IoU
optimization for object detection. CIoU loss not only penalizes the
bounding box coordinates but also considers the aspect ratio and center
distance of the boxes. The length of the last dimension should be 4 to
represent the bounding boxes.
Args:
bounding_box_format: a case-insensitive string (for example, "xyxy").
Each bounding box is defined by these 4 values. For detailed
information on the supported formats, see the [KerasCV bounding box
documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
eps: A small value added to avoid division by zero and stabilize
calculations.
References:
- [CIoU paper](https://arxiv.org/pdf/2005.03572.pdf)
Sample Usage:
```python
y_true = np.random.uniform(
size=(5, 10, 5),
low=0,
high=10)
y_pred = np.random.uniform(
(5, 10, 4),
low=0,
high=10)
loss = keras_cv.losses.CIoULoss()
loss(y_true, y_pred).numpy()
```
Usage with the `compile()` API:
```python
model.compile(optimizer='adam', loss=CIoULoss())
```
"""
def __init__(self, bounding_box_format, eps=1e-7, **kwargs):
super().__init__(**kwargs)
self.eps = eps
self.bounding_box_format = bounding_box_format
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if y_pred.shape[-1] != 4:
raise ValueError(
"CIoULoss expects y_pred.shape[-1] to be 4 to represent the "
f"bounding boxes. Received y_pred.shape[-1]={y_pred.shape[-1]}."
)
if y_true.shape[-1] != 4:
raise ValueError(
"CIoULoss expects y_true.shape[-1] to be 4 to represent the "
f"bounding boxes. Received y_true.shape[-1]={y_true.shape[-1]}."
)
if y_true.shape[-2] != y_pred.shape[-2]:
raise ValueError(
"CIoULoss expects number of boxes in y_pred to be equal to the "
"number of boxes in y_true. Received number of boxes in "
f"y_true={y_true.shape[-2]} and number of boxes in "
f"y_pred={y_pred.shape[-2]}."
)
ciou = compute_ciou(y_true, y_pred, self.bounding_box_format)
return 1 - ciou
def get_config(self):
config = super().get_config()
config.update(
{
"eps": self.eps,
}
)
return config
| keras-cv/keras_cv/losses/ciou_loss.py/0 | {
"file_path": "keras-cv/keras_cv/losses/ciou_loss.py",
"repo_id": "keras-cv",
"token_count": 1538
} | 52 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetMBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetSBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetTinyBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetXLBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class CSPDarkNetBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = CSPDarkNetBackbone(
stackwise_channels=[48, 96, 192, 384],
stackwise_depth=[1, 3, 3, 1],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = CSPDarkNetLBackbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = CSPDarkNetBackbone(
stackwise_channels=[48, 96, 192, 384],
stackwise_depth=[1, 3, 3, 1],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = CSPDarkNetBackbone(
stackwise_channels=[48, 96, 192, 384],
stackwise_depth=[1, 3, 3, 1],
include_rescaling=True,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "csp_darknet_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, CSPDarkNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = CSPDarkNetLBackbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "csp_darknet_backbone.keras"
)
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, CSPDarkNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = CSPDarkNetLBackbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 128),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 256),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 512),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 1024),
)
@parameterized.named_parameters(
("Tiny", CSPDarkNetTinyBackbone),
("S", CSPDarkNetSBackbone),
("M", CSPDarkNetMBackbone),
("L", CSPDarkNetLBackbone),
("XL", CSPDarkNetXLBackbone),
)
def test_specific_arch_forward_pass(self, arch_class):
backbone = arch_class()
backbone(np.random.uniform(size=(2, 256, 256, 3)))
@parameterized.named_parameters(
("Tiny", CSPDarkNetTinyBackbone),
("S", CSPDarkNetSBackbone),
("M", CSPDarkNetMBackbone),
("L", CSPDarkNetLBackbone),
("XL", CSPDarkNetXLBackbone),
)
def test_specific_arch_presets(self, arch_class):
self.assertDictEqual(
arch_class.presets, arch_class.presets_with_weights
)
if __name__ == "__main__":
tf.test.main()
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2540
} | 53 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from keras_cv.backend import keras
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
@keras.saving.register_keras_serializable(package="keras_cv.models")
class EfficientNetV1Backbone(Backbone):
"""Instantiates the EfficientNetV1 architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
- [Based on the original keras.applications EfficientNet](https://github.com/keras-team/keras/blob/master/keras/applications/efficientnet.py)
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections. The default
value is set to 0.2.
depth_divisor: integer, a unit of network width. The default value is
set to 8.
activation: activation function to use between each convolutional layer.
input_shape: optional shape tuple, it should have exactly 3 input
channels.
input_tensor: optional Keras tensor (i.e. output of `keras.keras.layers.Input()`) to
use as image input for the model.
stackwise_kernel_sizes: list of ints, the kernel sizes used for each
conv block.
stackwise_num_repeats: list of ints, number of times to repeat each
conv block.
stackwise_input_filters: list of ints, number of input filters for
each conv block.
stackwise_output_filters: list of ints, number of output filters for
each stack in the conv blocks model.
stackwise_expansion_ratios: list of floats, expand ratio passed to the
squeeze and excitation blocks.
stackwise_strides: list of ints, stackwise_strides for each conv block.
stackwise_squeeze_and_excite_ratios: list of ints, the squeeze and
excite ratios passed to the squeeze and excitation blocks.
Usage:
```python
# Construct an EfficientNetV1 from a preset:
efficientnet = keras_cv.models.EfficientNetV1Backbone.from_preset(
"efficientnetv1_b0"
)
images = np.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
# Alternatively, you can also customize the EfficientNetV1 architecture:
model = EfficientNetV1Backbone(
stackwise_kernel_sizes=[3, 3, 5, 3, 5, 5, 3],
stackwise_num_repeats=[1, 2, 2, 3, 3, 4, 1],
stackwise_input_filters=[32, 16, 24, 40, 80, 112, 192],
stackwise_output_filters=[16, 24, 40, 80, 112, 192, 320],
stackwise_expansion_ratios=[1, 6, 6, 6, 6, 6, 6],
stackwise_strides=[1, 2, 2, 2, 1, 2, 1],
stackwise_squeeze_and_excite_ratios=[
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
0.25,
],
width_coefficient=1.0,
depth_coefficient=1.0,
include_rescaling=False,
)
images = np.ones((1, 256, 256, 3))
outputs = efficientnet.predict(images)
```
""" # noqa: E501
def __init__(
self,
*,
include_rescaling,
width_coefficient,
depth_coefficient,
stackwise_kernel_sizes,
stackwise_num_repeats,
stackwise_input_filters,
stackwise_output_filters,
stackwise_expansion_ratios,
stackwise_strides,
stackwise_squeeze_and_excite_ratios,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
input_shape=(None, None, 3),
input_tensor=None,
activation="swish",
**kwargs,
):
img_input = utils.parse_model_inputs(input_shape, input_tensor)
x = img_input
if include_rescaling:
# Use common rescaling strategy across keras_cv
x = keras.layers.Rescaling(1.0 / 255.0)(x)
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, 3), name="stem_conv_pad"
)(x)
# Build stem
stem_filters = round_filters(
filters=stackwise_input_filters[0],
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=stem_filters,
kernel_size=3,
strides=2,
padding="valid",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name="stem_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name="stem_bn",
)(x)
x = keras.layers.Activation(activation, name="stem_activation")(x)
# Build blocks
block_id = 0
blocks = float(sum(stackwise_num_repeats))
pyramid_level_inputs = []
for i in range(len(stackwise_kernel_sizes)):
num_repeats = stackwise_num_repeats[i]
input_filters = stackwise_input_filters[i]
output_filters = stackwise_output_filters[i]
# Update block input and output filters based on depth multiplier.
input_filters = round_filters(
filters=input_filters,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
output_filters = round_filters(
filters=output_filters,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
repeats = round_repeats(
repeats=num_repeats,
depth_coefficient=depth_coefficient,
)
strides = stackwise_strides[i]
squeeze_and_excite_ratio = stackwise_squeeze_and_excite_ratios[i]
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
strides = 1
input_filters = output_filters
if strides != 1:
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# 97 is the start of the lowercase alphabet.
letter_identifier = chr(j + 97)
x = apply_efficientnet_block(
inputs=x,
filters_in=input_filters,
filters_out=output_filters,
kernel_size=stackwise_kernel_sizes[i],
strides=strides,
expand_ratio=stackwise_expansion_ratios[i],
se_ratio=squeeze_and_excite_ratio,
activation=activation,
dropout_rate=drop_connect_rate * block_id / blocks,
name="block{}{}_".format(i + 1, letter_identifier),
)
block_id += 1
# Build top
top_filters = round_filters(
filters=1280,
width_coefficient=width_coefficient,
divisor=depth_divisor,
)
x = keras.layers.Conv2D(
filters=top_filters,
kernel_size=1,
padding="same",
strides=1,
kernel_initializer=conv_kernel_initializer(),
use_bias=False,
name="top_conv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name="top_bn",
)(x)
x = keras.layers.Activation(
activation=activation, name="top_activation"
)(x)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
# Create model.
super().__init__(inputs=img_input, outputs=x, **kwargs)
self.include_rescaling = include_rescaling
self.width_coefficient = width_coefficient
self.depth_coefficient = depth_coefficient
self.dropout_rate = dropout_rate
self.drop_connect_rate = drop_connect_rate
self.depth_divisor = depth_divisor
self.activation = activation
self.input_tensor = input_tensor
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
self.stackwise_kernel_sizes = stackwise_kernel_sizes
self.stackwise_num_repeats = stackwise_num_repeats
self.stackwise_input_filters = stackwise_input_filters
self.stackwise_output_filters = stackwise_output_filters
self.stackwise_expansion_ratios = stackwise_expansion_ratios
self.stackwise_strides = stackwise_strides
self.stackwise_squeeze_and_excite_ratios = (
stackwise_squeeze_and_excite_ratios
)
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
"width_coefficient": self.width_coefficient,
"depth_coefficient": self.depth_coefficient,
"dropout_rate": self.dropout_rate,
"drop_connect_rate": self.drop_connect_rate,
"depth_divisor": self.depth_divisor,
"activation": self.activation,
"input_tensor": self.input_tensor,
"input_shape": self.input_shape[1:],
"trainable": self.trainable,
"stackwise_kernel_sizes": self.stackwise_kernel_sizes,
"stackwise_num_repeats": self.stackwise_num_repeats,
"stackwise_input_filters": self.stackwise_input_filters,
"stackwise_output_filters": self.stackwise_output_filters,
"stackwise_expansion_ratios": self.stackwise_expansion_ratios,
"stackwise_strides": self.stackwise_strides,
"stackwise_squeeze_and_excite_ratios": (
self.stackwise_squeeze_and_excite_ratios
),
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
def conv_kernel_initializer(scale=2.0):
return keras.initializers.VarianceScaling(
scale=scale, mode="fan_out", distribution="truncated_normal"
)
def round_filters(filters, width_coefficient, divisor):
"""Round number of filters based on depth multiplier.
Args:
filters: int, number of filters for Conv layer
width_coefficient: float, denotes the scaling coefficient of network
width
divisor: int, a unit of network width
Returns:
int, new rounded filters value for Conv layer
"""
filters *= width_coefficient
new_filters = max(divisor, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier.
Args:
repeats: int, number of repeats of efficientnet block
depth_coefficient: float, denotes the scaling coefficient of network
depth
Returns:
int, rounded repeats
"""
return int(math.ceil(depth_coefficient * repeats))
def apply_efficientnet_block(
inputs,
filters_in=32,
filters_out=16,
kernel_size=3,
strides=1,
activation="swish",
expand_ratio=1,
se_ratio=0.0,
dropout_rate=0.0,
name="",
):
"""An inverted residual block.
Args:
inputs: Tensor, The input tensor of the block
filters_in: integer, the number of input filters.
filters_out: integer, the number of output filters.
kernel_size: integer, the dimension of the convolution window.
strides: integer, the stride of the convolution.
activation: activation function to use between each convolutional layer.
expand_ratio: integer, scaling coefficient for the input filters.
se_ratio: float between 0 and 1, fraction to squeeze the input filters.
dropout_rate: float between 0 and 1, fraction of the input units to drop.
name: string, block label.
Returns:
output tensor for the block.
""" # noqa: E501
filters = filters_in * expand_ratio
if expand_ratio != 1:
x = keras.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "expand_conv",
)(inputs)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "expand_bn",
)(x)
x = keras.layers.Activation(
activation, name=name + "expand_activation"
)(x)
else:
x = inputs
# Depthwise Convolution
if strides == 2:
x = keras.layers.ZeroPadding2D(
padding=utils.correct_pad_downsample(x, kernel_size),
name=name + "dwconv_pad",
)(x)
conv_pad = "valid"
else:
conv_pad = "same"
x = keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=conv_pad,
use_bias=False,
depthwise_initializer=conv_kernel_initializer(),
name=name + "dwconv",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "dwconv_bn",
)(x)
x = keras.layers.Activation(activation, name=name + "dwconv_activation")(x)
# Squeeze and Excitation phase
if 0 < se_ratio <= 1:
filters_se = max(1, int(filters_in * se_ratio))
se = keras.layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
se_shape = (1, 1, filters)
se = keras.layers.Reshape(se_shape, name=name + "se_reshape")(se)
se = keras.layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=conv_kernel_initializer(),
name=name + "se_reduce",
)(se)
se = keras.layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=conv_kernel_initializer(),
name=name + "se_expand",
)(se)
x = keras.layers.multiply([x, se], name=name + "se_excite")
# Output phase
x = keras.layers.Conv2D(
filters=filters_out,
kernel_size=1,
strides=1,
padding="same",
use_bias=False,
kernel_initializer=conv_kernel_initializer(),
name=name + "project",
)(x)
x = keras.layers.BatchNormalization(
axis=3,
name=name + "project_bn",
)(x)
x = keras.layers.Activation(activation, name=name + "project_activation")(x)
if strides == 1 and filters_in == filters_out:
if dropout_rate > 0:
x = keras.layers.Dropout(
dropout_rate,
noise_shape=(None, 1, 1, 1),
name=name + "drop",
)(x)
x = keras.layers.Add(name=name + "add")([x, inputs])
return x
| keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_backbone.py",
"repo_id": "keras-cv",
"token_count": 7637
} | 54 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v2.resnet_v2_aliases import (
ResNet50V2Backbone,
)
from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone import (
ResNetV2Backbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class ResNetV2PresetSmokeTest(TestCase):
"""
A smoke test for ResNetV2 presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/resnet_v2/resnetv2_presets_test.py --run_large`
""" # noqa: E501
def setUp(self):
self.input_batch = np.ones(shape=(8, 224, 224, 3))
@parameterized.named_parameters(
("preset_with_weights", "resnet50_v2_imagenet"),
("preset_no_weights", "resnet50_v2"),
)
def test_backbone_output(self, preset):
model = ResNetV2Backbone.from_preset(preset)
outputs = model(self.input_batch)
if preset == "resnet50_v2_imagenet":
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
outputs = outputs[0, 0, 0, :5]
expected = [1.051145, 0, 0, 1.16328, 0]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs), expected, atol=0.01, rtol=0.01
)
def test_applications_model_output(self):
model = ResNet50V2Backbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = ResNet50V2Backbone.from_preset("resnet50_v2_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in ResNetV2Backbone.presets:
self.assertRegex(ResNetV2Backbone.from_preset.__doc__, name)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
ResNetV2Backbone.from_preset("resnet50_v2_clowntown")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
ResNetV2Backbone.from_preset("resnet50_v2", load_weights=True)
@pytest.mark.extra_large
class ResNetV2PresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This every presets for ResNetV2 and is only run manually.
Run with:
`pytest keras_cv/models/backbones/resnet_v2/resnet_v2_presets_test.py --run_extra_large`
""" # noqa: E501
def test_load_resnetv2(self):
input_data = np.ones(shape=(8, 224, 224, 3))
for preset in ResNetV2Backbone.presets:
model = ResNetV2Backbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1493
} | 55 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import keras
from keras_cv.backend import ops
def get_initializer(initializer_range=0.02):
"""
Creates a `keras.initializers.TruncatedNormal` with the given range.
Args:
initializer_range (*float*, defaults to 0.02): Standard deviation of the
initializer range.
Returns:
`keras.initializers.TruncatedNormal`: The truncated normal initializer.
"""
return keras.initializers.TruncatedNormal(stddev=initializer_range)
class QuickGELU(keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, x):
return x * ops.sigmoid(1.702 * x)
class ResidualAttention(keras.layers.Layer):
def __init__(
self,
proj_dim,
num_heads,
num_hidden_layers,
**kwargs,
):
super().__init__(**kwargs)
self.proj_dim = proj_dim
self.num_heads = num_heads
self.num_hidden_layers = num_hidden_layers
self.fc_std = np.power(2 * self.proj_dim, -0.5) * 0.02
self.in_proj_std = (
np.power(self.proj_dim, -0.5)
* (np.power(2 * self.num_hidden_layers, -0.5))
* 0.02
)
self.attn = CLIPAttention(
self.proj_dim,
self.num_heads,
self.num_hidden_layers,
name="multi_head_attention",
)
self.ln_1 = keras.layers.LayerNormalization(epsilon=1e-5, name="ln_1")
self.mlp_dense_1 = keras.layers.Dense(
self.proj_dim * 4,
name="c_fc",
)
self.mlp_activation = QuickGELU(name="gelu")
self.mlp_dense_2 = keras.layers.Dense(
self.proj_dim,
name="c_proj",
)
self.ln_2 = keras.layers.LayerNormalization(epsilon=1e-5, name="ln_2")
def attention(self, x, causal_attention_mask=None, attention_mask=None):
mask = None
if causal_attention_mask is not None:
mask = (
ops.cast(causal_attention_mask, dtype=x.dtype)
if causal_attention_mask is not None
else None
)
if attention_mask is not None:
attention_mask = (
ops.cast(attention_mask, dtype=x.dtype)
if attention_mask is not None
else None
)
mask = ops.add(causal_attention_mask, attention_mask)
return self.attn(
x,
attention_mask=mask,
)[0]
def build(self, input_shape):
super().build(input_shape)
self.attn.build(None)
self.ln_1.build([None, None, self.proj_dim])
self.mlp_dense_1.build([None, None, self.proj_dim])
self.mlp_dense_2.build([None, None, self.proj_dim * 4])
self.ln_2.build([None, None, self.proj_dim])
def call(self, x, causal_attention_mask=None, attention_mask=None):
residual = x
x = self.ln_1(x)
x = self.attention(
x,
causal_attention_mask=causal_attention_mask,
attention_mask=attention_mask,
)
x = x + residual
residual = x
x = self.mlp_dense_1(self.ln_2(residual))
x = self.mlp_activation(x)
x = self.mlp_dense_2(x)
x = residual + x
return x
def compute_output_shape(self, inputs_shape):
return inputs_shape
def get_config(self):
config = super().get_config()
config.update(
{
"proj_dim": self.proj_dim,
"num_heads": self.num_heads,
"num_hidden_layers": self.num_hidden_layers,
}
)
return config
class CLIPEncoder(keras.layers.Layer):
def __init__(self, width, num_layers, heads, **kwargs):
super().__init__(**kwargs)
self.width = width
self.num_layers = num_layers
self.heads = heads
self.resblocks = [
ResidualAttention(
self.width,
self.heads,
self.num_layers,
)
for _ in range(self.num_layers)
]
def build(self, input_shape):
super().build(input_shape)
for block in self.resblocks:
block.build(input_shape)
def call(
self,
x,
causal_attention_mask=None,
attention_mask=None,
):
for block in self.resblocks:
x = block(
x,
causal_attention_mask=causal_attention_mask,
attention_mask=attention_mask,
)
return x
def compute_output_shape(self, inputs_shape):
return inputs_shape
def get_config(self):
config = super().get_config()
config.update(
{
"width": self.width,
"num_layers": self.num_layers,
"heads": self.heads,
}
)
return config
class CLIPAttention(keras.layers.Layer):
"""
Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/modeling_clip.py # noqa: E501
"""
def __init__(
self, proj_dim, num_heads, num_hidden_layers, dropout=0.0, **kwargs
):
super().__init__(**kwargs)
self.proj_dim = proj_dim
self.num_heads = num_heads
self.num_hidden_layers = num_hidden_layers
self.dropout = dropout
self.head_dim = self.proj_dim // self.num_heads
if self.head_dim * self.num_heads != self.proj_dim:
raise ValueError(
f"proj_dim must be divisible by num_heads (got `proj_dim`"
f": {self.proj_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
in_proj_std = (
(self.proj_dim**-0.5)
* ((2 * self.num_hidden_layers) ** -0.5)
* 0.02
)
out_proj_std = (self.proj_dim**-0.5) * 0.02
self.q_proj = keras.layers.Dense(
units=self.proj_dim,
kernel_initializer=get_initializer(in_proj_std),
name="q_proj",
)
self.k_proj = keras.layers.Dense(
units=self.proj_dim,
kernel_initializer=get_initializer(in_proj_std),
name="k_proj",
)
self.v_proj = keras.layers.Dense(
units=self.proj_dim,
kernel_initializer=get_initializer(in_proj_std),
name="v_proj",
)
self.out_proj = keras.layers.Dense(
units=self.proj_dim,
kernel_initializer=get_initializer(out_proj_std),
name="out_proj",
)
def build(self, input_shape):
super().build(input_shape)
self.q_proj.build([None, None, self.proj_dim])
self.k_proj.build([None, None, self.proj_dim])
self.v_proj.build([None, None, self.proj_dim])
self.out_proj.build([None, None, self.proj_dim])
def _transpose_for_scores(self, tensor, batch_size):
"""
Adapted from https://github.com/huggingface/transformers/blob/8e164c5400b7b413c7b8fb32e35132001effc970/src/transformers/models/bert/modeling_tf_bert.py#L252 # noqa: E501
"""
# [batch_size, seq_len, all_head_dim] ->
# [batch_size, seq_len, num_heads, head_dim]
tensor = ops.reshape(
tensor, (batch_size, -1, self.num_heads, self.head_dim)
)
# [batch_size, seq_len, num_heads, head_dim] ->
# [batch_size, num_heads, seq_len, head_dim]
return ops.transpose(tensor, axes=[0, 2, 1, 3])
def call(
self,
x,
attention_mask=None,
output_attentions=None,
training=False,
):
batch_size = ops.shape(x)[0]
mixed_query_layer = self.q_proj(inputs=x)
mixed_key_layer = self.k_proj(inputs=x)
mixed_value_layer = self.v_proj(inputs=x)
query_layer = self._transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self._transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self._transpose_for_scores(mixed_value_layer, batch_size)
# Scaled dot product between key and query = raw attention scores.
attention_scores = ops.matmul(
query_layer, ops.transpose(key_layer, axes=[0, 1, 3, 2])
)
dk = ops.cast(ops.sqrt(self.head_dim), dtype=attention_scores.dtype)
attention_scores = ops.divide(
attention_scores, dk
) # (batch_size, num_heads, seq_len_q, seq_len_k)
if attention_mask is not None:
# Apply the attention mask (precomputed for all layers in the
# call() function)
attention_scores = ops.add(attention_scores, attention_mask)
# Normalize the attention scores to probabilities.
attention_probs = ops.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
dropout_attention_probs = keras.layers.Dropout(self.dropout)(
inputs=attention_probs, training=training
)
attn_output = ops.matmul(dropout_attention_probs, value_layer)
attn_output = ops.transpose(attn_output, axes=[0, 2, 1, 3])
# (batch_size, seq_len_q, proj_dim)
attn_output = ops.reshape(attn_output, (batch_size, -1, self.proj_dim))
attn_output = self.out_proj(attn_output, training=training)
outputs = (
(attn_output, attention_probs)
if output_attentions
else (attn_output,)
)
return outputs
def get_config(self):
config = super().get_config()
config.update(
{
"proj_dim": self.proj_dim,
"num_heads": self.num_heads,
"num_hidden_layers": self.num_hidden_layers,
"dropout": self.dropout,
}
)
return config
| keras-cv/keras_cv/models/feature_extractor/clip/clip_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/feature_extractor/clip/clip_encoder.py",
"repo_id": "keras-cv",
"token_count": 5332
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MLP Mixer models for KerasCV.
Reference:
- [MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601)
""" # noqa: E501
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from tensorflow.keras import layers
from keras_cv.models.legacy import utils
MODEL_CONFIGS = {
"MLPMixerB16": {
"patch_size": 16,
"num_blocks": 12,
"hidden_dim": 768,
"tokens_mlp_dim": 384,
"channels_mlp_dim": 3072,
},
"MLPMixerB32": {
"patch_size": 32,
"num_blocks": 12,
"hidden_dim": 768,
"tokens_mlp_dim": 384,
"channels_mlp_dim": 3072,
},
"MLPMixerL16": {
"patch_size": 16,
"num_blocks": 24,
"hidden_dim": 1024,
"tokens_mlp_dim": 512,
"channels_mlp_dim": 4096,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601)
This class represents a Keras {name} model.
For transfer learning use cases, make sure to read the [guide to transfer
learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, num_classes must be provided.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights (e.g.
'imagenet/classification')(see available pre-trained weights in
weights.py)
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
name: string, optional name to pass to the model, defaults to "{name}".
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
Returns:
A `keras.Model` instance.
""" # noqa: E501
def apply_mlp_block(x, mlp_dim, name=None):
"""An MLP block consisting of two linear layers with GELU activation in
between.
Args:
x: input tensor.
mlp_dim: integer, the number of units to be present in the first layer.
name: string, block label.
Returns:
the updated input tensor.
"""
if name is None:
name = f"mlp_block_{backend.get_uid('mlp_block')}"
y = layers.Dense(mlp_dim, name=f"{name}_dense_1")(x)
y = layers.Activation("gelu", name=f"{name}_gelu")(y)
return layers.Dense(x.shape[-1], name=f"{name}_dense_2")(y)
def apply_mixer_block(x, tokens_mlp_dim, channels_mlp_dim, name=None):
"""A mixer block.
Args:
x: input tensor.
tokens_mlp_dim: integer, number of units to be present in the MLP block
dealing with tokens.
channels_mlp_dim: integer, number of units to be present in the MLP block
dealing with channels.
name: string, block label.
Returns:
the updated input tensor.
"""
if name is None:
name = f"mixer_block_{backend.get_uid('mlp_block')}"
y = layers.LayerNormalization()(x)
y = layers.Permute((2, 1))(y)
y = apply_mlp_block(y, tokens_mlp_dim, name=f"{name}_token_mixing")
y = layers.Permute((2, 1))(y)
x = layers.Add()([x, y])
y = layers.LayerNormalization()(x)
y = apply_mlp_block(y, channels_mlp_dim, name=f"{name}_channel_mixing")
return layers.Add()([x, y])
@keras.utils.register_keras_serializable(package="keras_cv.models")
class MLPMixer(keras.Model):
"""Instantiates the MLP Mixer architecture.
Args:
input_shape: tuple denoting the input shape, (224, 224, 3) for example.
patch_size: integer denoting the size of the patches to be extracted
from the inputs (16 for extracting 16x16 patches for example).
num_blocks: integer, number of mixer blocks.
hidden_dim: integer, dimension to which the patches will be linearly
projected.
tokens_mlp_dim: integer, dimension of the MLP block responsible for
tokens.
channels_mlp_dim: integer, dimension of the MLP block responsible for
channels.
include_rescaling: whether to rescale the inputs. If set to True,
inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the fully-connected
layer at the top of the network. If provided, num_classes must be
provided.
num_classes: integer, optional number of classes to classify images
into. Only to be specified if `include_top` is True.
weights: one of `None` (random initialization) or a pretrained
weight file path.
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: string, optional name to pass to the model, defaults to "MLPMixer".
Returns:
A `keras.Model` instance.
"""
def __init__(
self,
input_shape,
patch_size,
num_blocks,
hidden_dim,
tokens_mlp_dim,
channels_mlp_dim,
include_rescaling,
include_top,
num_classes=None,
input_tensor=None,
weights=None,
pooling=None,
classifier_activation="softmax",
name="MLPMixer",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either "
"`None` or the path to the weights file to be loaded. "
f"Weights file not found at location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, "
"you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if not isinstance(input_shape, tuple):
raise ValueError("`input_shape` needs to be tuple.")
if len(input_shape) != 3:
raise ValueError(
"`input_shape` needs to contain dimensions for three"
" axes: height, width, and channel ((224, 224, 3) for example)."
)
if input_shape[0] != input_shape[1]:
raise ValueError("Non-uniform resolutions are not supported.")
if input_shape[0] % patch_size != 0:
raise ValueError(
"Input resolution should be divisible by the patch size."
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
x = layers.Conv2D(
filters=hidden_dim,
kernel_size=(patch_size, patch_size),
strides=(patch_size, patch_size),
padding="valid",
name="patchify_and_projection",
)(x)
x = layers.Reshape((x.shape[1] * x.shape[2], x.shape[3]))(x)
for i in range(num_blocks):
x = apply_mixer_block(
x, tokens_mlp_dim, channels_mlp_dim, name=f"mixer_block_{i}"
)
x = layers.LayerNormalization()(x)
if include_top:
x = layers.GlobalAveragePooling1D(name="avg_pool")(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name="predictions",
)(x)
elif pooling == "avg":
x = layers.GlobalAveragePooling1D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling1D(name="max_pool")(x)
super().__init__(inputs=inputs, outputs=x, name=name, **kwargs)
if weights is not None:
self.load_weights(weights)
self.patch_size = patch_size
self.num_blocks = num_blocks
self.hidden_dim = hidden_dim
self.tokens_mlp_dim = tokens_mlp_dim
self.channels_mlp_dim = channels_mlp_dim
self.include_rescaling = include_rescaling
self.include_top = include_top
self.num_classes = num_classes
self.input_tensor = input_tensor
self.pooling = pooling
self.classifier_activation = classifier_activation
def get_config(self):
return {
"input_shape": self.input_shape[1:],
"patch_size": self.patch_size,
"num_blocks": self.num_blocks,
"hidden_dim": self.hidden_dim,
"tokens_mlp_dim": self.tokens_mlp_dim,
"channels_mlp_dim": self.channels_mlp_dim,
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"num_classes": self.num_classes,
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"classifier_activation": self.classifier_activation,
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def MLPMixerB16(
input_shape,
*,
include_rescaling,
include_top,
num_classes=None,
input_tensor=None,
weights=None,
pooling=None,
name="MLPMixerB16",
**kwargs,
):
"""Instantiates the MLPMixerB16 architecture."""
return MLPMixer(
input_shape=input_shape,
patch_size=MODEL_CONFIGS["MLPMixerB16"]["patch_size"],
num_blocks=MODEL_CONFIGS["MLPMixerB16"]["num_blocks"],
hidden_dim=MODEL_CONFIGS["MLPMixerB16"]["hidden_dim"],
tokens_mlp_dim=MODEL_CONFIGS["MLPMixerB16"]["tokens_mlp_dim"],
channels_mlp_dim=MODEL_CONFIGS["MLPMixerB16"]["channels_mlp_dim"],
include_rescaling=include_rescaling,
include_top=include_top,
num_classes=num_classes,
input_tensor=input_tensor,
weights=weights,
pooling=pooling,
name=name,
**kwargs,
)
def MLPMixerB32(
input_shape,
*,
include_rescaling,
include_top,
num_classes=None,
input_tensor=None,
weights=None,
pooling=None,
name="MLPMixerB32",
**kwargs,
):
"""Instantiates the MLPMixerB32 architecture."""
return MLPMixer(
input_shape=input_shape,
patch_size=MODEL_CONFIGS["MLPMixerB32"]["patch_size"],
num_blocks=MODEL_CONFIGS["MLPMixerB32"]["num_blocks"],
hidden_dim=MODEL_CONFIGS["MLPMixerB32"]["hidden_dim"],
tokens_mlp_dim=MODEL_CONFIGS["MLPMixerB32"]["tokens_mlp_dim"],
channels_mlp_dim=MODEL_CONFIGS["MLPMixerB32"]["channels_mlp_dim"],
include_rescaling=include_rescaling,
include_top=include_top,
num_classes=num_classes,
input_tensor=input_tensor,
weights=weights,
pooling=pooling,
name=name,
**kwargs,
)
def MLPMixerL16(
input_shape,
*,
include_rescaling,
include_top,
num_classes=None,
input_tensor=None,
weights=None,
pooling=None,
name="MLPMixerL16",
**kwargs,
):
"""Instantiates the MLPMixerL16 architecture."""
return MLPMixer(
input_shape=input_shape,
patch_size=MODEL_CONFIGS["MLPMixerL16"]["patch_size"],
num_blocks=MODEL_CONFIGS["MLPMixerL16"]["num_blocks"],
hidden_dim=MODEL_CONFIGS["MLPMixerL16"]["hidden_dim"],
tokens_mlp_dim=MODEL_CONFIGS["MLPMixerL16"]["tokens_mlp_dim"],
channels_mlp_dim=MODEL_CONFIGS["MLPMixerL16"]["channels_mlp_dim"],
include_rescaling=include_rescaling,
include_top=include_top,
num_classes=num_classes,
input_tensor=input_tensor,
weights=weights,
pooling=pooling,
name=name,
**kwargs,
)
setattr(MLPMixerB16, "__doc__", BASE_DOCSTRING.format(name="MLPMixerB16"))
setattr(MLPMixerB32, "__doc__", BASE_DOCSTRING.format(name="MLPMixerB32"))
setattr(MLPMixerL16, "__doc__", BASE_DOCSTRING.format(name="MLPMixerL16"))
| keras-cv/keras_cv/models/legacy/mlp_mixer.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/mlp_mixer.py",
"repo_id": "keras-cv",
"token_count": 6411
} | 57 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ViT (Vision Transformer) models for Keras.
Reference:
- [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929v2)
(ICLR 2021)
- [How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers](https://arxiv.org/abs/2106.10270)
(CoRR 2021)
""" # noqa: E501
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.layers import TransformerEncoder
from keras_cv.layers.vit_layers import PatchingAndEmbedding
from keras_cv.models.legacy import utils
from keras_cv.models.legacy.weights import parse_weights
MODEL_CONFIGS = {
"ViTTiny16": {
"patch_size": 16,
"transformer_layer_num": 12,
"project_dim": 192,
"mlp_dim": 768,
"num_heads": 3,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTS16": {
"patch_size": 16,
"transformer_layer_num": 12,
"project_dim": 384,
"mlp_dim": 1536,
"num_heads": 6,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTB16": {
"patch_size": 16,
"transformer_layer_num": 12,
"project_dim": 768,
"mlp_dim": 3072,
"num_heads": 12,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTL16": {
"patch_size": 16,
"transformer_layer_num": 24,
"project_dim": 1024,
"mlp_dim": 4096,
"num_heads": 16,
"mlp_dropout": 0.1,
"attention_dropout": 0.0,
},
"ViTH16": {
"patch_size": 16,
"transformer_layer_num": 32,
"project_dim": 1280,
"mlp_dim": 5120,
"num_heads": 16,
"mlp_dropout": 0.1,
"attention_dropout": 0.0,
},
"ViTTiny32": {
"patch_size": 32,
"transformer_layer_num": 12,
"project_dim": 192,
"mlp_dim": 768,
"num_heads": 3,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTS32": {
"patch_size": 32,
"transformer_layer_num": 12,
"project_dim": 384,
"mlp_dim": 1536,
"num_heads": 6,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTB32": {
"patch_size": 32,
"transformer_layer_num": 12,
"project_dim": 768,
"mlp_dim": 3072,
"num_heads": 12,
"mlp_dropout": 0.0,
"attention_dropout": 0.0,
},
"ViTL32": {
"patch_size": 32,
"transformer_layer_num": 24,
"project_dim": 1024,
"mlp_dim": 4096,
"num_heads": 16,
"mlp_dropout": 0.1,
"attention_dropout": 0.0,
},
"ViTH32": {
"patch_size": 32,
"transformer_layer_num": 32,
"project_dim": 1280,
"mlp_dim": 5120,
"num_heads": 16,
"mlp_dropout": 0.1,
"attention_dropout": 0.0,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929v2)
(ICLR 2021)
This function returns a Keras {name} model.
The naming convention of ViT models follows: ViTSize_Patch-size
(i.e. ViTS16).
The following sizes were released in the original paper:
- S (Small)
- B (Base)
- L (Large)
But subsequent work from the same authors introduced:
- Ti (Tiny)
- H (Huge)
The parameter configurations for all of these sizes, at patch sizes 16 and
32 are made available, following the naming convention laid out above.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(scale=1./255.0)`
layer. Note that ViTs expect an input range of `[0..1]` if rescaling
isn't used. Regardless of whether you supply `[0..1]` or the input
is rescaled to `[0..1]`, the inputs will further be rescaled to
`[-1..1]`.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, num_classes must be provided.
num_classes: optional int, number of classes to classify images into,
only to be specified if `include_top` is True.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights
(e.g. 'imagenet/classification') (see available pre-trained weights
in weights.py). Note that the 'imagenet' weights only work on an
input shape of (224, 224, 3) due to the input shape dependent
patching and flattening logic.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
- `token_pooling`, default, means that the token at the start of the
sequences is used instead of regular pooling.
name: (Optional) name to pass to the model, defaults to "{name}".
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
Returns:
A `keras.Model` instance.
""" # noqa: E501
@keras.utils.register_keras_serializable(package="keras_cv.models")
class ViT(keras.Model):
"""Instantiates the ViT architecture.
Args:
mlp_dim: the dimensionality of the hidden Dense layer in the transformer
MLP head
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
name: string, model name.
include_top: bool, whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
or the path to the weights file to be loaded.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
- `token_pooling`, default, means that the token at the start of the
sequences is used instead of regular pooling.
num_classes: optional number of classes to classify images
into, only to be specified if `include_top` is True.
mlp_dim:
project_dim: the latent dimensionality to be projected into in the
output of each stacked transformer encoder
activation: the activation function to use in the first `layers.Dense`
layer in the MLP head of the transformer encoder
attention_dropout: the dropout rate to apply to the `MultiHeadAttention`
in each transformer encoder
mlp_dropout: the dropout rate to apply between `layers.Dense` layers
in the MLP head of the transformer encoder
num_heads: the number of heads to use in the `MultiHeadAttention` layer
of each transformer encoder
transformer_layer_num: the number of transformer encoder layers to stack
in the Vision Transformer
patch_size: the patch size to be supplied to the Patching layer to turn
input images into a flattened sequence of patches
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
**kwargs: Pass-through keyword arguments to `keras.Model`.
"""
def __init__(
self,
include_rescaling,
include_top,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
patch_size=None,
transformer_layer_num=None,
num_heads=None,
mlp_dropout=None,
attention_dropout=None,
activation=None,
project_dim=None,
mlp_dim=None,
classifier_activation="softmax",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either `None` or the path "
"to the weights file to be loaded. Weights file not found at "
"location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1.0 / 255.0, name="rescaling")(x)
# The previous layer rescales [0..255] to [0..1] if applicable
# This one rescales [0..1] to [-1..1] since ViTs expect [-1..1]
x = layers.Rescaling(scale=1.0 / 0.5, offset=-1.0, name="rescaling_2")(
x
)
encoded_patches = PatchingAndEmbedding(project_dim, patch_size)(x)
encoded_patches = layers.Dropout(mlp_dropout)(encoded_patches)
for _ in range(transformer_layer_num):
encoded_patches = TransformerEncoder(
project_dim=project_dim,
mlp_dim=mlp_dim,
num_heads=num_heads,
mlp_dropout=mlp_dropout,
attention_dropout=attention_dropout,
activation=activation,
)(encoded_patches)
output = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
if include_top:
output = output[:, 0]
output = layers.Dense(
num_classes, activation=classifier_activation
)(output)
elif pooling == "token_pooling":
output = output[:, 0]
elif pooling == "avg":
output = layers.GlobalAveragePooling1D()(output)
# Create model.
super().__init__(inputs=inputs, outputs=output, **kwargs)
if weights is not None:
self.load_weights(weights)
self.include_rescaling = include_rescaling
self.include_top = include_top
self.input_tensor = input_tensor
self.pooling = pooling
self.num_classes = num_classes
self.patch_size = patch_size
self.transformer_layer_num = transformer_layer_num
self.num_heads = num_heads
self.mlp_dropout = mlp_dropout
self.attention_dropout = attention_dropout
self.activation = activation
self.project_dim = project_dim
self.mlp_dim = mlp_dim
self.classifier_activation = classifier_activation
def get_config(self):
return {
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"name": self.name,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"num_classes": self.num_classes,
"patch_size": self.patch_size,
"transformer_layer_num": self.transformer_layer_num,
"num_heads": self.num_heads,
"mlp_dropout": self.mlp_dropout,
"attention_dropout": self.attention_dropout,
"activation": self.activation,
"project_dim": self.project_dim,
"mlp_dim": self.mlp_dim,
"classifier_activation": self.classifier_activation,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def ViTTiny16(
*,
include_rescaling,
include_top,
name="ViTTiny16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTTiny16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vittiny16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTTiny16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTTiny16"][
"transformer_layer_num"
],
project_dim=MODEL_CONFIGS["ViTTiny16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTTiny16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTTiny16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTTiny16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTTiny16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTS16(
*,
include_rescaling,
include_top,
name="ViTS16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTS16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vits16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTS16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTB32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTS16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTS16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTS16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTS16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTS16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTB16(
*,
include_rescaling,
include_top,
name="ViTB16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTB16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vitb16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTB16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTB16"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTB16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTB16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTB16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTB16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTB16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTL16(
*,
include_rescaling,
include_top,
name="ViTL16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTL16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vitl16"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTL16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTL16"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTL16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTL16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTL16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTL16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTL16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTH16(
*,
include_rescaling,
include_top,
name="ViTH16",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTH16 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTH16"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTH16"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTH16"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTH16"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTH16"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTH16"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTH16"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTTiny32(
*,
include_rescaling,
include_top,
name="ViTTiny32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTTiny32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTTiny32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTTiny32"][
"transformer_layer_num"
],
project_dim=MODEL_CONFIGS["ViTTiny32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTTiny32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTTiny32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTTiny32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTTiny32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTS32(
*,
include_rescaling,
include_top,
name="ViTS32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTS32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vits32"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTS32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTS32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTS32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTS32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTS32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTS32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTS32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTB32(
*,
include_rescaling,
include_top,
name="ViTB32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTB32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=parse_weights(weights, include_top, "vitb32"),
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTB32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTB32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTB32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTB32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTB32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTB32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTB32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTL32(
*,
include_rescaling,
include_top,
name="ViTL32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTL32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTL32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTL32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTL32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTL32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTL32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTL32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTL32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
def ViTH32(
*,
include_rescaling,
include_top,
name="ViTH32",
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
activation=keras.activations.gelu,
classifier_activation="softmax",
**kwargs,
):
"""Instantiates the ViTH32 architecture."""
return ViT(
include_rescaling,
include_top,
name=name,
weights=weights,
input_shape=input_shape,
input_tensor=input_tensor,
pooling=pooling,
num_classes=num_classes,
patch_size=MODEL_CONFIGS["ViTH32"]["patch_size"],
transformer_layer_num=MODEL_CONFIGS["ViTH32"]["transformer_layer_num"],
project_dim=MODEL_CONFIGS["ViTH32"]["project_dim"],
mlp_dim=MODEL_CONFIGS["ViTH32"]["mlp_dim"],
num_heads=MODEL_CONFIGS["ViTH32"]["num_heads"],
mlp_dropout=MODEL_CONFIGS["ViTH32"]["mlp_dropout"],
attention_dropout=MODEL_CONFIGS["ViTH32"]["attention_dropout"],
activation=activation,
classifier_activation=classifier_activation,
**kwargs,
)
setattr(ViTTiny16, "__doc__", BASE_DOCSTRING.format(name="ViTTiny16"))
setattr(ViTS16, "__doc__", BASE_DOCSTRING.format(name="ViTS16"))
setattr(ViTB16, "__doc__", BASE_DOCSTRING.format(name="ViTB16"))
setattr(ViTL16, "__doc__", BASE_DOCSTRING.format(name="ViTL16"))
setattr(ViTH16, "__doc__", BASE_DOCSTRING.format(name="ViTH16"))
setattr(ViTTiny32, "__doc__", BASE_DOCSTRING.format(name="ViTTiny32"))
setattr(ViTS32, "__doc__", BASE_DOCSTRING.format(name="ViTS32"))
setattr(ViTB32, "__doc__", BASE_DOCSTRING.format(name="ViTB32"))
setattr(ViTL32, "__doc__", BASE_DOCSTRING.format(name="ViTL32"))
setattr(ViTH32, "__doc__", BASE_DOCSTRING.format(name="ViTH32"))
| keras-cv/keras_cv/models/legacy/vit.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/vit.py",
"repo_id": "keras-cv",
"token_count": 11951
} | 58 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.object_detection.yolo_v8.yolo_v8_backbone_presets import (
backbone_presets,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_layers import (
apply_conv_bn,
)
from keras_cv.models.object_detection.yolo_v8.yolo_v8_layers import (
apply_csp_block,
)
from keras_cv.utils.python_utils import classproperty
def apply_spatial_pyramid_pooling_fast(
inputs, pool_size=5, activation="swish", name="spp_fast"
):
channel_axis = -1
input_channels = inputs.shape[channel_axis]
hidden_channels = int(input_channels // 2)
x = apply_conv_bn(
inputs,
hidden_channels,
kernel_size=1,
activation=activation,
name=f"{name}_pre",
)
pool_1 = keras.layers.MaxPooling2D(
pool_size=pool_size, strides=1, padding="same", name=f"{name}_pool1"
)(x)
pool_2 = keras.layers.MaxPooling2D(
pool_size=pool_size, strides=1, padding="same", name=f"{name}_pool2"
)(pool_1)
pool_3 = keras.layers.MaxPooling2D(
pool_size=pool_size, strides=1, padding="same", name=f"{name}_pool3"
)(pool_2)
out = ops.concatenate([x, pool_1, pool_2, pool_3], axis=channel_axis)
out = apply_conv_bn(
out,
input_channels,
kernel_size=1,
activation=activation,
name=f"{name}_output",
)
return out
@keras_cv_export("keras_cv.models.YOLOV8Backbone")
class YOLOV8Backbone(Backbone):
"""Implements the YOLOV8 backbone for object detection.
This backbone is a variant of the `CSPDarkNetBackbone` architecture.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
stackwise_channels: A list of ints, the number of channels for each dark
level in the model.
stackwise_depth: A list of ints, the depth for each dark level in the
model.
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
activation: String. The activation functions to use in the backbone to
use in the CSPDarkNet blocks. Defaults to "swish".
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
Returns:
A `keras.Model` instance.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained backbone
model = keras_cv.models.YOLOV8Backbone.from_preset(
"yolo_v8_xs_backbone_coco"
)
output = model(input_data)
# Randomly initialized backbone with a custom config
model = keras_cv.models.YOLOV8Backbone(
stackwise_channels=[128, 256, 512, 1024],
stackwise_depth=[3, 9, 9, 3],
include_rescaling=False,
)
output = model(input_data)
```
""" # noqa: E501
def __init__(
self,
stackwise_channels,
stackwise_depth,
include_rescaling,
activation="swish",
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(1 / 255.0)(x)
""" Stem """
stem_width = stackwise_channels[0]
x = apply_conv_bn(
x,
stem_width // 2,
kernel_size=3,
strides=2,
activation=activation,
name="stem_1",
)
x = apply_conv_bn(
x,
stem_width,
kernel_size=3,
strides=2,
activation=activation,
name="stem_2",
)
""" blocks """
pyramid_level_inputs = {"P1": utils.get_tensor_input_name(x)}
for stack_id, (channel, depth) in enumerate(
zip(stackwise_channels, stackwise_depth)
):
stack_name = f"stack{stack_id + 1}"
if stack_id >= 1:
x = apply_conv_bn(
x,
channel,
kernel_size=3,
strides=2,
activation=activation,
name=f"{stack_name}_downsample",
)
x = apply_csp_block(
x,
depth=depth,
expansion=0.5,
activation=activation,
name=f"{stack_name}_c2f",
)
if stack_id == len(stackwise_depth) - 1:
x = apply_spatial_pyramid_pooling_fast(
x,
pool_size=5,
activation=activation,
name=f"{stack_name}_spp_fast",
)
pyramid_level_inputs[f"P{stack_id + 2}"] = (
utils.get_tensor_input_name(x)
)
super().__init__(inputs=inputs, outputs=x, **kwargs)
self.pyramid_level_inputs = pyramid_level_inputs
self.stackwise_channels = stackwise_channels
self.stackwise_depth = stackwise_depth
self.include_rescaling = include_rescaling
self.activation = activation
def get_config(self):
config = super().get_config()
config.update(
{
"include_rescaling": self.include_rescaling,
# Remove batch dimension from `input_shape`
"input_shape": self.input_shape[1:],
"stackwise_channels": self.stackwise_channels,
"stackwise_depth": self.stackwise_depth,
"activation": self.activation,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
| keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolo_v8/yolo_v8_backbone.py",
"repo_id": "keras-cv",
"token_count": 3298
} | 59 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv.models.object_detection.yolox.layers import YoloXPAFPN
from keras_cv.tests.test_case import TestCase
class YoloXLabelEncoderTest(TestCase):
def test_num_parameters(self):
input1 = keras.Input((80, 80, 256))
input2 = keras.Input((40, 40, 512))
input3 = keras.Input((20, 20, 1024))
output = YoloXPAFPN()({3: input1, 4: input2, 5: input3})
model = keras.models.Model(
inputs=[input1, input2, input3], outputs=output
)
keras_params = sum(
[keras.backend.count_params(p) for p in model.trainable_weights]
)
# taken from original implementation
original_params = 19523072
self.assertEqual(keras_params, original_params)
def test_output_shape(self):
inputs = {
3: tf.random.uniform((3, 80, 80, 256)),
4: tf.random.uniform((3, 40, 40, 512)),
5: tf.random.uniform((3, 20, 20, 1024)),
}
output1, output2, output3 = YoloXPAFPN()(inputs)
self.assertEqual(output1.shape, [3, 80, 80, 256])
self.assertEqual(output2.shape, [3, 40, 40, 512])
self.assertEqual(output3.shape, [3, 20, 20, 1024])
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_pafpn_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_pafpn_test.py",
"repo_id": "keras-cv",
"token_count": 732
} | 60 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.layers import preprocessing
from keras_cv.training import ContrastiveTrainer
class SimCLRTrainer(ContrastiveTrainer):
"""Creates a SimCLRTrainer.
References:
- [SimCLR paper](https://arxiv.org/pdf/2002.05709)
Args:
encoder: a `keras.Model` to be pre-trained. In most cases, this encoder
should not include a top dense layer.
augmenter: a SimCLRAugmenter layer to randomly augment input
images for contrastive learning
projection_width: the width of the two-layer dense model used for
projection in the SimCLR paper
"""
def __init__(self, encoder, augmenter, projection_width=128, **kwargs):
super().__init__(
encoder=encoder,
augmenter=augmenter,
projector=keras.Sequential(
[
layers.Dense(projection_width, activation="relu"),
layers.Dense(projection_width),
layers.BatchNormalization(),
],
name="projector",
),
**kwargs,
)
class SimCLRAugmenter(keras.Sequential):
def __init__(
self,
value_range,
height=128,
width=128,
crop_area_factor=(0.08, 1.0),
aspect_ratio_factor=(3 / 4, 4 / 3),
grayscale_rate=0.2,
color_jitter_rate=0.8,
brightness_factor=0.2,
contrast_factor=0.8,
saturation_factor=(0.3, 0.7),
hue_factor=0.2,
**kwargs,
):
return super().__init__(
[
preprocessing.RandomFlip("horizontal"),
preprocessing.RandomCropAndResize(
target_size=(height, width),
crop_area_factor=crop_area_factor,
aspect_ratio_factor=aspect_ratio_factor,
),
preprocessing.RandomApply(
preprocessing.Grayscale(output_channels=3),
rate=grayscale_rate,
),
preprocessing.RandomApply(
preprocessing.RandomColorJitter(
value_range=value_range,
brightness_factor=brightness_factor,
contrast_factor=contrast_factor,
saturation_factor=saturation_factor,
hue_factor=hue_factor,
),
rate=color_jitter_rate,
),
],
**kwargs,
)
| keras-cv/keras_cv/training/contrastive/simclr_trainer.py/0 | {
"file_path": "keras-cv/keras_cv/training/contrastive/simclr_trainer.py",
"repo_id": "keras-cv",
"token_count": 1539
} | 61 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.backend import ops
def to_numpy(x):
if x is None:
return None
if isinstance(x, tf.RaggedTensor):
x = x.to_tensor(-1)
x = ops.convert_to_numpy(x)
# Important for consistency when working with visualization utilities
return np.ascontiguousarray(x)
| keras-cv/keras_cv/utils/to_numpy.py/0 | {
"file_path": "keras-cv/keras_cv/utils/to_numpy.py",
"repo_id": "keras-cv",
"token_count": 287
} | 62 |
# functional APIでKerasを始めてみよう
functional APIは,複数の出力があるモデルや有向非巡回グラフ,共有レイヤーを持ったモデルなどの複雑なモデルを定義するためのインターフェースです.
ここでは`Sequential`モデルについて既に知識があることを前提として説明します.
シンプルな例から見てきましょう.
-----
## 例1: 全結合ネットワーク
下記のネットワークは`Sequential`モデルによっても定義可能ですが,
functional APIを使ったシンプルな例を見てきましょう.
- レイヤーのインスタンスは関数呼び出し可能で,戻り値としてテンソルを返します
- `Model`を定義することで入力と出力のテンソルは接続されます
- 上記で定義したモデルは`Sequential`と同様に利用可能です
```python
from keras.layers import Input, Dense
from keras.models import Model
# This returns a tensor
inputs = Input(shape=(784,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels) # starts training
```
-----
## 全てのモデルはレイヤーと同じように関数呼び出し可能です
functional APIを利用することで,訓練済みモデルの再利用が簡単になります:全てのモデルを,テンソルを引数としたlayerのように扱うことができます.これにより,モデル構造だけでなく,モデルの重みも再利用できます.
```python
x = Input(shape=(784,))
# This works, and returns the 10-way softmax we defined above.
y = model(x)
```
一連のシーケンスを処理するモデルを簡単に設計できます.
例えば画像識別モデルをたった1行で動画識別モデルに応用できます.
```python
from keras.layers import TimeDistributed
# Input tensor for sequences of 20 timesteps,
# each containing a 784-dimensional vector
input_sequences = Input(shape=(20, 784))
# This applies our previous model to every timestep in the input sequences.
# the output of the previous model was a 10-way softmax,
# so the output of the layer below will be a sequence of 20 vectors of size 10.
processed_sequences = TimeDistributed(model)(input_sequences)
```
-----
## 多入力多出力モデル
functional APIは複数の入出力を持ったモデルに最適です.
複数の複雑なデータストリームを簡単に扱うことができます.
Twitterの新しいニュースヘッドラインを受信した際,そのツイートのリツイートやライクの回数を予測する例を考えます.主な入力はヘッドラインの単語のシーケンスですが,スパイスとして,ヘッドラインの投稿時間などのデータを入力として追加します.
このモデルは2つの損失関数によって訓練されます.モデルにおける初期の主損失関数を使うことは,深い層を持つモデルにとっては良い正則化の構造です.
以下がモデルの図になります.
<img src="https://s3.amazonaws.com/keras.io/img/multi-input-multi-output-graph.png" alt="multi-input-multi-output-graph" style="width: 400px;"/>
functional APIを利用してこのネットワークを実装してみましょう.
main inputはヘッドラインを整数のシーケンス(それぞれの整数は単語をエンコードしたしたもの)として受け取ります.
整数の範囲は1から10000となり(単語数は10000語),各シーケンスは長さ100単語で構成されます.
```python
from keras.layers import Input, Embedding, LSTM, Dense
from keras.models import Model
# Headline input: meant to receive sequences of 100 integers, between 1 and 10000.
# Note that we can name any layer by passing it a "name" argument.
main_input = Input(shape=(100,), dtype='int32', name='main_input')
# This embedding layer will encode the input sequence
# into a sequence of dense 512-dimensional vectors.
x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input)
# A LSTM will transform the vector sequence into a single vector,
# containing information about the entire sequence
lstm_out = LSTM(32)(x)
```
ここでは補助損失を追加し,LSTMとEmbeddedingレイヤーをスムーズに訓練できるようにしますが,モデルでは主損失がはるかに高くなります.
```python
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out)
```
この時点で,auxiliary_inputをLSTM出力と連結してモデルに入力します.
```python
auxiliary_input = Input(shape=(5,), name='aux_input')
x = keras.layers.concatenate([lstm_out, auxiliary_input])
# We stack a deep densely-connected network on top
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
# And finally we add the main logistic regression layer
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
```
2つの入力と2つの出力を持ったモデルを定義します.
```python
model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output, auxiliary_output])
```
モデルをコンパイルし,補助損失に0.2の重み付けを行います.
様々な`loss_weights`や`loss`を対応付けるためにリストもしくは辞書を利用します.
`loss`に1つの損失関数を与えた場合,全ての出力に対して同一の損失関数が適用されます.
```python
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
loss_weights=[1., 0.2])
```
モデルに入力と教師データをリストで渡すことで訓練できます.
```python
model.fit([headline_data, additional_data], [labels, labels],
epochs=50, batch_size=32)
```
入力と出力に名前付けを行っていれば("name"引数を利用),下記のような方法でモデルをコンパイルできます.
```python
model.compile(optimizer='rmsprop',
loss={'main_output': 'binary_crossentropy', 'aux_output': 'binary_crossentropy'},
loss_weights={'main_output': 1., 'aux_output': 0.2})
# And trained it via:
model.fit({'main_input': headline_data, 'aux_input': additional_data},
{'main_output': labels, 'aux_output': labels},
epochs=50, batch_size=32)
```
-----
## 共有レイヤー
その他のfunctional APIの利用例として,共有レイヤーがあります.
共有レイヤーについて考えてみましょう.
ツイートのデータセットの例を考えてみましょう.2つのツイートが同じ人物からつぶやかれたかどうかを判定するモデルを作りたいとします.(例えばこれによりユーザーの類似度を比較できます)
これを実現する一つの方法として,2つのツイートを2つのベクトルにエンコードし,それらをマージした後,ロジスティクス回帰を行うことで,その2つのツイートが同じ人物から投稿されたかどうかの確率を出力できます.
このモデルはポジティブなツイートのペアとネガティブなツイートのペアを用いて訓練できます.
問題はシンメトリックであるため,1つめのツイートのエンコードメカニズムは2つめのツイートのエンコード時に再利用出来ます.
ここではLSTMの共有レイヤーによりツイートをエンコードします.
functional APIでこのモデルを作成してみましょう.
入力として`(280, 256)`のバイナリー行列をとります.
サイズが256の280個のシーケンスで,256次元のベクトルの各次元は文字(アルファベット以外も含めた256文字の出現頻度の高いもの)の有無を表します.
```python
import keras
from keras.layers import Input, LSTM, Dense
from keras.models import Model
tweet_a = Input(shape=(280, 256))
tweet_b = Input(shape=(280, 256))
```
それぞれの入力間でレイヤーを共有するために,1つのレイヤーを生成し,そのレイヤーを用いて複数の入力を処理します.
```python
# This layer can take as input a matrix
# and will return a vector of size 64
shared_lstm = LSTM(64)
# When we reuse the same layer instance
# multiple times, the weights of the layer
# are also being reused
# (it is effectively *the same* layer)
encoded_a = shared_lstm(tweet_a)
encoded_b = shared_lstm(tweet_b)
# We can then concatenate the two vectors:
merged_vector = keras.layers.concatenate([encoded_a, encoded_b], axis=-1)
# And add a logistic regression on top
predictions = Dense(1, activation='sigmoid')(merged_vector)
# We define a trainable model linking the
# tweet inputs to the predictions
model = Model(inputs=[tweet_a, tweet_b], outputs=predictions)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit([data_a, data_b], labels, epochs=10)
```
共有レイヤーの出力や出力のshapeを見てみましょう.
-----
## "ノード"の概念
ある入力を用いてレイヤーを関数呼び出しするときは常に新しいテンソル(レイヤーの出力)を生成しており,レイヤーにノードを追加すると入力のテンソルと出力のテンソルはリンクされます.
同じレイヤーを複数回呼び出す際,そのレイヤーは0, 1, 2...とインデックスされた複数のノードを所有することになります.
以前のバージョンのKerasでは,`layer.get_output()`によって出力のテンソルを取得でき,`layer.output_shape`によって形を取得できました.
もちろん現在のバージョンでもこれらは利用可能です(`get_output()`は`output`というプロパティーに変更されました).
しかし複数の入力が接続されているレイヤーはどうしたらよいでしょうか?
1つのレイヤーに1つの入力しかない場合は問題はなく`.output`がレイヤーが単一の出力を返すでしょう.
```python
a = Input(shape=(280, 256))
lstm = LSTM(32)
encoded_a = lstm(a)
assert lstm.output == encoded_a
```
複数の入力がある場合はそうはなりません.
```python
a = Input(shape=(280, 256))
b = Input(shape=(280, 256))
lstm = LSTM(32)
encoded_a = lstm(a)
encoded_b = lstm(b)
lstm.output
```
```
>> AttributeError: Layer lstm_1 has multiple inbound nodes,
hence the notion of "layer output" is ill-defined.
Use `get_output_at(node_index)` instead.
```
下記は正常に動作します.
```python
assert lstm.get_output_at(0) == encoded_a
assert lstm.get_output_at(1) == encoded_b
```
シンプルですね.
`input_shape`と`output_shape`についても同じことが言えます.
レイヤーが1つのノードしか持っていない,もしくは全てのノードが同じ入出力のshapeであれば,レイヤーの入出力のshapeが一意に定まり,`layer.output_shape`/`layer.input_shape`によって1つのshapeを返します.しかしながら,1つの`Conv2D`レイヤーに`(32, 32, 3)`の入力と`(64, 64, 32)`の入力を行った場合,そのレイヤーは複数のinput/output shapeを持つことになるため,それぞれのshapeはノードのインデックスを指定することで取得できます.
```python
a = Input(shape=(32, 32, 3))
b = Input(shape=(64, 64, 3))
conv = Conv2D(16, (3, 3), padding='same')
conved_a = conv(a)
# Only one input so far, the following will work:
assert conv.input_shape == (None, 32, 32, 3)
conved_b = conv(b)
# now the `.input_shape` property wouldn't work, but this does:
assert conv.get_input_shape_at(0) == (None, 32, 32, 3)
assert conv.get_input_shape_at(1) == (None, 64, 64, 3)
```
-----
## その他の例
コード例から学び始めることは最良の手法です.
その他の例も見てみましょう.
### Inception module
Inceptionモデルについての詳細は[Going Deeper with Convolutions](http://arxiv.org/abs/1409.4842)を参照.
```python
from keras.layers import Conv2D, MaxPooling2D, Input
input_img = Input(shape=(256, 256, 3))
tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)
tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2)
tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)
tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3)
output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=1)
```
### Residual connection on a convolution layer
Residual networksモデルについての詳細は[Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385)を参照してください.
```python
from keras.layers import Conv2D, Input
# input tensor for a 3-channel 256x256 image
x = Input(shape=(256, 256, 3))
# 3x3 conv with 3 output channels (same as input channels)
y = Conv2D(3, (3, 3), padding='same')(x)
# this returns x + y.
z = keras.layers.add([x, y])
```
### Shared vision model
このモデルでは,2つのMNISTの数字が同じものかどうかを識別するために,同じ画像処理のモジュールを2つの入力で再利用しています.
```python
from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten
from keras.models import Model
# First, define the vision modules
from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten
from keras.models import Model
# First, define the vision modules
digit_input = Input(shape=(27, 27, 1))
x = Conv2D(64, (3, 3))(digit_input)
x = Conv2D(64, (3, 3))(x)
x = MaxPooling2D((2, 2))(x)
out = Flatten()(x)
vision_model = Model(digit_input, out)
# Then define the tell-digits-apart model
digit_a = Input(shape=(27, 27, 1))
digit_b = Input(shape=(27, 27, 1))
# The vision model will be shared, weights and all
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
concatenated = keras.layers.concatenate([out_a, out_b])
out = Dense(1, activation='sigmoid')(concatenated)
classification_model = Model([digit_a, digit_b], out)
```
### Visual question answering model
このモデルは写真に対する自然言語の質問に対して1単語の解答を選択できます.
質問と画像をそれぞれベクトルにエンコードし,それらを1つに結合して,解答となる語彙を正解データとしたロジスティック回帰を訓練させることで実現できます.
```python
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.layers import Input, LSTM, Embedding, Dense
from keras.models import Model, Sequential
# First, let's define a vision model using a Sequential model.
# This model will encode an image into a vector.
vision_model = Sequential()
vision_model.add(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(224, 224, 3)))
vision_model.add(Conv2D(64, (3, 3), activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
vision_model.add(Conv2D(128, (3, 3), activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
vision_model.add(Conv2D(256, (3, 3), activation='relu'))
vision_model.add(Conv2D(256, (3, 3), activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(Flatten())
# Now let's get a tensor with the output of our vision model:
image_input = Input(shape=(224, 224, 3))
encoded_image = vision_model(image_input)
# Next, let's define a language model to encode the question into a vector.
# Each question will be at most 100 word long,
# and we will index words as integers from 1 to 9999.
question_input = Input(shape=(100,), dtype='int32')
embedded_question = Embedding(input_dim=10000, output_dim=256, input_length=100)(question_input)
encoded_question = LSTM(256)(embedded_question)
# Let's concatenate the question vector and the image vector:
merged = keras.layers.concatenate([encoded_question, encoded_image])
# And let's train a logistic regression over 1000 words on top:
output = Dense(1000, activation='softmax')(merged)
# This is our final model:
vqa_model = Model(inputs=[image_input, question_input], outputs=output)
# The next stage would be training this model on actual data.
```
### Video question answering model
画像のQAモデルを訓練したので,そのモデルを応用して動画のQA modelを作成してみましょう.
適切な訓練を行うことで,短い動画や(例えば,100フレームの人物行動)や動画を用いた自然言語のQAへ応用することができます(例えば,「その少年は何のスポーツをしていますか?」「サッカーです」).
```python
from keras.layers import TimeDistributed
video_input = Input(shape=(100, 224, 224, 3))
# This is our video encoded via the previously trained vision_model (weights are reused)
encoded_frame_sequence = TimeDistributed(vision_model)(video_input) # the output will be a sequence of vectors
encoded_video = LSTM(256)(encoded_frame_sequence) # the output will be a vector
# This is a model-level representation of the question encoder, reusing the same weights as before:
question_encoder = Model(inputs=question_input, outputs=encoded_question)
# Let's use it to encode the question:
video_question_input = Input(shape=(100,), dtype='int32')
encoded_video_question = question_encoder(video_question_input)
# And this is our video question answering model:
merged = keras.layers.concatenate([encoded_video, encoded_video_question])
output = Dense(1000, activation='softmax')(merged)
video_qa_model = Model(inputs=[video_input, video_question_input], outputs=output)
```
| keras-docs-ja/sources/getting-started/functional-api-guide.md/0 | {
"file_path": "keras-docs-ja/sources/getting-started/functional-api-guide.md",
"repo_id": "keras-docs-ja",
"token_count": 7598
} | 63 |
# Kerasレイヤーを作成
シンプルで状態を持たない独自演算では,`layers.core.Lambda`を用いるべきでしょう.
しかし,学習可能な重みを持つ独自演算は,自身でレイヤーを実装する必要があります.
以下に__Keras 2.0__でのレイヤーの枠組みを示します(古いバージョンを使っている場合は,更新してください).
実装する必要のあるメソッドは3つだけです.
- `build(input_shape)`: これは重みを定義するメソッドです.このメソッドは,`self.built = True`をセットしなければいけません,これは`super([Layer], self).build()`を呼び出しでできます.
- `call(x)`: ここではレイヤーのロジックを記述します.オリジナルのレイヤーでマスキングをサポートしない限り,第1引数である入力テンソルが`call`に渡されることに気を付けてください.
- `compute_output_shape(input_shape)`: 作成したレイヤーの内部で入力のshapeを変更する場合には,ここでshape変換のロジックを指定する必要があります.こうすることでKerasが自動的にshapeを推定します.
```python
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
```
既存のKerasレイヤーは何を実装するにしても十分な例を提供しています.なので,躊躇せずソースコードを読んでください!
| keras-docs-ja/sources/layers/writing-your-own-keras-layers.md/0 | {
"file_path": "keras-docs-ja/sources/layers/writing-your-own-keras-layers.md",
"repo_id": "keras-docs-ja",
"token_count": 1062
} | 64 |
## 제약의 사용법<sub>Usage of constraints</sub>
`constraints` 모듈의 함수는 최적화 과정에서 네트워크 매개변수에 제약(예시: 음이 아닌<sub>non-negativity</sub>)을 설정할 수 있습니다.
페널티는 각 층별로 적용됩니다. API의 구체사항은 층마다 다를 수 있지만, `Dense`, `Conv1D`, `Conv2D` 그리고 `Conv3D` 층은 통일된 API를 가집니다.
이러한 층들은 2가지 키워드 인자를 가집니다.
- 주요 가중치 행렬에 대한 `kernel_constraint`
- 편향에 대한 `bias_constraint`
```python
from keras.constraints import max_norm
model.add(Dense(64, kernel_constraint=max_norm(2.)))
```
---
## 사용가능한 제약<sub>Available constraints</sub>
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/constraints.py#L22)</span>
### MaxNorm
```python
keras.constraints.MaxNorm(max_value=2, axis=0)
```
MaxNorm 가중치 제약.
각 은닉층에 대응하는 가중치를 제약해서
가중치의 노름이 특정 값 이하가 되도록 합니다.
__인자__
- __max_value__: 입력 가중치의 최대 노름
- __axis__: 정수, 가중치 노름을 계산할 축.
예를 들어, 어느 `Dense` 층의 가중치 행렬이
`(input_dim, output_dim)`의 형태를 취할 때,
`axis`를 `0`으로 설정해서 `(input_dim,)`의 길이를 갖는
각 가중치 벡터를 제약할 수 있습니다.
`data_format="channels_last"`의 데이터 포맷을 갖는 `Conv2D` 층의 경우,
가중치 텐서는
`(rows, cols, input_depth, output_depth)`의 형태를 가지며,
`axis`를 `[0, 1, 2]`로 설정하여
`(rows, cols, input_depth)`의 형태를 갖는
각 필터 텐서의 가중치를 제약할 수 있습니다.
__참고__
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](
http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/constraints.py#L61)</span>
### NonNeg
```python
keras.constraints.NonNeg()
```
가중치가 비음수가 되도록 제약합니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/constraints.py#L69)</span>
### UnitNorm
```python
keras.constraints.UnitNorm(axis=0)
```
각 은닉 유닛에 대응하는 가중치가 단위 노름을 가지도록 제약합니다.
__인자__
- __axis__: 정수,가중치 노름을 계산할 축.
예를 들어, 어느 `Dense` 층의 가중치 행렬이
`(input_dim, output_dim)`의 형태를 취할 때,
`axis`를 `0`으로 설정해서 `(input_dim,)`의 길이를 갖는
각 가중치 벡터를 제약할 수 있습니다.
`data_format="channels_last"`의 데이터 포맷을 갖는 `Conv2D` 층의 경우,
가중치 텐서는
`(rows, cols, input_depth, output_depth)`의 형태를 가지며,
`axis`를 `[0, 1, 2]`로 설정하여
`(rows, cols, input_depth)`의 형태를 갖는
각 필터 텐서의 가중치를 제약할 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/constraints.py#L98)</span>
### MinMaxNorm
```python
keras.constraints.MinMaxNorm(min_value=0.0, max_value=1.0, rate=1.0, axis=0)
```
MinMaxNorm 가중치 제약.
각 은닉 유닛에 대응하는 가중치를 제약해서
가중치의 노름이 상한과 하한 사이의 값을 가지도록 합니다.
__인자__
- __min_value__: 입력 가중치의 최소 노름.
- __max_value__: 입력 가중치의 최대 노름.
- __rate__: 제약을 시행하는 속도:
가중치를 리스케일하여
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`의 값을 산출하도록 합니다.
이는 실질적으로 rate=1.0의 경우 제약을 엄격하게 실행함을 의미하고
실행함을 의미하고, 반대로 rate<1.0의 경우
매 단계마다 가중치가 리스케일되어
원하는 간격 사이의 값에 천천히 가까워지도록 함을 말합니다.
- __axis__: 정수, 가중치 노름을 계산할 축.
예를 들어, 어느 `Dense` 층의 가중치 행렬이
`(input_dim, output_dim)`의 형태를 취할 때,
`axis`를 `0`으로 설정해서 `(input_dim,)`의 길이를 갖는
각 가중치 벡터를 제약할 수 있습니다.
`data_format="channels_last"`의 데이터 포맷을 갖는 `Conv2D` 층의 경우,
가중치 텐서는
`(rows, cols, input_depth, output_depth)`의 형태를 가지며,
`axis`를 `[0, 1, 2]`로 설정하여
`(rows, cols, input_depth)`의 형태를 갖는
각 필터 텐서의 가중치를 제약할 수 있습니다.
---
| keras-docs-ko/sources/constraints.md/0 | {
"file_path": "keras-docs-ko/sources/constraints.md",
"repo_id": "keras-docs-ko",
"token_count": 3290
} | 65 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/normalization.py#L16)</span>
### BatchNormalization
```python
keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)
```
배치 정규화 레이어입니다. (Ioffe and Szegedy, 2014)
각 배치에서 이전 레이어의 activations를 정규화합니다. 즉, activation의 평균은 0에 가깝도록 하고 표준 편차는 1에 가깝게 유지하는 변환을 적용합니다.
__인자 설명__
- __axis__: Integer, 정규화되어야 하는 축을 의미합니다.(일반적으로 feature axis입니다.) 예를 들어, `data_format="channels_first"`가 있는 `Conv2D`레이어 다음에 `axis=1`인 `BatchNormalization`을 설정할 수 있습니다.
- __momentum__: 이동 평균(moving mean) 및 이동 분산(moving variance)에 대한 모멘텀을 의미합니다.
- __epsilon__: 0으로 나누기를 방지하기 위해 분산에 추가되는 작은 float값 입니다.
- __center__: True일 경우, 정규화된 텐서에 `beta`만큼의 거리(offset)를 추가합니다. False인 경우 `beta`는 무시됩니다.
- __scale__: True일 경우, `gamma`를 곱합니다. False인 경우 `gamma`는 사용되지 않습니다. 다음 레이어가 선형(예를 들어, `nn.relu`)일때, Scaling이 다음 레이어에서 수행될 것이기 때문에 사용되지 않을 수 있습니다.
- __beta_initializer__: beta weight를 위한 초기값 설정기입니다.
- __gamma_initializer__: gamma weight를 위한 초기값 설정기입니다.
- __moving_mean_initializer__: 이동 평균(moving mean)을 위한 초기값 설정기입니다.
- __moving_variance_initializer__: 이동 분산(moving variance)을 위한 초기값 설정기입니다.
- __beta_regularizer__: beta weight를 위해 선택적으로 사용 가능한 규제기입니다.
- __gamma_regularizer__: gamma weight를 위해 선택적으로 사용 가능한 규제기입니다.
- __beta_constraint__: beta weight를 위해 선택적으로 적용 가능한 제약조건입니다.
- __gamma_constraint__: gamma weight를 위해 선택적으로 적용 가능한 제약조건입니다.
__입력 크기__
임의입니다. 이 레이어를 모델의 첫 번째 레이어로 사용할 때, 키워드 인자 `input_shape` (정수 튜플, 샘플 축 미포함)를 사용하십시오.
__출력 크기__
입력 크기와 동일합니다.
__참고 자료__
- [Batch Normalization: Accelerating Deep Network Training by
Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
| keras-docs-ko/sources/layers/normalization.md/0 | {
"file_path": "keras-docs-ko/sources/layers/normalization.md",
"repo_id": "keras-docs-ko",
"token_count": 1781
} | 66 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/generic_utils.py#L21)</span>
### CustomObjectScope
```python
keras.utils.CustomObjectScope()
```
`_GLOBAL_CUSTOM_OBJECTS`로의 변환이 벗어나지 못하는 유효범위를 제공합니다.
`with` 명령문 내의 코드는 문자열 이름을 통해 커스텀 객체에 접근할 수 있습니다.
글로벌 커스텀 객체로의 변환은
`with` 명령문의 영역 내에서 유효합니다. `with` 명령문이 끝나면,
글로벌 커스텀 객체는
`with` 명령문 시작 상태로 되돌아갑니다.
__예시__
`MyObject`(예: 클래스)라는 커스텀 객체에 대해, 다음과 같이 접근할 수 있습니다.
```python
with CustomObjectScope({'MyObject': MyObject}):
# save, load 등의 함수들은 문자열 이름으로 커스텀 객체에 접근할 수 있습니다.
layer = Dense(..., kernel_regularizer='MyObject')
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/io_utils.py#L25)</span>
### HDF5Matrix
```python
keras.utils.HDF5Matrix(datapath, dataset, start=0, end=None, normalizer=None)
```
NumPy 배열을 대체하는 HDF5 데이터 세트 표현양식.
__예시__
```python
x_data = HDF5Matrix('input/file.hdf5', 'data')
model.predict(x_data)
```
`start`와 `end` 인자를 통해 가져올 데이터 범위의 시작과 끝을 지정할 수 있습니다.
추가적으로 정규화 함수(혹은 람다)를 사용할 수 있습니다.
이는 가져온 모든 데이터에 대해 호출됩니다.
__인자__
- __datapath__: `str`, HDF5 파일의 경로입니다.
- __dataset__: `str`, `datapath`에 명시된 파일 내 HDF5 데이터 세트의 이름입니다.
- __start__: `int`, 데이터 세트에서 자르기 원하는 부분의 시작점입니다.
- __end__: `int`, 데이터 세트에서 자르기 원하는 부분의 끝점입니다.
- __normalizer__: 데이터가 회수될 때 데이터에 대해서 호출할 함수입니다.
__반환값__
배열과 유사한 형태의 HDF5 데이터 세트.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/data_utils.py#L305)</span>
### Sequence
```python
keras.utils.Sequence()
```
데이터 세트 등의 데이터 시퀀스를 학습하기 위한 베이스 객체.
모든 `Sequence`는 `__getitem__`과 `__len__` 메소드를 실행해야 합니다.
`on_epoch_end` 메소드를 사용하면, 매 에폭의 끝에서 데이터 세트를 수정할 수 있습니다.
`__getitem__` 메소드는 완전한 배치를 반환해야 합니다.
__안내__
`Sequence`는 멀티프로세싱을 보다 안전하게 실행합니다.
생성기와는 다르게 네트워크가 각 에폭당 한 샘플을 한 번만 학습하도록
보장해줍니다.
__예시__
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# 여기서 `x_set`은 이미지 파일 경로의 목록입니다.
# 그리고 `y_set`은 관련 클래스입니다.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
----
### to_categorical
```python
keras.utils.to_categorical(y, num_classes=None, dtype='float32')
```
정수로 이루어진 벡터를 이진 클래스 행렬로 변환합니다(예: `categorical_crossentropy`와 함께 사용할 수 있습니다).
__인자__
- __y__: 행렬로 변환할 클래스 벡터(0부터 `num_classes`까지의 정수).
- __num_classes__: 클래스의 총 개수.
- __dtype__: 문자열로 표현된 입력값의 데이터 자료형 (`'float32'`, `'float64'`, `'int32'`, ...).
__반환값__
입력값의 이진행렬 표현.
클래스 축이 마지막에 위치합니다.
__예시__
```python
# 3 클래스 {0, 1, 2}에 대한 5개의 레이블로 이루어진 배열을 생각해봅시다.
> labels
array([0, 2, 1, 2, 0])
# `to_categorical`은 이를 클래스 수 만큼의 열을 가진
# 행렬로 변환합니다. 행의 수는
# 변하지 않습니다.
> to_categorical(labels)
array([[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.]], dtype=float32)
```
----
### normalize
```python
keras.utils.normalize(x, axis=-1, order=2)
```
NumPy 배열을 정규화합니다.
__인자__
- __x__: 정규화할 NumPy 배열.
- __axis__: 정규화를 적용할 축.
- __order__: 정규화 계수 (예: L2 노름의 경우 2).
__반환값__
정규화된 배열의 복사본.
----
### get_file
```python
keras.utils.get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None)
```
캐시에 파일이 존재하지 않으면 URL에서 파일을 다운로드 합니다.
기본적으로 URL `origin`에서 `cache_subdir`인 `'datasets'` 내 위치한
`cache_dir`인 `~/.keras`로 파일이 다운로드되고
`fname`으로 파일 이름이 붙습니다. 그러므로
`example.txt` 파일의 최종위치는 `~/.keras/datasets/example.txt`가 됩니다.
tar, tar.gz, tar.bz, 그리고 zip 형식의 파일도 추출 가능합니다.
파일 해시를 인자로 전달하면 다운로드 후 파일의 유효성을 검증할 수 있습니다.
명령줄 프로그램인 `shasum`과 `sha256sum`으로 해시를 계산할 수 있습니다.
__인자__
- __fname__: 파일의 이름. 절대 경로 `/path/to/file.txt`가 명시된 경우 해당 경로에 파일이 저장됩니다.
- __origin__: 파일의 본래 URL.
- __untar__: `untar`대신 `extract`를 권장합니다.
`bool` 형식의 입력으로 파일의 압축 해제 여부를 선택합니다.
- __md5_hash__: `md5_hash`대신 `file_hash`를 권장합니다.
파일 검사용 md5 해시.
- __file_hash__: 다운로드 후 예산되는 파일의 해시 문자열.
해시 알고리즘인 sha256과 md5 둘 모두 지원됩니다.
- __cache_subdir__: 파일이 저장되는 케라스 캐시 디렉토리 내 하위 디렉토리.
절대 경로 `/path/to/folder`가 명시된 경우 해당 경로에 파일이 저장됩니다.
- __hash_algorithm__: 파일을 검사하기 위한 해시 알고리즘을 선택합니다.
`'md5'`, `'sha256'`, 그리고 `'auto'`를 선택할 수 있습니다.
기본값인 `'auto'`는 사용중인 해시 알고리즘을 감지합니다.
- __extract__: `True`인 경우 tar 혹은 zip처럼 Archive로 파일을 추출합니다.
- __archive_format__: 파일 추출을 시도할 Archive 형식.
`'auto'`, `'tar'`, `'zip'`, 그리고 `None`을 선택할 수 있습니다.
`'tar'`는 tar, tar.gz, 그리고 tar.bz 파일을 포함합니다.
`'auto'`를 선택할 경우 기본값으로 ['tar', 'zip']가 선택됩니다.
`None` 혹은 빈 목록는 '발견된 매치가 없음'을 메시지를 반환합니다.
- __cache_dir__: 캐시된 파일을 저장할 위치.
`None`일 경우 기본값은 [케라스 디렉토리](/faq/#where-is-the-keras-configuration-filed-stored)입니다.
__반환값__
다운로드된 파일의 경로.
----
### print_summary
```python
keras.utils.print_summary(model, line_length=None, positions=None, print_fn=None)
```
모델을 요약하여 출력합니다.
__인자__
- __model__: 케라스 모델 인스턴스.
- __line_length__: 출력 결과의 가로 길이
(예: 터미널 창의 가로 길이에 맞도록
이 값을 설정합니다).
- __positions__: 각 라인의 로그 요소의 절대적 혹은 상대적 위치.
값을 특정하지 않으면 기본값인 `[.33, .55, .67, 1.]`로 설정됩니다.
- __print_fn__: 사용할 출력 함수.
모델 요약의 각 라인마다 호출됩니다.
문자열 요약을 캡처하려면
이 값을 커스텀 함수로 설정할 수 있습니다.
기본값은 `print`(stdout으로 출력)입니다.
----
### plot_model
```python
keras.utils.plot_model(model, to_file='model.png', show_shapes=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96)
```
케라스 모델을 도트 형식으로 변환하고 파일에 저장합니다.
__인자__
- __model__: 케라스 모델 인스턴스.
- __to_file__: 플롯 이미지의 파일 이름.
- __show_shapes__: 입출력 형태 정보를 보여줄지 여부.
- __show_layer_names__: 층 이름을 보여줄지 여부.
- __rankdir__: `rankdir` 인자가,
플롯의 형식을 결정하는 문자열인 PyDot으로 전달됩니다:
`'TB'`는 세로 플롯;
`'LR'`는 가로 플롯을 생성합니다.
- __expand_nested__: 중첩된 모델을 클러스터로 확장할지 여부.
- __dpi__: 도트 DPI.
__반환값__
주피터가 설치된 경우 주피터 노트북 이미지 객체를 반환합니다.
이 메소드를 이용하면 주피터 노트북에서 모델을 바로 출력하여 확인할 수 있습니다.
----
### multi_gpu_model
```python
keras.utils.multi_gpu_model(model, gpus=None, cpu_merge=True, cpu_relocation=False)
```
다른 GPU에 모델을 복제합니다.
이 함수는 구체적으로 단일 기계 다중 GPU 데이터 병렬처리를 실행합니다.
다음과 같은 방식으로 작동합니다:
- 모델의 입력을 여러 하위 배치로 나눕니다.
- 모델 복사본을 각 하위 배치에 적용합니다.
각 모델 복사본은 전용 GPU에서 실행됩니다.
- 결과물을 (CPU에서) 연결하여 하나의 큰 배치로 만듭니다.
예. `batch_size`가 64이고 `gpus=2`라면,
입력이 각 32개의 샘플로 구성된 2개의 하위 배치로 나뉘고,
한 GPU 당 각각의 하위 배치가 처리된 후,
64개의 처리된 샘플로 구성된 완전한 배치를 반환합니다.
8개의 GPU까지는 거의 선형적으로 속도 향상이 예상됩니다.
이 함수는 현재 TensorFlow 백엔드에서만
사용가능합니다.
__인자__
- __model__: 케라스 모델 인스턴스.
메모리 부족 오류를 피하기 위해서 이 모델을 CPU에 생성해두는 방법이 있습니다
(아래의 사용법 예시를 참고하십시오).
- __gpus__: 정수 >= 2 혹은 정수 리스트, number of GPUs or
생성된 모델 복사본을 위치시킬 GPU의 개수 혹은 GPU ID의 목록.
- __cpu_merge__: CPU의 유효범위 내에서 모델 가중치를 합치는 것을 강제
여부를 명시하는 불리언 값.
- __cpu_relocation__: CPU의 유효범위 내에서 모델 가중치를 생성
여부를 명시하는 불리언 값.
만약 이전의 어떤 장치의 유효범위에도 모델이 정의되지 않았다면,
이 옵션을 활성화시켜 문제를 해결할 수 있습니다.
__반환값__
초기 `model` 인자와 완벽하게 동일하게 사용가능하되,
작업부하를 여러 GPU에 분산시키는 케라스 `Model` 인스턴스.
__예시__
예시 1 - CPU에서 가중치를 병합하는 모델 학습
```python
import tensorflow as tf
from keras.applications import Xception
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
# 베이스 모델(혹은 "템플릿" 모델)을 인스턴스화합니다.
# 모델의 가중치가 CPU 메모리에 저장될 수 있도록,
# CPU 장치 유효범위 내에서 이 작업을 진행하는 것을 권합니다.
# 그렇지 않은 경우, 가중치가 GPU에 저장되어
# 가중치 공유작업이 원활치 않을 수 있습니다.
with tf.device('/cpu:0'):
model = Xception(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# 모델을 8개의 GPU에 복제합니다.
# 이는 컴퓨터에 8개의 사용가능한 GPU가 있다고 가정하는 것입니다.
parallel_model = multi_gpu_model(model, gpus=8)
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# 가짜 데이터를 생성합니다.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# 이 `fit` 호출은 8개의 GPU에 분산됩니다.
# 배치 크기가 256이므로, 각 GPU는 32샘플을 처리합니다.
parallel_model.fit(x, y, epochs=20, batch_size=256)
# (같은 가중치를 공유하는) 템플릿 모델을 통해서 모델을 저장합니다:
model.save('my_model.h5')
```
예시 2 - cpu_relocation을 사용해 CPU에서 가중치를 병합하는 모델 학습
```python
..
# 모델 정의를 위해서 장치 유효범위를 바꿀 필요는 없습니다:
model = Xception(weights=None, ..)
try:
parallel_model = multi_gpu_model(model, cpu_relocation=True)
print("Training using multiple GPUs..")
except ValueError:
parallel_model = model
print("Training using single GPU or CPU..")
parallel_model.compile(..)
..
```
예시 3 - GPU에서 가중치를 병합하는 모델 학습 (NV-link에 권장됩니다)
```python
..
# 모델 정의를 위해서 장치 유효범위를 바꿀 필요는 없습니다:
model = Xception(weights=None, ..)
try:
parallel_model = multi_gpu_model(model, cpu_merge=False)
print("Training using multiple GPUs..")
except:
parallel_model = model
print("Training using single GPU or CPU..")
parallel_model.compile(..)
..
```
__모델 저장하기__
다중-GPU 모델을 저장하려면, `multi_gpu_model`에 의해서 반환되는 모델보다는
템플릿 모델(`multi_gpu_model`에 전달되는 인자)과 함께
`.save(fname)` 혹은 `.save_weights(fname)`를 사용하면 됩니다.
| keras-docs-ko/sources/utils.md/0 | {
"file_path": "keras-docs-ko/sources/utils.md",
"repo_id": "keras-docs-ko",
"token_count": 9790
} | 67 |
## 激活函数的用法
激活函数可以通过设置单独的 `Activation` 层实现,也可以在构造层对象时通过传递 `activation` 参数实现:
```python
from keras.layers import Activation, Dense
model.add(Dense(64))
model.add(Activation('tanh'))
```
等价于:
```python
model.add(Dense(64, activation='tanh'))
```
你也可以通过传递一个逐元素运算的 Theano/TensorFlow/CNTK 函数来作为激活函数:
```python
from keras import backend as K
model.add(Dense(64, activation=K.tanh))
model.add(Activation(K.tanh))
```
## 预定义激活函数
### elu
```python
keras.activations.elu(x, alpha=1.0)
```
指数线性单元。
__参数__
- __x__:输入张量。
- __alpha__:一个标量,表示负数部分的斜率。
__返回__
线性指数激活:如果 `x > 0`,返回值为 `x`;如果 `x < 0` 返回值为 `alpha * (exp(x)-1)`
__参考文献__
- [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)
----
### softmax
```python
keras.activations.softmax(x, axis=-1)
```
Softmax 激活函数。
__参数__
- __x__:输入张量。
- __axis__:整数,代表 softmax 所作用的维度。
__返回__
softmax 变换后的张量。
__异常__
- __ValueError__:如果 `dim(x) == 1`。
----
### selu
```python
keras.activations.selu(x)
```
可伸缩的指数线性单元(SELU)。
SELU 等同于:`scale * elu(x, alpha)`,其中 alpha 和 scale 是预定义的常量。只要正确初始化权重(参见 `lecun_normal` 初始化方法)并且输入的数量「足够大」(参见参考文献获得更多信息),选择合适的 alpha 和 scale 的值,就可以在两个连续层之间保留输入的均值和方差。
__参数__
- __x__: 一个用来用于计算激活函数的张量或变量。
__返回__
可伸缩的指数线性激活:`scale * elu(x, alpha)`。
__注意__
- 与「lecun_normal」初始化方法一起使用。
- 与 dropout 的变种「AlphaDropout」一起使用。
__参考文献__
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
----
### softplus
```python
keras.activations.softplus(x)
```
Softplus 激活函数。
__参数__
- __x__: 输入张量。
__返回__
Softplus 激活:`log(exp(x) + 1)`。
----
### softsign
```python
keras.activations.softsign(x)
```
Softsign 激活函数。
__参数__
- __x__: 输入张量。
__返回__
Softsign 激活:`x / (abs(x) + 1)`。
----
### relu
```python
keras.activations.relu(x, alpha=0.0, max_value=None, threshold=0.0)
```
整流线性单元。
使用默认值时,它返回逐元素的 `max(x, 0)`。
否则,它遵循:
- 如果 `x >= max_value`:`f(x) = max_value`,
- 如果 `threshold <= x < max_value`:`f(x) = x`,
- 否则:`f(x) = alpha * (x - threshold)`。
__参数__
- __x__: 输入张量。
- __alpha__:负数部分的斜率。默认为 0。
- __max_value__:输出的最大值。
- __threshold__: 浮点数。Thresholded activation 的阈值值。
__返回__
一个张量。
----
### tanh
```python
keras.activations.tanh(x)
```
双曲正切激活函数。
__参数__
- __x__: 输入张量。
__返回__
双曲正切激活函数:
`tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))`
----
### sigmoid
```python
sigmoid(x)
```
Sigmoid 激活函数。
__参数__
- __x__: 输入张量.
__返回__
Sigmoid激活函数: `1 / (1 + exp(-x))`.
----
### hard_sigmoid
```python
hard_sigmoid(x)
```
Hard sigmoid 激活函数。
计算速度比 sigmoid 激活函数更快。
__参数__
- __x__: 输入张量。
__返回__
Hard sigmoid 激活函数:
- 如果 `x < -2.5`,返回 `0`。
- 如果 `x > 2.5`,返回 `1`。
- 如果 `-2.5 <= x <= 2.5`,返回 `0.2 * x + 0.5`。
----
### exponential
```python
keras.activations.exponential(x)
```
自然数指数激活函数。
----
### linear
```python
keras.activations.linear(x)
```
线性激活函数(即不做任何改变)
__参数__
- __x__: 输入张量。
__返回__
输入张量,不变。
## 高级激活函数
对于 Theano/TensorFlow/CNTK 不能表达的复杂激活函数,如含有可学习参数的激活函数,可通过[高级激活函数](layers/advanced-activations.md)实现,可以在 `keras.layers.advanced_activations` 模块中找到。 这些高级激活函数包括 `PReLU` 和 `LeakyReLU`。
| keras-docs-zh/sources/activations.md/0 | {
"file_path": "keras-docs-zh/sources/activations.md",
"repo_id": "keras-docs-zh",
"token_count": 2621
} | 68 |
# 此脚本演示了卷积LSTM网络的使用。
该网络用于预测包含移动方块的人工生成的电影的下一帧。
```python
from keras.models import Sequential
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
import numpy as np
import pylab as plt
# 我们创建一个网络层,以尺寸为 (n_frames,width,height,channels) 的电影作为输入,并返回相同尺寸的电影。
seq = Sequential()
seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
input_shape=(None, 40, 40, 1),
padding='same', return_sequences=True))
seq.add(BatchNormalization())
seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same', return_sequences=True))
seq.add(BatchNormalization())
seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same', return_sequences=True))
seq.add(BatchNormalization())
seq.add(ConvLSTM2D(filters=40, kernel_size=(3, 3),
padding='same', return_sequences=True))
seq.add(BatchNormalization())
seq.add(Conv3D(filters=1, kernel_size=(3, 3, 3),
activation='sigmoid',
padding='same', data_format='channels_last'))
seq.compile(loss='binary_crossentropy', optimizer='adadelta')
# 人工数据生成:
# 生成内部有3到7个移动方块的电影。
# 方块的尺寸为 1x1 或 2x2 像素,
# 随着时间的推移线性移动。
# 为方便起见,我们首先创建宽度和高度较大的电影(80x80),最后选择 40x40 的窗口。
def generate_movies(n_samples=1200, n_frames=15):
row = 80
col = 80
noisy_movies = np.zeros((n_samples, n_frames, row, col, 1), dtype=np.float)
shifted_movies = np.zeros((n_samples, n_frames, row, col, 1),
dtype=np.float)
for i in range(n_samples):
# 添加 3 到 7 个移动方块
n = np.random.randint(3, 8)
for j in range(n):
# 初始位置
xstart = np.random.randint(20, 60)
ystart = np.random.randint(20, 60)
# 运动方向
directionx = np.random.randint(0, 3) - 1
directiony = np.random.randint(0, 3) - 1
# 方块尺寸
w = np.random.randint(2, 4)
for t in range(n_frames):
x_shift = xstart + directionx * t
y_shift = ystart + directiony * t
noisy_movies[i, t, x_shift - w: x_shift + w,
y_shift - w: y_shift + w, 0] += 1
# 通过添加噪音使其更加健壮。
# 这个想法是,如果在推理期间,像素的值不是一个,
# 我们需要训练更加健壮的网络,并仍然将其视为属于方块的像素。
if np.random.randint(0, 2):
noise_f = (-1)**np.random.randint(0, 2)
noisy_movies[i, t,
x_shift - w - 1: x_shift + w + 1,
y_shift - w - 1: y_shift + w + 1,
0] += noise_f * 0.1
# Shift the ground truth by 1
x_shift = xstart + directionx * (t + 1)
y_shift = ystart + directiony * (t + 1)
shifted_movies[i, t, x_shift - w: x_shift + w,
y_shift - w: y_shift + w, 0] += 1
# 裁剪为 40x40 窗口
noisy_movies = noisy_movies[::, ::, 20:60, 20:60, ::]
shifted_movies = shifted_movies[::, ::, 20:60, 20:60, ::]
noisy_movies[noisy_movies >= 1] = 1
shifted_movies[shifted_movies >= 1] = 1
return noisy_movies, shifted_movies
# 训练网络
noisy_movies, shifted_movies = generate_movies(n_samples=1200)
seq.fit(noisy_movies[:1000], shifted_movies[:1000], batch_size=10,
epochs=300, validation_split=0.05)
# 在一部电影上测试网络
# 用前 7 个位置训练它,然后预测新的位置
which = 1004
track = noisy_movies[which][:7, ::, ::, ::]
for j in range(16):
new_pos = seq.predict(track[np.newaxis, ::, ::, ::, ::])
new = new_pos[::, -1, ::, ::, ::]
track = np.concatenate((track, new), axis=0)
# 然后将预测与实际进行比较
track2 = noisy_movies[which][::, ::, ::, ::]
for i in range(15):
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(121)
if i >= 7:
ax.text(1, 3, 'Predictions !', fontsize=20, color='w')
else:
ax.text(1, 3, 'Initial trajectory', fontsize=20)
toplot = track[i, ::, ::, 0]
plt.imshow(toplot)
ax = fig.add_subplot(122)
plt.text(1, 3, 'Ground truth', fontsize=20)
toplot = track2[i, ::, ::, 0]
if i >= 2:
toplot = shifted_movies[which][i - 1, ::, ::, 0]
plt.imshow(toplot)
plt.savefig('%i_animate.png' % (i + 1))
```
| keras-docs-zh/sources/examples/conv_lstm.md/0 | {
"file_path": "keras-docs-zh/sources/examples/conv_lstm.md",
"repo_id": "keras-docs-zh",
"token_count": 2748
} | 69 |
这是由 Quoc V. Le, Navdeep Jaitly, Geoffrey E. Hinton 撰写的 "A Simple Way to Initialize Recurrent Networks of Rectified Linear Units" 中逐像素顺序 MNIST 的 IRNN 实验的复现。
arxiv:1504.00941v2 [cs.NE] 7 Apr 2015
http://arxiv.org/pdf/1504.00941v2.pdf
优化器由 RMSprop 代替,从而获得了更加稳定和稳定的改进。
900 个轮次后达到 0.93 的训练/测试精度
(这大致相当于原始论文中的 1687500 步。)
```python
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import SimpleRNN
from keras import initializers
from keras.optimizers import RMSprop
batch_size = 32
num_classes = 10
epochs = 200
hidden_units = 100
learning_rate = 1e-6
clip_norm = 1.0
# 数据,分为训练集和测试集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], -1, 1)
x_test = x_test.reshape(x_test.shape[0], -1, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# 将类向量转换为二进制类矩阵
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('Evaluate IRNN...')
model = Sequential()
model.add(SimpleRNN(hidden_units,
kernel_initializer=initializers.RandomNormal(stddev=0.001),
recurrent_initializer=initializers.Identity(gain=1.0),
activation='relu',
input_shape=x_train.shape[1:]))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
scores = model.evaluate(x_test, y_test, verbose=0)
print('IRNN test score:', scores[0])
print('IRNN test accuracy:', scores[1])
``` | keras-docs-zh/sources/examples/mnist_irnn.md/0 | {
"file_path": "keras-docs-zh/sources/examples/mnist_irnn.md",
"repo_id": "keras-docs-zh",
"token_count": 1061
} | 70 |
# 开始使用 Keras Sequential 顺序模型
顺序模型是多个网络层的线性堆叠。
你可以通过将网络层实例的列表传递给 `Sequential` 的构造器,来创建一个 `Sequential` 模型:
```python
from keras.models import Sequential
from keras.layers import Dense, Activation
model = Sequential([
Dense(32, input_shape=(784,)),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
```
也可以简单地使用 `.add()` 方法将各层添加到模型中:
```python
model = Sequential()
model.add(Dense(32, input_dim=784))
model.add(Activation('relu'))
```
----
## 指定输入数据的尺寸
模型需要知道它所期望的输入的尺寸。出于这个原因,顺序模型中的第一层(且只有第一层,因为下面的层可以自动地推断尺寸)需要接收关于其输入尺寸的信息。有几种方法来做到这一点:
- 传递一个 `input_shape` 参数给第一层。它是一个表示尺寸的元组 (一个由整数或 `None` 组成的元组,其中 `None` 表示可能为任何正整数)。在 `input_shape` 中不包含数据的 batch 大小。
- 某些 2D 层,例如 `Dense`,支持通过参数 `input_dim` 指定输入尺寸,某些 3D 时序层支持 `input_dim` 和 `input_length` 参数。
- 如果你需要为你的输入指定一个固定的 batch 大小(这对 stateful RNNs 很有用),你可以传递一个 `batch_size` 参数给一个层。如果你同时将 `batch_size=32` 和 `input_shape=(6, 8)` 传递给一个层,那么每一批输入的尺寸就为 `(32,6,8)`。
因此,下面的代码片段是等价的:
```python
model = Sequential()
model.add(Dense(32, input_shape=(784,)))
```
```python
model = Sequential()
model.add(Dense(32, input_dim=784))
```
----
## 模型编译
在训练模型之前,您需要配置学习过程,这是通过 `compile` 方法完成的。它接收三个参数:
- 优化器 optimizer。它可以是现有优化器的字符串标识符,如 `rmsprop` 或 `adagrad`,也可以是 Optimizer 类的实例。详见:[optimizers](/optimizers)。
- 损失函数 loss,模型试图最小化的目标函数。它可以是现有损失函数的字符串标识符,如 `categorical_crossentropy` 或 `mse`,也可以是一个目标函数。详见:[losses](/losses)。
- 评估标准 metrics。对于任何分类问题,你都希望将其设置为 `metrics = ['accuracy']`。评估标准可以是现有的标准的字符串标识符,也可以是自定义的评估标准函数。详见: [metrics](/metrics)。
```python
# 多分类问题
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 二分类问题
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# 均方误差回归问题
model.compile(optimizer='rmsprop',
loss='mse')
# 自定义评估标准函数
import keras.backend as K
def mean_pred(y_true, y_pred):
return K.mean(y_pred)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy', mean_pred])
```
----
## 模型训练
Keras 模型在输入数据和标签的 Numpy 矩阵上进行训练。为了训练一个模型,你通常会使用 `fit` 函数。[文档详见此处](/models/sequential)。
```python
# 对于具有 2 个类的单输入模型(二进制分类):
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# 生成虚拟数据
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(2, size=(1000, 1))
# 训练模型,以 32 个样本为一个 batch 进行迭代
model.fit(data, labels, epochs=10, batch_size=32)
```
```python
# 对于具有 10 个类的单输入模型(多分类分类):
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# 生成虚拟数据
import numpy as np
data = np.random.random((1000, 100))
labels = np.random.randint(10, size=(1000, 1))
# 将标签转换为分类的 one-hot 编码
one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)
# 训练模型,以 32 个样本为一个 batch 进行迭代
model.fit(data, one_hot_labels, epochs=10, batch_size=32)
```
----
## 示例
这里有几个可以帮助你起步的例子!
在 [examples](https://github.com/keras-team/keras/tree/master/keras/datasets) 目录中,你可以找到真实数据集的示例模型:
- CIFAR10 小图片分类:具有实时数据增强的卷积神经网络 (CNN)
- IMDB 电影评论情感分类:基于词序列的 LSTM
- Reuters 新闻主题分类:多层感知器 (MLP)
- MNIST 手写数字分类:MLP & CNN
- 基于 LSTM 的字符级文本生成
...以及更多。
### 基于多层感知器 (MLP) 的 softmax 多分类:
```python
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
# 生成虚拟数据
import numpy as np
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# Dense(64) 是一个具有 64 个隐藏神经元的全连接层。
# 在第一层必须指定所期望的输入数据尺寸:
# 在这里,是一个 20 维的向量。
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
```
### 基于多层感知器的二分类:
```python
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout
# 生成虚拟数据
x_train = np.random.random((1000, 20))
y_train = np.random.randint(2, size=(1000, 1))
x_test = np.random.random((100, 20))
y_test = np.random.randint(2, size=(100, 1))
model = Sequential()
model.add(Dense(64, input_dim=20, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
score = model.evaluate(x_test, y_test, batch_size=128)
```
### 类似 VGG 的卷积神经网络:
```python
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
# 生成虚拟数据
x_train = np.random.random((100, 100, 100, 3))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
x_test = np.random.random((20, 100, 100, 3))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(20, 1)), num_classes=10)
model = Sequential()
# 输入: 3 通道 100x100 像素图像 -> (100, 100, 3) 张量。
# 使用 32 个大小为 3x3 的卷积滤波器。
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(x_train, y_train, batch_size=32, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=32)
```
### 基于 LSTM 的序列分类:
```python
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
max_features = 1024
model = Sequential()
model.add(Embedding(max_features, output_dim=256))
model.add(LSTM(128))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=16, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=16)
```
### 基于 1D 卷积的序列分类:
```python
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
seq_length = 64
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(seq_length, 100)))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 3, activation='relu'))
model.add(Conv1D(128, 3, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=16, epochs=10)
score = model.evaluate(x_test, y_test, batch_size=16)
```
### 基于栈式 LSTM 的序列分类
在这个模型中,我们将 3 个 LSTM 层叠在一起,使模型能够学习更高层次的时间表示。
前两个 LSTM 返回完整的输出序列,但最后一个只返回输出序列的最后一步,从而降低了时间维度(即将输入序列转换成单个向量)。
<img src="/img/regular_stacked_lstm.png" alt="stacked LSTM" style="width: 300px;"/>
```python
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
data_dim = 16
timesteps = 8
num_classes = 10
# 期望输入数据尺寸: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(LSTM(32, return_sequences=True,
input_shape=(timesteps, data_dim))) # 返回维度为 32 的向量序列
model.add(LSTM(32, return_sequences=True)) # 返回维度为 32 的向量序列
model.add(LSTM(32)) # 返回维度为 32 的单个向量
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# 生成虚拟训练数据
x_train = np.random.random((1000, timesteps, data_dim))
y_train = np.random.random((1000, num_classes))
# 生成虚拟验证数据
x_val = np.random.random((100, timesteps, data_dim))
y_val = np.random.random((100, num_classes))
model.fit(x_train, y_train,
batch_size=64, epochs=5,
validation_data=(x_val, y_val))
```
### "stateful" 渲染的的栈式 LSTM 模型
有状态 (stateful) 的循环神经网络模型中,在一个 batch 的样本处理完成后,其内部状态(记忆)会被记录并作为下一个 batch 的样本的初始状态。这允许处理更长的序列,同时保持计算复杂度的可控性。
[你可以在 FAQ 中查找更多关于 stateful RNNs 的信息。](/getting-started/faq/#how-can-i-use-stateful-rnns)
```python
from keras.models import Sequential
from keras.layers import LSTM, Dense
import numpy as np
data_dim = 16
timesteps = 8
num_classes = 10
batch_size = 32
# 期望输入数据尺寸: (batch_size, timesteps, data_dim)
# 请注意,我们必须提供完整的 batch_input_shape,因为网络是有状态的。
# 第 k 批数据的第 i 个样本是第 k-1 批数据的第 i 个样本的后续。
model = Sequential()
model.add(LSTM(32, return_sequences=True, stateful=True,
batch_input_shape=(batch_size, timesteps, data_dim)))
model.add(LSTM(32, return_sequences=True, stateful=True))
model.add(LSTM(32, stateful=True))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# 生成虚拟训练数据
x_train = np.random.random((batch_size * 10, timesteps, data_dim))
y_train = np.random.random((batch_size * 10, num_classes))
# 生成虚拟验证数据
x_val = np.random.random((batch_size * 3, timesteps, data_dim))
y_val = np.random.random((batch_size * 3, num_classes))
model.fit(x_train, y_train,
batch_size=batch_size, epochs=5, shuffle=False,
validation_data=(x_val, y_val))
```
| keras-docs-zh/sources/getting-started/sequential-model-guide.md/0 | {
"file_path": "keras-docs-zh/sources/getting-started/sequential-model-guide.md",
"repo_id": "keras-docs-zh",
"token_count": 6793
} | 71 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/local.py#L19)</span>
### LocallyConnected1D
```python
keras.layers.LocallyConnected1D(filters, kernel_size, strides=1, padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
1D 输入的局部连接层。
`LocallyConnected1D` 层与 `Conv1D` 层的工作方式相同,除了权值不共享外,
也就是说,在输入的每个不同部分应用不同的一组过滤器。
__示例__
```python
# 将长度为 3 的非共享权重 1D 卷积应用于
# 具有 10 个时间步长的序列,并使用 64个 输出滤波器
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# 现在 model.output_shape == (None, 8, 64)
# 在上面再添加一个新的 conv1d
model.add(LocallyConnected1D(32, 3))
# 现在 model.output_shape == (None, 6, 32)
```
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者单个整数表示的元组或列表,
指明 1D 卷积窗口的长度。
- __strides__: 一个整数,或者单个整数表示的元组或列表,
指明卷积的步长。
指定任何 `stride!=1` 与指定 `dilation_rate!=1` 两者不兼容。
- __padding__: 当前仅支持 `"valid"` (大小写敏感)。
`"same"` 可能会在未来支持。
- __data_format__: 字符串,`channels_first`, `channels_last` 之一。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
3D 张量,尺寸为: `(batch_size, steps, input_dim)`。
__输出尺寸__
3D 张量 ,尺寸为:`(batch_size, new_steps, filters)`,
`steps` 值可能因填充或步长而改变。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/local.py#L183)</span>
### LocallyConnected2D
```python
keras.layers.LocallyConnected2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
2D 输入的局部连接层。
`LocallyConnected2D` 层与 `Conv2D` 层的工作方式相同,除了权值不共享外,
也就是说,在输入的每个不同部分应用不同的一组过滤器。
__示例__
```python
# 在 32x32 图像上应用 3x3 非共享权值和64个输出过滤器的卷积
# 数据格式 `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# 现在 model.output_shape == (None, 30, 30, 64)
# 注意这一层的参数数量为 (30*30)*(3*3*3*64) + (30*30)*64
# 在上面再加一个 3x3 非共享权值和 32 个输出滤波器的卷积:
model.add(LocallyConnected2D(32, (3, 3)))
# 现在 model.output_shape == (None, 28, 28, 32)
```
__参数__
- __filters__: 整数,输出空间的维度
(即卷积中滤波器的输出数量)。
- __kernel_size__: 一个整数,或者 2 个整数表示的元组或列表,
指明 2D 卷积窗口的宽度和高度。
可以是一个整数,为所有空间维度指定相同的值。
- __strides__: 一个整数,或者 2 个整数表示的元组或列表,
指明卷积沿宽度和高度方向的步长。
可以是一个整数,为所有空间维度指定相同的值。
- __padding__: 当前仅支持 `"valid"` (大小写敏感)。
`"same"` 可能会在未来支持。
- __data_format__: 字符串,
`channels_last` (默认) 或 `channels_first` 之一。
输入中维度的顺序。
`channels_last` 对应输入尺寸为 `(batch, height, width, channels)`,
`channels_first` 对应输入尺寸为 `(batch, channels, height, width)`。
它默认为从 Keras 配置文件 `~/.keras/keras.json` 中
找到的 `image_data_format` 值。
如果你从未设置它,将使用 "channels_last"。
- __activation__: 要使用的激活函数
(详见 [activations](../activations.md))。
如果你不指定,则不使用激活函数
(即线性激活: `a(x) = x`)。
- __use_bias__: 布尔值,该层是否使用偏置向量。
- __kernel_initializer__: `kernel` 权值矩阵的初始化器
(详见 [initializers](../initializers.md))。
- __bias_initializer__: 偏置向量的初始化器
(详见 [initializers](../initializers.md))。
- __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数
(详见 [regularizer](../regularizers.md))。
- __bias_regularizer__: 运用到偏置向量的正则化函数
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 运用到层输出(它的激活值)的正则化函数
(详见 [regularizer](../regularizers.md))。
- __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数
(详见 [constraints](../constraints.md))。
- __bias_constraint__: 运用到偏置向量的约束函数
(详见 [constraints](../constraints.md))。
__输入尺寸__
4D 张量,尺寸为:
`(samples, channels, rows, cols)`,如果 `data_format='channels_first'`;
或者 4D 张量,尺寸为:
`(samples, rows, cols, channels)`,如果 `data_format='channels_last'`。
__输出尺寸__
4D 张量,尺寸为:
`(samples, filters, new_rows, new_cols)`,如果 data_format='channels_first';
或者 4D 张量,尺寸为:
`(samples, new_rows, new_cols, filters)`,如果 data_format='channels_last'。
`rows` 和 `cols` 的值可能因填充而改变。
| keras-docs-zh/sources/layers/local.md/0 | {
"file_path": "keras-docs-zh/sources/layers/local.md",
"repo_id": "keras-docs-zh",
"token_count": 3641
} | 72 |
### Text Preprocessing
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/preprocessing/text.py#L139)</span>
### Tokenizer
```python
keras.preprocessing.text.Tokenizer(num_words=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ',
char_level=False,
oov_token=None,
document_count=0)
```
文本标记实用类。
该类允许使用两种方法向量化一个文本语料库:
将每个文本转化为一个整数序列(每个整数都是词典中标记的索引);
或者将其转化为一个向量,其中每个标记的系数可以是二进制值、词频、TF-IDF 权重等。
__参数__
- __num_words__: 需要保留的最大词数,基于词频。只有最常出现的 `num_words-1` 词会被保留。
- __filters__: 一个字符串,其中每个元素是一个将从文本中过滤掉的字符。默认值是所有标点符号,加上制表符和换行符,减去 `'` 字符。
- __lower__: 布尔值。是否将文本转换为小写。
- __split__: 字符串。按该字符串切割文本。
- __char_level__: 如果为 True,则每个字符都将被视为标记。
- __oov_token__: 如果给出,它将被添加到 word_index 中,并用于在 `text_to_sequence` 调用期间替换词汇表外的单词。
默认情况下,删除所有标点符号,将文本转换为空格分隔的单词序列(单词可能包含 `'` 字符)。
这些序列然后被分割成标记列表。然后它们将被索引或向量化。
`0` 是不会被分配给任何单词的保留索引。
----
### hashing_trick
```python
keras.preprocessing.text.hashing_trick(text,
n,
hash_function=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ')
```
将文本转换为固定大小散列空间中的索引序列。
__参数__
- __text__: 输入文本(字符串)。
- __n__: 散列空间维度。
- __hash_function__: 默认为 python 散列函数,可以是 'md5' 或任意接受输入字符串并返回整数的函数。注意 'hash' 不是稳定的散列函数,所以它在不同的运行中不一致,而 'md5' 是一个稳定的散列函数。
- __filters__: 要过滤的字符列表(或连接),如标点符号。默认:``!"#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n``,包含基本标点符号,制表符和换行符。
- __lower__: 布尔值。是否将文本转换为小写。
- __split__: 字符串。按该字符串切割文本。
__返回__
整数词索引列表(唯一性无法保证)。
`0` 是不会被分配给任何单词的保留索引。
由于哈希函数可能发生冲突,可能会将两个或更多字分配给同一索引。
碰撞的[概率](https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)与散列空间的维度和不同对象的数量有关。
----
### one_hot
```python
keras.preprocessing.text.one_hot(text,
n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ')
```
One-hot 将文本编码为大小为 n 的单词索引列表。
这是 `hashing_trick` 函数的一个封装,
使用 `hash` 作为散列函数;单词索引映射无保证唯一性。
__参数__
- __text__: 输入文本(字符串)。
- __n__: 整数。词汇表尺寸。
- __filters__: 要过滤的字符列表(或连接),如标点符号。默认:``!"#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n``,包含基本标点符号,制表符和换行符。
- __lower__: 布尔值。是否将文本转换为小写。
- __split__: 字符串。按该字符串切割文本。
__返回__
[1, n] 之间的整数列表。每个整数编码一个词(唯一性无法保证)。
----
### text_to_word_sequence
```python
keras.preprocessing.text.text_to_word_sequence(text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' ')
```
将文本转换为单词(或标记)的序列。
__参数__
- __text__: 输入文本(字符串)。
- __filters__: 要过滤的字符列表(或连接),如标点符号。默认:``!"#$%&()*+,-./:;<=>?@[\]^_`{|}~\t\n``,包含基本标点符号,制表符和换行符。
- __lower__: 布尔值。是否将文本转换为小写。
- __split__: 字符串。按该字符串切割文本。
__返回__
词或标记的列表。
| keras-docs-zh/sources/preprocessing/text.md/0 | {
"file_path": "keras-docs-zh/sources/preprocessing/text.md",
"repo_id": "keras-docs-zh",
"token_count": 3368
} | 73 |
# Keras.io documentation generator
This repository hosts the code used to generate the [keras.io](https://keras.io) website.
## Generating a local copy of the website
```
pip install -r requirements.txt
# Update Keras version to 3
pip install keras==3.0.2
cd scripts
python autogen.py make
python autogen.py serve
```
If you have Docker (you don't need the gpu version of Docker), you can run instead:
```
docker build -t keras-io . && docker run --rm -p 8000:8000 keras-io
```
It will take a while the first time because it's going to pull the
image and the dependencies, but on the next times it'll be much faster.
Another way of testing using Docker is via our Makefile:
```
make container-test
```
This command will build a Docker image with a documentation server and run it.
## Call for examples
Are you interested in submitting new examples for publication on keras.io?
We welcome your contributions!
Please read the information below about adding new code examples.
We are currently interested in [the following examples](https://github.com/keras-team/keras-io/blob/master/call_for_contributions.md).
## Fixing something in an existing code example
### Fixing typos
If your fix is very simple, please send out a PR simultaneously updating
the `.py`, the `.md`, and the `.ipynb` files for the example.
## More extensive fixes
For larger fixes, please send a PR that only includes the `.py` file,
so we only update the other two files once the code has been reviewed
and approved.
## Adding a new code example
Keras code examples are implemented as **tutobooks**.
A tutobook is a script available simultaneously as a notebook,
as a Python file, and as a nicely-rendered webpage.
Its source-of-truth (for manual edition and version control) is
its Python script form, but you can also create one by starting
from a notebook and converting it with the command `nb2py`.
Text cells are stored in markdown-formatted comment blocks.
the first line (starting with `"""`) may optionally contain a special
annotation, one of:
- `shell`: execute this block while prefixing each line with `!`.
- `invisible`: do not render this block.
The script form should start with a header with the following fields:
```
Title: (title)
Author: (could be `Authors`: as well, and may contain markdown links)
Date created: (date in yyyy/mm/dd format)
Last modified: (date in yyyy/mm/dd format)
Description: (one-line text description)
Accelerator: (could be GPU, TPU, or None)
```
To see examples of tutobooks, you can check out any `.py` file in `examples/` or `guides/`.
### Creating a new example starting from a `ipynb` file
1. Save the `ipynb` file to local disk.
2. Convert the file to a tutobook by running:
(assuming you are in the `scripts/` directory)
```
python tutobooks.py nb2py path_to_your_nb.ipynb ../examples/vision/script_name.py
```
This will create the file `examples/vision/script_name.py`.
3. Open it, fill in the headers, and generally edit it so that it looks nice.
NOTE THAT THE CONVERSION SCRIPT MAY MAKE MISTAKES IN ITS ATTEMPTS
TO SHORTEN LINES. MAKE SURE TO PROOFREAD THE GENERATED .py IN FULL.
Or alternatively, make sure to keep your lines reasonably-sized (<90 char)
to start with, so that the script won't have to shorten them.
4. Run `python autogen.py add_example vision/script_name`. This will generate an ipynb and markdown
rendering of your example, creating files in `examples/vision/ipynb`,
`examples/vision/md`, and `examples/vision/img`. Do not modify any of these files by hand; only the
original Python script should ever be edited manually.
5. Submit a PR adding `examples/vision/script_name.py` (only the `.py`, not the generated files). Get a review and approval.
6. Once the PR is approved, add to the PR the files created by the `add_example` command. Then we will merge the PR.
### Creating a new example starting from a Python script
1. Format the script with `black`: `black script_name.py`
2. Add tutobook header
3. Put the script in the relevant subfolder of `examples/` (e.g. `examples/vision/script_name`)
4. Run `python autogen.py add_example vision/script_name`. This will generate an ipynb and markdown
rendering of your example, creating files in `examples/vision/ipynb`,
`examples/vision/md`, and `examples/vision/img`. Do not modify any of these files by hand; only the
original Python script should ever be edited manually.
5. Submit a PR adding `examples/vision/script_name.py` (only the `.py`, not the generated files). Get a review and approval.
6. Once the PR is approved, add to the PR the files created by the `add_example` command. Then we will merge the PR.
### Previewing a new example
You can locally preview what the example looks like by running:
```
cd scripts
python autogen.py add_example vision/script_name
```
(Assuming the tutobook file is `examples/vision/script_name.py`.)
NOTE THAT THIS COMMAND WILL ERROR OUT IF ANY CELLS TAKES TOO LONG
TO EXECUTE. In that case, make your code lighter/faster.
Remember that examples are meant to demonstrate workflows, not
train state-of-the-art models. They should
stay very lightweight.
Then serving the website:
```
python autogen.py make
python autogen.py serve
```
And navigating to `0.0.0.0:8000/examples`.
## Read-only autogenerated files
The contents of the following folders should **not** be modified by hand:
- `site/*`
- `sources/*`
- `templates/examples/*`
- `templates/guides/*`
- `examples/*/md/*`, `examples/*/ipynb/*`, `examples/*/img/*`
- `guides/md/*`, `guides/ipynb/*`, `guides/img/*`
## Modifiable files
These are the only files that should be edited by hand:
- `templates/*.md`, with the exception of `templates/examples/*` and `templates/guides/*`
- `examples/*/*.py`
- `guides/*.py`
- `theme/*`
- `scripts/*.py`
| keras-io/README.md/0 | {
"file_path": "keras-io/README.md",
"repo_id": "keras-io",
"token_count": 1714
} | 74 |
# MelGAN-based spectrogram inversion using feature matching
**Author:** [Darshan Deshpande](https://twitter.com/getdarshan)<br>
**Date created:** 02/09/2021<br>
**Last modified:** 15/09/2021<br>
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/audio/ipynb/melgan_spectrogram_inversion.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/audio/melgan_spectrogram_inversion.py)
**Description:** Inversion of audio from mel-spectrograms using the MelGAN architecture and feature matching.
---
## Introduction
Autoregressive vocoders have been ubiquitous for a majority of the history of speech processing,
but for most of their existence they have lacked parallelism.
[MelGAN](https://arxiv.org/pdf/1910.06711v3.pdf) is a
non-autoregressive, fully convolutional vocoder architecture used for purposes ranging
from spectral inversion and speech enhancement to present-day state-of-the-art
speech synthesis when used as a decoder
with models like Tacotron2 or FastSpeech that convert text to mel spectrograms.
In this tutorial, we will have a look at the MelGAN architecture and how it can achieve
fast spectral inversion, i.e. conversion of spectrograms to audio waves. The MelGAN
implemented in this tutorial is similar to the original implementation with only the
difference of method of padding for convolutions where we will use 'same' instead of
reflect padding.
---
## Importing and Defining Hyperparameters
```python
!pip install -qqq tensorflow_addons
!pip install -qqq tensorflow-io
```
```python
import tensorflow as tf
import tensorflow_io as tfio
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow_addons import layers as addon_layers
# Setting logger level to avoid input shape warnings
tf.get_logger().setLevel("ERROR")
# Defining hyperparameters
DESIRED_SAMPLES = 8192
LEARNING_RATE_GEN = 1e-5
LEARNING_RATE_DISC = 1e-6
BATCH_SIZE = 16
mse = keras.losses.MeanSquaredError()
mae = keras.losses.MeanAbsoluteError()
```
<div class="k-default-codeblock">
```
|████████████████████████████████| 1.1 MB 5.1 MB/s
|████████████████████████████████| 22.7 MB 1.7 MB/s
|████████████████████████████████| 2.1 MB 36.2 MB/s
```
</div>
---
## Loading the Dataset
This example uses the [LJSpeech dataset](https://keithito.com/LJ-Speech-Dataset/).
The LJSpeech dataset is primarily used for text-to-speech and consists of 13,100 discrete
speech samples taken from 7 non-fiction books, having a total length of approximately 24
hours. The MelGAN training is only concerned with the audio waves so we process only the
WAV files and ignore the audio annotations.
```python
!wget https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
!tar -xf /content/LJSpeech-1.1.tar.bz2
```
<div class="k-default-codeblock">
```
--2021-09-16 11:45:24-- https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
Resolving data.keithito.com (data.keithito.com)... 174.138.79.61
Connecting to data.keithito.com (data.keithito.com)|174.138.79.61|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 2748572632 (2.6G) [application/octet-stream]
Saving to: ‘LJSpeech-1.1.tar.bz2’
```
</div>
<div class="k-default-codeblock">
```
LJSpeech-1.1.tar.bz 100%[===================>] 2.56G 68.3MB/s in 36s
```
</div>
<div class="k-default-codeblock">
```
2021-09-16 11:46:01 (72.2 MB/s) - ‘LJSpeech-1.1.tar.bz2’ saved [2748572632/2748572632]
```
</div>
We create a `tf.data.Dataset` to load and process the audio files on the fly.
The `preprocess()` function takes the file path as input and returns two instances of the
wave, one for input and one as the ground truth for comparison. The input wave will be
mapped to a spectrogram using the custom `MelSpec` layer as shown later in this example.
```python
# Splitting the dataset into training and testing splits
wavs = tf.io.gfile.glob("LJSpeech-1.1/wavs/*.wav")
print(f"Number of audio files: {len(wavs)}")
# Mapper function for loading the audio. This function returns two instances of the wave
def preprocess(filename):
audio = tf.audio.decode_wav(tf.io.read_file(filename), 1, DESIRED_SAMPLES).audio
return audio, audio
# Create tf.data.Dataset objects and apply preprocessing
train_dataset = tf.data.Dataset.from_tensor_slices((wavs,))
train_dataset = train_dataset.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
```
<div class="k-default-codeblock">
```
Number of audio files: 13100
```
</div>
---
## Defining custom layers for MelGAN
The MelGAN architecture consists of 3 main modules:
1. The residual block
2. Dilated convolutional block
3. Discriminator block

Since the network takes a mel-spectrogram as input, we will create an additional custom
layer
which can convert the raw audio wave to a spectrogram on-the-fly. We use the raw audio
tensor from `train_dataset` and map it to a mel-spectrogram using the `MelSpec` layer
below.
```python
# Custom keras layer for on-the-fly audio to spectrogram conversion
class MelSpec(layers.Layer):
def __init__(
self,
frame_length=1024,
frame_step=256,
fft_length=None,
sampling_rate=22050,
num_mel_channels=80,
freq_min=125,
freq_max=7600,
**kwargs,
):
super().__init__(**kwargs)
self.frame_length = frame_length
self.frame_step = frame_step
self.fft_length = fft_length
self.sampling_rate = sampling_rate
self.num_mel_channels = num_mel_channels
self.freq_min = freq_min
self.freq_max = freq_max
# Defining mel filter. This filter will be multiplied with the STFT output
self.mel_filterbank = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=self.num_mel_channels,
num_spectrogram_bins=self.frame_length // 2 + 1,
sample_rate=self.sampling_rate,
lower_edge_hertz=self.freq_min,
upper_edge_hertz=self.freq_max,
)
def call(self, audio, training=True):
# We will only perform the transformation during training.
if training:
# Taking the Short Time Fourier Transform. Ensure that the audio is padded.
# In the paper, the STFT output is padded using the 'REFLECT' strategy.
stft = tf.signal.stft(
tf.squeeze(audio, -1),
self.frame_length,
self.frame_step,
self.fft_length,
pad_end=True,
)
# Taking the magnitude of the STFT output
magnitude = tf.abs(stft)
# Multiplying the Mel-filterbank with the magnitude and scaling it using the db scale
mel = tf.matmul(tf.square(magnitude), self.mel_filterbank)
log_mel_spec = tfio.audio.dbscale(mel, top_db=80)
return log_mel_spec
else:
return audio
def get_config(self):
config = super().get_config()
config.update(
{
"frame_length": self.frame_length,
"frame_step": self.frame_step,
"fft_length": self.fft_length,
"sampling_rate": self.sampling_rate,
"num_mel_channels": self.num_mel_channels,
"freq_min": self.freq_min,
"freq_max": self.freq_max,
}
)
return config
```
The residual convolutional block extensively uses dilations and has a total receptive
field of 27 timesteps per block. The dilations must grow as a power of the `kernel_size`
to ensure reduction of hissing noise in the output. The network proposed by the paper is
as follows:

```python
# Creating the residual stack block
def residual_stack(input, filters):
"""Convolutional residual stack with weight normalization.
Args:
filters: int, determines filter size for the residual stack.
Returns:
Residual stack output.
"""
c1 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(input)
lrelu1 = layers.LeakyReLU()(c1)
c2 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu1)
add1 = layers.Add()([c2, input])
lrelu2 = layers.LeakyReLU()(add1)
c3 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=3, padding="same"), data_init=False
)(lrelu2)
lrelu3 = layers.LeakyReLU()(c3)
c4 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu3)
add2 = layers.Add()([add1, c4])
lrelu4 = layers.LeakyReLU()(add2)
c5 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=9, padding="same"), data_init=False
)(lrelu4)
lrelu5 = layers.LeakyReLU()(c5)
c6 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu5)
add3 = layers.Add()([c6, add2])
return add3
```
Each convolutional block uses the dilations offered by the residual stack
and upsamples the input data by the `upsampling_factor`.
```python
# Dilated convolutional block consisting of the Residual stack
def conv_block(input, conv_dim, upsampling_factor):
"""Dilated Convolutional Block with weight normalization.
Args:
conv_dim: int, determines filter size for the block.
upsampling_factor: int, scale for upsampling.
Returns:
Dilated convolution block.
"""
conv_t = addon_layers.WeightNormalization(
layers.Conv1DTranspose(conv_dim, 16, upsampling_factor, padding="same"),
data_init=False,
)(input)
lrelu1 = layers.LeakyReLU()(conv_t)
res_stack = residual_stack(lrelu1, conv_dim)
lrelu2 = layers.LeakyReLU()(res_stack)
return lrelu2
```
The discriminator block consists of convolutions and downsampling layers. This block is
essential for the implementation of the feature matching technique.
Each discriminator outputs a list of feature maps that will be compared during training
to compute the feature matching loss.
```python
def discriminator_block(input):
conv1 = addon_layers.WeightNormalization(
layers.Conv1D(16, 15, 1, "same"), data_init=False
)(input)
lrelu1 = layers.LeakyReLU()(conv1)
conv2 = addon_layers.WeightNormalization(
layers.Conv1D(64, 41, 4, "same", groups=4), data_init=False
)(lrelu1)
lrelu2 = layers.LeakyReLU()(conv2)
conv3 = addon_layers.WeightNormalization(
layers.Conv1D(256, 41, 4, "same", groups=16), data_init=False
)(lrelu2)
lrelu3 = layers.LeakyReLU()(conv3)
conv4 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 41, 4, "same", groups=64), data_init=False
)(lrelu3)
lrelu4 = layers.LeakyReLU()(conv4)
conv5 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 41, 4, "same", groups=256), data_init=False
)(lrelu4)
lrelu5 = layers.LeakyReLU()(conv5)
conv6 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 5, 1, "same"), data_init=False
)(lrelu5)
lrelu6 = layers.LeakyReLU()(conv6)
conv7 = addon_layers.WeightNormalization(
layers.Conv1D(1, 3, 1, "same"), data_init=False
)(lrelu6)
return [lrelu1, lrelu2, lrelu3, lrelu4, lrelu5, lrelu6, conv7]
```
### Create the generator
```python
def create_generator(input_shape):
inp = keras.Input(input_shape)
x = MelSpec()(inp)
x = layers.Conv1D(512, 7, padding="same")(x)
x = layers.LeakyReLU()(x)
x = conv_block(x, 256, 8)
x = conv_block(x, 128, 8)
x = conv_block(x, 64, 2)
x = conv_block(x, 32, 2)
x = addon_layers.WeightNormalization(
layers.Conv1D(1, 7, padding="same", activation="tanh")
)(x)
return keras.Model(inp, x)
# We use a dynamic input shape for the generator since the model is fully convolutional
generator = create_generator((None, 1))
generator.summary()
```
<div class="k-default-codeblock">
```
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, None, 1)] 0
__________________________________________________________________________________________________
mel_spec (MelSpec) (None, None, 80) 0 input_1[0][0]
__________________________________________________________________________________________________
conv1d (Conv1D) (None, None, 512) 287232 mel_spec[0][0]
__________________________________________________________________________________________________
leaky_re_lu (LeakyReLU) (None, None, 512) 0 conv1d[0][0]
__________________________________________________________________________________________________
weight_normalization (WeightNor (None, None, 256) 2097921 leaky_re_lu[0][0]
__________________________________________________________________________________________________
leaky_re_lu_1 (LeakyReLU) (None, None, 256) 0 weight_normalization[0][0]
__________________________________________________________________________________________________
weight_normalization_1 (WeightN (None, None, 256) 197121 leaky_re_lu_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_2 (LeakyReLU) (None, None, 256) 0 weight_normalization_1[0][0]
__________________________________________________________________________________________________
weight_normalization_2 (WeightN (None, None, 256) 197121 leaky_re_lu_2[0][0]
__________________________________________________________________________________________________
add (Add) (None, None, 256) 0 weight_normalization_2[0][0]
leaky_re_lu_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_3 (LeakyReLU) (None, None, 256) 0 add[0][0]
__________________________________________________________________________________________________
weight_normalization_3 (WeightN (None, None, 256) 197121 leaky_re_lu_3[0][0]
__________________________________________________________________________________________________
leaky_re_lu_4 (LeakyReLU) (None, None, 256) 0 weight_normalization_3[0][0]
__________________________________________________________________________________________________
weight_normalization_4 (WeightN (None, None, 256) 197121 leaky_re_lu_4[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, None, 256) 0 add[0][0]
weight_normalization_4[0][0]
__________________________________________________________________________________________________
leaky_re_lu_5 (LeakyReLU) (None, None, 256) 0 add_1[0][0]
__________________________________________________________________________________________________
weight_normalization_5 (WeightN (None, None, 256) 197121 leaky_re_lu_5[0][0]
__________________________________________________________________________________________________
leaky_re_lu_6 (LeakyReLU) (None, None, 256) 0 weight_normalization_5[0][0]
__________________________________________________________________________________________________
weight_normalization_6 (WeightN (None, None, 256) 197121 leaky_re_lu_6[0][0]
__________________________________________________________________________________________________
add_2 (Add) (None, None, 256) 0 weight_normalization_6[0][0]
add_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_7 (LeakyReLU) (None, None, 256) 0 add_2[0][0]
__________________________________________________________________________________________________
weight_normalization_7 (WeightN (None, None, 128) 524673 leaky_re_lu_7[0][0]
__________________________________________________________________________________________________
leaky_re_lu_8 (LeakyReLU) (None, None, 128) 0 weight_normalization_7[0][0]
__________________________________________________________________________________________________
weight_normalization_8 (WeightN (None, None, 128) 49409 leaky_re_lu_8[0][0]
__________________________________________________________________________________________________
leaky_re_lu_9 (LeakyReLU) (None, None, 128) 0 weight_normalization_8[0][0]
__________________________________________________________________________________________________
weight_normalization_9 (WeightN (None, None, 128) 49409 leaky_re_lu_9[0][0]
__________________________________________________________________________________________________
add_3 (Add) (None, None, 128) 0 weight_normalization_9[0][0]
leaky_re_lu_8[0][0]
__________________________________________________________________________________________________
leaky_re_lu_10 (LeakyReLU) (None, None, 128) 0 add_3[0][0]
__________________________________________________________________________________________________
weight_normalization_10 (Weight (None, None, 128) 49409 leaky_re_lu_10[0][0]
__________________________________________________________________________________________________
leaky_re_lu_11 (LeakyReLU) (None, None, 128) 0 weight_normalization_10[0][0]
__________________________________________________________________________________________________
weight_normalization_11 (Weight (None, None, 128) 49409 leaky_re_lu_11[0][0]
__________________________________________________________________________________________________
add_4 (Add) (None, None, 128) 0 add_3[0][0]
weight_normalization_11[0][0]
__________________________________________________________________________________________________
leaky_re_lu_12 (LeakyReLU) (None, None, 128) 0 add_4[0][0]
__________________________________________________________________________________________________
weight_normalization_12 (Weight (None, None, 128) 49409 leaky_re_lu_12[0][0]
__________________________________________________________________________________________________
leaky_re_lu_13 (LeakyReLU) (None, None, 128) 0 weight_normalization_12[0][0]
__________________________________________________________________________________________________
weight_normalization_13 (Weight (None, None, 128) 49409 leaky_re_lu_13[0][0]
__________________________________________________________________________________________________
add_5 (Add) (None, None, 128) 0 weight_normalization_13[0][0]
add_4[0][0]
__________________________________________________________________________________________________
leaky_re_lu_14 (LeakyReLU) (None, None, 128) 0 add_5[0][0]
__________________________________________________________________________________________________
weight_normalization_14 (Weight (None, None, 64) 131265 leaky_re_lu_14[0][0]
__________________________________________________________________________________________________
leaky_re_lu_15 (LeakyReLU) (None, None, 64) 0 weight_normalization_14[0][0]
__________________________________________________________________________________________________
weight_normalization_15 (Weight (None, None, 64) 12417 leaky_re_lu_15[0][0]
__________________________________________________________________________________________________
leaky_re_lu_16 (LeakyReLU) (None, None, 64) 0 weight_normalization_15[0][0]
__________________________________________________________________________________________________
weight_normalization_16 (Weight (None, None, 64) 12417 leaky_re_lu_16[0][0]
__________________________________________________________________________________________________
add_6 (Add) (None, None, 64) 0 weight_normalization_16[0][0]
leaky_re_lu_15[0][0]
__________________________________________________________________________________________________
leaky_re_lu_17 (LeakyReLU) (None, None, 64) 0 add_6[0][0]
__________________________________________________________________________________________________
weight_normalization_17 (Weight (None, None, 64) 12417 leaky_re_lu_17[0][0]
__________________________________________________________________________________________________
leaky_re_lu_18 (LeakyReLU) (None, None, 64) 0 weight_normalization_17[0][0]
__________________________________________________________________________________________________
weight_normalization_18 (Weight (None, None, 64) 12417 leaky_re_lu_18[0][0]
__________________________________________________________________________________________________
add_7 (Add) (None, None, 64) 0 add_6[0][0]
weight_normalization_18[0][0]
__________________________________________________________________________________________________
leaky_re_lu_19 (LeakyReLU) (None, None, 64) 0 add_7[0][0]
__________________________________________________________________________________________________
weight_normalization_19 (Weight (None, None, 64) 12417 leaky_re_lu_19[0][0]
__________________________________________________________________________________________________
leaky_re_lu_20 (LeakyReLU) (None, None, 64) 0 weight_normalization_19[0][0]
__________________________________________________________________________________________________
weight_normalization_20 (Weight (None, None, 64) 12417 leaky_re_lu_20[0][0]
__________________________________________________________________________________________________
add_8 (Add) (None, None, 64) 0 weight_normalization_20[0][0]
add_7[0][0]
__________________________________________________________________________________________________
leaky_re_lu_21 (LeakyReLU) (None, None, 64) 0 add_8[0][0]
__________________________________________________________________________________________________
weight_normalization_21 (Weight (None, None, 32) 32865 leaky_re_lu_21[0][0]
__________________________________________________________________________________________________
leaky_re_lu_22 (LeakyReLU) (None, None, 32) 0 weight_normalization_21[0][0]
__________________________________________________________________________________________________
weight_normalization_22 (Weight (None, None, 32) 3137 leaky_re_lu_22[0][0]
__________________________________________________________________________________________________
leaky_re_lu_23 (LeakyReLU) (None, None, 32) 0 weight_normalization_22[0][0]
__________________________________________________________________________________________________
weight_normalization_23 (Weight (None, None, 32) 3137 leaky_re_lu_23[0][0]
__________________________________________________________________________________________________
add_9 (Add) (None, None, 32) 0 weight_normalization_23[0][0]
leaky_re_lu_22[0][0]
__________________________________________________________________________________________________
leaky_re_lu_24 (LeakyReLU) (None, None, 32) 0 add_9[0][0]
__________________________________________________________________________________________________
weight_normalization_24 (Weight (None, None, 32) 3137 leaky_re_lu_24[0][0]
__________________________________________________________________________________________________
leaky_re_lu_25 (LeakyReLU) (None, None, 32) 0 weight_normalization_24[0][0]
__________________________________________________________________________________________________
weight_normalization_25 (Weight (None, None, 32) 3137 leaky_re_lu_25[0][0]
__________________________________________________________________________________________________
add_10 (Add) (None, None, 32) 0 add_9[0][0]
weight_normalization_25[0][0]
__________________________________________________________________________________________________
leaky_re_lu_26 (LeakyReLU) (None, None, 32) 0 add_10[0][0]
__________________________________________________________________________________________________
weight_normalization_26 (Weight (None, None, 32) 3137 leaky_re_lu_26[0][0]
__________________________________________________________________________________________________
leaky_re_lu_27 (LeakyReLU) (None, None, 32) 0 weight_normalization_26[0][0]
__________________________________________________________________________________________________
weight_normalization_27 (Weight (None, None, 32) 3137 leaky_re_lu_27[0][0]
__________________________________________________________________________________________________
add_11 (Add) (None, None, 32) 0 weight_normalization_27[0][0]
add_10[0][0]
__________________________________________________________________________________________________
leaky_re_lu_28 (LeakyReLU) (None, None, 32) 0 add_11[0][0]
__________________________________________________________________________________________________
weight_normalization_28 (Weight (None, None, 1) 452 leaky_re_lu_28[0][0]
==================================================================================================
Total params: 4,646,912
Trainable params: 4,646,658
Non-trainable params: 254
__________________________________________________________________________________________________
```
</div>
### Create the discriminator
```python
def create_discriminator(input_shape):
inp = keras.Input(input_shape)
out_map1 = discriminator_block(inp)
pool1 = layers.AveragePooling1D()(inp)
out_map2 = discriminator_block(pool1)
pool2 = layers.AveragePooling1D()(pool1)
out_map3 = discriminator_block(pool2)
return keras.Model(inp, [out_map1, out_map2, out_map3])
# We use a dynamic input shape for the discriminator
# This is done because the input shape for the generator is unknown
discriminator = create_discriminator((None, 1))
discriminator.summary()
```
<div class="k-default-codeblock">
```
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) [(None, None, 1)] 0
__________________________________________________________________________________________________
average_pooling1d (AveragePooli (None, None, 1) 0 input_2[0][0]
__________________________________________________________________________________________________
average_pooling1d_1 (AveragePoo (None, None, 1) 0 average_pooling1d[0][0]
__________________________________________________________________________________________________
weight_normalization_29 (Weight (None, None, 16) 273 input_2[0][0]
__________________________________________________________________________________________________
weight_normalization_36 (Weight (None, None, 16) 273 average_pooling1d[0][0]
__________________________________________________________________________________________________
weight_normalization_43 (Weight (None, None, 16) 273 average_pooling1d_1[0][0]
__________________________________________________________________________________________________
leaky_re_lu_29 (LeakyReLU) (None, None, 16) 0 weight_normalization_29[0][0]
__________________________________________________________________________________________________
leaky_re_lu_35 (LeakyReLU) (None, None, 16) 0 weight_normalization_36[0][0]
__________________________________________________________________________________________________
leaky_re_lu_41 (LeakyReLU) (None, None, 16) 0 weight_normalization_43[0][0]
__________________________________________________________________________________________________
weight_normalization_30 (Weight (None, None, 64) 10625 leaky_re_lu_29[0][0]
__________________________________________________________________________________________________
weight_normalization_37 (Weight (None, None, 64) 10625 leaky_re_lu_35[0][0]
__________________________________________________________________________________________________
weight_normalization_44 (Weight (None, None, 64) 10625 leaky_re_lu_41[0][0]
__________________________________________________________________________________________________
leaky_re_lu_30 (LeakyReLU) (None, None, 64) 0 weight_normalization_30[0][0]
__________________________________________________________________________________________________
leaky_re_lu_36 (LeakyReLU) (None, None, 64) 0 weight_normalization_37[0][0]
__________________________________________________________________________________________________
leaky_re_lu_42 (LeakyReLU) (None, None, 64) 0 weight_normalization_44[0][0]
__________________________________________________________________________________________________
weight_normalization_31 (Weight (None, None, 256) 42497 leaky_re_lu_30[0][0]
__________________________________________________________________________________________________
weight_normalization_38 (Weight (None, None, 256) 42497 leaky_re_lu_36[0][0]
__________________________________________________________________________________________________
weight_normalization_45 (Weight (None, None, 256) 42497 leaky_re_lu_42[0][0]
__________________________________________________________________________________________________
leaky_re_lu_31 (LeakyReLU) (None, None, 256) 0 weight_normalization_31[0][0]
__________________________________________________________________________________________________
leaky_re_lu_37 (LeakyReLU) (None, None, 256) 0 weight_normalization_38[0][0]
__________________________________________________________________________________________________
leaky_re_lu_43 (LeakyReLU) (None, None, 256) 0 weight_normalization_45[0][0]
__________________________________________________________________________________________________
weight_normalization_32 (Weight (None, None, 1024) 169985 leaky_re_lu_31[0][0]
__________________________________________________________________________________________________
weight_normalization_39 (Weight (None, None, 1024) 169985 leaky_re_lu_37[0][0]
__________________________________________________________________________________________________
weight_normalization_46 (Weight (None, None, 1024) 169985 leaky_re_lu_43[0][0]
__________________________________________________________________________________________________
leaky_re_lu_32 (LeakyReLU) (None, None, 1024) 0 weight_normalization_32[0][0]
__________________________________________________________________________________________________
leaky_re_lu_38 (LeakyReLU) (None, None, 1024) 0 weight_normalization_39[0][0]
__________________________________________________________________________________________________
leaky_re_lu_44 (LeakyReLU) (None, None, 1024) 0 weight_normalization_46[0][0]
__________________________________________________________________________________________________
weight_normalization_33 (Weight (None, None, 1024) 169985 leaky_re_lu_32[0][0]
__________________________________________________________________________________________________
weight_normalization_40 (Weight (None, None, 1024) 169985 leaky_re_lu_38[0][0]
__________________________________________________________________________________________________
weight_normalization_47 (Weight (None, None, 1024) 169985 leaky_re_lu_44[0][0]
__________________________________________________________________________________________________
leaky_re_lu_33 (LeakyReLU) (None, None, 1024) 0 weight_normalization_33[0][0]
__________________________________________________________________________________________________
leaky_re_lu_39 (LeakyReLU) (None, None, 1024) 0 weight_normalization_40[0][0]
__________________________________________________________________________________________________
leaky_re_lu_45 (LeakyReLU) (None, None, 1024) 0 weight_normalization_47[0][0]
__________________________________________________________________________________________________
weight_normalization_34 (Weight (None, None, 1024) 5244929 leaky_re_lu_33[0][0]
__________________________________________________________________________________________________
weight_normalization_41 (Weight (None, None, 1024) 5244929 leaky_re_lu_39[0][0]
__________________________________________________________________________________________________
weight_normalization_48 (Weight (None, None, 1024) 5244929 leaky_re_lu_45[0][0]
__________________________________________________________________________________________________
leaky_re_lu_34 (LeakyReLU) (None, None, 1024) 0 weight_normalization_34[0][0]
__________________________________________________________________________________________________
leaky_re_lu_40 (LeakyReLU) (None, None, 1024) 0 weight_normalization_41[0][0]
__________________________________________________________________________________________________
leaky_re_lu_46 (LeakyReLU) (None, None, 1024) 0 weight_normalization_48[0][0]
__________________________________________________________________________________________________
weight_normalization_35 (Weight (None, None, 1) 3075 leaky_re_lu_34[0][0]
__________________________________________________________________________________________________
weight_normalization_42 (Weight (None, None, 1) 3075 leaky_re_lu_40[0][0]
__________________________________________________________________________________________________
weight_normalization_49 (Weight (None, None, 1) 3075 leaky_re_lu_46[0][0]
==================================================================================================
Total params: 16,924,107
Trainable params: 16,924,086
Non-trainable params: 21
__________________________________________________________________________________________________
```
</div>
---
## Defining the loss functions
**Generator Loss**
The generator architecture uses a combination of two losses
1. Mean Squared Error:
This is the standard MSE generator loss calculated between ones and the outputs from the
discriminator with _N_ layers.
<p align="center">
<img src="https://i.imgur.com/dz4JS3I.png" width=300px;></img>
</p>
2. Feature Matching Loss:
This loss involves extracting the outputs of every layer from the discriminator for both
the generator and ground truth and compare each layer output _k_ using Mean Absolute Error.
<p align="center">
<img src="https://i.imgur.com/gEpSBar.png" width=400px;></img>
</p>
**Discriminator Loss**
The discriminator uses the Mean Absolute Error and compares the real data predictions
with ones and generated predictions with zeros.
<p align="center">
<img src="https://i.imgur.com/bbEnJ3t.png" width=425px;></img>
</p>
```python
# Generator loss
def generator_loss(real_pred, fake_pred):
"""Loss function for the generator.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Loss for the generator.
"""
gen_loss = []
for i in range(len(fake_pred)):
gen_loss.append(mse(tf.ones_like(fake_pred[i][-1]), fake_pred[i][-1]))
return tf.reduce_mean(gen_loss)
def feature_matching_loss(real_pred, fake_pred):
"""Implements the feature matching loss.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Feature Matching Loss.
"""
fm_loss = []
for i in range(len(fake_pred)):
for j in range(len(fake_pred[i]) - 1):
fm_loss.append(mae(real_pred[i][j], fake_pred[i][j]))
return tf.reduce_mean(fm_loss)
def discriminator_loss(real_pred, fake_pred):
"""Implements the discriminator loss.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Discriminator Loss.
"""
real_loss, fake_loss = [], []
for i in range(len(real_pred)):
real_loss.append(mse(tf.ones_like(real_pred[i][-1]), real_pred[i][-1]))
fake_loss.append(mse(tf.zeros_like(fake_pred[i][-1]), fake_pred[i][-1]))
# Calculating the final discriminator loss after scaling
disc_loss = tf.reduce_mean(real_loss) + tf.reduce_mean(fake_loss)
return disc_loss
```
Defining the MelGAN model for training.
This subclass overrides the `train_step()` method to implement the training logic.
```python
class MelGAN(keras.Model):
def __init__(self, generator, discriminator, **kwargs):
"""MelGAN trainer class
Args:
generator: keras.Model, Generator model
discriminator: keras.Model, Discriminator model
"""
super().__init__(**kwargs)
self.generator = generator
self.discriminator = discriminator
def compile(
self,
gen_optimizer,
disc_optimizer,
generator_loss,
feature_matching_loss,
discriminator_loss,
):
"""MelGAN compile method.
Args:
gen_optimizer: keras.optimizer, optimizer to be used for training
disc_optimizer: keras.optimizer, optimizer to be used for training
generator_loss: callable, loss function for generator
feature_matching_loss: callable, loss function for feature matching
discriminator_loss: callable, loss function for discriminator
"""
super().compile()
# Optimizers
self.gen_optimizer = gen_optimizer
self.disc_optimizer = disc_optimizer
# Losses
self.generator_loss = generator_loss
self.feature_matching_loss = feature_matching_loss
self.discriminator_loss = discriminator_loss
# Trackers
self.gen_loss_tracker = keras.metrics.Mean(name="gen_loss")
self.disc_loss_tracker = keras.metrics.Mean(name="disc_loss")
def train_step(self, batch):
x_batch_train, y_batch_train = batch
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Generating the audio wave
gen_audio_wave = generator(x_batch_train, training=True)
# Generating the features using the discriminator
real_pred = discriminator(y_batch_train)
fake_pred = discriminator(gen_audio_wave)
# Calculating the generator losses
gen_loss = generator_loss(real_pred, fake_pred)
fm_loss = feature_matching_loss(real_pred, fake_pred)
# Calculating final generator loss
gen_fm_loss = gen_loss + 10 * fm_loss
# Calculating the discriminator losses
disc_loss = discriminator_loss(real_pred, fake_pred)
# Calculating and applying the gradients for generator and discriminator
grads_gen = gen_tape.gradient(gen_fm_loss, generator.trainable_weights)
grads_disc = disc_tape.gradient(disc_loss, discriminator.trainable_weights)
gen_optimizer.apply_gradients(zip(grads_gen, generator.trainable_weights))
disc_optimizer.apply_gradients(zip(grads_disc, discriminator.trainable_weights))
self.gen_loss_tracker.update_state(gen_fm_loss)
self.disc_loss_tracker.update_state(disc_loss)
return {
"gen_loss": self.gen_loss_tracker.result(),
"disc_loss": self.disc_loss_tracker.result(),
}
```
---
## Training
The paper suggests that the training with dynamic shapes takes around 400,000 steps (~500
epochs). For this example, we will run it only for a single epoch (819 steps).
Longer training time (greater than 300 epochs) will almost certainly provide better results.
```python
gen_optimizer = keras.optimizers.Adam(
LEARNING_RATE_GEN, beta_1=0.5, beta_2=0.9, clipnorm=1
)
disc_optimizer = keras.optimizers.Adam(
LEARNING_RATE_DISC, beta_1=0.5, beta_2=0.9, clipnorm=1
)
# Start training
generator = create_generator((None, 1))
discriminator = create_discriminator((None, 1))
mel_gan = MelGAN(generator, discriminator)
mel_gan.compile(
gen_optimizer,
disc_optimizer,
generator_loss,
feature_matching_loss,
discriminator_loss,
)
mel_gan.fit(
train_dataset.shuffle(200).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE), epochs=1
)
```
<div class="k-default-codeblock">
```
819/819 [==============================] - 641s 696ms/step - gen_loss: 0.9761 - disc_loss: 0.9350
<keras.callbacks.History at 0x7f8f702fe050>
```
</div>
---
## Testing the model
The trained model can now be used for real time text-to-speech translation tasks.
To test how fast the MelGAN inference can be, let us take a sample audio mel-spectrogram
and convert it. Note that the actual model pipeline will not include the `MelSpec` layer
and hence this layer will be disabled during inference. The inference input will be a
mel-spectrogram processed similar to the `MelSpec` layer configuration.
For testing this, we will create a randomly uniformly distributed tensor to simulate the
behavior of the inference pipeline.
```python
# Sampling a random tensor to mimic a batch of 128 spectrograms of shape [50, 80]
audio_sample = tf.random.uniform([128, 50, 80])
```
Timing the inference speed of a single sample. Running this, you can see that the average
inference time per spectrogram ranges from 8 milliseconds to 10 milliseconds on a K80 GPU which is
pretty fast.
```python
pred = generator.predict(audio_sample, batch_size=32, verbose=1)
```
<div class="k-default-codeblock">
```
4/4 [==============================] - 5s 280ms/step
```
</div>
---
## Conclusion
The MelGAN is a highly effective architecture for spectral inversion that has a Mean
Opinion Score (MOS) of 3.61 that considerably outperforms the Griffin
Lim algorithm having a MOS of just 1.57. In contrast with this, the MelGAN compares with
the state-of-the-art WaveGlow and WaveNet architectures on text-to-speech and speech
enhancement tasks on
the LJSpeech and VCTK datasets <sup>[1]</sup>.
This tutorial highlights:
1. The advantages of using dilated convolutions that grow with the filter size
2. Implementation of a custom layer for on-the-fly conversion of audio waves to
mel-spectrograms
3. Effectiveness of using the feature matching loss function for training GAN generators.
Further reading
1. [MelGAN paper](https://arxiv.org/pdf/1910.06711v3.pdf) (Kundan Kumar et al.) to
understand the reasoning behind the architecture and training process
2. For in-depth understanding of the feature matching loss, you can refer to [Improved
Techniques for Training GANs](https://arxiv.org/pdf/1606.03498v1.pdf) (Tim Salimans et
al.).
Example available on HuggingFace
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/MelGAN-spectrogram-inversion) | [](https://huggingface.co/spaces/keras-io/MelGAN-spectrogram-inversion) |
| keras-io/examples/audio/md/melgan_spectrogram_inversion.md/0 | {
"file_path": "keras-io/examples/audio/md/melgan_spectrogram_inversion.md",
"repo_id": "keras-io",
"token_count": 17184
} | 75 |
<jupyter_start><jupyter_text>Denoising Diffusion Probabilistic Model**Author:** [A_K_Nain](https://twitter.com/A_K_Nain)**Date created:** 2022/11/30**Last modified:** 2022/12/07**Description:** Generating images of flowers with denoising diffusion probabilistic models. IntroductionGenerative modeling experienced tremendous growth in the last five years. Models likeVAEs, GANs, and flow-based models proved to be a great success in generatinghigh-quality content, especially images. Diffusion models are a new type of generativemodel that has proven to be better than previous approaches.Diffusion models are inspired by non-equilibrium thermodynamics, and they learn togenerate by denoising. Learning by denoising consists of two processes,each of which is a Markov Chain. These are:1. The forward process: In the forward process, we slowly add random noise to the datain a series of time steps `(t1, t2, ..., tn )`. Samples at the current time step aredrawn from a Gaussian distribution where the mean of the distribution is conditionedon the sample at the previous time step, and the variance of the distribution followsa fixed schedule. At the end of the forward process, the samples end up with a purenoise distribution.2. The reverse process: During the reverse process, we try to undo the added noise atevery time step. We start with the pure noise distribution (the last step of theforward process) and try to denoise the samples in the backward direction`(tn, tn-1, ..., t1)`.We implement the [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239)paper or DDPMs for short in this code example. It was the first paper demonstratingthe use of diffusion models for generating high-quality images. The authors provedthat a certain parameterization of diffusion models reveals an equivalence withdenoising score matching over multiple noise levels during training and with annealedLangevin dynamics during sampling that generates the best quality results.This paper replicates both the Markov chains (forward process and reverse process)involved in the diffusion process but for images. The forward process is fixed andgradually adds Gaussian noise to the images according to a fixed variance scheduledenoted by beta in the paper. This is what the diffusion process looks like in caseof images: (image -> noise::noise -> image)The paper describes two algorithms, one for training the model, and the other forsampling from the trained model. Training is performed by optimizing the usualvariational bound on negative log-likelihood. The objective function is furthersimplified, and the network is treated as a noise prediction network. Once optimized,we can sample from the network to generate new images from noise samples. Here is anoverview of both algorithms as presented in the paper:**Note:** DDPM is just one way of implementing a diffusion model. Also, the samplingalgorithm in the DDPM replicates the complete Markov chain. Hence, it's slow ingenerating new samples compared to other generative models like GANs. Lots of researchefforts have been made to address this issue. One such example is Denoising DiffusionImplicit Models, or DDIM for short, where the authors replaced the Markov chain with anon-Markovian process to sample faster. You can find the code example for DDIM[here](https://keras.io/examples/generative/ddim/)Implementing a DDPM model is simple. We define a model that takestwo inputs: Images and the randomly sampled time steps. At each training step, weperform the following operations to train our model:1. Sample random noise to be added to the inputs.2. Apply the forward process to diffuse the inputs with the sampled noise.3. Your model takes these noisy samples as inputs and outputs the noiseprediction for each time step.4. Given true noise and predicted noise, we calculate the loss values5. We then calculate the gradients and update the model weights.Given that our model knows how to denoise a noisy sample at a given time step,we can leverage this idea to generate new samples, starting from a pure noisedistribution. Setup<jupyter_code>import math
import numpy as np
import matplotlib.pyplot as plt
# Requires TensorFlow >=2.11 for the GroupNormalization layer.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds<jupyter_output><empty_output><jupyter_text>Hyperparameters<jupyter_code>batch_size = 32
num_epochs = 1 # Just for the sake of demonstration
total_timesteps = 1000
norm_groups = 8 # Number of groups used in GroupNormalization layer
learning_rate = 2e-4
img_size = 64
img_channels = 3
clip_min = -1.0
clip_max = 1.0
first_conv_channels = 64
channel_multiplier = [1, 2, 4, 8]
widths = [first_conv_channels * mult for mult in channel_multiplier]
has_attention = [False, False, True, True]
num_res_blocks = 2 # Number of residual blocks
dataset_name = "oxford_flowers102"
splits = ["train"]<jupyter_output><empty_output><jupyter_text>DatasetWe use the [Oxford Flowers 102](https://www.tensorflow.org/datasets/catalog/oxford_flowers102)dataset for generating images of flowers. In terms of preprocessing, we use centercropping for resizing the images to the desired image size, and we rescale the pixelvalues in the range `[-1.0, 1.0]`. This is in line with the range of the pixel values thatwas applied by the authors of the [DDPMs paper](https://arxiv.org/abs/2006.11239). Foraugmenting training data, we randomly flip the images left/right.<jupyter_code># Load the dataset
(ds,) = tfds.load(dataset_name, split=splits, with_info=False, shuffle_files=True)
def augment(img):
"""Flips an image left/right randomly."""
return tf.image.random_flip_left_right(img)
def resize_and_rescale(img, size):
"""Resize the image to the desired size first and then
rescale the pixel values in the range [-1.0, 1.0].
Args:
img: Image tensor
size: Desired image size for resizing
Returns:
Resized and rescaled image tensor
"""
height = tf.shape(img)[0]
width = tf.shape(img)[1]
crop_size = tf.minimum(height, width)
img = tf.image.crop_to_bounding_box(
img,
(height - crop_size) // 2,
(width - crop_size) // 2,
crop_size,
crop_size,
)
# Resize
img = tf.cast(img, dtype=tf.float32)
img = tf.image.resize(img, size=size, antialias=True)
# Rescale the pixel values
img = img / 127.5 - 1.0
img = tf.clip_by_value(img, clip_min, clip_max)
return img
def train_preprocessing(x):
img = x["image"]
img = resize_and_rescale(img, size=(img_size, img_size))
img = augment(img)
return img
train_ds = (
ds.map(train_preprocessing, num_parallel_calls=tf.data.AUTOTUNE)
.batch(batch_size, drop_remainder=True)
.shuffle(batch_size * 2)
.prefetch(tf.data.AUTOTUNE)
)<jupyter_output><empty_output><jupyter_text>Gaussian diffusion utilitiesWe define the forward process and the reverse processas a separate utility. Most of the code in this utility has been borrowedfrom the original implementation with some slight modifications.<jupyter_code>class GaussianDiffusion:
"""Gaussian diffusion utility.
Args:
beta_start: Start value of the scheduled variance
beta_end: End value of the scheduled variance
timesteps: Number of time steps in the forward process
"""
def __init__(
self,
beta_start=1e-4,
beta_end=0.02,
timesteps=1000,
clip_min=-1.0,
clip_max=1.0,
):
self.beta_start = beta_start
self.beta_end = beta_end
self.timesteps = timesteps
self.clip_min = clip_min
self.clip_max = clip_max
# Define the linear variance schedule
self.betas = betas = np.linspace(
beta_start,
beta_end,
timesteps,
dtype=np.float64, # Using float64 for better precision
)
self.num_timesteps = int(timesteps)
alphas = 1.0 - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1])
self.betas = tf.constant(betas, dtype=tf.float32)
self.alphas_cumprod = tf.constant(alphas_cumprod, dtype=tf.float32)
self.alphas_cumprod_prev = tf.constant(alphas_cumprod_prev, dtype=tf.float32)
# Calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = tf.constant(
np.sqrt(alphas_cumprod), dtype=tf.float32
)
self.sqrt_one_minus_alphas_cumprod = tf.constant(
np.sqrt(1.0 - alphas_cumprod), dtype=tf.float32
)
self.log_one_minus_alphas_cumprod = tf.constant(
np.log(1.0 - alphas_cumprod), dtype=tf.float32
)
self.sqrt_recip_alphas_cumprod = tf.constant(
np.sqrt(1.0 / alphas_cumprod), dtype=tf.float32
)
self.sqrt_recipm1_alphas_cumprod = tf.constant(
np.sqrt(1.0 / alphas_cumprod - 1), dtype=tf.float32
)
# Calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = (
betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod)
)
self.posterior_variance = tf.constant(posterior_variance, dtype=tf.float32)
# Log calculation clipped because the posterior variance is 0 at the beginning
# of the diffusion chain
self.posterior_log_variance_clipped = tf.constant(
np.log(np.maximum(posterior_variance, 1e-20)), dtype=tf.float32
)
self.posterior_mean_coef1 = tf.constant(
betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod),
dtype=tf.float32,
)
self.posterior_mean_coef2 = tf.constant(
(1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod),
dtype=tf.float32,
)
def _extract(self, a, t, x_shape):
"""Extract some coefficients at specified timesteps,
then reshape to [batch_size, 1, 1, 1, 1, ...] for broadcasting purposes.
Args:
a: Tensor to extract from
t: Timestep for which the coefficients are to be extracted
x_shape: Shape of the current batched samples
"""
batch_size = x_shape[0]
out = tf.gather(a, t)
return tf.reshape(out, [batch_size, 1, 1, 1])
def q_mean_variance(self, x_start, t):
"""Extracts the mean, and the variance at current timestep.
Args:
x_start: Initial sample (before the first diffusion step)
t: Current timestep
"""
x_start_shape = tf.shape(x_start)
mean = self._extract(self.sqrt_alphas_cumprod, t, x_start_shape) * x_start
variance = self._extract(1.0 - self.alphas_cumprod, t, x_start_shape)
log_variance = self._extract(
self.log_one_minus_alphas_cumprod, t, x_start_shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise):
"""Diffuse the data.
Args:
x_start: Initial sample (before the first diffusion step)
t: Current timestep
noise: Gaussian noise to be added at the current timestep
Returns:
Diffused samples at timestep `t`
"""
x_start_shape = tf.shape(x_start)
return (
self._extract(self.sqrt_alphas_cumprod, t, tf.shape(x_start)) * x_start
+ self._extract(self.sqrt_one_minus_alphas_cumprod, t, x_start_shape)
* noise
)
def predict_start_from_noise(self, x_t, t, noise):
x_t_shape = tf.shape(x_t)
return (
self._extract(self.sqrt_recip_alphas_cumprod, t, x_t_shape) * x_t
- self._extract(self.sqrt_recipm1_alphas_cumprod, t, x_t_shape) * noise
)
def q_posterior(self, x_start, x_t, t):
"""Compute the mean and variance of the diffusion
posterior q(x_{t-1} | x_t, x_0).
Args:
x_start: Stating point(sample) for the posterior computation
x_t: Sample at timestep `t`
t: Current timestep
Returns:
Posterior mean and variance at current timestep
"""
x_t_shape = tf.shape(x_t)
posterior_mean = (
self._extract(self.posterior_mean_coef1, t, x_t_shape) * x_start
+ self._extract(self.posterior_mean_coef2, t, x_t_shape) * x_t
)
posterior_variance = self._extract(self.posterior_variance, t, x_t_shape)
posterior_log_variance_clipped = self._extract(
self.posterior_log_variance_clipped, t, x_t_shape
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, pred_noise, x, t, clip_denoised=True):
x_recon = self.predict_start_from_noise(x, t=t, noise=pred_noise)
if clip_denoised:
x_recon = tf.clip_by_value(x_recon, self.clip_min, self.clip_max)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
x_start=x_recon, x_t=x, t=t
)
return model_mean, posterior_variance, posterior_log_variance
def p_sample(self, pred_noise, x, t, clip_denoised=True):
"""Sample from the diffusion model.
Args:
pred_noise: Noise predicted by the diffusion model
x: Samples at a given timestep for which the noise was predicted
t: Current timestep
clip_denoised (bool): Whether to clip the predicted noise
within the specified range or not.
"""
model_mean, _, model_log_variance = self.p_mean_variance(
pred_noise, x=x, t=t, clip_denoised=clip_denoised
)
noise = tf.random.normal(shape=x.shape, dtype=x.dtype)
# No noise when t == 0
nonzero_mask = tf.reshape(
1 - tf.cast(tf.equal(t, 0), tf.float32), [tf.shape(x)[0], 1, 1, 1]
)
return model_mean + nonzero_mask * tf.exp(0.5 * model_log_variance) * noise<jupyter_output><empty_output><jupyter_text>Network architectureU-Net, originally developed for semantic segmentation, is an architecture that iswidely used for implementing diffusion models but with some slight modifications:1. The network accepts two inputs: Image and time step2. Self-attention between the convolution blocks once we reach a specific resolution(16x16 in the paper)3. Group Normalization instead of weight normalizationWe implement most of the things as used in the original paper. We use the`swish` activation function throughout the network. We use the variance scalingkernel initializer.The only difference here is the number of groups used for the`GroupNormalization` layer. For the flowers dataset,we found that a value of `groups=8` produces better resultscompared to the default value of `groups=32`. Dropout is optional and should beused where chances of over fitting is high. In the paper, the authors used dropoutonly when training on CIFAR10.<jupyter_code># Kernel initializer to use
def kernel_init(scale):
scale = max(scale, 1e-10)
return keras.initializers.VarianceScaling(
scale, mode="fan_avg", distribution="uniform"
)
class AttentionBlock(layers.Layer):
"""Applies self-attention.
Args:
units: Number of units in the dense layers
groups: Number of groups to be used for GroupNormalization layer
"""
def __init__(self, units, groups=8, **kwargs):
self.units = units
self.groups = groups
super().__init__(**kwargs)
self.norm = layers.GroupNormalization(groups=groups)
self.query = layers.Dense(units, kernel_initializer=kernel_init(1.0))
self.key = layers.Dense(units, kernel_initializer=kernel_init(1.0))
self.value = layers.Dense(units, kernel_initializer=kernel_init(1.0))
self.proj = layers.Dense(units, kernel_initializer=kernel_init(0.0))
def call(self, inputs):
batch_size = tf.shape(inputs)[0]
height = tf.shape(inputs)[1]
width = tf.shape(inputs)[2]
scale = tf.cast(self.units, tf.float32) ** (-0.5)
inputs = self.norm(inputs)
q = self.query(inputs)
k = self.key(inputs)
v = self.value(inputs)
attn_score = tf.einsum("bhwc, bHWc->bhwHW", q, k) * scale
attn_score = tf.reshape(attn_score, [batch_size, height, width, height * width])
attn_score = tf.nn.softmax(attn_score, -1)
attn_score = tf.reshape(attn_score, [batch_size, height, width, height, width])
proj = tf.einsum("bhwHW,bHWc->bhwc", attn_score, v)
proj = self.proj(proj)
return inputs + proj
class TimeEmbedding(layers.Layer):
def __init__(self, dim, **kwargs):
super().__init__(**kwargs)
self.dim = dim
self.half_dim = dim // 2
self.emb = math.log(10000) / (self.half_dim - 1)
self.emb = tf.exp(tf.range(self.half_dim, dtype=tf.float32) * -self.emb)
def call(self, inputs):
inputs = tf.cast(inputs, dtype=tf.float32)
emb = inputs[:, None] * self.emb[None, :]
emb = tf.concat([tf.sin(emb), tf.cos(emb)], axis=-1)
return emb
def ResidualBlock(width, groups=8, activation_fn=keras.activations.swish):
def apply(inputs):
x, t = inputs
input_width = x.shape[3]
if input_width == width:
residual = x
else:
residual = layers.Conv2D(
width, kernel_size=1, kernel_initializer=kernel_init(1.0)
)(x)
temb = activation_fn(t)
temb = layers.Dense(width, kernel_initializer=kernel_init(1.0))(temb)[
:, None, None, :
]
x = layers.GroupNormalization(groups=groups)(x)
x = activation_fn(x)
x = layers.Conv2D(
width, kernel_size=3, padding="same", kernel_initializer=kernel_init(1.0)
)(x)
x = layers.Add()([x, temb])
x = layers.GroupNormalization(groups=groups)(x)
x = activation_fn(x)
x = layers.Conv2D(
width, kernel_size=3, padding="same", kernel_initializer=kernel_init(0.0)
)(x)
x = layers.Add()([x, residual])
return x
return apply
def DownSample(width):
def apply(x):
x = layers.Conv2D(
width,
kernel_size=3,
strides=2,
padding="same",
kernel_initializer=kernel_init(1.0),
)(x)
return x
return apply
def UpSample(width, interpolation="nearest"):
def apply(x):
x = layers.UpSampling2D(size=2, interpolation=interpolation)(x)
x = layers.Conv2D(
width, kernel_size=3, padding="same", kernel_initializer=kernel_init(1.0)
)(x)
return x
return apply
def TimeMLP(units, activation_fn=keras.activations.swish):
def apply(inputs):
temb = layers.Dense(
units, activation=activation_fn, kernel_initializer=kernel_init(1.0)
)(inputs)
temb = layers.Dense(units, kernel_initializer=kernel_init(1.0))(temb)
return temb
return apply
def build_model(
img_size,
img_channels,
widths,
has_attention,
num_res_blocks=2,
norm_groups=8,
interpolation="nearest",
activation_fn=keras.activations.swish,
):
image_input = layers.Input(
shape=(img_size, img_size, img_channels), name="image_input"
)
time_input = keras.Input(shape=(), dtype=tf.int64, name="time_input")
x = layers.Conv2D(
first_conv_channels,
kernel_size=(3, 3),
padding="same",
kernel_initializer=kernel_init(1.0),
)(image_input)
temb = TimeEmbedding(dim=first_conv_channels * 4)(time_input)
temb = TimeMLP(units=first_conv_channels * 4, activation_fn=activation_fn)(temb)
skips = [x]
# DownBlock
for i in range(len(widths)):
for _ in range(num_res_blocks):
x = ResidualBlock(
widths[i], groups=norm_groups, activation_fn=activation_fn
)([x, temb])
if has_attention[i]:
x = AttentionBlock(widths[i], groups=norm_groups)(x)
skips.append(x)
if widths[i] != widths[-1]:
x = DownSample(widths[i])(x)
skips.append(x)
# MiddleBlock
x = ResidualBlock(widths[-1], groups=norm_groups, activation_fn=activation_fn)(
[x, temb]
)
x = AttentionBlock(widths[-1], groups=norm_groups)(x)
x = ResidualBlock(widths[-1], groups=norm_groups, activation_fn=activation_fn)(
[x, temb]
)
# UpBlock
for i in reversed(range(len(widths))):
for _ in range(num_res_blocks + 1):
x = layers.Concatenate(axis=-1)([x, skips.pop()])
x = ResidualBlock(
widths[i], groups=norm_groups, activation_fn=activation_fn
)([x, temb])
if has_attention[i]:
x = AttentionBlock(widths[i], groups=norm_groups)(x)
if i != 0:
x = UpSample(widths[i], interpolation=interpolation)(x)
# End block
x = layers.GroupNormalization(groups=norm_groups)(x)
x = activation_fn(x)
x = layers.Conv2D(3, (3, 3), padding="same", kernel_initializer=kernel_init(0.0))(x)
return keras.Model([image_input, time_input], x, name="unet")<jupyter_output><empty_output><jupyter_text>TrainingWe follow the same setup for training the diffusion model as describedin the paper. We use `Adam` optimizer with a learning rate of `2e-4`.We use EMA on model parameters with a decay factor of 0.999. Wetreat our model as noise prediction network i.e. at every training step, weinput a batch of images and corresponding time steps to our UNet,and the network outputs the noise as predictions.The only difference is that we aren't using the Kernel Inception Distance (KID)or Frechet Inception Distance (FID) for evaluating the quality of generatedsamples during training. This is because both these metrics are compute heavyand are skipped for the brevity of implementation.**Note: ** We are using mean squared error as the loss function which is aligned withthe paper, and theoretically makes sense. In practice, though, it is also common touse mean absolute error or Huber loss as the loss function.<jupyter_code>class DiffusionModel(keras.Model):
def __init__(self, network, ema_network, timesteps, gdf_util, ema=0.999):
super().__init__()
self.network = network
self.ema_network = ema_network
self.timesteps = timesteps
self.gdf_util = gdf_util
self.ema = ema
def train_step(self, images):
# 1. Get the batch size
batch_size = tf.shape(images)[0]
# 2. Sample timesteps uniformly
t = tf.random.uniform(
minval=0, maxval=self.timesteps, shape=(batch_size,), dtype=tf.int64
)
with tf.GradientTape() as tape:
# 3. Sample random noise to be added to the images in the batch
noise = tf.random.normal(shape=tf.shape(images), dtype=images.dtype)
# 4. Diffuse the images with noise
images_t = self.gdf_util.q_sample(images, t, noise)
# 5. Pass the diffused images and time steps to the network
pred_noise = self.network([images_t, t], training=True)
# 6. Calculate the loss
loss = self.loss(noise, pred_noise)
# 7. Get the gradients
gradients = tape.gradient(loss, self.network.trainable_weights)
# 8. Update the weights of the network
self.optimizer.apply_gradients(zip(gradients, self.network.trainable_weights))
# 9. Updates the weight values for the network with EMA weights
for weight, ema_weight in zip(self.network.weights, self.ema_network.weights):
ema_weight.assign(self.ema * ema_weight + (1 - self.ema) * weight)
# 10. Return loss values
return {"loss": loss}
def generate_images(self, num_images=16):
# 1. Randomly sample noise (starting point for reverse process)
samples = tf.random.normal(
shape=(num_images, img_size, img_size, img_channels), dtype=tf.float32
)
# 2. Sample from the model iteratively
for t in reversed(range(0, self.timesteps)):
tt = tf.cast(tf.fill(num_images, t), dtype=tf.int64)
pred_noise = self.ema_network.predict(
[samples, tt], verbose=0, batch_size=num_images
)
samples = self.gdf_util.p_sample(
pred_noise, samples, tt, clip_denoised=True
)
# 3. Return generated samples
return samples
def plot_images(
self, epoch=None, logs=None, num_rows=2, num_cols=8, figsize=(12, 5)
):
"""Utility to plot images using the diffusion model during training."""
generated_samples = self.generate_images(num_images=num_rows * num_cols)
generated_samples = (
tf.clip_by_value(generated_samples * 127.5 + 127.5, 0.0, 255.0)
.numpy()
.astype(np.uint8)
)
_, ax = plt.subplots(num_rows, num_cols, figsize=figsize)
for i, image in enumerate(generated_samples):
if num_rows == 1:
ax[i].imshow(image)
ax[i].axis("off")
else:
ax[i // num_cols, i % num_cols].imshow(image)
ax[i // num_cols, i % num_cols].axis("off")
plt.tight_layout()
plt.show()
# Build the unet model
network = build_model(
img_size=img_size,
img_channels=img_channels,
widths=widths,
has_attention=has_attention,
num_res_blocks=num_res_blocks,
norm_groups=norm_groups,
activation_fn=keras.activations.swish,
)
ema_network = build_model(
img_size=img_size,
img_channels=img_channels,
widths=widths,
has_attention=has_attention,
num_res_blocks=num_res_blocks,
norm_groups=norm_groups,
activation_fn=keras.activations.swish,
)
ema_network.set_weights(network.get_weights()) # Initially the weights are the same
# Get an instance of the Gaussian Diffusion utilities
gdf_util = GaussianDiffusion(timesteps=total_timesteps)
# Get the model
model = DiffusionModel(
network=network,
ema_network=ema_network,
gdf_util=gdf_util,
timesteps=total_timesteps,
)
# Compile the model
model.compile(
loss=keras.losses.MeanSquaredError(),
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
)
# Train the model
model.fit(
train_ds,
epochs=num_epochs,
batch_size=batch_size,
callbacks=[keras.callbacks.LambdaCallback(on_epoch_end=model.plot_images)],
)<jupyter_output><empty_output><jupyter_text>ResultsWe trained this model for 800 epochs on a V100 GPU,and each epoch took almost 8 seconds to finish. We load those weightshere, and we generate a few samples starting from pure noise.<jupyter_code>!curl -LO https://github.com/AakashKumarNain/ddpms/releases/download/v3.0.0/checkpoints.zip
!unzip -qq checkpoints.zip
# Load the model weights
model.ema_network.load_weights("checkpoints/diffusion_model_checkpoint")
# Generate and plot some samples
model.plot_images(num_rows=4, num_cols=8)<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/ddpm.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/ddpm.ipynb",
"repo_id": "keras-io",
"token_count": 11121
} | 76 |
<jupyter_start><jupyter_text>GPT text generation from scratch with KerasNLP**Author:** [Jesse Chan](https://github.com/jessechancy)**Date created:** 2022/07/25**Last modified:** 2022/07/25**Description:** Using KerasNLP to train a mini-GPT model for text generation. IntroductionIn this example, we will use KerasNLP to build a scaled down GenerativePre-Trained (GPT) model. GPT is a Transformer-based model that allows you to generatesophisticated text from a prompt.We will train the model on the [simplebooks-92](https://arxiv.org/abs/1911.12391) corpus,which is a dataset made from several novels. It is a good dataset for this example sinceit has a small vocabulary and high word frequency, which is beneficial when training amodel with few parameters.This example combines concepts from[Text generation with a miniature GPT](https://keras.io/examples/generative/text_generation_with_miniature_gpt/)with KerasNLP abstractions. We will demonstrate how KerasNLP tokenization, layers andmetrics simplify the trainingprocess, and then show how to generate output text using the KerasNLP sampling utilities.Note: If you are running this example on a Colab,make sure to enable GPU runtime for faster training.This example requires KerasNLP. You can install it via the following command:`pip install keras-nlp` Setup<jupyter_code>!pip install -q --upgrade keras-nlp
!pip install -q --upgrade keras # Upgrade to Keras 3.
import os
import keras_nlp
import keras
import tensorflow.data as tf_data
import tensorflow.strings as tf_strings<jupyter_output><empty_output><jupyter_text>Settings & hyperparameters<jupyter_code># Data
BATCH_SIZE = 64
MIN_STRING_LEN = 512 # Strings shorter than this will be discarded
SEQ_LEN = 128 # Length of training sequences, in tokens
# Model
EMBED_DIM = 256
FEED_FORWARD_DIM = 128
NUM_HEADS = 3
NUM_LAYERS = 2
VOCAB_SIZE = 5000 # Limits parameters in model.
# Training
EPOCHS = 5
# Inference
NUM_TOKENS_TO_GENERATE = 80<jupyter_output><empty_output><jupyter_text>Load the dataNow, let's download the dataset! The SimpleBooks dataset consists of 1,573 Gutenberg books, and hasone of the smallest vocabulary size to word-level tokens ratio. It has a vocabulary size of ~98k,a third of WikiText-103's, with around the same number of tokens (~100M). This makes it easy to fit a small model.<jupyter_code>keras.utils.get_file(
origin="https://dldata-public.s3.us-east-2.amazonaws.com/simplebooks.zip",
extract=True,
)
dir = os.path.expanduser("~/.keras/datasets/simplebooks/")
# Load simplebooks-92 train set and filter out short lines.
raw_train_ds = (
tf_data.TextLineDataset(dir + "simplebooks-92-raw/train.txt")
.filter(lambda x: tf_strings.length(x) > MIN_STRING_LEN)
.batch(BATCH_SIZE)
.shuffle(buffer_size=256)
)
# Load simplebooks-92 validation set and filter out short lines.
raw_val_ds = (
tf_data.TextLineDataset(dir + "simplebooks-92-raw/valid.txt")
.filter(lambda x: tf_strings.length(x) > MIN_STRING_LEN)
.batch(BATCH_SIZE)
)<jupyter_output><empty_output><jupyter_text>Train the tokenizerWe train the tokenizer from the training dataset for a vocabulary size of `VOCAB_SIZE`,which is a tuned hyperparameter. We want to limit the vocabulary as much as possible, aswe will see later onthat it has a large effect on the number of model parameters. We also don't want to include*too few* vocabulary terms, or there would be too many out-of-vocabulary (OOV) sub-words. Inaddition, three tokens are reserved in the vocabulary:- `"[PAD]"` for padding sequences to `SEQ_LEN`. This token has index 0 in both`reserved_tokens` and `vocab`, since `WordPieceTokenizer` (and other layers) consider`0`/`vocab[0]` as the default padding.- `"[UNK]"` for OOV sub-words, which should match the default `oov_token="[UNK]"` in`WordPieceTokenizer`.- `"[BOS]"` stands for beginning of sentence, but here technically it is a tokenrepresenting the beginning of each line of training data.<jupyter_code># Train tokenizer vocabulary
vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(
raw_train_ds,
vocabulary_size=VOCAB_SIZE,
lowercase=True,
reserved_tokens=["[PAD]", "[UNK]", "[BOS]"],
)<jupyter_output><empty_output><jupyter_text>Load tokenizerWe use the vocabulary data to initialize`keras_nlp.tokenizers.WordPieceTokenizer`. WordPieceTokenizer is an efficientimplementation of the WordPiece algorithm used by BERT and other models. It will strip,lower-case and do other irreversible preprocessing operations.<jupyter_code>tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=vocab,
sequence_length=SEQ_LEN,
lowercase=True,
)<jupyter_output><empty_output><jupyter_text>Tokenize dataWe preprocess the dataset by tokenizing and splitting it into `features` and `labels`.<jupyter_code># packer adds a start token
start_packer = keras_nlp.layers.StartEndPacker(
sequence_length=SEQ_LEN,
start_value=tokenizer.token_to_id("[BOS]"),
)
def preprocess(inputs):
outputs = tokenizer(inputs)
features = start_packer(outputs)
labels = outputs
return features, labels
# Tokenize and split into train and label sequences.
train_ds = raw_train_ds.map(preprocess, num_parallel_calls=tf_data.AUTOTUNE).prefetch(
tf_data.AUTOTUNE
)
val_ds = raw_val_ds.map(preprocess, num_parallel_calls=tf_data.AUTOTUNE).prefetch(
tf_data.AUTOTUNE
)<jupyter_output><empty_output><jupyter_text>Build the modelWe create our scaled down GPT model with the following layers:- One `keras_nlp.layers.TokenAndPositionEmbedding` layer, which combines the embeddingfor the token and its position.- Multiple `keras_nlp.layers.TransformerDecoder` layers, with the default causal masking.The layer has no cross-attention when run with decoder sequence only.- One final dense linear layer<jupyter_code>inputs = keras.layers.Input(shape=(None,), dtype="int32")
# Embedding.
embedding_layer = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=VOCAB_SIZE,
sequence_length=SEQ_LEN,
embedding_dim=EMBED_DIM,
mask_zero=True,
)
x = embedding_layer(inputs)
# Transformer decoders.
for _ in range(NUM_LAYERS):
decoder_layer = keras_nlp.layers.TransformerDecoder(
num_heads=NUM_HEADS,
intermediate_dim=FEED_FORWARD_DIM,
)
x = decoder_layer(x) # Giving one argument only skips cross-attention.
# Output.
outputs = keras.layers.Dense(VOCAB_SIZE)(x)
model = keras.Model(inputs=inputs, outputs=outputs)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
perplexity = keras_nlp.metrics.Perplexity(from_logits=True, mask_token_id=0)
model.compile(optimizer="adam", loss=loss_fn, metrics=[perplexity])<jupyter_output><empty_output><jupyter_text>Let's take a look at our model summary - a large majority of theparameters are in the `token_and_position_embedding` and the output `dense` layer!This means that the vocabulary size (`VOCAB_SIZE`) has a large effect on the size of the model,while the number of Transformer decoder layers (`NUM_LAYERS`) doesn't affect it as much.<jupyter_code>model.summary()<jupyter_output><empty_output><jupyter_text>TrainingNow that we have our model, let's train it with the `fit()` method.<jupyter_code>model.fit(train_ds, validation_data=val_ds, epochs=EPOCHS)<jupyter_output><empty_output><jupyter_text>InferenceWith our trained model, we can test it out to gauge its performance. To do thiswe can seed our model with an input sequence starting with the `"[BOS]"` token,and progressively sample the model by making predictions for each subsequenttoken in a loop.To start lets build a prompt with the same shape as our model inputs, containingonly the `"[BOS]"` token.<jupyter_code># The "packer" layers adds the [BOS] token for us.
prompt_tokens = start_packer(tokenizer([""]))
prompt_tokens<jupyter_output><empty_output><jupyter_text>We will use the `keras_nlp.samplers` module for inference, which requires acallback function wrapping the model we just trained. This wrapper callsthe model and returns the logit predictions for the current token we aregenerating.Note: There are two pieces of more advanced functionality available whendefining your callback. The first is the ability to take in a `cache` of statescomputed in previous generation steps, which can be used to speed up generation.The second is the ability to output the final dense "hidden state" of eachgenerated token. This is used by `keras_nlp.samplers.ContrastiveSampler`, whichavoids repetition by penalizing repeated hidden states. Both are optional, andwe will ignore them for now.<jupyter_code>def next(prompt, cache, index):
logits = model(prompt)[:, index - 1, :]
# Ignore hidden states for now; only needed for contrastive search.
hidden_states = None
return logits, hidden_states, cache<jupyter_output><empty_output><jupyter_text>Creating the wrapper function is the most complex part of using these functions. Now thatit's done, let's test out the different utilities, starting with greedy search. Greedy searchWe greedily pick the most probable token at each timestep. In other words, we get theargmax of the model output.<jupyter_code>sampler = keras_nlp.samplers.GreedySampler()
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1, # Start sampling immediately after the [BOS] token.
)
txt = tokenizer.detokenize(output_tokens)
print(f"Greedy search generated text: \n{txt}\n")<jupyter_output><empty_output><jupyter_text>As you can see, greedy search starts out making some sense, but quickly starts repeatingitself. This is a common problem with text generation that can be fixed by some of theprobabilistic text generation utilities shown later on! Beam searchAt a high-level, beam search keeps track of the `num_beams` most probable sequences ateach timestep, and predicts the best next token from all sequences. It is an improvementover greedy search since it stores more possibilities. However, it is less efficient thangreedy search since it has to compute and store multiple potential sequences.**Note:** beam search with `num_beams=1` is identical to greedy search.<jupyter_code>sampler = keras_nlp.samplers.BeamSampler(num_beams=10)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Beam search generated text: \n{txt}\n")<jupyter_output><empty_output><jupyter_text>Similar to greedy search, beam search quickly starts repeating itself, since it is stilla deterministic method. Random searchRandom search is our first probabilistic method. At each time step, it samples the nexttoken using the softmax probabilities provided by the model.<jupyter_code>sampler = keras_nlp.samplers.RandomSampler()
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Random search generated text: \n{txt}\n")<jupyter_output><empty_output><jupyter_text>Voilà, no repetitions! However, with random search, we may see some nonsensical wordsappearing since any word in the vocabulary has a chance of appearing with this samplingmethod. This is fixed by our next search utility, top-k search. Top-K searchSimilar to random search, we sample the next token from the probability distributionprovided by the model. The only difference is that here, we select out the top `k` mostprobable tokens, and distribute the probability mass over them before sampling. This way,we won't be sampling from low probability tokens, and hence we would have lessnonsensical words!<jupyter_code>sampler = keras_nlp.samplers.TopKSampler(k=10)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-K search generated text: \n{txt}\n")<jupyter_output><empty_output><jupyter_text>Top-P searchEven with the top-k search, there is something to improve upon. With top-k search, thenumber `k` is fixed, which means it selects the same number of tokens for any probabilitydistribution. Consider two scenarios, one where the probability mass is concentrated over2 words and another where the probability mass is evenly concentrated across 10. Shouldwe choose `k=2` or `k=10`? There is no one size that fits all `k` here.This is where top-p search comes in! Instead of choosing a `k`, we choose a probability`p` that we want the probabilities of the top tokens to sum up to. This way, we candynamically adjust the `k` based on the probability distribution. By setting `p=0.9`, if90% of the probability mass is concentrated on the top 2 tokens, we can filter out thetop 2 tokens to sample from. If instead the 90% is distributed over 10 tokens, it willsimilarly filter out the top 10 tokens to sample from.<jupyter_code>sampler = keras_nlp.samplers.TopPSampler(p=0.5)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-P search generated text: \n{txt}\n")<jupyter_output><empty_output><jupyter_text>Using callbacks for text generationWe can also wrap the utilities in a callback, which allows you to print out a predictionsequence for every epoch of the model! Here is an example of a callback for top-k search:<jupyter_code>class TopKTextGenerator(keras.callbacks.Callback):
"""A callback to generate text from a trained model using top-k."""
def __init__(self, k):
self.sampler = keras_nlp.samplers.TopKSampler(k)
def on_epoch_end(self, epoch, logs=None):
output_tokens = self.sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-K search generated text: \n{txt}\n")
text_generation_callback = TopKTextGenerator(k=10)
# Dummy training loop to demonstrate callback.
model.fit(train_ds.take(1), verbose=2, epochs=2, callbacks=[text_generation_callback])<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/text_generation_gpt.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/text_generation_gpt.ipynb",
"repo_id": "keras-io",
"token_count": 4432
} | 77 |
"""
Title: Node Classification with Graph Neural Networks
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/05/30
Last modified: 2021/05/30
Description: Implementing a graph neural network model for predicting the topic of a paper given its citations.
Accelerator: GPU
"""
"""
## Introduction
Many datasets in various machine learning (ML) applications have structural relationships
between their entities, which can be represented as graphs. Such application includes
social and communication networks analysis, traffic prediction, and fraud detection.
[Graph representation Learning](https://www.cs.mcgill.ca/~wlh/grl_book/)
aims to build and train models for graph datasets to be used for a variety of ML tasks.
This example demonstrate a simple implementation of a [Graph Neural Network](https://arxiv.org/pdf/1901.00596.pdf)
(GNN) model. The model is used for a node prediction task on the [Cora dataset](https://relational.fit.cvut.cz/dataset/CORA)
to predict the subject of a paper given its words and citations network.
Note that, **we implement a Graph Convolution Layer from scratch** to provide better
understanding of how they work. However, there is a number of specialized TensorFlow-based
libraries that provide rich GNN APIs, such as [Spectral](https://graphneural.network/),
[StellarGraph](https://stellargraph.readthedocs.io/en/stable/README.html), and
[GraphNets](https://github.com/deepmind/graph_nets).
"""
"""
## Setup
"""
import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## Prepare the Dataset
The Cora dataset consists of 2,708 scientific papers classified into one of seven classes.
The citation network consists of 5,429 links. Each paper has a binary word vector of size
1,433, indicating the presence of a corresponding word.
### Download the dataset
The dataset has two tap-separated files: `cora.cites` and `cora.content`.
1. The `cora.cites` includes the citation records with two columns:
`cited_paper_id` (target) and `citing_paper_id` (source).
2. The `cora.content` includes the paper content records with 1,435 columns:
`paper_id`, `subject`, and 1,433 binary features.
Let's download the dataset.
"""
zip_file = keras.utils.get_file(
fname="cora.tgz",
origin="https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz",
extract=True,
)
data_dir = os.path.join(os.path.dirname(zip_file), "cora")
"""
### Process and visualize the dataset
Then we load the citations data into a Pandas DataFrame.
"""
citations = pd.read_csv(
os.path.join(data_dir, "cora.cites"),
sep="\t",
header=None,
names=["target", "source"],
)
print("Citations shape:", citations.shape)
"""
Now we display a sample of the `citations` DataFrame.
The `target` column includes the paper ids cited by the paper ids in the `source` column.
"""
citations.sample(frac=1).head()
"""
Now let's load the papers data into a Pandas DataFrame.
"""
column_names = ["paper_id"] + [f"term_{idx}" for idx in range(1433)] + ["subject"]
papers = pd.read_csv(
os.path.join(data_dir, "cora.content"),
sep="\t",
header=None,
names=column_names,
)
print("Papers shape:", papers.shape)
"""
Now we display a sample of the `papers` DataFrame. The DataFrame includes the `paper_id`
and the `subject` columns, as well as 1,433 binary column representing whether a term exists
in the paper or not.
"""
print(papers.sample(5).T)
"""
Let's display the count of the papers in each subject.
"""
print(papers.subject.value_counts())
"""
We convert the paper ids and the subjects into zero-based indices.
"""
class_values = sorted(papers["subject"].unique())
class_idx = {name: id for id, name in enumerate(class_values)}
paper_idx = {name: idx for idx, name in enumerate(sorted(papers["paper_id"].unique()))}
papers["paper_id"] = papers["paper_id"].apply(lambda name: paper_idx[name])
citations["source"] = citations["source"].apply(lambda name: paper_idx[name])
citations["target"] = citations["target"].apply(lambda name: paper_idx[name])
papers["subject"] = papers["subject"].apply(lambda value: class_idx[value])
"""
Now let's visualize the citation graph. Each node in the graph represents a paper,
and the color of the node corresponds to its subject. Note that we only show a sample of
the papers in the dataset.
"""
plt.figure(figsize=(10, 10))
colors = papers["subject"].tolist()
cora_graph = nx.from_pandas_edgelist(citations.sample(n=1500))
subjects = list(papers[papers["paper_id"].isin(list(cora_graph.nodes))]["subject"])
nx.draw_spring(cora_graph, node_size=15, node_color=subjects)
"""
### Split the dataset into stratified train and test sets
"""
train_data, test_data = [], []
for _, group_data in papers.groupby("subject"):
# Select around 50% of the dataset for training.
random_selection = np.random.rand(len(group_data.index)) <= 0.5
train_data.append(group_data[random_selection])
test_data.append(group_data[~random_selection])
train_data = pd.concat(train_data).sample(frac=1)
test_data = pd.concat(test_data).sample(frac=1)
print("Train data shape:", train_data.shape)
print("Test data shape:", test_data.shape)
"""
## Implement Train and Evaluate Experiment
"""
hidden_units = [32, 32]
learning_rate = 0.01
dropout_rate = 0.5
num_epochs = 300
batch_size = 256
"""
This function compiles and trains an input model using the given training data.
"""
def run_experiment(model, x_train, y_train):
# Compile the model.
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_acc", patience=50, restore_best_weights=True
)
# Fit the model.
history = model.fit(
x=x_train,
y=y_train,
epochs=num_epochs,
batch_size=batch_size,
validation_split=0.15,
callbacks=[early_stopping],
)
return history
"""
This function displays the loss and accuracy curves of the model during training.
"""
def display_learning_curves(history):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
ax1.plot(history.history["loss"])
ax1.plot(history.history["val_loss"])
ax1.legend(["train", "test"], loc="upper right")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Loss")
ax2.plot(history.history["acc"])
ax2.plot(history.history["val_acc"])
ax2.legend(["train", "test"], loc="upper right")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("Accuracy")
plt.show()
"""
## Implement Feedforward Network (FFN) Module
We will use this module in the baseline and the GNN models.
"""
def create_ffn(hidden_units, dropout_rate, name=None):
fnn_layers = []
for units in hidden_units:
fnn_layers.append(layers.BatchNormalization())
fnn_layers.append(layers.Dropout(dropout_rate))
fnn_layers.append(layers.Dense(units, activation=tf.nn.gelu))
return keras.Sequential(fnn_layers, name=name)
"""
## Build a Baseline Neural Network Model
### Prepare the data for the baseline model
"""
feature_names = list(set(papers.columns) - {"paper_id", "subject"})
num_features = len(feature_names)
num_classes = len(class_idx)
# Create train and test features as a numpy array.
x_train = train_data[feature_names].to_numpy()
x_test = test_data[feature_names].to_numpy()
# Create train and test targets as a numpy array.
y_train = train_data["subject"]
y_test = test_data["subject"]
"""
### Implement a baseline classifier
We add five FFN blocks with skip connections, so that we generate a baseline model with
roughly the same number of parameters as the GNN models to be built later.
"""
def create_baseline_model(hidden_units, num_classes, dropout_rate=0.2):
inputs = layers.Input(shape=(num_features,), name="input_features")
x = create_ffn(hidden_units, dropout_rate, name=f"ffn_block1")(inputs)
for block_idx in range(4):
# Create an FFN block.
x1 = create_ffn(hidden_units, dropout_rate, name=f"ffn_block{block_idx + 2}")(x)
# Add skip connection.
x = layers.Add(name=f"skip_connection{block_idx + 2}")([x, x1])
# Compute logits.
logits = layers.Dense(num_classes, name="logits")(x)
# Create the model.
return keras.Model(inputs=inputs, outputs=logits, name="baseline")
baseline_model = create_baseline_model(hidden_units, num_classes, dropout_rate)
baseline_model.summary()
"""
### Train the baseline classifier
"""
history = run_experiment(baseline_model, x_train, y_train)
"""
Let's plot the learning curves.
"""
display_learning_curves(history)
"""
Now we evaluate the baseline model on the test data split.
"""
_, test_accuracy = baseline_model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
"""
### Examine the baseline model predictions
Let's create new data instances by randomly generating binary word vectors with respect to
the word presence probabilities.
"""
def generate_random_instances(num_instances):
token_probability = x_train.mean(axis=0)
instances = []
for _ in range(num_instances):
probabilities = np.random.uniform(size=len(token_probability))
instance = (probabilities <= token_probability).astype(int)
instances.append(instance)
return np.array(instances)
def display_class_probabilities(probabilities):
for instance_idx, probs in enumerate(probabilities):
print(f"Instance {instance_idx + 1}:")
for class_idx, prob in enumerate(probs):
print(f"- {class_values[class_idx]}: {round(prob * 100, 2)}%")
"""
Now we show the baseline model predictions given these randomly generated instances.
"""
new_instances = generate_random_instances(num_classes)
logits = baseline_model.predict(new_instances)
probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy()
display_class_probabilities(probabilities)
"""
## Build a Graph Neural Network Model
### Prepare the data for the graph model
Preparing and loading the graphs data into the model for training is the most challenging
part in GNN models, which is addressed in different ways by the specialised libraries.
In this example, we show a simple approach for preparing and using graph data that is suitable
if your dataset consists of a single graph that fits entirely in memory.
The graph data is represented by the `graph_info` tuple, which consists of the following
three elements:
1. `node_features`: This is a `[num_nodes, num_features]` NumPy array that includes the
node features. In this dataset, the nodes are the papers, and the `node_features` are the
word-presence binary vectors of each paper.
2. `edges`: This is `[num_edges, num_edges]` NumPy array representing a sparse
[adjacency matrix](https://en.wikipedia.org/wiki/Adjacency_matrix#:~:text=In%20graph%20theory%20and%20computer,with%20zeros%20on%20its%20diagonal.)
of the links between the nodes. In this example, the links are the citations between the papers.
3. `edge_weights` (optional): This is a `[num_edges]` NumPy array that includes the edge weights, which *quantify*
the relationships between nodes in the graph. In this example, there are no weights for the paper citations.
"""
# Create an edges array (sparse adjacency matrix) of shape [2, num_edges].
edges = citations[["source", "target"]].to_numpy().T
# Create an edge weights array of ones.
edge_weights = tf.ones(shape=edges.shape[1])
# Create a node features array of shape [num_nodes, num_features].
node_features = tf.cast(
papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.dtypes.float32
)
# Create graph info tuple with node_features, edges, and edge_weights.
graph_info = (node_features, edges, edge_weights)
print("Edges shape:", edges.shape)
print("Nodes shape:", node_features.shape)
"""
### Implement a graph convolution layer
We implement a graph convolution module as a [Keras Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer?version=nightly).
Our `GraphConvLayer` performs the following steps:
1. **Prepare**: The input node representations are processed using a FFN to produce a *message*. You can simplify
the processing by only applying linear transformation to the representations.
2. **Aggregate**: The messages of the neighbours of each node are aggregated with
respect to the `edge_weights` using a *permutation invariant* pooling operation, such as *sum*, *mean*, and *max*,
to prepare a single aggregated message for each node. See, for example, [tf.math.unsorted_segment_sum](https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum)
APIs used to aggregate neighbour messages.
3. **Update**: The `node_repesentations` and `aggregated_messages`—both of shape `[num_nodes, representation_dim]`—
are combined and processed to produce the new state of the node representations (node embeddings).
If `combination_type` is `gru`, the `node_repesentations` and `aggregated_messages` are stacked to create a sequence,
then processed by a GRU layer. Otherwise, the `node_repesentations` and `aggregated_messages` are added
or concatenated, then processed using a FFN.
The technique implemented use ideas from [Graph Convolutional Networks](https://arxiv.org/abs/1609.02907),
[GraphSage](https://arxiv.org/abs/1706.02216), [Graph Isomorphism Network](https://arxiv.org/abs/1810.00826),
[Simple Graph Networks](https://arxiv.org/abs/1902.07153), and
[Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493).
Two other key techniques that are not covered are [Graph Attention Networks](https://arxiv.org/abs/1710.10903)
and [Message Passing Neural Networks](https://arxiv.org/abs/1704.01212).
"""
def create_gru(hidden_units, dropout_rate):
inputs = keras.layers.Input(shape=(2, hidden_units[0]))
x = inputs
for units in hidden_units:
x = layers.GRU(
units=units,
activation="tanh",
recurrent_activation="sigmoid",
return_sequences=True,
dropout=dropout_rate,
return_state=False,
recurrent_dropout=dropout_rate,
)(x)
return keras.Model(inputs=inputs, outputs=x)
class GraphConvLayer(layers.Layer):
def __init__(
self,
hidden_units,
dropout_rate=0.2,
aggregation_type="mean",
combination_type="concat",
normalize=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.aggregation_type = aggregation_type
self.combination_type = combination_type
self.normalize = normalize
self.ffn_prepare = create_ffn(hidden_units, dropout_rate)
if self.combination_type == "gru":
self.update_fn = create_gru(hidden_units, dropout_rate)
else:
self.update_fn = create_ffn(hidden_units, dropout_rate)
def prepare(self, node_repesentations, weights=None):
# node_repesentations shape is [num_edges, embedding_dim].
messages = self.ffn_prepare(node_repesentations)
if weights is not None:
messages = messages * tf.expand_dims(weights, -1)
return messages
def aggregate(self, node_indices, neighbour_messages, node_repesentations):
# node_indices shape is [num_edges].
# neighbour_messages shape: [num_edges, representation_dim].
# node_repesentations shape is [num_nodes, representation_dim]
num_nodes = node_repesentations.shape[0]
if self.aggregation_type == "sum":
aggregated_message = tf.math.unsorted_segment_sum(
neighbour_messages, node_indices, num_segments=num_nodes
)
elif self.aggregation_type == "mean":
aggregated_message = tf.math.unsorted_segment_mean(
neighbour_messages, node_indices, num_segments=num_nodes
)
elif self.aggregation_type == "max":
aggregated_message = tf.math.unsorted_segment_max(
neighbour_messages, node_indices, num_segments=num_nodes
)
else:
raise ValueError(f"Invalid aggregation type: {self.aggregation_type}.")
return aggregated_message
def update(self, node_repesentations, aggregated_messages):
# node_repesentations shape is [num_nodes, representation_dim].
# aggregated_messages shape is [num_nodes, representation_dim].
if self.combination_type == "gru":
# Create a sequence of two elements for the GRU layer.
h = tf.stack([node_repesentations, aggregated_messages], axis=1)
elif self.combination_type == "concat":
# Concatenate the node_repesentations and aggregated_messages.
h = tf.concat([node_repesentations, aggregated_messages], axis=1)
elif self.combination_type == "add":
# Add node_repesentations and aggregated_messages.
h = node_repesentations + aggregated_messages
else:
raise ValueError(f"Invalid combination type: {self.combination_type}.")
# Apply the processing function.
node_embeddings = self.update_fn(h)
if self.combination_type == "gru":
node_embeddings = tf.unstack(node_embeddings, axis=1)[-1]
if self.normalize:
node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1)
return node_embeddings
def call(self, inputs):
"""Process the inputs to produce the node_embeddings.
inputs: a tuple of three elements: node_repesentations, edges, edge_weights.
Returns: node_embeddings of shape [num_nodes, representation_dim].
"""
node_repesentations, edges, edge_weights = inputs
# Get node_indices (source) and neighbour_indices (target) from edges.
node_indices, neighbour_indices = edges[0], edges[1]
# neighbour_repesentations shape is [num_edges, representation_dim].
neighbour_repesentations = tf.gather(node_repesentations, neighbour_indices)
# Prepare the messages of the neighbours.
neighbour_messages = self.prepare(neighbour_repesentations, edge_weights)
# Aggregate the neighbour messages.
aggregated_messages = self.aggregate(
node_indices, neighbour_messages, node_repesentations
)
# Update the node embedding with the neighbour messages.
return self.update(node_repesentations, aggregated_messages)
"""
### Implement a graph neural network node classifier
The GNN classification model follows the [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843) approach,
as follows:
1. Apply preprocessing using FFN to the node features to generate initial node representations.
2. Apply one or more graph convolutional layer, with skip connections, to the node representation
to produce node embeddings.
3. Apply post-processing using FFN to the node embeddings to generat the final node embeddings.
4. Feed the node embeddings in a Softmax layer to predict the node class.
Each graph convolutional layer added captures information from a further level of neighbours.
However, adding many graph convolutional layer can cause oversmoothing, where the model
produces similar embeddings for all the nodes.
Note that the `graph_info` passed to the constructor of the Keras model, and used as a *property*
of the Keras model object, rather than input data for training or prediction.
The model will accept a **batch** of `node_indices`, which are used to lookup the
node features and neighbours from the `graph_info`.
"""
class GNNNodeClassifier(tf.keras.Model):
def __init__(
self,
graph_info,
num_classes,
hidden_units,
aggregation_type="sum",
combination_type="concat",
dropout_rate=0.2,
normalize=True,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
# Unpack graph_info to three elements: node_features, edges, and edge_weight.
node_features, edges, edge_weights = graph_info
self.node_features = node_features
self.edges = edges
self.edge_weights = edge_weights
# Set edge_weights to ones if not provided.
if self.edge_weights is None:
self.edge_weights = tf.ones(shape=edges.shape[1])
# Scale edge_weights to sum to 1.
self.edge_weights = self.edge_weights / tf.math.reduce_sum(self.edge_weights)
# Create a process layer.
self.preprocess = create_ffn(hidden_units, dropout_rate, name="preprocess")
# Create the first GraphConv layer.
self.conv1 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv1",
)
# Create the second GraphConv layer.
self.conv2 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv2",
)
# Create a postprocess layer.
self.postprocess = create_ffn(hidden_units, dropout_rate, name="postprocess")
# Create a compute logits layer.
self.compute_logits = layers.Dense(units=num_classes, name="logits")
def call(self, input_node_indices):
# Preprocess the node_features to produce node representations.
x = self.preprocess(self.node_features)
# Apply the first graph conv layer.
x1 = self.conv1((x, self.edges, self.edge_weights))
# Skip connection.
x = x1 + x
# Apply the second graph conv layer.
x2 = self.conv2((x, self.edges, self.edge_weights))
# Skip connection.
x = x2 + x
# Postprocess node embedding.
x = self.postprocess(x)
# Fetch node embeddings for the input node_indices.
node_embeddings = tf.gather(x, input_node_indices)
# Compute logits
return self.compute_logits(node_embeddings)
"""
Let's test instantiating and calling the GNN model.
Notice that if you provide `N` node indices, the output will be a tensor of shape `[N, num_classes]`,
regardless of the size of the graph.
"""
gnn_model = GNNNodeClassifier(
graph_info=graph_info,
num_classes=num_classes,
hidden_units=hidden_units,
dropout_rate=dropout_rate,
name="gnn_model",
)
print("GNN output shape:", gnn_model([1, 10, 100]))
gnn_model.summary()
"""
### Train the GNN model
Note that we use the standard *supervised* cross-entropy loss to train the model.
However, we can add another *self-supervised* loss term for the generated node embeddings
that makes sure that neighbouring nodes in graph have similar representations, while faraway
nodes have dissimilar representations.
"""
x_train = train_data.paper_id.to_numpy()
history = run_experiment(gnn_model, x_train, y_train)
"""
Let's plot the learning curves
"""
display_learning_curves(history)
"""
Now we evaluate the GNN model on the test data split.
The results may vary depending on the training sample, however the GNN model always outperforms
the baseline model in terms of the test accuracy.
"""
x_test = test_data.paper_id.to_numpy()
_, test_accuracy = gnn_model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
"""
### Examine the GNN model predictions
Let's add the new instances as nodes to the `node_features`, and generate links
(citations) to existing nodes.
"""
# First we add the N new_instances as nodes to the graph
# by appending the new_instance to node_features.
num_nodes = node_features.shape[0]
new_node_features = np.concatenate([node_features, new_instances])
# Second we add the M edges (citations) from each new node to a set
# of existing nodes in a particular subject
new_node_indices = [i + num_nodes for i in range(num_classes)]
new_citations = []
for subject_idx, group in papers.groupby("subject"):
subject_papers = list(group.paper_id)
# Select random x papers specific subject.
selected_paper_indices1 = np.random.choice(subject_papers, 5)
# Select random y papers from any subject (where y < x).
selected_paper_indices2 = np.random.choice(list(papers.paper_id), 2)
# Merge the selected paper indices.
selected_paper_indices = np.concatenate(
[selected_paper_indices1, selected_paper_indices2], axis=0
)
# Create edges between a citing paper idx and the selected cited papers.
citing_paper_indx = new_node_indices[subject_idx]
for cited_paper_idx in selected_paper_indices:
new_citations.append([citing_paper_indx, cited_paper_idx])
new_citations = np.array(new_citations).T
new_edges = np.concatenate([edges, new_citations], axis=1)
"""
Now let's update the `node_features` and the `edges` in the GNN model.
"""
print("Original node_features shape:", gnn_model.node_features.shape)
print("Original edges shape:", gnn_model.edges.shape)
gnn_model.node_features = new_node_features
gnn_model.edges = new_edges
gnn_model.edge_weights = tf.ones(shape=new_edges.shape[1])
print("New node_features shape:", gnn_model.node_features.shape)
print("New edges shape:", gnn_model.edges.shape)
logits = gnn_model.predict(tf.convert_to_tensor(new_node_indices))
probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy()
display_class_probabilities(probabilities)
"""
Notice that the probabilities of the expected subjects
(to which several citations are added) are higher compared to the baseline model.
"""
| keras-io/examples/graph/gnn_citations.py/0 | {
"file_path": "keras-io/examples/graph/gnn_citations.py",
"repo_id": "keras-io",
"token_count": 9282
} | 78 |
# Node Classification with Graph Neural Networks
**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br>
**Date created:** 2021/05/30<br>
**Last modified:** 2021/05/30<br>
**Description:** Implementing a graph neural network model for predicting the topic of a paper given its citations.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/graph/ipynb/gnn_citations.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/graph/gnn_citations.py)
---
## Introduction
Many datasets in various machine learning (ML) applications have structural relationships
between their entities, which can be represented as graphs. Such application includes
social and communication networks analysis, traffic prediction, and fraud detection.
[Graph representation Learning](https://www.cs.mcgill.ca/~wlh/grl_book/)
aims to build and train models for graph datasets to be used for a variety of ML tasks.
This example demonstrate a simple implementation of a [Graph Neural Network](https://arxiv.org/pdf/1901.00596.pdf)
(GNN) model. The model is used for a node prediction task on the [Cora dataset](https://relational.fit.cvut.cz/dataset/CORA)
to predict the subject of a paper given its words and citations network.
Note that, **we implement a Graph Convolution Layer from scratch** to provide better
understanding of how they work. However, there is a number of specialized TensorFlow-based
libraries that provide rich GNN APIs, such as [Spectral](https://graphneural.network/),
[StellarGraph](https://stellargraph.readthedocs.io/en/stable/README.html), and
[GraphNets](https://github.com/deepmind/graph_nets).
---
## Setup
```python
import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
```
---
## Prepare the Dataset
The Cora dataset consists of 2,708 scientific papers classified into one of seven classes.
The citation network consists of 5,429 links. Each paper has a binary word vector of size
1,433, indicating the presence of a corresponding word.
### Download the dataset
The dataset has two tap-separated files: `cora.cites` and `cora.content`.
1. The `cora.cites` includes the citation records with two columns:
`cited_paper_id` (target) and `citing_paper_id` (source).
2. The `cora.content` includes the paper content records with 1,435 columns:
`paper_id`, `subject`, and 1,433 binary features.
Let's download the dataset.
```python
zip_file = keras.utils.get_file(
fname="cora.tgz",
origin="https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz",
extract=True,
)
data_dir = os.path.join(os.path.dirname(zip_file), "cora")
```
### Process and visualize the dataset
Then we load the citations data into a Pandas DataFrame.
```python
citations = pd.read_csv(
os.path.join(data_dir, "cora.cites"),
sep="\t",
header=None,
names=["target", "source"],
)
print("Citations shape:", citations.shape)
```
<div class="k-default-codeblock">
```
Citations shape: (5429, 2)
```
</div>
Now we display a sample of the `citations` DataFrame.
The `target` column includes the paper ids cited by the paper ids in the `source` column.
```python
citations.sample(frac=1).head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
<div class="k-default-codeblock">
```
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
```
</div>
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>target</th>
<th>source</th>
</tr>
</thead>
<tbody>
<tr>
<th>2581</th>
<td>28227</td>
<td>6169</td>
</tr>
<tr>
<th>1500</th>
<td>7297</td>
<td>7276</td>
</tr>
<tr>
<th>1194</th>
<td>6184</td>
<td>1105718</td>
</tr>
<tr>
<th>4221</th>
<td>139738</td>
<td>1108834</td>
</tr>
<tr>
<th>3707</th>
<td>79809</td>
<td>1153275</td>
</tr>
</tbody>
</table>
</div>
Now let's load the papers data into a Pandas DataFrame.
```python
column_names = ["paper_id"] + [f"term_{idx}" for idx in range(1433)] + ["subject"]
papers = pd.read_csv(
os.path.join(data_dir, "cora.content"), sep="\t", header=None, names=column_names,
)
print("Papers shape:", papers.shape)
```
<div class="k-default-codeblock">
```
Papers shape: (2708, 1435)
```
</div>
Now we display a sample of the `papers` DataFrame. The DataFrame includes the `paper_id`
and the `subject` columns, as well as 1,433 binary column representing whether a term exists
in the paper or not.
```python
print(papers.sample(5).T)
```
<div class="k-default-codeblock">
```
1 133 2425 \
paper_id 1061127 34355 1108389
term_0 0 0 0
term_1 0 0 0
term_2 0 0 0
term_3 0 0 0
... ... ... ...
term_1429 0 0 0
term_1430 0 0 0
term_1431 0 0 0
term_1432 0 0 0
subject Rule_Learning Neural_Networks Probabilistic_Methods
```
</div>
<div class="k-default-codeblock">
```
2103 1346
paper_id 1153942 80491
term_0 0 0
term_1 0 0
term_2 1 0
term_3 0 0
... ... ...
term_1429 0 0
term_1430 0 0
term_1431 0 0
term_1432 0 0
subject Genetic_Algorithms Neural_Networks
```
</div>
<div class="k-default-codeblock">
```
[1435 rows x 5 columns]
```
</div>
Let's display the count of the papers in each subject.
```python
print(papers.subject.value_counts())
```
<div class="k-default-codeblock">
```
Neural_Networks 818
Probabilistic_Methods 426
Genetic_Algorithms 418
Theory 351
Case_Based 298
Reinforcement_Learning 217
Rule_Learning 180
Name: subject, dtype: int64
```
</div>
We convert the paper ids and the subjects into zero-based indices.
```python
class_values = sorted(papers["subject"].unique())
class_idx = {name: id for id, name in enumerate(class_values)}
paper_idx = {name: idx for idx, name in enumerate(sorted(papers["paper_id"].unique()))}
papers["paper_id"] = papers["paper_id"].apply(lambda name: paper_idx[name])
citations["source"] = citations["source"].apply(lambda name: paper_idx[name])
citations["target"] = citations["target"].apply(lambda name: paper_idx[name])
papers["subject"] = papers["subject"].apply(lambda value: class_idx[value])
```
Now let's visualize the citation graph. Each node in the graph represents a paper,
and the color of the node corresponds to its subject. Note that we only show a sample of
the papers in the dataset.
```python
plt.figure(figsize=(10, 10))
colors = papers["subject"].tolist()
cora_graph = nx.from_pandas_edgelist(citations.sample(n=1500))
subjects = list(papers[papers["paper_id"].isin(list(cora_graph.nodes))]["subject"])
nx.draw_spring(cora_graph, node_size=15, node_color=subjects)
```

### Split the dataset into stratified train and test sets
```python
train_data, test_data = [], []
for _, group_data in papers.groupby("subject"):
# Select around 50% of the dataset for training.
random_selection = np.random.rand(len(group_data.index)) <= 0.5
train_data.append(group_data[random_selection])
test_data.append(group_data[~random_selection])
train_data = pd.concat(train_data).sample(frac=1)
test_data = pd.concat(test_data).sample(frac=1)
print("Train data shape:", train_data.shape)
print("Test data shape:", test_data.shape)
```
<div class="k-default-codeblock">
```
Train data shape: (1360, 1435)
Test data shape: (1348, 1435)
```
</div>
---
## Implement Train and Evaluate Experiment
```python
hidden_units = [32, 32]
learning_rate = 0.01
dropout_rate = 0.5
num_epochs = 300
batch_size = 256
```
This function compiles and trains an input model using the given training data.
```python
def run_experiment(model, x_train, y_train):
# Compile the model.
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_acc", patience=50, restore_best_weights=True
)
# Fit the model.
history = model.fit(
x=x_train,
y=y_train,
epochs=num_epochs,
batch_size=batch_size,
validation_split=0.15,
callbacks=[early_stopping],
)
return history
```
This function displays the loss and accuracy curves of the model during training.
```python
def display_learning_curves(history):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
ax1.plot(history.history["loss"])
ax1.plot(history.history["val_loss"])
ax1.legend(["train", "test"], loc="upper right")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Loss")
ax2.plot(history.history["acc"])
ax2.plot(history.history["val_acc"])
ax2.legend(["train", "test"], loc="upper right")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("Accuracy")
plt.show()
```
---
## Implement Feedforward Network (FFN) Module
We will use this module in the baseline and the GNN models.
```python
def create_ffn(hidden_units, dropout_rate, name=None):
fnn_layers = []
for units in hidden_units:
fnn_layers.append(layers.BatchNormalization())
fnn_layers.append(layers.Dropout(dropout_rate))
fnn_layers.append(layers.Dense(units, activation=tf.nn.gelu))
return keras.Sequential(fnn_layers, name=name)
```
---
## Build a Baseline Neural Network Model
### Prepare the data for the baseline model
```python
feature_names = list(set(papers.columns) - {"paper_id", "subject"})
num_features = len(feature_names)
num_classes = len(class_idx)
# Create train and test features as a numpy array.
x_train = train_data[feature_names].to_numpy()
x_test = test_data[feature_names].to_numpy()
# Create train and test targets as a numpy array.
y_train = train_data["subject"]
y_test = test_data["subject"]
```
### Implement a baseline classifier
We add five FFN blocks with skip connections, so that we generate a baseline model with
roughly the same number of parameters as the GNN models to be built later.
```python
def create_baseline_model(hidden_units, num_classes, dropout_rate=0.2):
inputs = layers.Input(shape=(num_features,), name="input_features")
x = create_ffn(hidden_units, dropout_rate, name=f"ffn_block1")(inputs)
for block_idx in range(4):
# Create an FFN block.
x1 = create_ffn(hidden_units, dropout_rate, name=f"ffn_block{block_idx + 2}")(x)
# Add skip connection.
x = layers.Add(name=f"skip_connection{block_idx + 2}")([x, x1])
# Compute logits.
logits = layers.Dense(num_classes, name="logits")(x)
# Create the model.
return keras.Model(inputs=inputs, outputs=logits, name="baseline")
baseline_model = create_baseline_model(hidden_units, num_classes, dropout_rate)
baseline_model.summary()
```
<div class="k-default-codeblock">
```
Model: "baseline"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_features (InputLayer) [(None, 1433)] 0
__________________________________________________________________________________________________
ffn_block1 (Sequential) (None, 32) 52804 input_features[0][0]
__________________________________________________________________________________________________
ffn_block2 (Sequential) (None, 32) 2368 ffn_block1[0][0]
__________________________________________________________________________________________________
skip_connection2 (Add) (None, 32) 0 ffn_block1[0][0]
ffn_block2[0][0]
__________________________________________________________________________________________________
ffn_block3 (Sequential) (None, 32) 2368 skip_connection2[0][0]
__________________________________________________________________________________________________
skip_connection3 (Add) (None, 32) 0 skip_connection2[0][0]
ffn_block3[0][0]
__________________________________________________________________________________________________
ffn_block4 (Sequential) (None, 32) 2368 skip_connection3[0][0]
__________________________________________________________________________________________________
skip_connection4 (Add) (None, 32) 0 skip_connection3[0][0]
ffn_block4[0][0]
__________________________________________________________________________________________________
ffn_block5 (Sequential) (None, 32) 2368 skip_connection4[0][0]
__________________________________________________________________________________________________
skip_connection5 (Add) (None, 32) 0 skip_connection4[0][0]
ffn_block5[0][0]
__________________________________________________________________________________________________
logits (Dense) (None, 7) 231 skip_connection5[0][0]
==================================================================================================
Total params: 62,507
Trainable params: 59,065
Non-trainable params: 3,442
__________________________________________________________________________________________________
```
</div>
### Train the baseline classifier
```python
history = run_experiment(baseline_model, x_train, y_train)
```
<div class="k-default-codeblock">
```
Epoch 1/300
5/5 [==============================] - 3s 203ms/step - loss: 4.1695 - acc: 0.1660 - val_loss: 1.9008 - val_acc: 0.3186
Epoch 2/300
5/5 [==============================] - 0s 15ms/step - loss: 2.9269 - acc: 0.2630 - val_loss: 1.8906 - val_acc: 0.3235
Epoch 3/300
5/5 [==============================] - 0s 15ms/step - loss: 2.5669 - acc: 0.2424 - val_loss: 1.8713 - val_acc: 0.3186
Epoch 4/300
5/5 [==============================] - 0s 15ms/step - loss: 2.1377 - acc: 0.3147 - val_loss: 1.8687 - val_acc: 0.3529
Epoch 5/300
5/5 [==============================] - 0s 15ms/step - loss: 2.0256 - acc: 0.3297 - val_loss: 1.8285 - val_acc: 0.3235
Epoch 6/300
5/5 [==============================] - 0s 15ms/step - loss: 1.8148 - acc: 0.3495 - val_loss: 1.8000 - val_acc: 0.3235
Epoch 7/300
5/5 [==============================] - 0s 15ms/step - loss: 1.7216 - acc: 0.3883 - val_loss: 1.7771 - val_acc: 0.3333
Epoch 8/300
5/5 [==============================] - 0s 15ms/step - loss: 1.6941 - acc: 0.3910 - val_loss: 1.7528 - val_acc: 0.3284
Epoch 9/300
5/5 [==============================] - 0s 15ms/step - loss: 1.5690 - acc: 0.4358 - val_loss: 1.7128 - val_acc: 0.3333
Epoch 10/300
5/5 [==============================] - 0s 15ms/step - loss: 1.5139 - acc: 0.4367 - val_loss: 1.6650 - val_acc: 0.3676
Epoch 11/300
5/5 [==============================] - 0s 15ms/step - loss: 1.4370 - acc: 0.4930 - val_loss: 1.6145 - val_acc: 0.3775
Epoch 12/300
5/5 [==============================] - 0s 15ms/step - loss: 1.3696 - acc: 0.5109 - val_loss: 1.5787 - val_acc: 0.3873
Epoch 13/300
5/5 [==============================] - 0s 15ms/step - loss: 1.3979 - acc: 0.5341 - val_loss: 1.5564 - val_acc: 0.3922
Epoch 14/300
5/5 [==============================] - 0s 15ms/step - loss: 1.2681 - acc: 0.5599 - val_loss: 1.5547 - val_acc: 0.3922
Epoch 15/300
5/5 [==============================] - 0s 16ms/step - loss: 1.1970 - acc: 0.5807 - val_loss: 1.5735 - val_acc: 0.3873
Epoch 16/300
5/5 [==============================] - 0s 15ms/step - loss: 1.1555 - acc: 0.6032 - val_loss: 1.5131 - val_acc: 0.4216
Epoch 17/300
5/5 [==============================] - 0s 15ms/step - loss: 1.1234 - acc: 0.6130 - val_loss: 1.4385 - val_acc: 0.4608
Epoch 18/300
5/5 [==============================] - 0s 14ms/step - loss: 1.0507 - acc: 0.6306 - val_loss: 1.3929 - val_acc: 0.4804
Epoch 19/300
5/5 [==============================] - 0s 15ms/step - loss: 1.0341 - acc: 0.6393 - val_loss: 1.3628 - val_acc: 0.4902
Epoch 20/300
5/5 [==============================] - 0s 35ms/step - loss: 0.9457 - acc: 0.6693 - val_loss: 1.3383 - val_acc: 0.4902
Epoch 21/300
5/5 [==============================] - 0s 17ms/step - loss: 0.9054 - acc: 0.6756 - val_loss: 1.3365 - val_acc: 0.4951
Epoch 22/300
5/5 [==============================] - 0s 15ms/step - loss: 0.8952 - acc: 0.6854 - val_loss: 1.3228 - val_acc: 0.5049
Epoch 23/300
5/5 [==============================] - 0s 15ms/step - loss: 0.8413 - acc: 0.7217 - val_loss: 1.2924 - val_acc: 0.5294
Epoch 24/300
5/5 [==============================] - 0s 15ms/step - loss: 0.8543 - acc: 0.6998 - val_loss: 1.2379 - val_acc: 0.5490
Epoch 25/300
5/5 [==============================] - 0s 16ms/step - loss: 0.7632 - acc: 0.7376 - val_loss: 1.1516 - val_acc: 0.5833
Epoch 26/300
5/5 [==============================] - 0s 15ms/step - loss: 0.7189 - acc: 0.7496 - val_loss: 1.1296 - val_acc: 0.5931
Epoch 27/300
5/5 [==============================] - 0s 15ms/step - loss: 0.7433 - acc: 0.7482 - val_loss: 1.0937 - val_acc: 0.6127
Epoch 28/300
5/5 [==============================] - 0s 15ms/step - loss: 0.7310 - acc: 0.7440 - val_loss: 1.0950 - val_acc: 0.5980
Epoch 29/300
5/5 [==============================] - 0s 16ms/step - loss: 0.7059 - acc: 0.7654 - val_loss: 1.1343 - val_acc: 0.5882
Epoch 30/300
5/5 [==============================] - 0s 21ms/step - loss: 0.6831 - acc: 0.7645 - val_loss: 1.1938 - val_acc: 0.5686
Epoch 31/300
5/5 [==============================] - 0s 23ms/step - loss: 0.6741 - acc: 0.7788 - val_loss: 1.1281 - val_acc: 0.5931
Epoch 32/300
5/5 [==============================] - 0s 16ms/step - loss: 0.6344 - acc: 0.7753 - val_loss: 1.0870 - val_acc: 0.6029
Epoch 33/300
5/5 [==============================] - 0s 16ms/step - loss: 0.6052 - acc: 0.7876 - val_loss: 1.0947 - val_acc: 0.6127
Epoch 34/300
5/5 [==============================] - 0s 15ms/step - loss: 0.6313 - acc: 0.7908 - val_loss: 1.1186 - val_acc: 0.5882
Epoch 35/300
5/5 [==============================] - 0s 16ms/step - loss: 0.6163 - acc: 0.7955 - val_loss: 1.0899 - val_acc: 0.6176
Epoch 36/300
5/5 [==============================] - 0s 16ms/step - loss: 0.5388 - acc: 0.8203 - val_loss: 1.1222 - val_acc: 0.5882
Epoch 37/300
5/5 [==============================] - 0s 16ms/step - loss: 0.5487 - acc: 0.8080 - val_loss: 1.0205 - val_acc: 0.6127
Epoch 38/300
5/5 [==============================] - 0s 16ms/step - loss: 0.5885 - acc: 0.7903 - val_loss: 0.9268 - val_acc: 0.6569
Epoch 39/300
5/5 [==============================] - 0s 15ms/step - loss: 0.5541 - acc: 0.8025 - val_loss: 0.9367 - val_acc: 0.6471
Epoch 40/300
5/5 [==============================] - 0s 36ms/step - loss: 0.5594 - acc: 0.7935 - val_loss: 0.9688 - val_acc: 0.6275
Epoch 41/300
5/5 [==============================] - 0s 17ms/step - loss: 0.5255 - acc: 0.8169 - val_loss: 1.0076 - val_acc: 0.6324
Epoch 42/300
5/5 [==============================] - 0s 16ms/step - loss: 0.5284 - acc: 0.8180 - val_loss: 1.0106 - val_acc: 0.6373
Epoch 43/300
5/5 [==============================] - 0s 15ms/step - loss: 0.5141 - acc: 0.8188 - val_loss: 0.8842 - val_acc: 0.6912
Epoch 44/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4767 - acc: 0.8342 - val_loss: 0.8249 - val_acc: 0.7108
Epoch 45/300
5/5 [==============================] - 0s 15ms/step - loss: 0.5915 - acc: 0.8055 - val_loss: 0.8567 - val_acc: 0.6912
Epoch 46/300
5/5 [==============================] - 0s 15ms/step - loss: 0.5026 - acc: 0.8357 - val_loss: 0.9287 - val_acc: 0.6618
Epoch 47/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4859 - acc: 0.8304 - val_loss: 0.9044 - val_acc: 0.6667
Epoch 48/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4860 - acc: 0.8440 - val_loss: 0.8672 - val_acc: 0.6912
Epoch 49/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4723 - acc: 0.8358 - val_loss: 0.8717 - val_acc: 0.6863
Epoch 50/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4831 - acc: 0.8457 - val_loss: 0.8674 - val_acc: 0.6912
Epoch 51/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4873 - acc: 0.8353 - val_loss: 0.8587 - val_acc: 0.7010
Epoch 52/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4537 - acc: 0.8472 - val_loss: 0.8544 - val_acc: 0.7059
Epoch 53/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4684 - acc: 0.8425 - val_loss: 0.8423 - val_acc: 0.7206
Epoch 54/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4436 - acc: 0.8523 - val_loss: 0.8607 - val_acc: 0.6961
Epoch 55/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4589 - acc: 0.8335 - val_loss: 0.8462 - val_acc: 0.7059
Epoch 56/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4757 - acc: 0.8360 - val_loss: 0.8415 - val_acc: 0.7010
Epoch 57/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4270 - acc: 0.8593 - val_loss: 0.8094 - val_acc: 0.7255
Epoch 58/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4530 - acc: 0.8307 - val_loss: 0.8357 - val_acc: 0.7108
Epoch 59/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4370 - acc: 0.8453 - val_loss: 0.8804 - val_acc: 0.7108
Epoch 60/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4379 - acc: 0.8465 - val_loss: 0.8791 - val_acc: 0.7108
Epoch 61/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4254 - acc: 0.8615 - val_loss: 0.8355 - val_acc: 0.7059
Epoch 62/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3929 - acc: 0.8696 - val_loss: 0.8355 - val_acc: 0.7304
Epoch 63/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4039 - acc: 0.8516 - val_loss: 0.8576 - val_acc: 0.7353
Epoch 64/300
5/5 [==============================] - 0s 35ms/step - loss: 0.4220 - acc: 0.8596 - val_loss: 0.8848 - val_acc: 0.7059
Epoch 65/300
5/5 [==============================] - 0s 17ms/step - loss: 0.4091 - acc: 0.8521 - val_loss: 0.8560 - val_acc: 0.7108
Epoch 66/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4658 - acc: 0.8470 - val_loss: 0.8518 - val_acc: 0.7206
Epoch 67/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4269 - acc: 0.8437 - val_loss: 0.7878 - val_acc: 0.7255
Epoch 68/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4368 - acc: 0.8438 - val_loss: 0.7859 - val_acc: 0.7255
Epoch 69/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4113 - acc: 0.8452 - val_loss: 0.8056 - val_acc: 0.7402
Epoch 70/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4304 - acc: 0.8469 - val_loss: 0.8093 - val_acc: 0.7451
Epoch 71/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4159 - acc: 0.8585 - val_loss: 0.8090 - val_acc: 0.7451
Epoch 72/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4218 - acc: 0.8610 - val_loss: 0.8028 - val_acc: 0.7402
Epoch 73/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3632 - acc: 0.8714 - val_loss: 0.8153 - val_acc: 0.7304
Epoch 74/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3745 - acc: 0.8722 - val_loss: 0.8299 - val_acc: 0.7402
Epoch 75/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3997 - acc: 0.8680 - val_loss: 0.8445 - val_acc: 0.7255
Epoch 76/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4143 - acc: 0.8620 - val_loss: 0.8344 - val_acc: 0.7206
Epoch 77/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4006 - acc: 0.8616 - val_loss: 0.8358 - val_acc: 0.7255
Epoch 78/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4266 - acc: 0.8532 - val_loss: 0.8266 - val_acc: 0.7206
Epoch 79/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4337 - acc: 0.8523 - val_loss: 0.8181 - val_acc: 0.7206
Epoch 80/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3857 - acc: 0.8624 - val_loss: 0.8143 - val_acc: 0.7206
Epoch 81/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4146 - acc: 0.8567 - val_loss: 0.8192 - val_acc: 0.7108
Epoch 82/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3638 - acc: 0.8794 - val_loss: 0.8248 - val_acc: 0.7206
Epoch 83/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4126 - acc: 0.8678 - val_loss: 0.8565 - val_acc: 0.7255
Epoch 84/300
5/5 [==============================] - 0s 36ms/step - loss: 0.3941 - acc: 0.8530 - val_loss: 0.8624 - val_acc: 0.7206
Epoch 85/300
5/5 [==============================] - 0s 17ms/step - loss: 0.3843 - acc: 0.8786 - val_loss: 0.8389 - val_acc: 0.7255
Epoch 86/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3651 - acc: 0.8747 - val_loss: 0.8314 - val_acc: 0.7206
Epoch 87/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3911 - acc: 0.8657 - val_loss: 0.8736 - val_acc: 0.7255
Epoch 88/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3706 - acc: 0.8714 - val_loss: 0.9159 - val_acc: 0.7108
Epoch 89/300
5/5 [==============================] - 0s 15ms/step - loss: 0.4403 - acc: 0.8386 - val_loss: 0.9038 - val_acc: 0.7206
Epoch 90/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3865 - acc: 0.8668 - val_loss: 0.8733 - val_acc: 0.7206
Epoch 91/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3757 - acc: 0.8643 - val_loss: 0.8704 - val_acc: 0.7157
Epoch 92/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3828 - acc: 0.8669 - val_loss: 0.8786 - val_acc: 0.7157
Epoch 93/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3651 - acc: 0.8787 - val_loss: 0.8977 - val_acc: 0.7206
Epoch 94/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3913 - acc: 0.8614 - val_loss: 0.9415 - val_acc: 0.7206
Epoch 95/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3995 - acc: 0.8590 - val_loss: 0.9495 - val_acc: 0.7157
Epoch 96/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4228 - acc: 0.8508 - val_loss: 0.9490 - val_acc: 0.7059
Epoch 97/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3853 - acc: 0.8789 - val_loss: 0.9402 - val_acc: 0.7157
Epoch 98/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3711 - acc: 0.8812 - val_loss: 0.9283 - val_acc: 0.7206
Epoch 99/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3949 - acc: 0.8578 - val_loss: 0.9591 - val_acc: 0.7108
Epoch 100/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3563 - acc: 0.8780 - val_loss: 0.9744 - val_acc: 0.7206
Epoch 101/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3579 - acc: 0.8815 - val_loss: 0.9358 - val_acc: 0.7206
Epoch 102/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4069 - acc: 0.8698 - val_loss: 0.9245 - val_acc: 0.7157
Epoch 103/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3161 - acc: 0.8955 - val_loss: 0.9401 - val_acc: 0.7157
Epoch 104/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3346 - acc: 0.8910 - val_loss: 0.9517 - val_acc: 0.7157
Epoch 105/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4204 - acc: 0.8538 - val_loss: 0.9366 - val_acc: 0.7157
Epoch 106/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3492 - acc: 0.8821 - val_loss: 0.9424 - val_acc: 0.7353
Epoch 107/300
5/5 [==============================] - 0s 16ms/step - loss: 0.4002 - acc: 0.8604 - val_loss: 0.9842 - val_acc: 0.7157
Epoch 108/300
5/5 [==============================] - 0s 35ms/step - loss: 0.3701 - acc: 0.8736 - val_loss: 0.9999 - val_acc: 0.7010
Epoch 109/300
5/5 [==============================] - 0s 17ms/step - loss: 0.3391 - acc: 0.8866 - val_loss: 0.9768 - val_acc: 0.6961
Epoch 110/300
5/5 [==============================] - 0s 15ms/step - loss: 0.3857 - acc: 0.8739 - val_loss: 0.9953 - val_acc: 0.7255
Epoch 111/300
5/5 [==============================] - 0s 16ms/step - loss: 0.3822 - acc: 0.8731 - val_loss: 0.9817 - val_acc: 0.7255
Epoch 112/300
5/5 [==============================] - 0s 23ms/step - loss: 0.3211 - acc: 0.8887 - val_loss: 0.9781 - val_acc: 0.7108
Epoch 113/300
5/5 [==============================] - 0s 20ms/step - loss: 0.3473 - acc: 0.8715 - val_loss: 0.9927 - val_acc: 0.6912
Epoch 114/300
5/5 [==============================] - 0s 20ms/step - loss: 0.4026 - acc: 0.8621 - val_loss: 1.0002 - val_acc: 0.6863
Epoch 115/300
5/5 [==============================] - 0s 20ms/step - loss: 0.3413 - acc: 0.8837 - val_loss: 1.0031 - val_acc: 0.6912
Epoch 116/300
5/5 [==============================] - 0s 20ms/step - loss: 0.3653 - acc: 0.8765 - val_loss: 1.0065 - val_acc: 0.7010
Epoch 117/300
5/5 [==============================] - 0s 21ms/step - loss: 0.3147 - acc: 0.8974 - val_loss: 1.0206 - val_acc: 0.7059
Epoch 118/300
5/5 [==============================] - 0s 21ms/step - loss: 0.3639 - acc: 0.8783 - val_loss: 1.0206 - val_acc: 0.7010
Epoch 119/300
5/5 [==============================] - 0s 19ms/step - loss: 0.3660 - acc: 0.8696 - val_loss: 1.0260 - val_acc: 0.6912
Epoch 120/300
5/5 [==============================] - 0s 18ms/step - loss: 0.3624 - acc: 0.8708 - val_loss: 1.0619 - val_acc: 0.6814
```
</div>
Let's plot the learning curves.
```python
display_learning_curves(history)
```

Now we evaluate the baseline model on the test data split.
```python
_, test_accuracy = baseline_model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
```
<div class="k-default-codeblock">
```
Test accuracy: 73.52%
```
</div>
### Examine the baseline model predictions
Let's create new data instances by randomly generating binary word vectors with respect to
the word presence probabilities.
```python
def generate_random_instances(num_instances):
token_probability = x_train.mean(axis=0)
instances = []
for _ in range(num_instances):
probabilities = np.random.uniform(size=len(token_probability))
instance = (probabilities <= token_probability).astype(int)
instances.append(instance)
return np.array(instances)
def display_class_probabilities(probabilities):
for instance_idx, probs in enumerate(probabilities):
print(f"Instance {instance_idx + 1}:")
for class_idx, prob in enumerate(probs):
print(f"- {class_values[class_idx]}: {round(prob * 100, 2)}%")
```
Now we show the baseline model predictions given these randomly generated instances.
```python
new_instances = generate_random_instances(num_classes)
logits = baseline_model.predict(new_instances)
probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy()
display_class_probabilities(probabilities)
```
<div class="k-default-codeblock">
```
Instance 1:
- Case_Based: 13.02%
- Genetic_Algorithms: 6.89%
- Neural_Networks: 23.32%
- Probabilistic_Methods: 47.89%
- Reinforcement_Learning: 2.66%
- Rule_Learning: 1.18%
- Theory: 5.03%
Instance 2:
- Case_Based: 1.64%
- Genetic_Algorithms: 59.74%
- Neural_Networks: 27.13%
- Probabilistic_Methods: 9.02%
- Reinforcement_Learning: 1.05%
- Rule_Learning: 0.12%
- Theory: 1.31%
Instance 3:
- Case_Based: 1.35%
- Genetic_Algorithms: 77.41%
- Neural_Networks: 9.56%
- Probabilistic_Methods: 7.89%
- Reinforcement_Learning: 0.42%
- Rule_Learning: 0.46%
- Theory: 2.92%
Instance 4:
- Case_Based: 0.43%
- Genetic_Algorithms: 3.87%
- Neural_Networks: 92.88%
- Probabilistic_Methods: 0.97%
- Reinforcement_Learning: 0.56%
- Rule_Learning: 0.09%
- Theory: 1.2%
Instance 5:
- Case_Based: 0.11%
- Genetic_Algorithms: 0.17%
- Neural_Networks: 10.26%
- Probabilistic_Methods: 0.5%
- Reinforcement_Learning: 0.35%
- Rule_Learning: 0.63%
- Theory: 87.97%
Instance 6:
- Case_Based: 0.98%
- Genetic_Algorithms: 23.37%
- Neural_Networks: 70.76%
- Probabilistic_Methods: 1.12%
- Reinforcement_Learning: 2.23%
- Rule_Learning: 0.21%
- Theory: 1.33%
Instance 7:
- Case_Based: 0.64%
- Genetic_Algorithms: 2.42%
- Neural_Networks: 27.19%
- Probabilistic_Methods: 14.07%
- Reinforcement_Learning: 1.62%
- Rule_Learning: 9.35%
- Theory: 44.7%
```
</div>
---
## Build a Graph Neural Network Model
### Prepare the data for the graph model
Preparing and loading the graphs data into the model for training is the most challenging
part in GNN models, which is addressed in different ways by the specialised libraries.
In this example, we show a simple approach for preparing and using graph data that is suitable
if your dataset consists of a single graph that fits entirely in memory.
The graph data is represented by the `graph_info` tuple, which consists of the following
three elements:
1. `node_features`: This is a `[num_nodes, num_features]` NumPy array that includes the
node features. In this dataset, the nodes are the papers, and the `node_features` are the
word-presence binary vectors of each paper.
2. `edges`: This is `[num_edges, num_edges]` NumPy array representing a sparse
[adjacency matrix](https://en.wikipedia.org/wiki/Adjacency_matrix#:~:text=In%20graph%20theory%20and%20computer,with%20zeros%20on%20its%20diagonal.)
of the links between the nodes. In this example, the links are the citations between the papers.
3. `edge_weights` (optional): This is a `[num_edges]` NumPy array that includes the edge weights, which *quantify*
the relationships between nodes in the graph. In this example, there are no weights for the paper citations.
```python
# Create an edges array (sparse adjacency matrix) of shape [2, num_edges].
edges = citations[["source", "target"]].to_numpy().T
# Create an edge weights array of ones.
edge_weights = tf.ones(shape=edges.shape[1])
# Create a node features array of shape [num_nodes, num_features].
node_features = tf.cast(
papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.dtypes.float32
)
# Create graph info tuple with node_features, edges, and edge_weights.
graph_info = (node_features, edges, edge_weights)
print("Edges shape:", edges.shape)
print("Nodes shape:", node_features.shape)
```
<div class="k-default-codeblock">
```
Edges shape: (2, 5429)
Nodes shape: (2708, 1433)
```
</div>
### Implement a graph convolution layer
We implement a graph convolution module as a [Keras Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer?version=nightly).
Our `GraphConvLayer` performs the following steps:
1. **Prepare**: The input node representations are processed using a FFN to produce a *message*. You can simplify
the processing by only applying linear transformation to the representations.
2. **Aggregate**: The messages of the neighbours of each node are aggregated with
respect to the `edge_weights` using a *permutation invariant* pooling operation, such as *sum*, *mean*, and *max*,
to prepare a single aggregated message for each node. See, for example, [tf.math.unsorted_segment_sum](https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum)
APIs used to aggregate neighbour messages.
3. **Update**: The `node_repesentations` and `aggregated_messages`—both of shape `[num_nodes, representation_dim]`—
are combined and processed to produce the new state of the node representations (node embeddings).
If `combination_type` is `gru`, the `node_repesentations` and `aggregated_messages` are stacked to create a sequence,
then processed by a GRU layer. Otherwise, the `node_repesentations` and `aggregated_messages` are added
or concatenated, then processed using a FFN.
The technique implemented use ideas from [Graph Convolutional Networks](https://arxiv.org/abs/1609.02907),
[GraphSage](https://arxiv.org/abs/1706.02216), [Graph Isomorphism Network](https://arxiv.org/abs/1810.00826),
[Simple Graph Networks](https://arxiv.org/abs/1902.07153), and
[Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493).
Two other key techniques that are not covered are [Graph Attention Networks](https://arxiv.org/abs/1710.10903)
and [Message Passing Neural Networks](https://arxiv.org/abs/1704.01212).
```python
def create_gru(hidden_units, dropout_rate):
inputs = keras.layers.Input(shape=(2, hidden_units[0]))
x = inputs
for units in hidden_units:
x = layers.GRU(
units=units,
activation="tanh",
recurrent_activation="sigmoid",
return_sequences=True,
dropout=dropout_rate,
return_state=False,
recurrent_dropout=dropout_rate,
)(x)
return keras.Model(inputs=inputs, outputs=x)
class GraphConvLayer(layers.Layer):
def __init__(
self,
hidden_units,
dropout_rate=0.2,
aggregation_type="mean",
combination_type="concat",
normalize=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.aggregation_type = aggregation_type
self.combination_type = combination_type
self.normalize = normalize
self.ffn_prepare = create_ffn(hidden_units, dropout_rate)
if self.combination_type == "gru":
self.update_fn = create_gru(hidden_units, dropout_rate)
else:
self.update_fn = create_ffn(hidden_units, dropout_rate)
def prepare(self, node_repesentations, weights=None):
# node_repesentations shape is [num_edges, embedding_dim].
messages = self.ffn_prepare(node_repesentations)
if weights is not None:
messages = messages * tf.expand_dims(weights, -1)
return messages
def aggregate(self, node_indices, neighbour_messages, node_repesentations):
# node_indices shape is [num_edges].
# neighbour_messages shape: [num_edges, representation_dim].
# node_repesentations shape is [num_nodes, representation_dim]
num_nodes = node_repesentations.shape[0]
if self.aggregation_type == "sum":
aggregated_message = tf.math.unsorted_segment_sum(
neighbour_messages, node_indices, num_segments=num_nodes
)
elif self.aggregation_type == "mean":
aggregated_message = tf.math.unsorted_segment_mean(
neighbour_messages, node_indices, num_segments=num_nodes
)
elif self.aggregation_type == "max":
aggregated_message = tf.math.unsorted_segment_max(
neighbour_messages, node_indices, num_segments=num_nodes
)
else:
raise ValueError(f"Invalid aggregation type: {self.aggregation_type}.")
return aggregated_message
def update(self, node_repesentations, aggregated_messages):
# node_repesentations shape is [num_nodes, representation_dim].
# aggregated_messages shape is [num_nodes, representation_dim].
if self.combination_type == "gru":
# Create a sequence of two elements for the GRU layer.
h = tf.stack([node_repesentations, aggregated_messages], axis=1)
elif self.combination_type == "concat":
# Concatenate the node_repesentations and aggregated_messages.
h = tf.concat([node_repesentations, aggregated_messages], axis=1)
elif self.combination_type == "add":
# Add node_repesentations and aggregated_messages.
h = node_repesentations + aggregated_messages
else:
raise ValueError(f"Invalid combination type: {self.combination_type}.")
# Apply the processing function.
node_embeddings = self.update_fn(h)
if self.combination_type == "gru":
node_embeddings = tf.unstack(node_embeddings, axis=1)[-1]
if self.normalize:
node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1)
return node_embeddings
def call(self, inputs):
"""Process the inputs to produce the node_embeddings.
inputs: a tuple of three elements: node_repesentations, edges, edge_weights.
Returns: node_embeddings of shape [num_nodes, representation_dim].
"""
node_repesentations, edges, edge_weights = inputs
# Get node_indices (source) and neighbour_indices (target) from edges.
node_indices, neighbour_indices = edges[0], edges[1]
# neighbour_repesentations shape is [num_edges, representation_dim].
neighbour_repesentations = tf.gather(node_repesentations, neighbour_indices)
# Prepare the messages of the neighbours.
neighbour_messages = self.prepare(neighbour_repesentations, edge_weights)
# Aggregate the neighbour messages.
aggregated_messages = self.aggregate(
node_indices, neighbour_messages, node_repesentations
)
# Update the node embedding with the neighbour messages.
return self.update(node_repesentations, aggregated_messages)
```
### Implement a graph neural network node classifier
The GNN classification model follows the [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843) approach,
as follows:
1. Apply preprocessing using FFN to the node features to generate initial node representations.
2. Apply one or more graph convolutional layer, with skip connections, to the node representation
to produce node embeddings.
3. Apply post-processing using FFN to the node embeddings to generat the final node embeddings.
4. Feed the node embeddings in a Softmax layer to predict the node class.
Each graph convolutional layer added captures information from a further level of neighbours.
However, adding many graph convolutional layer can cause oversmoothing, where the model
produces similar embeddings for all the nodes.
Note that the `graph_info` passed to the constructor of the Keras model, and used as a *property*
of the Keras model object, rather than input data for training or prediction.
The model will accept a **batch** of `node_indices`, which are used to lookup the
node features and neighbours from the `graph_info`.
```python
class GNNNodeClassifier(tf.keras.Model):
def __init__(
self,
graph_info,
num_classes,
hidden_units,
aggregation_type="sum",
combination_type="concat",
dropout_rate=0.2,
normalize=True,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
# Unpack graph_info to three elements: node_features, edges, and edge_weight.
node_features, edges, edge_weights = graph_info
self.node_features = node_features
self.edges = edges
self.edge_weights = edge_weights
# Set edge_weights to ones if not provided.
if self.edge_weights is None:
self.edge_weights = tf.ones(shape=edges.shape[1])
# Scale edge_weights to sum to 1.
self.edge_weights = self.edge_weights / tf.math.reduce_sum(self.edge_weights)
# Create a process layer.
self.preprocess = create_ffn(hidden_units, dropout_rate, name="preprocess")
# Create the first GraphConv layer.
self.conv1 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv1",
)
# Create the second GraphConv layer.
self.conv2 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv2",
)
# Create a postprocess layer.
self.postprocess = create_ffn(hidden_units, dropout_rate, name="postprocess")
# Create a compute logits layer.
self.compute_logits = layers.Dense(units=num_classes, name="logits")
def call(self, input_node_indices):
# Preprocess the node_features to produce node representations.
x = self.preprocess(self.node_features)
# Apply the first graph conv layer.
x1 = self.conv1((x, self.edges, self.edge_weights))
# Skip connection.
x = x1 + x
# Apply the second graph conv layer.
x2 = self.conv2((x, self.edges, self.edge_weights))
# Skip connection.
x = x2 + x
# Postprocess node embedding.
x = self.postprocess(x)
# Fetch node embeddings for the input node_indices.
node_embeddings = tf.gather(x, input_node_indices)
# Compute logits
return self.compute_logits(node_embeddings)
```
Let's test instantiating and calling the GNN model.
Notice that if you provide `N` node indices, the output will be a tensor of shape `[N, num_classes]`,
regardless of the size of the graph.
```python
gnn_model = GNNNodeClassifier(
graph_info=graph_info,
num_classes=num_classes,
hidden_units=hidden_units,
dropout_rate=dropout_rate,
name="gnn_model",
)
print("GNN output shape:", gnn_model([1, 10, 100]))
gnn_model.summary()
```
<div class="k-default-codeblock">
```
GNN output shape: tf.Tensor(
[[ 0.00620723 0.06162593 0.0176599 0.00830251 -0.03019211 -0.00402163
0.00277454]
[ 0.01705155 -0.0467547 0.01400987 -0.02146192 -0.11757397 0.10820404
-0.0375765 ]
[-0.02516522 -0.05514468 -0.03842098 -0.0495692 -0.05128997 -0.02241635
-0.07738923]], shape=(3, 7), dtype=float32)
Model: "gnn_model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
preprocess (Sequential) (2708, 32) 52804
_________________________________________________________________
graph_conv1 (GraphConvLayer) multiple 5888
_________________________________________________________________
graph_conv2 (GraphConvLayer) multiple 5888
_________________________________________________________________
postprocess (Sequential) (2708, 32) 2368
_________________________________________________________________
logits (Dense) multiple 231
=================================================================
Total params: 67,179
Trainable params: 63,481
Non-trainable params: 3,698
_________________________________________________________________
```
</div>
### Train the GNN model
Note that we use the standard *supervised* cross-entropy loss to train the model.
However, we can add another *self-supervised* loss term for the generated node embeddings
that makes sure that neighbouring nodes in graph have similar representations, while faraway
nodes have dissimilar representations.
```python
x_train = train_data.paper_id.to_numpy()
history = run_experiment(gnn_model, x_train, y_train)
```
<div class="k-default-codeblock">
```
Epoch 1/300
5/5 [==============================] - 4s 188ms/step - loss: 2.2529 - acc: 0.1793 - val_loss: 1.8933 - val_acc: 0.2941
Epoch 2/300
5/5 [==============================] - 0s 83ms/step - loss: 1.9866 - acc: 0.2601 - val_loss: 1.8753 - val_acc: 0.3186
Epoch 3/300
5/5 [==============================] - 0s 77ms/step - loss: 1.8794 - acc: 0.2846 - val_loss: 1.8655 - val_acc: 0.3186
Epoch 4/300
5/5 [==============================] - 0s 74ms/step - loss: 1.8432 - acc: 0.3078 - val_loss: 1.8529 - val_acc: 0.3186
Epoch 5/300
5/5 [==============================] - 0s 69ms/step - loss: 1.8314 - acc: 0.3134 - val_loss: 1.8429 - val_acc: 0.3186
Epoch 6/300
5/5 [==============================] - 0s 68ms/step - loss: 1.8157 - acc: 0.3208 - val_loss: 1.8326 - val_acc: 0.3186
Epoch 7/300
5/5 [==============================] - 0s 94ms/step - loss: 1.8112 - acc: 0.3071 - val_loss: 1.8265 - val_acc: 0.3186
Epoch 8/300
5/5 [==============================] - 0s 67ms/step - loss: 1.8028 - acc: 0.3132 - val_loss: 1.8171 - val_acc: 0.3186
Epoch 9/300
5/5 [==============================] - 0s 68ms/step - loss: 1.8007 - acc: 0.3206 - val_loss: 1.7961 - val_acc: 0.3186
Epoch 10/300
5/5 [==============================] - 0s 68ms/step - loss: 1.7571 - acc: 0.3259 - val_loss: 1.7623 - val_acc: 0.3186
Epoch 11/300
5/5 [==============================] - 0s 68ms/step - loss: 1.7373 - acc: 0.3279 - val_loss: 1.7131 - val_acc: 0.3186
Epoch 12/300
5/5 [==============================] - 0s 76ms/step - loss: 1.7130 - acc: 0.3169 - val_loss: 1.6552 - val_acc: 0.3186
Epoch 13/300
5/5 [==============================] - 0s 70ms/step - loss: 1.6989 - acc: 0.3315 - val_loss: 1.6075 - val_acc: 0.3284
Epoch 14/300
5/5 [==============================] - 0s 79ms/step - loss: 1.6733 - acc: 0.3522 - val_loss: 1.6027 - val_acc: 0.3333
Epoch 15/300
5/5 [==============================] - 0s 75ms/step - loss: 1.6060 - acc: 0.3641 - val_loss: 1.6422 - val_acc: 0.3480
Epoch 16/300
5/5 [==============================] - 0s 68ms/step - loss: 1.5783 - acc: 0.3924 - val_loss: 1.6893 - val_acc: 0.3676
Epoch 17/300
5/5 [==============================] - 0s 70ms/step - loss: 1.5269 - acc: 0.4315 - val_loss: 1.7534 - val_acc: 0.3725
Epoch 18/300
5/5 [==============================] - 0s 77ms/step - loss: 1.4558 - acc: 0.4633 - val_loss: 1.7224 - val_acc: 0.4167
Epoch 19/300
5/5 [==============================] - 0s 75ms/step - loss: 1.4131 - acc: 0.4765 - val_loss: 1.6482 - val_acc: 0.4510
Epoch 20/300
5/5 [==============================] - 0s 70ms/step - loss: 1.3880 - acc: 0.4859 - val_loss: 1.4956 - val_acc: 0.4706
Epoch 21/300
5/5 [==============================] - 0s 73ms/step - loss: 1.3223 - acc: 0.5166 - val_loss: 1.5299 - val_acc: 0.4853
Epoch 22/300
5/5 [==============================] - 0s 75ms/step - loss: 1.3226 - acc: 0.5172 - val_loss: 1.6304 - val_acc: 0.4902
Epoch 23/300
5/5 [==============================] - 0s 75ms/step - loss: 1.2888 - acc: 0.5267 - val_loss: 1.6679 - val_acc: 0.5000
Epoch 24/300
5/5 [==============================] - 0s 69ms/step - loss: 1.2478 - acc: 0.5279 - val_loss: 1.6552 - val_acc: 0.4853
Epoch 25/300
5/5 [==============================] - 0s 70ms/step - loss: 1.1978 - acc: 0.5720 - val_loss: 1.6705 - val_acc: 0.4902
Epoch 26/300
5/5 [==============================] - 0s 70ms/step - loss: 1.1814 - acc: 0.5596 - val_loss: 1.6327 - val_acc: 0.5343
Epoch 27/300
5/5 [==============================] - 0s 68ms/step - loss: 1.1085 - acc: 0.5979 - val_loss: 1.5184 - val_acc: 0.5245
Epoch 28/300
5/5 [==============================] - 0s 69ms/step - loss: 1.0695 - acc: 0.6078 - val_loss: 1.5212 - val_acc: 0.4853
Epoch 29/300
5/5 [==============================] - 0s 70ms/step - loss: 1.1063 - acc: 0.6002 - val_loss: 1.5988 - val_acc: 0.4706
Epoch 30/300
5/5 [==============================] - 0s 68ms/step - loss: 1.0194 - acc: 0.6326 - val_loss: 1.5636 - val_acc: 0.4951
Epoch 31/300
5/5 [==============================] - 0s 70ms/step - loss: 1.0320 - acc: 0.6268 - val_loss: 1.5191 - val_acc: 0.5196
Epoch 32/300
5/5 [==============================] - 0s 82ms/step - loss: 0.9749 - acc: 0.6433 - val_loss: 1.5922 - val_acc: 0.5098
Epoch 33/300
5/5 [==============================] - 0s 85ms/step - loss: 0.9095 - acc: 0.6717 - val_loss: 1.5879 - val_acc: 0.5000
Epoch 34/300
5/5 [==============================] - 0s 78ms/step - loss: 0.9324 - acc: 0.6903 - val_loss: 1.5717 - val_acc: 0.4951
Epoch 35/300
5/5 [==============================] - 0s 80ms/step - loss: 0.8908 - acc: 0.6953 - val_loss: 1.5010 - val_acc: 0.5098
Epoch 36/300
5/5 [==============================] - 0s 99ms/step - loss: 0.8858 - acc: 0.6977 - val_loss: 1.5939 - val_acc: 0.5147
Epoch 37/300
5/5 [==============================] - 0s 79ms/step - loss: 0.8376 - acc: 0.6991 - val_loss: 1.4000 - val_acc: 0.5833
Epoch 38/300
5/5 [==============================] - 0s 75ms/step - loss: 0.8657 - acc: 0.7080 - val_loss: 1.3288 - val_acc: 0.5931
Epoch 39/300
5/5 [==============================] - 0s 86ms/step - loss: 0.9160 - acc: 0.6819 - val_loss: 1.1358 - val_acc: 0.6275
Epoch 40/300
5/5 [==============================] - 0s 80ms/step - loss: 0.8676 - acc: 0.7109 - val_loss: 1.0618 - val_acc: 0.6765
Epoch 41/300
5/5 [==============================] - 0s 72ms/step - loss: 0.8065 - acc: 0.7246 - val_loss: 1.0785 - val_acc: 0.6765
Epoch 42/300
5/5 [==============================] - 0s 76ms/step - loss: 0.8478 - acc: 0.7145 - val_loss: 1.0502 - val_acc: 0.6569
Epoch 43/300
5/5 [==============================] - 0s 78ms/step - loss: 0.8125 - acc: 0.7068 - val_loss: 0.9888 - val_acc: 0.6520
Epoch 44/300
5/5 [==============================] - 0s 68ms/step - loss: 0.7791 - acc: 0.7425 - val_loss: 0.9820 - val_acc: 0.6618
Epoch 45/300
5/5 [==============================] - 0s 69ms/step - loss: 0.7492 - acc: 0.7368 - val_loss: 0.9297 - val_acc: 0.6961
Epoch 46/300
5/5 [==============================] - 0s 71ms/step - loss: 0.7521 - acc: 0.7668 - val_loss: 0.9757 - val_acc: 0.6961
Epoch 47/300
5/5 [==============================] - 0s 71ms/step - loss: 0.7090 - acc: 0.7587 - val_loss: 0.9676 - val_acc: 0.7059
Epoch 48/300
5/5 [==============================] - 0s 68ms/step - loss: 0.7008 - acc: 0.7430 - val_loss: 0.9457 - val_acc: 0.7010
Epoch 49/300
5/5 [==============================] - 0s 69ms/step - loss: 0.6919 - acc: 0.7584 - val_loss: 0.9998 - val_acc: 0.6569
Epoch 50/300
5/5 [==============================] - 0s 68ms/step - loss: 0.7583 - acc: 0.7628 - val_loss: 0.9707 - val_acc: 0.6667
Epoch 51/300
5/5 [==============================] - 0s 69ms/step - loss: 0.6575 - acc: 0.7697 - val_loss: 0.9260 - val_acc: 0.6814
Epoch 52/300
5/5 [==============================] - 0s 78ms/step - loss: 0.6751 - acc: 0.7774 - val_loss: 0.9173 - val_acc: 0.6765
Epoch 53/300
5/5 [==============================] - 0s 92ms/step - loss: 0.6964 - acc: 0.7561 - val_loss: 0.8985 - val_acc: 0.6961
Epoch 54/300
5/5 [==============================] - 0s 77ms/step - loss: 0.6386 - acc: 0.7872 - val_loss: 0.9455 - val_acc: 0.6961
Epoch 55/300
5/5 [==============================] - 0s 77ms/step - loss: 0.6110 - acc: 0.8130 - val_loss: 0.9780 - val_acc: 0.6716
Epoch 56/300
5/5 [==============================] - 0s 76ms/step - loss: 0.6483 - acc: 0.7703 - val_loss: 0.9650 - val_acc: 0.6863
Epoch 57/300
5/5 [==============================] - 0s 78ms/step - loss: 0.6811 - acc: 0.7706 - val_loss: 0.9446 - val_acc: 0.6667
Epoch 58/300
5/5 [==============================] - 0s 76ms/step - loss: 0.6391 - acc: 0.7852 - val_loss: 0.9059 - val_acc: 0.7010
Epoch 59/300
5/5 [==============================] - 0s 76ms/step - loss: 0.6533 - acc: 0.7784 - val_loss: 0.8964 - val_acc: 0.7108
Epoch 60/300
5/5 [==============================] - 0s 101ms/step - loss: 0.6587 - acc: 0.7863 - val_loss: 0.8417 - val_acc: 0.7108
Epoch 61/300
5/5 [==============================] - 0s 84ms/step - loss: 0.5776 - acc: 0.8166 - val_loss: 0.8035 - val_acc: 0.7304
Epoch 62/300
5/5 [==============================] - 0s 80ms/step - loss: 0.6396 - acc: 0.7792 - val_loss: 0.8072 - val_acc: 0.7500
Epoch 63/300
5/5 [==============================] - 0s 67ms/step - loss: 0.6201 - acc: 0.7972 - val_loss: 0.7809 - val_acc: 0.7696
Epoch 64/300
5/5 [==============================] - 0s 68ms/step - loss: 0.6358 - acc: 0.7875 - val_loss: 0.7635 - val_acc: 0.7500
Epoch 65/300
5/5 [==============================] - 0s 70ms/step - loss: 0.5914 - acc: 0.8027 - val_loss: 0.8147 - val_acc: 0.7402
Epoch 66/300
5/5 [==============================] - 0s 69ms/step - loss: 0.5960 - acc: 0.7955 - val_loss: 0.9350 - val_acc: 0.7304
Epoch 67/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5752 - acc: 0.8001 - val_loss: 0.9849 - val_acc: 0.7157
Epoch 68/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5189 - acc: 0.8322 - val_loss: 1.0268 - val_acc: 0.7206
Epoch 69/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5413 - acc: 0.8078 - val_loss: 0.9132 - val_acc: 0.7549
Epoch 70/300
5/5 [==============================] - 0s 75ms/step - loss: 0.5231 - acc: 0.8222 - val_loss: 0.8673 - val_acc: 0.7647
Epoch 71/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5416 - acc: 0.8219 - val_loss: 0.8179 - val_acc: 0.7696
Epoch 72/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5060 - acc: 0.8263 - val_loss: 0.7870 - val_acc: 0.7794
Epoch 73/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5502 - acc: 0.8221 - val_loss: 0.7749 - val_acc: 0.7549
Epoch 74/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5111 - acc: 0.8434 - val_loss: 0.7830 - val_acc: 0.7549
Epoch 75/300
5/5 [==============================] - 0s 69ms/step - loss: 0.5119 - acc: 0.8386 - val_loss: 0.8140 - val_acc: 0.7451
Epoch 76/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4922 - acc: 0.8433 - val_loss: 0.8149 - val_acc: 0.7353
Epoch 77/300
5/5 [==============================] - 0s 71ms/step - loss: 0.5217 - acc: 0.8188 - val_loss: 0.7784 - val_acc: 0.7598
Epoch 78/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5027 - acc: 0.8410 - val_loss: 0.7660 - val_acc: 0.7696
Epoch 79/300
5/5 [==============================] - 0s 67ms/step - loss: 0.5307 - acc: 0.8265 - val_loss: 0.7217 - val_acc: 0.7696
Epoch 80/300
5/5 [==============================] - 0s 68ms/step - loss: 0.5164 - acc: 0.8239 - val_loss: 0.6974 - val_acc: 0.7647
Epoch 81/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4404 - acc: 0.8526 - val_loss: 0.6891 - val_acc: 0.7745
Epoch 82/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4565 - acc: 0.8449 - val_loss: 0.6839 - val_acc: 0.7696
Epoch 83/300
5/5 [==============================] - 0s 67ms/step - loss: 0.4759 - acc: 0.8491 - val_loss: 0.7162 - val_acc: 0.7745
Epoch 84/300
5/5 [==============================] - 0s 70ms/step - loss: 0.5154 - acc: 0.8476 - val_loss: 0.7889 - val_acc: 0.7598
Epoch 85/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4847 - acc: 0.8480 - val_loss: 0.7579 - val_acc: 0.7794
Epoch 86/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4519 - acc: 0.8592 - val_loss: 0.7056 - val_acc: 0.7941
Epoch 87/300
5/5 [==============================] - 0s 67ms/step - loss: 0.5038 - acc: 0.8472 - val_loss: 0.6725 - val_acc: 0.7794
Epoch 88/300
5/5 [==============================] - 0s 92ms/step - loss: 0.4729 - acc: 0.8454 - val_loss: 0.7057 - val_acc: 0.7745
Epoch 89/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4811 - acc: 0.8562 - val_loss: 0.6784 - val_acc: 0.7990
Epoch 90/300
5/5 [==============================] - 0s 70ms/step - loss: 0.4102 - acc: 0.8779 - val_loss: 0.6383 - val_acc: 0.8039
Epoch 91/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4493 - acc: 0.8703 - val_loss: 0.6574 - val_acc: 0.7941
Epoch 92/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4560 - acc: 0.8610 - val_loss: 0.6764 - val_acc: 0.7941
Epoch 93/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4465 - acc: 0.8626 - val_loss: 0.6628 - val_acc: 0.7892
Epoch 94/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4773 - acc: 0.8446 - val_loss: 0.6573 - val_acc: 0.7941
Epoch 95/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4313 - acc: 0.8734 - val_loss: 0.6875 - val_acc: 0.7941
Epoch 96/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4668 - acc: 0.8598 - val_loss: 0.6712 - val_acc: 0.8039
Epoch 97/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4329 - acc: 0.8696 - val_loss: 0.6274 - val_acc: 0.8088
Epoch 98/300
5/5 [==============================] - 0s 71ms/step - loss: 0.4223 - acc: 0.8542 - val_loss: 0.6259 - val_acc: 0.7990
Epoch 99/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4677 - acc: 0.8488 - val_loss: 0.6431 - val_acc: 0.8186
Epoch 100/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3933 - acc: 0.8753 - val_loss: 0.6559 - val_acc: 0.8186
Epoch 101/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3945 - acc: 0.8777 - val_loss: 0.6461 - val_acc: 0.8186
Epoch 102/300
5/5 [==============================] - 0s 70ms/step - loss: 0.4671 - acc: 0.8324 - val_loss: 0.6607 - val_acc: 0.7990
Epoch 103/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3890 - acc: 0.8762 - val_loss: 0.6792 - val_acc: 0.7941
Epoch 104/300
5/5 [==============================] - 0s 67ms/step - loss: 0.4336 - acc: 0.8646 - val_loss: 0.6854 - val_acc: 0.7990
Epoch 105/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4304 - acc: 0.8651 - val_loss: 0.6949 - val_acc: 0.8039
Epoch 106/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4043 - acc: 0.8723 - val_loss: 0.6941 - val_acc: 0.7892
Epoch 107/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4043 - acc: 0.8713 - val_loss: 0.6798 - val_acc: 0.8088
Epoch 108/300
5/5 [==============================] - 0s 70ms/step - loss: 0.4647 - acc: 0.8599 - val_loss: 0.6726 - val_acc: 0.8039
Epoch 109/300
5/5 [==============================] - 0s 73ms/step - loss: 0.3916 - acc: 0.8820 - val_loss: 0.6680 - val_acc: 0.8137
Epoch 110/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3990 - acc: 0.8875 - val_loss: 0.6580 - val_acc: 0.8137
Epoch 111/300
5/5 [==============================] - 0s 95ms/step - loss: 0.4240 - acc: 0.8786 - val_loss: 0.6487 - val_acc: 0.8137
Epoch 112/300
5/5 [==============================] - 0s 67ms/step - loss: 0.4050 - acc: 0.8633 - val_loss: 0.6471 - val_acc: 0.8186
Epoch 113/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4120 - acc: 0.8522 - val_loss: 0.6375 - val_acc: 0.8137
Epoch 114/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3802 - acc: 0.8793 - val_loss: 0.6454 - val_acc: 0.8137
Epoch 115/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4073 - acc: 0.8730 - val_loss: 0.6504 - val_acc: 0.8088
Epoch 116/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3573 - acc: 0.8948 - val_loss: 0.6501 - val_acc: 0.7990
Epoch 117/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4238 - acc: 0.8611 - val_loss: 0.7339 - val_acc: 0.7843
Epoch 118/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3565 - acc: 0.8832 - val_loss: 0.7533 - val_acc: 0.7941
Epoch 119/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3863 - acc: 0.8834 - val_loss: 0.7470 - val_acc: 0.8186
Epoch 120/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3935 - acc: 0.8768 - val_loss: 0.6778 - val_acc: 0.8333
Epoch 121/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3745 - acc: 0.8862 - val_loss: 0.6741 - val_acc: 0.8137
Epoch 122/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4152 - acc: 0.8647 - val_loss: 0.6594 - val_acc: 0.8235
Epoch 123/300
5/5 [==============================] - 0s 64ms/step - loss: 0.3987 - acc: 0.8813 - val_loss: 0.6478 - val_acc: 0.8235
Epoch 124/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4005 - acc: 0.8798 - val_loss: 0.6837 - val_acc: 0.8284
Epoch 125/300
5/5 [==============================] - 0s 68ms/step - loss: 0.4366 - acc: 0.8699 - val_loss: 0.6456 - val_acc: 0.8235
Epoch 126/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3544 - acc: 0.8852 - val_loss: 0.6967 - val_acc: 0.8088
Epoch 127/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3835 - acc: 0.8676 - val_loss: 0.7279 - val_acc: 0.8088
Epoch 128/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3932 - acc: 0.8723 - val_loss: 0.7471 - val_acc: 0.8137
Epoch 129/300
5/5 [==============================] - 0s 66ms/step - loss: 0.3788 - acc: 0.8822 - val_loss: 0.7028 - val_acc: 0.8284
Epoch 130/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3546 - acc: 0.8876 - val_loss: 0.6424 - val_acc: 0.8382
Epoch 131/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4244 - acc: 0.8784 - val_loss: 0.6478 - val_acc: 0.8382
Epoch 132/300
5/5 [==============================] - 0s 66ms/step - loss: 0.4120 - acc: 0.8689 - val_loss: 0.6834 - val_acc: 0.8186
Epoch 133/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3585 - acc: 0.8872 - val_loss: 0.6802 - val_acc: 0.8186
Epoch 134/300
5/5 [==============================] - 0s 71ms/step - loss: 0.3782 - acc: 0.8788 - val_loss: 0.6936 - val_acc: 0.8235
Epoch 135/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3459 - acc: 0.8776 - val_loss: 0.6776 - val_acc: 0.8431
Epoch 136/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3176 - acc: 0.9108 - val_loss: 0.6881 - val_acc: 0.8382
Epoch 137/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3205 - acc: 0.9052 - val_loss: 0.6934 - val_acc: 0.8431
Epoch 138/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4079 - acc: 0.8782 - val_loss: 0.6830 - val_acc: 0.8431
Epoch 139/300
5/5 [==============================] - 0s 71ms/step - loss: 0.3465 - acc: 0.8973 - val_loss: 0.6876 - val_acc: 0.8431
Epoch 140/300
5/5 [==============================] - 0s 95ms/step - loss: 0.3935 - acc: 0.8766 - val_loss: 0.7166 - val_acc: 0.8382
Epoch 141/300
5/5 [==============================] - 0s 71ms/step - loss: 0.3905 - acc: 0.8868 - val_loss: 0.7320 - val_acc: 0.8284
Epoch 142/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3482 - acc: 0.8887 - val_loss: 0.7575 - val_acc: 0.8186
Epoch 143/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3567 - acc: 0.8820 - val_loss: 0.7537 - val_acc: 0.8235
Epoch 144/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3427 - acc: 0.8753 - val_loss: 0.7225 - val_acc: 0.8284
Epoch 145/300
5/5 [==============================] - 0s 72ms/step - loss: 0.3894 - acc: 0.8750 - val_loss: 0.7228 - val_acc: 0.8333
Epoch 146/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3585 - acc: 0.8938 - val_loss: 0.6870 - val_acc: 0.8284
Epoch 147/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3450 - acc: 0.8830 - val_loss: 0.6666 - val_acc: 0.8284
Epoch 148/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3174 - acc: 0.8929 - val_loss: 0.6683 - val_acc: 0.8382
Epoch 149/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3357 - acc: 0.9041 - val_loss: 0.6676 - val_acc: 0.8480
Epoch 150/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3597 - acc: 0.8792 - val_loss: 0.6913 - val_acc: 0.8235
Epoch 151/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3043 - acc: 0.9093 - val_loss: 0.7146 - val_acc: 0.8039
Epoch 152/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3935 - acc: 0.8814 - val_loss: 0.6716 - val_acc: 0.8382
Epoch 153/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3200 - acc: 0.8898 - val_loss: 0.6832 - val_acc: 0.8578
Epoch 154/300
5/5 [==============================] - 0s 71ms/step - loss: 0.3738 - acc: 0.8809 - val_loss: 0.6622 - val_acc: 0.8529
Epoch 155/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3784 - acc: 0.8777 - val_loss: 0.6510 - val_acc: 0.8431
Epoch 156/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3565 - acc: 0.8962 - val_loss: 0.6600 - val_acc: 0.8333
Epoch 157/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2935 - acc: 0.9137 - val_loss: 0.6732 - val_acc: 0.8333
Epoch 158/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3130 - acc: 0.9060 - val_loss: 0.7070 - val_acc: 0.8284
Epoch 159/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3386 - acc: 0.8937 - val_loss: 0.6865 - val_acc: 0.8480
Epoch 160/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3310 - acc: 0.9038 - val_loss: 0.7082 - val_acc: 0.8382
Epoch 161/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3232 - acc: 0.8993 - val_loss: 0.7184 - val_acc: 0.8431
Epoch 162/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3062 - acc: 0.9036 - val_loss: 0.7070 - val_acc: 0.8382
Epoch 163/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3374 - acc: 0.8962 - val_loss: 0.7187 - val_acc: 0.8284
Epoch 164/300
5/5 [==============================] - 0s 94ms/step - loss: 0.3249 - acc: 0.8977 - val_loss: 0.7197 - val_acc: 0.8382
Epoch 165/300
5/5 [==============================] - 0s 69ms/step - loss: 0.4041 - acc: 0.8764 - val_loss: 0.7195 - val_acc: 0.8431
Epoch 166/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3356 - acc: 0.9015 - val_loss: 0.7114 - val_acc: 0.8333
Epoch 167/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3006 - acc: 0.9017 - val_loss: 0.6988 - val_acc: 0.8235
Epoch 168/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3368 - acc: 0.8970 - val_loss: 0.6795 - val_acc: 0.8284
Epoch 169/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3049 - acc: 0.9124 - val_loss: 0.6590 - val_acc: 0.8333
Epoch 170/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3652 - acc: 0.8900 - val_loss: 0.6538 - val_acc: 0.8431
Epoch 171/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3153 - acc: 0.9094 - val_loss: 0.6342 - val_acc: 0.8480
Epoch 172/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2881 - acc: 0.9038 - val_loss: 0.6242 - val_acc: 0.8382
Epoch 173/300
5/5 [==============================] - 0s 66ms/step - loss: 0.3764 - acc: 0.8824 - val_loss: 0.6220 - val_acc: 0.8480
Epoch 174/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3352 - acc: 0.8958 - val_loss: 0.6305 - val_acc: 0.8578
Epoch 175/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3450 - acc: 0.9026 - val_loss: 0.6426 - val_acc: 0.8578
Epoch 176/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3471 - acc: 0.8941 - val_loss: 0.6653 - val_acc: 0.8333
Epoch 177/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3373 - acc: 0.8970 - val_loss: 0.6941 - val_acc: 0.8137
Epoch 178/300
5/5 [==============================] - 0s 69ms/step - loss: 0.2986 - acc: 0.9092 - val_loss: 0.6841 - val_acc: 0.8137
Epoch 179/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3466 - acc: 0.9038 - val_loss: 0.6704 - val_acc: 0.8284
Epoch 180/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3661 - acc: 0.8998 - val_loss: 0.6995 - val_acc: 0.8235
Epoch 181/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3163 - acc: 0.8902 - val_loss: 0.6806 - val_acc: 0.8235
Epoch 182/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3278 - acc: 0.9025 - val_loss: 0.6815 - val_acc: 0.8284
Epoch 183/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3343 - acc: 0.8960 - val_loss: 0.6704 - val_acc: 0.8333
Epoch 184/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3172 - acc: 0.8906 - val_loss: 0.6434 - val_acc: 0.8333
Epoch 185/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3679 - acc: 0.8921 - val_loss: 0.6394 - val_acc: 0.8529
Epoch 186/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3030 - acc: 0.9079 - val_loss: 0.6677 - val_acc: 0.8480
Epoch 187/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3102 - acc: 0.8908 - val_loss: 0.6456 - val_acc: 0.8529
Epoch 188/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2763 - acc: 0.9140 - val_loss: 0.6151 - val_acc: 0.8431
Epoch 189/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3298 - acc: 0.8964 - val_loss: 0.6119 - val_acc: 0.8676
Epoch 190/300
5/5 [==============================] - 0s 69ms/step - loss: 0.2928 - acc: 0.9094 - val_loss: 0.6141 - val_acc: 0.8480
Epoch 191/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3066 - acc: 0.9093 - val_loss: 0.6393 - val_acc: 0.8480
Epoch 192/300
5/5 [==============================] - 0s 94ms/step - loss: 0.2988 - acc: 0.9060 - val_loss: 0.6380 - val_acc: 0.8431
Epoch 193/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3654 - acc: 0.8800 - val_loss: 0.6102 - val_acc: 0.8578
Epoch 194/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3482 - acc: 0.8981 - val_loss: 0.6396 - val_acc: 0.8480
Epoch 195/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3029 - acc: 0.9083 - val_loss: 0.6410 - val_acc: 0.8431
Epoch 196/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3276 - acc: 0.8931 - val_loss: 0.6209 - val_acc: 0.8529
Epoch 197/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3252 - acc: 0.8989 - val_loss: 0.6153 - val_acc: 0.8578
Epoch 198/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3542 - acc: 0.8917 - val_loss: 0.6079 - val_acc: 0.8627
Epoch 199/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3191 - acc: 0.9006 - val_loss: 0.6087 - val_acc: 0.8578
Epoch 200/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3077 - acc: 0.9008 - val_loss: 0.6209 - val_acc: 0.8529
Epoch 201/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3045 - acc: 0.9076 - val_loss: 0.6609 - val_acc: 0.8333
Epoch 202/300
5/5 [==============================] - 0s 71ms/step - loss: 0.3053 - acc: 0.9058 - val_loss: 0.7324 - val_acc: 0.8284
Epoch 203/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3107 - acc: 0.8985 - val_loss: 0.7755 - val_acc: 0.8235
Epoch 204/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3047 - acc: 0.8995 - val_loss: 0.7936 - val_acc: 0.7941
Epoch 205/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3131 - acc: 0.9098 - val_loss: 0.6453 - val_acc: 0.8529
Epoch 206/300
5/5 [==============================] - 0s 71ms/step - loss: 0.3795 - acc: 0.8849 - val_loss: 0.6213 - val_acc: 0.8529
Epoch 207/300
5/5 [==============================] - 0s 70ms/step - loss: 0.2903 - acc: 0.9114 - val_loss: 0.6354 - val_acc: 0.8578
Epoch 208/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2599 - acc: 0.9164 - val_loss: 0.6390 - val_acc: 0.8676
Epoch 209/300
5/5 [==============================] - 0s 71ms/step - loss: 0.2954 - acc: 0.9041 - val_loss: 0.6376 - val_acc: 0.8775
Epoch 210/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3250 - acc: 0.9023 - val_loss: 0.6206 - val_acc: 0.8725
Epoch 211/300
5/5 [==============================] - 0s 69ms/step - loss: 0.2694 - acc: 0.9149 - val_loss: 0.6177 - val_acc: 0.8676
Epoch 212/300
5/5 [==============================] - 0s 71ms/step - loss: 0.2920 - acc: 0.9054 - val_loss: 0.6438 - val_acc: 0.8627
Epoch 213/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2861 - acc: 0.9048 - val_loss: 0.7128 - val_acc: 0.8480
Epoch 214/300
5/5 [==============================] - 0s 65ms/step - loss: 0.2916 - acc: 0.9083 - val_loss: 0.7030 - val_acc: 0.8431
Epoch 215/300
5/5 [==============================] - 0s 91ms/step - loss: 0.3288 - acc: 0.8887 - val_loss: 0.6593 - val_acc: 0.8529
Epoch 216/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3802 - acc: 0.8875 - val_loss: 0.6165 - val_acc: 0.8578
Epoch 217/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2905 - acc: 0.9175 - val_loss: 0.6141 - val_acc: 0.8725
Epoch 218/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3078 - acc: 0.9104 - val_loss: 0.6158 - val_acc: 0.8676
Epoch 219/300
5/5 [==============================] - 0s 66ms/step - loss: 0.2757 - acc: 0.9214 - val_loss: 0.6195 - val_acc: 0.8578
Epoch 220/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3159 - acc: 0.8958 - val_loss: 0.6375 - val_acc: 0.8578
Epoch 221/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3348 - acc: 0.8944 - val_loss: 0.6839 - val_acc: 0.8431
Epoch 222/300
5/5 [==============================] - 0s 70ms/step - loss: 0.3239 - acc: 0.8936 - val_loss: 0.6450 - val_acc: 0.8578
Epoch 223/300
5/5 [==============================] - 0s 73ms/step - loss: 0.2783 - acc: 0.9081 - val_loss: 0.6163 - val_acc: 0.8627
Epoch 224/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2852 - acc: 0.9165 - val_loss: 0.6495 - val_acc: 0.8431
Epoch 225/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3073 - acc: 0.8902 - val_loss: 0.6622 - val_acc: 0.8529
Epoch 226/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3127 - acc: 0.9102 - val_loss: 0.6652 - val_acc: 0.8431
Epoch 227/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3248 - acc: 0.9067 - val_loss: 0.6475 - val_acc: 0.8529
Epoch 228/300
5/5 [==============================] - 0s 69ms/step - loss: 0.3155 - acc: 0.9089 - val_loss: 0.6263 - val_acc: 0.8382
Epoch 229/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3585 - acc: 0.8898 - val_loss: 0.6308 - val_acc: 0.8578
Epoch 230/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2812 - acc: 0.9180 - val_loss: 0.6201 - val_acc: 0.8529
Epoch 231/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3070 - acc: 0.8984 - val_loss: 0.6170 - val_acc: 0.8431
Epoch 232/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3433 - acc: 0.8909 - val_loss: 0.6568 - val_acc: 0.8431
Epoch 233/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2844 - acc: 0.9085 - val_loss: 0.6571 - val_acc: 0.8529
Epoch 234/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3122 - acc: 0.9044 - val_loss: 0.6516 - val_acc: 0.8480
Epoch 235/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3047 - acc: 0.9232 - val_loss: 0.6505 - val_acc: 0.8480
Epoch 236/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2913 - acc: 0.9192 - val_loss: 0.6432 - val_acc: 0.8529
Epoch 237/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2505 - acc: 0.9322 - val_loss: 0.6462 - val_acc: 0.8627
Epoch 238/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3033 - acc: 0.9085 - val_loss: 0.6378 - val_acc: 0.8627
Epoch 239/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3418 - acc: 0.8975 - val_loss: 0.6232 - val_acc: 0.8578
Epoch 240/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3167 - acc: 0.9051 - val_loss: 0.6284 - val_acc: 0.8627
Epoch 241/300
5/5 [==============================] - 0s 69ms/step - loss: 0.2637 - acc: 0.9145 - val_loss: 0.6427 - val_acc: 0.8627
Epoch 242/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2678 - acc: 0.9227 - val_loss: 0.6492 - val_acc: 0.8578
Epoch 243/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2730 - acc: 0.9113 - val_loss: 0.6736 - val_acc: 0.8578
Epoch 244/300
5/5 [==============================] - 0s 93ms/step - loss: 0.3013 - acc: 0.9077 - val_loss: 0.7138 - val_acc: 0.8333
Epoch 245/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3151 - acc: 0.9096 - val_loss: 0.7278 - val_acc: 0.8382
Epoch 246/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3307 - acc: 0.9058 - val_loss: 0.6944 - val_acc: 0.8627
Epoch 247/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2631 - acc: 0.9236 - val_loss: 0.6789 - val_acc: 0.8529
Epoch 248/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3215 - acc: 0.9027 - val_loss: 0.6790 - val_acc: 0.8529
Epoch 249/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2968 - acc: 0.9038 - val_loss: 0.6864 - val_acc: 0.8480
Epoch 250/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2998 - acc: 0.9078 - val_loss: 0.7079 - val_acc: 0.8480
Epoch 251/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2375 - acc: 0.9197 - val_loss: 0.7252 - val_acc: 0.8529
Epoch 252/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2955 - acc: 0.9178 - val_loss: 0.7298 - val_acc: 0.8284
Epoch 253/300
5/5 [==============================] - 0s 69ms/step - loss: 0.2946 - acc: 0.9039 - val_loss: 0.7172 - val_acc: 0.8284
Epoch 254/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3051 - acc: 0.9087 - val_loss: 0.6861 - val_acc: 0.8382
Epoch 255/300
5/5 [==============================] - 0s 67ms/step - loss: 0.3563 - acc: 0.8882 - val_loss: 0.6739 - val_acc: 0.8480
Epoch 256/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3144 - acc: 0.8969 - val_loss: 0.6970 - val_acc: 0.8382
Epoch 257/300
5/5 [==============================] - 0s 68ms/step - loss: 0.3210 - acc: 0.9152 - val_loss: 0.7106 - val_acc: 0.8333
Epoch 258/300
5/5 [==============================] - 0s 67ms/step - loss: 0.2523 - acc: 0.9214 - val_loss: 0.7111 - val_acc: 0.8431
Epoch 259/300
5/5 [==============================] - 0s 68ms/step - loss: 0.2552 - acc: 0.9236 - val_loss: 0.7258 - val_acc: 0.8382
```
</div>
Let's plot the learning curves
```python
display_learning_curves(history)
```

Now we evaluate the GNN model on the test data split.
The results may vary depending on the training sample, however the GNN model always outperforms
the baseline model in terms of the test accuracy.
```python
x_test = test_data.paper_id.to_numpy()
_, test_accuracy = gnn_model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
```
<div class="k-default-codeblock">
```
Test accuracy: 80.19%
```
</div>
### Examine the GNN model predictions
Let's add the new instances as nodes to the `node_features`, and generate links
(citations) to existing nodes.
```python
# First we add the N new_instances as nodes to the graph
# by appending the new_instance to node_features.
num_nodes = node_features.shape[0]
new_node_features = np.concatenate([node_features, new_instances])
# Second we add the M edges (citations) from each new node to a set
# of existing nodes in a particular subject
new_node_indices = [i + num_nodes for i in range(num_classes)]
new_citations = []
for subject_idx, group in papers.groupby("subject"):
subject_papers = list(group.paper_id)
# Select random x papers specific subject.
selected_paper_indices1 = np.random.choice(subject_papers, 5)
# Select random y papers from any subject (where y < x).
selected_paper_indices2 = np.random.choice(list(papers.paper_id), 2)
# Merge the selected paper indices.
selected_paper_indices = np.concatenate(
[selected_paper_indices1, selected_paper_indices2], axis=0
)
# Create edges between a citing paper idx and the selected cited papers.
citing_paper_indx = new_node_indices[subject_idx]
for cited_paper_idx in selected_paper_indices:
new_citations.append([citing_paper_indx, cited_paper_idx])
new_citations = np.array(new_citations).T
new_edges = np.concatenate([edges, new_citations], axis=1)
```
Now let's update the `node_features` and the `edges` in the GNN model.
```python
print("Original node_features shape:", gnn_model.node_features.shape)
print("Original edges shape:", gnn_model.edges.shape)
gnn_model.node_features = new_node_features
gnn_model.edges = new_edges
gnn_model.edge_weights = tf.ones(shape=new_edges.shape[1])
print("New node_features shape:", gnn_model.node_features.shape)
print("New edges shape:", gnn_model.edges.shape)
logits = gnn_model.predict(tf.convert_to_tensor(new_node_indices))
probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy()
display_class_probabilities(probabilities)
```
<div class="k-default-codeblock">
```
Original node_features shape: (2708, 1433)
Original edges shape: (2, 5429)
New node_features shape: (2715, 1433)
New edges shape: (2, 5478)
Instance 1:
- Case_Based: 4.35%
- Genetic_Algorithms: 4.19%
- Neural_Networks: 1.49%
- Probabilistic_Methods: 1.68%
- Reinforcement_Learning: 21.34%
- Rule_Learning: 52.82%
- Theory: 14.14%
Instance 2:
- Case_Based: 0.01%
- Genetic_Algorithms: 99.88%
- Neural_Networks: 0.03%
- Probabilistic_Methods: 0.0%
- Reinforcement_Learning: 0.07%
- Rule_Learning: 0.0%
- Theory: 0.01%
Instance 3:
- Case_Based: 0.1%
- Genetic_Algorithms: 59.18%
- Neural_Networks: 39.17%
- Probabilistic_Methods: 0.38%
- Reinforcement_Learning: 0.55%
- Rule_Learning: 0.08%
- Theory: 0.54%
Instance 4:
- Case_Based: 0.14%
- Genetic_Algorithms: 10.44%
- Neural_Networks: 84.1%
- Probabilistic_Methods: 3.61%
- Reinforcement_Learning: 0.71%
- Rule_Learning: 0.16%
- Theory: 0.85%
Instance 5:
- Case_Based: 0.27%
- Genetic_Algorithms: 0.15%
- Neural_Networks: 0.48%
- Probabilistic_Methods: 0.23%
- Reinforcement_Learning: 0.79%
- Rule_Learning: 0.45%
- Theory: 97.63%
Instance 6:
- Case_Based: 3.12%
- Genetic_Algorithms: 1.35%
- Neural_Networks: 19.72%
- Probabilistic_Methods: 0.48%
- Reinforcement_Learning: 39.56%
- Rule_Learning: 28.0%
- Theory: 7.77%
Instance 7:
- Case_Based: 1.6%
- Genetic_Algorithms: 34.76%
- Neural_Networks: 4.45%
- Probabilistic_Methods: 9.59%
- Reinforcement_Learning: 2.97%
- Rule_Learning: 4.05%
- Theory: 42.6%
```
</div>
Notice that the probabilities of the expected subjects
(to which several citations are added) are higher compared to the baseline model.
| keras-io/examples/graph/md/gnn_citations.md/0 | {
"file_path": "keras-io/examples/graph/md/gnn_citations.md",
"repo_id": "keras-io",
"token_count": 35230
} | 79 |
<jupyter_start><jupyter_text>Approximating non-Function Mappings with Mixture Density Networks**Author:** [lukewood](https://twitter.com/luke_wood_ml)**Date created:** 2023/07/15**Last modified:** 2023/07/15**Description:** Approximate non one to one mapping using mixture density networks. Approximating NonFunctionsNeural networks are universal function approximators. Key word: function!While powerful function approximators, neural networks are not able toapproximate non-functions.One important restriction to remember about functions - they have one input, oneoutput!Neural networks suffer greatly when the training set has multiple values of Y for a single X.In this guide I'll show you how to approximate the class of non-functionsconsisting of mappings from `x -> y` such that multiple `y` may exist for agiven `x`. We'll use a class of neural networks called"Mixture Density Networks".I'm going to use the new[multibackend Keras Core project](https://github.com/keras-team/keras-core) tobuild my Mixture Density networks.Great job to the Keras team on the project - it's awesome to be able to swapframeworks in one line of code.Some bad news: I use TensorFlow probability in this guide... so it doesn'tactually work with other backends.Anyways, let's start by installing dependencies and sorting out imports:<jupyter_code>!pip install -q --upgrade tensorflow-probability keras-core
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from keras_core import callbacks
import keras_core
import tensorflow as tf
from keras_core import layers
from keras_core import optimizers
from tensorflow_probability import distributions as tfd<jupyter_output><empty_output><jupyter_text>Next, lets generate a noisy spiral that we're going to attempt to approximate.I've defined a few functions below to do this:<jupyter_code>def normalize(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
def create_noisy_spiral(n, jitter_std=0.2, revolutions=2):
angle = np.random.uniform(0, 2 * np.pi * revolutions, [n])
r = angle
x = r * np.cos(angle)
y = r * np.sin(angle)
result = np.stack([x, y], axis=1)
result = result + np.random.normal(scale=jitter_std, size=[n, 2])
result = 5 * normalize(result)
return result<jupyter_output><empty_output><jupyter_text>Next, lets invoke this function many times to construct a sample dataset:<jupyter_code>xy = create_noisy_spiral(10000)
x, y = xy[:, 0:1], xy[:, 1:]
plt.scatter(x, y)
plt.show()<jupyter_output><empty_output><jupyter_text>As you can see, there's multiple possible values for Y with respect to a givenX. Normal neural networks will simply learn the mean of these points withrespect to geometric space.We can quickly show this with a simple linear model:<jupyter_code>N_HIDDEN = 128
model = keras_core.Sequential(
[
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(1),
]
)<jupyter_output><empty_output><jupyter_text>Let's use mean squared error as well as the adam optimizer.These tend to be reasonable prototyping choices:<jupyter_code>model.compile(optimizer="adam", loss="mse")<jupyter_output><empty_output><jupyter_text>We can fit this model quite easy<jupyter_code>model.fit(
x,
y,
epochs=300,
batch_size=128,
validation_split=0.15,
callbacks=[callbacks.EarlyStopping(monitor="val_loss", patience=10)],
)<jupyter_output><empty_output><jupyter_text>And let's check out the result:<jupyter_code>y_pred = model.predict(x)<jupyter_output><empty_output><jupyter_text>As expected, the model learns the geometric mean of all points in `y` for agiven `x`.<jupyter_code>plt.scatter(x, y)
plt.scatter(x, y_pred)
plt.show()<jupyter_output><empty_output><jupyter_text>Mixture Density NetworksMixture Density networks can alleviate this problem.A Mixture density is a class of complicated densities expressible in terms of simpler densities.They are effectively the sum of a ton of probability distributions.Mixture Density networks learn to parameterize a mixture density distributionbased on a given training set.As a practitioner, all you need to know, is that Mixture Density Networks solvethe problem of multiple values of Y for a given X.I'm hoping to add a tool to your kit- but I'm not going to formally explain thederivation of Mixture Density networks in this guide.The most important thing to know is that a Mixture Density network learns toparameterize a mixture density distribution.This is done by computing a special loss with respect to both the provided`y_i` label as well as the predicted distribution for the corresponding `x_i`.This loss function operates by computing the probability that `y_i` would bedrawn from the predicted mixture distribution.Let's implement a Mixture density network.Below, a ton of helper functions are defined based on an old Keras library[`Keras Mixture Density Network Layer`](https://github.com/cpmpercussion/keras-mdn-layer).I've adapted the code for use with Keras core.Lets start writing a Mixture Density Network!First, we need a special activation function: ELU plus a tiny epsilon.This helps prevent ELU from outputting 0 which causes NaNs in Mixture DensityNetwork loss evaluation.<jupyter_code>def elu_plus_one_plus_epsilon(x):
return keras_core.activations.elu(x) + 1 + keras_core.backend.epsilon()<jupyter_output><empty_output><jupyter_text>Next, lets actually define a MixtureDensity layer that outputs all values neededto sample from the learned mixture distribution:<jupyter_code>class MixtureDensityOutput(layers.Layer):
def __init__(self, output_dimension, num_mixtures, **kwargs):
super().__init__(**kwargs)
self.output_dim = output_dimension
self.num_mix = num_mixtures
self.mdn_mus = layers.Dense(
self.num_mix * self.output_dim, name="mdn_mus"
) # mix*output vals, no activation
self.mdn_sigmas = layers.Dense(
self.num_mix * self.output_dim,
activation=elu_plus_one_plus_epsilon,
name="mdn_sigmas",
) # mix*output vals exp activation
self.mdn_pi = layers.Dense(self.num_mix, name="mdn_pi") # mix vals, logits
def build(self, input_shape):
self.mdn_mus.build(input_shape)
self.mdn_sigmas.build(input_shape)
self.mdn_pi.build(input_shape)
super().build(input_shape)
@property
def trainable_weights(self):
return (
self.mdn_mus.trainable_weights
+ self.mdn_sigmas.trainable_weights
+ self.mdn_pi.trainable_weights
)
@property
def non_trainable_weights(self):
return (
self.mdn_mus.non_trainable_weights
+ self.mdn_sigmas.non_trainable_weights
+ self.mdn_pi.non_trainable_weights
)
def call(self, x, mask=None):
return layers.concatenate(
[self.mdn_mus(x), self.mdn_sigmas(x), self.mdn_pi(x)], name="mdn_outputs"
)<jupyter_output><empty_output><jupyter_text>Lets construct an Mixture Density Network using our new layer:<jupyter_code>OUTPUT_DIMS = 1
N_MIXES = 20
mdn_network = keras_core.Sequential(
[
layers.Dense(N_HIDDEN, activation="relu"),
layers.Dense(N_HIDDEN, activation="relu"),
MixtureDensityOutput(OUTPUT_DIMS, N_MIXES),
]
)<jupyter_output><empty_output><jupyter_text>Next, let's implement a custom loss function to train the Mixture DensityNetwork layer based on the true values and our expected outputs:<jupyter_code>def get_mixture_loss_func(output_dim, num_mixes):
def mdn_loss_func(y_true, y_pred):
# Reshape inputs in case this is used in a TimeDistribued layer
y_pred = tf.reshape(
y_pred,
[-1, (2 * num_mixes * output_dim) + num_mixes],
name="reshape_ypreds",
)
y_true = tf.reshape(y_true, [-1, output_dim], name="reshape_ytrue")
# Split the inputs into paramaters
out_mu, out_sigma, out_pi = tf.split(
y_pred,
num_or_size_splits=[
num_mixes * output_dim,
num_mixes * output_dim,
num_mixes,
],
axis=-1,
name="mdn_coef_split",
)
# Construct the mixture models
cat = tfd.Categorical(logits=out_pi)
component_splits = [output_dim] * num_mixes
mus = tf.split(out_mu, num_or_size_splits=component_splits, axis=1)
sigs = tf.split(out_sigma, num_or_size_splits=component_splits, axis=1)
coll = [
tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)
for loc, scale in zip(mus, sigs)
]
mixture = tfd.Mixture(cat=cat, components=coll)
loss = mixture.log_prob(y_true)
loss = tf.negative(loss)
loss = tf.reduce_mean(loss)
return loss
return mdn_loss_func
mdn_network.compile(loss=get_mixture_loss_func(OUTPUT_DIMS, N_MIXES), optimizer="adam")<jupyter_output><empty_output><jupyter_text>Finally, we can call `model.fit()` like any other Keras model.<jupyter_code>mdn_network.fit(
x,
y,
epochs=300,
batch_size=128,
validation_split=0.15,
callbacks=[
callbacks.EarlyStopping(monitor="loss", patience=10, restore_best_weights=True),
callbacks.ReduceLROnPlateau(monitor="loss", patience=5),
],
)<jupyter_output><empty_output><jupyter_text>Let's make some predictions!<jupyter_code>y_pred_mixture = mdn_network.predict(x)
print(y_pred_mixture.shape)<jupyter_output><empty_output><jupyter_text>The MDN does not output a single value; instead it outputs values toparameterize a mixture distribution.To visualize these outputs, lets sample from the distribution.Note that sampling is a lossy process.If you want to preserve all information as part of a greater latentrepresentation (i.e. for downstream processing) I recommend you simply keep thedistribution parameters in place.<jupyter_code>def split_mixture_params(params, output_dim, num_mixes):
mus = params[: num_mixes * output_dim]
sigs = params[num_mixes * output_dim : 2 * num_mixes * output_dim]
pi_logits = params[-num_mixes:]
return mus, sigs, pi_logits
def softmax(w, t=1.0):
e = np.array(w) / t # adjust temperature
e -= e.max() # subtract max to protect from exploding exp values.
e = np.exp(e)
dist = e / np.sum(e)
return dist
def sample_from_categorical(dist):
r = np.random.rand(1) # uniform random number in [0,1]
accumulate = 0
for i in range(0, dist.size):
accumulate += dist[i]
if accumulate >= r:
return i
tf.logging.info("Error sampling categorical model.")
return -1
def sample_from_output(params, output_dim, num_mixes, temp=1.0, sigma_temp=1.0):
mus, sigs, pi_logits = split_mixture_params(params, output_dim, num_mixes)
pis = softmax(pi_logits, t=temp)
m = sample_from_categorical(pis)
# Alternative way to sample from categorical:
# m = np.random.choice(range(len(pis)), p=pis)
mus_vector = mus[m * output_dim : (m + 1) * output_dim]
sig_vector = sigs[m * output_dim : (m + 1) * output_dim]
scale_matrix = np.identity(output_dim) * sig_vector # scale matrix from diag
cov_matrix = np.matmul(scale_matrix, scale_matrix.T) # cov is scale squared.
cov_matrix = cov_matrix * sigma_temp # adjust for sigma temperature
sample = np.random.multivariate_normal(mus_vector, cov_matrix, 1)
return sample<jupyter_output><empty_output><jupyter_text>Next lets use our sampling function:<jupyter_code># Sample from the predicted distributions
y_samples = np.apply_along_axis(
sample_from_output, 1, y_pred_mixture, 1, N_MIXES, temp=1.0
)<jupyter_output><empty_output><jupyter_text>Finally, we can visualize our network outputs<jupyter_code>plt.scatter(x, y, alpha=0.05, color="blue", label="Ground Truth")
plt.scatter(
x,
y_samples[:, :, 0],
color="green",
alpha=0.05,
label="Mixture Density Network prediction",
)
plt.show()<jupyter_output><empty_output><jupyter_text>Beautiful. Love to see it ConclusionsNeural Networks are universal function approximators - but they can onlyapproximate functions. Mixture Density networks can approximate arbitraryx->y mappings using some neat probability tricks.For more examples with `tensorflow_probability`[start here](https://www.tensorflow.org/probability/examples/Probabilistic_Layers_Regression).One more pretty graphic for the road:<jupyter_code>fig, axs = plt.subplots(1, 3)
fig.set_figheight(3)
fig.set_figwidth(12)
axs[0].set_title("Ground Truth")
axs[0].scatter(x, y, alpha=0.05, color="blue")
xlim = axs[0].get_xlim()
ylim = axs[0].get_ylim()
axs[1].set_title("Normal Model prediction")
axs[1].scatter(x, y_pred, alpha=0.05, color="red")
axs[1].set_xlim(xlim)
axs[1].set_ylim(ylim)
axs[2].scatter(
x,
y_samples[:, :, 0],
color="green",
alpha=0.05,
label="Mixture Density Network prediction",
)
axs[2].set_title("Mixture Density Network prediction")
axs[2].set_xlim(xlim)
axs[2].set_ylim(ylim)
plt.show()<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/approximating_non_function_mappings.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/approximating_non_function_mappings.ipynb",
"repo_id": "keras-io",
"token_count": 4979
} | 80 |
# Simple custom layer example: Antirectifier
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2016/01/06<br>
**Last modified:** 2023/11/20<br>
**Description:** Demonstration of custom layer creation.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/antirectifier.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/antirectifier.py)
---
## Introduction
This example shows how to create custom layers, using the Antirectifier layer
(originally proposed as a Keras example script in January 2016), an alternative
to ReLU. Instead of zeroing-out the negative part of the input, it splits the negative
and positive parts and returns the concatenation of the absolute value
of both. This avoids loss of information, at the cost of an increase in dimensionality.
To fix the dimensionality increase, we linearly combine the
features back to a space of the original size.
---
## Setup
```python
import keras
from keras import layers
from keras import ops
```
---
## The Antirectifier layer
To implement a custom layer:
- Create the state variables via `add_weight()` in `__init__` or `build()`.
Similarly, you can also create sublayers.
- Implement the `call()` method, taking the layer's input tensor(s) and
return the output tensor(s).
- Optionally, you can also enable serialization by implementing `get_config()`,
which returns a configuration dictionary.
See also the guide
[Making new layers and models via subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/).
```python
class Antirectifier(layers.Layer):
def __init__(self, initializer="he_normal", **kwargs):
super().__init__(**kwargs)
self.initializer = keras.initializers.get(initializer)
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer=self.initializer,
name="kernel",
trainable=True,
)
def call(self, inputs):
inputs -= ops.mean(inputs, axis=-1, keepdims=True)
pos = ops.relu(inputs)
neg = ops.relu(-inputs)
concatenated = ops.concatenate([pos, neg], axis=-1)
mixed = ops.matmul(concatenated, self.kernel)
return mixed
def get_config(self):
# Implement get_config to enable serialization. This is optional.
base_config = super().get_config()
config = {"initializer": keras.initializers.serialize(self.initializer)}
return dict(list(base_config.items()) + list(config.items()))
```
---
## Let's test-drive it on MNIST
```python
# Training parameters
batch_size = 128
num_classes = 10
epochs = 20
# The data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784)
x_test = x_test.reshape(-1, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Build the model
model = keras.Sequential(
[
keras.Input(shape=(784,)),
layers.Dense(256),
Antirectifier(),
layers.Dense(256),
Antirectifier(),
layers.Dropout(0.5),
layers.Dense(10),
]
)
# Compile the model
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train the model
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.15)
# Test the model
model.evaluate(x_test, y_test)
```
<div class="k-default-codeblock">
```
60000 train samples
10000 test samples
Epoch 1/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - loss: 0.6226 - sparse_categorical_accuracy: 0.8146 - val_loss: 0.4256 - val_sparse_categorical_accuracy: 0.8808
Epoch 2/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.1887 - sparse_categorical_accuracy: 0.9455 - val_loss: 0.1556 - val_sparse_categorical_accuracy: 0.9588
Epoch 3/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.1406 - sparse_categorical_accuracy: 0.9608 - val_loss: 0.1531 - val_sparse_categorical_accuracy: 0.9611
Epoch 4/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.1084 - sparse_categorical_accuracy: 0.9691 - val_loss: 0.1178 - val_sparse_categorical_accuracy: 0.9731
Epoch 5/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0995 - sparse_categorical_accuracy: 0.9738 - val_loss: 0.2207 - val_sparse_categorical_accuracy: 0.9526
Epoch 6/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0831 - sparse_categorical_accuracy: 0.9769 - val_loss: 0.2092 - val_sparse_categorical_accuracy: 0.9533
Epoch 7/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0736 - sparse_categorical_accuracy: 0.9807 - val_loss: 0.1129 - val_sparse_categorical_accuracy: 0.9749
Epoch 8/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0653 - sparse_categorical_accuracy: 0.9827 - val_loss: 0.1000 - val_sparse_categorical_accuracy: 0.9791
Epoch 9/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0590 - sparse_categorical_accuracy: 0.9833 - val_loss: 0.1320 - val_sparse_categorical_accuracy: 0.9750
Epoch 10/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0587 - sparse_categorical_accuracy: 0.9854 - val_loss: 0.1439 - val_sparse_categorical_accuracy: 0.9747
Epoch 11/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0622 - sparse_categorical_accuracy: 0.9853 - val_loss: 0.1473 - val_sparse_categorical_accuracy: 0.9753
Epoch 12/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0554 - sparse_categorical_accuracy: 0.9869 - val_loss: 0.1529 - val_sparse_categorical_accuracy: 0.9757
Epoch 13/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0507 - sparse_categorical_accuracy: 0.9884 - val_loss: 0.1452 - val_sparse_categorical_accuracy: 0.9783
Epoch 14/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0468 - sparse_categorical_accuracy: 0.9889 - val_loss: 0.1435 - val_sparse_categorical_accuracy: 0.9796
Epoch 15/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0478 - sparse_categorical_accuracy: 0.9892 - val_loss: 0.1580 - val_sparse_categorical_accuracy: 0.9770
Epoch 16/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0492 - sparse_categorical_accuracy: 0.9888 - val_loss: 0.1957 - val_sparse_categorical_accuracy: 0.9753
Epoch 17/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0478 - sparse_categorical_accuracy: 0.9896 - val_loss: 0.1865 - val_sparse_categorical_accuracy: 0.9779
Epoch 18/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0478 - sparse_categorical_accuracy: 0.9893 - val_loss: 0.2107 - val_sparse_categorical_accuracy: 0.9747
Epoch 19/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0494 - sparse_categorical_accuracy: 0.9894 - val_loss: 0.2306 - val_sparse_categorical_accuracy: 0.9734
Epoch 20/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.0473 - sparse_categorical_accuracy: 0.9910 - val_loss: 0.2201 - val_sparse_categorical_accuracy: 0.9731
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 802us/step - loss: 0.2086 - sparse_categorical_accuracy: 0.9710
[0.19070196151733398, 0.9740999937057495]
```
</div> | keras-io/examples/keras_recipes/md/antirectifier.md/0 | {
"file_path": "keras-io/examples/keras_recipes/md/antirectifier.md",
"repo_id": "keras-io",
"token_count": 3278
} | 81 |
"""
Title: Text Classification using FNet
Author: [Abheesht Sharma](https://github.com/abheesht17/)
Date created: 2022/06/01
Last modified: 2022/12/21
Description: Text Classification on the IMDb Dataset using `keras_nlp.layers.FNetEncoder` layer.
Accelerator: GPU
"""
"""
## Introduction
In this example, we will demonstrate the ability of FNet to achieve comparable
results with a vanilla Transformer model on the text classification task.
We will be using the IMDb dataset, which is a
collection of movie reviews labelled either positive or negative (sentiment
analysis).
To build the tokenizer, model, etc., we will use components from
[KerasNLP](https://github.com/keras-team/keras-nlp). KerasNLP makes life easier
for people who want to build NLP pipelines! :)
### Model
Transformer-based language models (LMs) such as BERT, RoBERTa, XLNet, etc. have
demonstrated the effectiveness of the self-attention mechanism for computing
rich embeddings for input text. However, the self-attention mechanism is an
expensive operation, with a time complexity of `O(n^2)`, where `n` is the number
of tokens in the input. Hence, there has been an effort to reduce the time
complexity of the self-attention mechanism and improve performance without
sacrificing the quality of results.
In 2020, a paper titled
[FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824)
replaced the self-attention layer in BERT with a simple Fourier Transform layer
for "token mixing". This resulted in comparable accuracy and a speed-up during
training. In particular, a couple of points from the paper stand out:
* The authors claim that FNet is 80% faster than BERT on GPUs and 70% faster on
TPUs. The reason for this speed-up is two-fold: a) the Fourier Transform layer
is unparametrized, it does not have any parameters, and b) the authors use Fast
Fourier Transform (FFT); this reduces the time complexity from `O(n^2)`
(in the case of self-attention) to `O(n log n)`.
* FNet manages to achieve 92-97% of the accuracy of BERT on the GLUE benchmark.
"""
"""
## Setup
Before we start with the implementation, let's import all the necessary packages.
"""
"""shell
pip install -q --upgrade keras-nlp
pip install -q --upgrade keras # Upgrade to Keras 3.
"""
import keras_nlp
import keras
import tensorflow as tf
import os
keras.utils.set_random_seed(42)
"""
Let's also define our hyperparameters.
"""
BATCH_SIZE = 64
EPOCHS = 3
MAX_SEQUENCE_LENGTH = 512
VOCAB_SIZE = 15000
EMBED_DIM = 128
INTERMEDIATE_DIM = 512
"""
## Loading the dataset
First, let's download the IMDB dataset and extract it.
"""
"""shell
wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
tar -xzf aclImdb_v1.tar.gz
"""
"""
Samples are present in the form of text files. Let's inspect the structure of
the directory.
"""
print(os.listdir("./aclImdb"))
print(os.listdir("./aclImdb/train"))
print(os.listdir("./aclImdb/test"))
"""
The directory contains two sub-directories: `train` and `test`. Each subdirectory
in turn contains two folders: `pos` and `neg` for positive and negative reviews,
respectively. Before we load the dataset, let's delete the `./aclImdb/train/unsup`
folder since it has unlabelled samples.
"""
"""shell
rm -rf aclImdb/train/unsup
"""
"""
We'll use the `keras.utils.text_dataset_from_directory` utility to generate
our labelled `tf.data.Dataset` dataset from text files.
"""
train_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train",
batch_size=BATCH_SIZE,
validation_split=0.2,
subset="training",
seed=42,
)
val_ds = keras.utils.text_dataset_from_directory(
"aclImdb/train",
batch_size=BATCH_SIZE,
validation_split=0.2,
subset="validation",
seed=42,
)
test_ds = keras.utils.text_dataset_from_directory("aclImdb/test", batch_size=BATCH_SIZE)
"""
We will now convert the text to lowercase.
"""
train_ds = train_ds.map(lambda x, y: (tf.strings.lower(x), y))
val_ds = val_ds.map(lambda x, y: (tf.strings.lower(x), y))
test_ds = test_ds.map(lambda x, y: (tf.strings.lower(x), y))
"""
Let's print a few samples.
"""
for text_batch, label_batch in train_ds.take(1):
for i in range(3):
print(text_batch.numpy()[i])
print(label_batch.numpy()[i])
"""
### Tokenizing the data
We'll be using the `keras_nlp.tokenizers.WordPieceTokenizer` layer to tokenize
the text. `keras_nlp.tokenizers.WordPieceTokenizer` takes a WordPiece vocabulary
and has functions for tokenizing the text, and detokenizing sequences of tokens.
Before we define the tokenizer, we first need to train it on the dataset
we have. The WordPiece tokenization algorithm is a subword tokenization algorithm;
training it on a corpus gives us a vocabulary of subwords. A subword tokenizer
is a compromise between word tokenizers (word tokenizers need very large
vocabularies for good coverage of input words), and character tokenizers
(characters don't really encode meaning like words do). Luckily, KerasNLP
makes it very simple to train WordPiece on a corpus with the
`keras_nlp.tokenizers.compute_word_piece_vocabulary` utility.
Note: The official implementation of FNet uses the SentencePiece Tokenizer.
"""
def train_word_piece(ds, vocab_size, reserved_tokens):
word_piece_ds = ds.unbatch().map(lambda x, y: x)
vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(
word_piece_ds.batch(1000).prefetch(2),
vocabulary_size=vocab_size,
reserved_tokens=reserved_tokens,
)
return vocab
"""
Every vocabulary has a few special, reserved tokens. We have two such tokens:
- `"[PAD]"` - Padding token. Padding tokens are appended to the input sequence length
when the input sequence length is shorter than the maximum sequence length.
- `"[UNK]"` - Unknown token.
"""
reserved_tokens = ["[PAD]", "[UNK]"]
train_sentences = [element[0] for element in train_ds]
vocab = train_word_piece(train_ds, VOCAB_SIZE, reserved_tokens)
"""
Let's see some tokens!
"""
print("Tokens: ", vocab[100:110])
"""
Now, let's define the tokenizer. We will configure the tokenizer with the
the vocabularies trained above. We will define a maximum sequence length so that
all sequences are padded to the same length, if the length of the sequence is
less than the specified sequence length. Otherwise, the sequence is truncated.
"""
tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=vocab,
lowercase=False,
sequence_length=MAX_SEQUENCE_LENGTH,
)
"""
Let's try and tokenize a sample from our dataset! To verify whether the text has
been tokenized correctly, we can also detokenize the list of tokens back to the
original text.
"""
input_sentence_ex = train_ds.take(1).get_single_element()[0][0]
input_tokens_ex = tokenizer(input_sentence_ex)
print("Sentence: ", input_sentence_ex)
print("Tokens: ", input_tokens_ex)
print("Recovered text after detokenizing: ", tokenizer.detokenize(input_tokens_ex))
"""
## Formatting the dataset
Next, we'll format our datasets in the form that will be fed to the models. We
need to tokenize the text.
"""
def format_dataset(sentence, label):
sentence = tokenizer(sentence)
return ({"input_ids": sentence}, label)
def make_dataset(dataset):
dataset = dataset.map(format_dataset, num_parallel_calls=tf.data.AUTOTUNE)
return dataset.shuffle(512).prefetch(16).cache()
train_ds = make_dataset(train_ds)
val_ds = make_dataset(val_ds)
test_ds = make_dataset(test_ds)
"""
## Building the model
Now, let's move on to the exciting part - defining our model!
We first need an embedding layer, i.e., a layer that maps every token in the input
sequence to a vector. This embedding layer can be initialised randomly. We also
need a positional embedding layer which encodes the word order in the sequence.
The convention is to add, i.e., sum, these two embeddings. KerasNLP has a
`keras_nlp.layers.TokenAndPositionEmbedding ` layer which does all of the above
steps for us.
Our FNet classification model consists of three `keras_nlp.layers.FNetEncoder`
layers with a `keras.layers.Dense` layer on top.
Note: For FNet, masking the padding tokens has a minimal effect on results. In the
official implementation, the padding tokens are not masked.
"""
input_ids = keras.Input(shape=(None,), dtype="int64", name="input_ids")
x = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=VOCAB_SIZE,
sequence_length=MAX_SEQUENCE_LENGTH,
embedding_dim=EMBED_DIM,
mask_zero=True,
)(input_ids)
x = keras_nlp.layers.FNetEncoder(intermediate_dim=INTERMEDIATE_DIM)(inputs=x)
x = keras_nlp.layers.FNetEncoder(intermediate_dim=INTERMEDIATE_DIM)(inputs=x)
x = keras_nlp.layers.FNetEncoder(intermediate_dim=INTERMEDIATE_DIM)(inputs=x)
x = keras.layers.GlobalAveragePooling1D()(x)
x = keras.layers.Dropout(0.1)(x)
outputs = keras.layers.Dense(1, activation="sigmoid")(x)
fnet_classifier = keras.Model(input_ids, outputs, name="fnet_classifier")
"""
## Training our model
We'll use accuracy to monitor training progress on the validation data. Let's
train our model for 3 epochs.
"""
fnet_classifier.summary()
fnet_classifier.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss="binary_crossentropy",
metrics=["accuracy"],
)
fnet_classifier.fit(train_ds, epochs=EPOCHS, validation_data=val_ds)
"""
We obtain a train accuracy of around 92% and a validation accuracy of around
85%. Moreover, for 3 epochs, it takes around 86 seconds to train the model
(on Colab with a 16 GB Tesla T4 GPU).
Let's calculate the test accuracy.
"""
fnet_classifier.evaluate(test_ds, batch_size=BATCH_SIZE)
"""
## Comparison with Transformer model
Let's compare our FNet Classifier model with a Transformer Classifier model. We
keep all the parameters/hyperparameters the same. For example, we use three
`TransformerEncoder` layers.
We set the number of heads to 2.
"""
NUM_HEADS = 2
input_ids = keras.Input(shape=(None,), dtype="int64", name="input_ids")
x = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=VOCAB_SIZE,
sequence_length=MAX_SEQUENCE_LENGTH,
embedding_dim=EMBED_DIM,
mask_zero=True,
)(input_ids)
x = keras_nlp.layers.TransformerEncoder(
intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS
)(inputs=x)
x = keras_nlp.layers.TransformerEncoder(
intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS
)(inputs=x)
x = keras_nlp.layers.TransformerEncoder(
intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS
)(inputs=x)
x = keras.layers.GlobalAveragePooling1D()(x)
x = keras.layers.Dropout(0.1)(x)
outputs = keras.layers.Dense(1, activation="sigmoid")(x)
transformer_classifier = keras.Model(input_ids, outputs, name="transformer_classifier")
transformer_classifier.summary()
transformer_classifier.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss="binary_crossentropy",
metrics=["accuracy"],
)
transformer_classifier.fit(train_ds, epochs=EPOCHS, validation_data=val_ds)
"""
We obtain a train accuracy of around 94% and a validation accuracy of around
86.5%. It takes around 146 seconds to train the model (on Colab with a 16 GB Tesla
T4 GPU).
Let's calculate the test accuracy.
"""
transformer_classifier.evaluate(test_ds, batch_size=BATCH_SIZE)
"""
Let's make a table and compare the two models. We can see that FNet
significantly speeds up our run time (1.7x), with only a small sacrifice in
overall accuracy (drop of 0.75%).
| | **FNet Classifier** | **Transformer Classifier** |
|:-----------------------:|:-------------------:|:--------------------------:|
| **Training Time** | 86 seconds | 146 seconds |
| **Train Accuracy** | 92.34% | 93.85% |
| **Validation Accuracy** | 85.21% | 86.42% |
| **Test Accuracy** | 83.94% | 84.69% |
| **#Params** | 2,321,921 | 2,520,065 |
"""
| keras-io/examples/nlp/fnet_classification_with_keras_nlp.py/0 | {
"file_path": "keras-io/examples/nlp/fnet_classification_with_keras_nlp.py",
"repo_id": "keras-io",
"token_count": 4144
} | 82 |
<jupyter_start><jupyter_text>Abstractive Text Summarization with BART**Author:** [Abheesht Sharma](https://github.com/abheesht17/)**Date created:** 2023/07/08**Last modified:** 2023/07/08**Description:** Use KerasNLP to fine-tune BART on the abstractive summarization task. IntroductionIn the era of information overload, it has become crucial to extract the cruxof a long document or a conversation and express it in a few sentences. Owingto the fact that summarization has widespread applications in different domains,it has become a key, well-studied NLP task in recent years.[Bidirectional Autoregressive Transformer (BART)](https://arxiv.org/abs/1910.13461)is a Transformer-based encoder-decoder model, often used forsequence-to-sequence tasks like summarization and neural machine translation.BART is pre-trained in a self-supervised fashion on a large text corpus. Duringpre-training, the text is corrupted and BART is trained to reconstruct theoriginal text (hence called a "denoising autoencoder"). Some pre-training tasksinclude token masking, token deletion, sentence permutation (shuffle sentencesand train BART to fix the order), etc.In this example, we will demonstrate how to fine-tune BART on the abstractivesummarization task (on conversations!) using KerasNLP, and generate summariesusing the fine-tuned model. SetupBefore we start implementing the pipeline, let's install and import all thelibraries we need. We'll be using the KerasNLP library. We will also need acouple of utility libraries.<jupyter_code>!pip install git+https://github.com/keras-team/keras-nlp.git py7zr -q<jupyter_output><empty_output><jupyter_text>This examples uses [Keras Core](https://keras.io/keras_core/) to work in any of`"tensorflow"`, `"jax"` or `"torch"`. Support for Keras Core is baked intoKerasNLP, simply change the `"KERAS_BACKEND"` environment variable to selectthe backend of your choice. We select the JAX backend below.<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "jax"<jupyter_output><empty_output><jupyter_text>Import all necessary libraries.<jupyter_code>import py7zr
import time
import keras_nlp
import tensorflow as tf
import tensorflow_datasets as tfds
import keras_core as keras<jupyter_output><empty_output><jupyter_text>Let's also define our hyperparameters.<jupyter_code>BATCH_SIZE = 8
NUM_BATCHES = 600
EPOCHS = 1 # Can be set to a higher value for better results
MAX_ENCODER_SEQUENCE_LENGTH = 512
MAX_DECODER_SEQUENCE_LENGTH = 128
MAX_GENERATION_LENGTH = 40<jupyter_output><empty_output><jupyter_text>DatasetLet's load the [SAMSum dataset](https://arxiv.org/abs/1911.12237). This datasetcontains around 15,000 pairs of conversations/dialogues and summaries.<jupyter_code># Download the dataset.
filename = keras.utils.get_file(
"corpus.7z",
origin="https://huggingface.co/datasets/samsum/resolve/main/data/corpus.7z",
)
# Extract the `.7z` file.
with py7zr.SevenZipFile(filename, mode="r") as z:
z.extractall(path="/root/tensorflow_datasets/downloads/manual")
# Load data using TFDS.
samsum_ds = tfds.load("samsum", split="train", as_supervised=True)<jupyter_output><empty_output><jupyter_text>The dataset has two fields: `dialogue` and `summary`. Let's see a sample.<jupyter_code>for dialogue, summary in samsum_ds:
print(dialogue.numpy())
print(summary.numpy())
break<jupyter_output><empty_output><jupyter_text>We'll now batch the dataset and retain only a subset of the dataset for thepurpose of this example. The dialogue is fed to the encoder, and thecorresponding summary serves as input to the decoder. We will, therefore, changethe format of the dataset to a dictionary having two keys: `"encoder_text"` and`"decoder_text"`.This is how `keras_nlp.models.BartSeq2SeqLMPreprocessor`expects the input format to be.<jupyter_code>train_ds = (
samsum_ds.map(
lambda dialogue, summary: {"encoder_text": dialogue, "decoder_text": summary}
)
.batch(BATCH_SIZE)
.cache()
)
train_ds = train_ds.take(NUM_BATCHES)<jupyter_output><empty_output><jupyter_text>Fine-tune BARTLet's load the model and preprocessor first. We use sequence lengths of 512and 128 for the encoder and decoder, respectively, instead of 1024 (which is thedefault sequence length). This will allow us to run this example quicklyon Colab.If you observe carefully, the preprocessor is attached to the model. What thismeans is that we don't have to worry about preprocessing the text inputs;everything will be done internally. The preprocessor tokenizes the encoder textand the decoder text, adds special tokens and pads them. To generate labelsfor auto-regressive training, the preprocessor shifts the decoder text oneposition to the right. This is done because at every timestep, the model istrained to predict the next token.<jupyter_code>preprocessor = keras_nlp.models.BartSeq2SeqLMPreprocessor.from_preset(
"bart_base_en",
encoder_sequence_length=MAX_ENCODER_SEQUENCE_LENGTH,
decoder_sequence_length=MAX_DECODER_SEQUENCE_LENGTH,
)
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset(
"bart_base_en", preprocessor=preprocessor
)
bart_lm.summary()<jupyter_output><empty_output><jupyter_text>Define the optimizer and loss. We use the Adam optimizer with a linearlydecaying learning rate. Compile the model.<jupyter_code>optimizer = keras.optimizers.AdamW(
learning_rate=5e-5,
weight_decay=0.01,
epsilon=1e-6,
global_clipnorm=1.0, # Gradient clipping.
)
# Exclude layernorm and bias terms from weight decay.
optimizer.exclude_from_weight_decay(var_names=["bias"])
optimizer.exclude_from_weight_decay(var_names=["gamma"])
optimizer.exclude_from_weight_decay(var_names=["beta"])
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
bart_lm.compile(
optimizer=optimizer,
loss=loss,
weighted_metrics=["accuracy"],
)<jupyter_output><empty_output><jupyter_text>Let's train the model!<jupyter_code>bart_lm.fit(train_ds, epochs=EPOCHS)<jupyter_output><empty_output><jupyter_text>Generate summaries and evaluate them!Now that the model has been trained, let's get to the fun part - actuallygenerating summaries! Let's pick the first 100 samples from the validation setand generate summaries for them. We will use the default decoding strategy, i.e.,greedy search.Generation in KerasNLP is highly optimized. It is backed by the power of XLA.Secondly, key/value tensors in the self-attention layer and cross-attention layerin the decoder are cached to avoid recomputation at every timestep.<jupyter_code>def generate_text(model, input_text, max_length=200, print_time_taken=False):
start = time.time()
output = model.generate(input_text, max_length=max_length)
end = time.time()
print(f"Total Time Elapsed: {end - start:.2f}s")
return output
# Load the dataset.
val_ds = tfds.load("samsum", split="validation", as_supervised=True)
val_ds = val_ds.take(100)
dialogues = []
ground_truth_summaries = []
for dialogue, summary in val_ds:
dialogues.append(dialogue.numpy())
ground_truth_summaries.append(summary.numpy())
# Let's make a dummy call - the first call to XLA generally takes a bit longer.
_ = generate_text(bart_lm, "sample text", max_length=MAX_GENERATION_LENGTH)
# Generate summaries.
generated_summaries = generate_text(
bart_lm,
val_ds.map(lambda dialogue, _: dialogue).batch(8),
max_length=MAX_GENERATION_LENGTH,
print_time_taken=True,
)<jupyter_output><empty_output><jupyter_text>Let's see some of the summaries.<jupyter_code>for dialogue, generated_summary, ground_truth_summary in zip(
dialogues[:5], generated_summaries[:5], ground_truth_summaries[:5]
):
print("Dialogue:", dialogue)
print("Generated Summary:", generated_summary)
print("Ground Truth Summary:", ground_truth_summary)
print("=============================")<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/abstractive_summarization_with_bart.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/abstractive_summarization_with_bart.ipynb",
"repo_id": "keras-io",
"token_count": 2582
} | 83 |
<jupyter_start><jupyter_text>Using pre-trained word embeddings**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2020/05/05**Last modified:** 2020/05/05**Description:** Text classification on the Newsgroup20 dataset using pre-trained GloVe word embeddings. Setup<jupyter_code>import os
# Only the TensorFlow backend supports string inputs.
os.environ["KERAS_BACKEND"] = "tensorflow"
import pathlib
import numpy as np
import tensorflow.data as tf_data
import keras
from keras import layers<jupyter_output><empty_output><jupyter_text>IntroductionIn this example, we show how to train a text classification model that uses pre-trainedword embeddings.We'll work with the Newsgroup20 dataset, a set of 20,000 message board messagesbelonging to 20 different topic categories.For the pre-trained word embeddings, we'll use[GloVe embeddings](http://nlp.stanford.edu/projects/glove/). Download the Newsgroup20 data<jupyter_code>data_path = keras.utils.get_file(
"news20.tar.gz",
"http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.tar.gz",
untar=True,
)<jupyter_output><empty_output><jupyter_text>Let's take a look at the data<jupyter_code>data_dir = pathlib.Path(data_path).parent / "20_newsgroup"
dirnames = os.listdir(data_dir)
print("Number of directories:", len(dirnames))
print("Directory names:", dirnames)
fnames = os.listdir(data_dir / "comp.graphics")
print("Number of files in comp.graphics:", len(fnames))
print("Some example filenames:", fnames[:5])<jupyter_output><empty_output><jupyter_text>Here's a example of what one file contains:<jupyter_code>print(open(data_dir / "comp.graphics" / "38987").read())<jupyter_output><empty_output><jupyter_text>As you can see, there are header lines that are leaking the file's category, eitherexplicitly (the first line is literally the category name), or implicitly, e.g. via the`Organization` filed. Let's get rid of the headers:<jupyter_code>samples = []
labels = []
class_names = []
class_index = 0
for dirname in sorted(os.listdir(data_dir)):
class_names.append(dirname)
dirpath = data_dir / dirname
fnames = os.listdir(dirpath)
print("Processing %s, %d files found" % (dirname, len(fnames)))
for fname in fnames:
fpath = dirpath / fname
f = open(fpath, encoding="latin-1")
content = f.read()
lines = content.split("\n")
lines = lines[10:]
content = "\n".join(lines)
samples.append(content)
labels.append(class_index)
class_index += 1
print("Classes:", class_names)
print("Number of samples:", len(samples))<jupyter_output><empty_output><jupyter_text>There's actually one category that doesn't have the expected number of files, but thedifference is small enough that the problem remains a balanced classification problem. Shuffle and split the data into training & validation sets<jupyter_code># Shuffle the data
seed = 1337
rng = np.random.RandomState(seed)
rng.shuffle(samples)
rng = np.random.RandomState(seed)
rng.shuffle(labels)
# Extract a training & validation split
validation_split = 0.2
num_validation_samples = int(validation_split * len(samples))
train_samples = samples[:-num_validation_samples]
val_samples = samples[-num_validation_samples:]
train_labels = labels[:-num_validation_samples]
val_labels = labels[-num_validation_samples:]<jupyter_output><empty_output><jupyter_text>Create a vocabulary indexLet's use the `TextVectorization` to index the vocabulary found in the dataset.Later, we'll use the same layer instance to vectorize the samples.Our layer will only consider the top 20,000 words, and will truncate or pad sequences tobe actually 200 tokens long.<jupyter_code>vectorizer = layers.TextVectorization(max_tokens=20000, output_sequence_length=200)
text_ds = tf_data.Dataset.from_tensor_slices(train_samples).batch(128)
vectorizer.adapt(text_ds)<jupyter_output><empty_output><jupyter_text>You can retrieve the computed vocabulary used via `vectorizer.get_vocabulary()`. Let'sprint the top 5 words:<jupyter_code>vectorizer.get_vocabulary()[:5]<jupyter_output><empty_output><jupyter_text>Let's vectorize a test sentence:<jupyter_code>output = vectorizer([["the cat sat on the mat"]])
output.numpy()[0, :6]<jupyter_output><empty_output><jupyter_text>As you can see, "the" gets represented as "2". Why not 0, given that "the" was the firstword in the vocabulary? That's because index 0 is reserved for padding and index 1 isreserved for "out of vocabulary" tokens.Here's a dict mapping words to their indices:<jupyter_code>voc = vectorizer.get_vocabulary()
word_index = dict(zip(voc, range(len(voc))))<jupyter_output><empty_output><jupyter_text>As you can see, we obtain the same encoding as above for our test sentence:<jupyter_code>test = ["the", "cat", "sat", "on", "the", "mat"]
[word_index[w] for w in test]<jupyter_output><empty_output><jupyter_text>Load pre-trained word embeddings Let's download pre-trained GloVe embeddings (a 822M zip file).You'll need to run the following commands:<jupyter_code>!wget https://downloads.cs.stanford.edu/nlp/data/glove.6B.zip
!unzip -q glove.6B.zip<jupyter_output><empty_output><jupyter_text>The archive contains text-encoded vectors of various sizes: 50-dimensional,100-dimensional, 200-dimensional, 300-dimensional. We'll use the 100D ones.Let's make a dict mapping words (strings) to their NumPy vector representation:<jupyter_code>path_to_glove_file = "glove.6B.100d.txt"
embeddings_index = {}
with open(path_to_glove_file) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
print("Found %s word vectors." % len(embeddings_index))<jupyter_output><empty_output><jupyter_text>Now, let's prepare a corresponding embedding matrix that we can use in a Keras`Embedding` layer. It's a simple NumPy matrix where entry at index `i` is the pre-trainedvector for the word of index `i` in our `vectorizer`'s vocabulary.<jupyter_code>num_tokens = len(voc) + 2
embedding_dim = 100
hits = 0
misses = 0
# Prepare embedding matrix
embedding_matrix = np.zeros((num_tokens, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
# This includes the representation for "padding" and "OOV"
embedding_matrix[i] = embedding_vector
hits += 1
else:
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))<jupyter_output><empty_output><jupyter_text>Next, we load the pre-trained word embeddings matrix into an `Embedding` layer.Note that we set `trainable=False` so as to keep the embeddings fixed (we don't want toupdate them during training).<jupyter_code>from keras.layers import Embedding
embedding_layer = Embedding(
num_tokens,
embedding_dim,
trainable=False,
)
embedding_layer.build((1,))
embedding_layer.set_weights([embedding_matrix])<jupyter_output><empty_output><jupyter_text>Build the modelA simple 1D convnet with global max pooling and a classifier at the end.<jupyter_code>int_sequences_input = keras.Input(shape=(None,), dtype="int32")
embedded_sequences = embedding_layer(int_sequences_input)
x = layers.Conv1D(128, 5, activation="relu")(embedded_sequences)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(128, 5, activation="relu")(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(128, 5, activation="relu")(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dropout(0.5)(x)
preds = layers.Dense(len(class_names), activation="softmax")(x)
model = keras.Model(int_sequences_input, preds)
model.summary()<jupyter_output><empty_output><jupyter_text>Train the modelFirst, convert our list-of-strings data to NumPy arrays of integer indices. The arraysare right-padded.<jupyter_code>x_train = vectorizer(np.array([[s] for s in train_samples])).numpy()
x_val = vectorizer(np.array([[s] for s in val_samples])).numpy()
y_train = np.array(train_labels)
y_val = np.array(val_labels)<jupyter_output><empty_output><jupyter_text>We use categorical crossentropy as our loss since we're doing softmax classification.Moreover, we use `sparse_categorical_crossentropy` since our labels are integers.<jupyter_code>model.compile(
loss="sparse_categorical_crossentropy", optimizer="rmsprop", metrics=["acc"]
)
model.fit(x_train, y_train, batch_size=128, epochs=20, validation_data=(x_val, y_val))<jupyter_output><empty_output><jupyter_text>Export an end-to-end modelNow, we may want to export a `Model` object that takes as input a string of arbitrarylength, rather than a sequence of indices. It would make the model much more portable,since you wouldn't have to worry about the input preprocessing pipeline.Our `vectorizer` is actually a Keras layer, so it's simple:<jupyter_code>string_input = keras.Input(shape=(1,), dtype="string")
x = vectorizer(string_input)
preds = model(x)
end_to_end_model = keras.Model(string_input, preds)
probabilities = end_to_end_model(
keras.ops.convert_to_tensor(
[["this message is about computer graphics and 3D modeling"]]
)
)
print(class_names[np.argmax(probabilities[0])])<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/pretrained_word_embeddings.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/pretrained_word_embeddings.ipynb",
"repo_id": "keras-io",
"token_count": 3247
} | 84 |
# Sequence to sequence learning for performing number addition
**Author:** [Smerity](https://twitter.com/Smerity) and others<br>
**Date created:** 2015/08/17<br>
**Last modified:** 2024/02/13<br>
**Description:** A model that learns to add strings of numbers, e.g. "535+61" -> "596".
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/addition_rnn.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/addition_rnn.py)
---
## Introduction
In this example, we train a model to learn to add two numbers, provided as strings.
**Example:**
- Input: "535+61"
- Output: "596"
Input may optionally be reversed, which was shown to increase performance in many tasks
in: [Learning to Execute](http://arxiv.org/abs/1410.4615) and
[Sequence to Sequence Learning with Neural Networks](http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf).
Theoretically, sequence order inversion introduces shorter term dependencies between
source and target for this problem.
**Results:**
For two digits (reversed):
+ One layer LSTM (128 HN), 5k training examples = 99% train/test accuracy in 55 epochs
Three digits (reversed):
+ One layer LSTM (128 HN), 50k training examples = 99% train/test accuracy in 100 epochs
Four digits (reversed):
+ One layer LSTM (128 HN), 400k training examples = 99% train/test accuracy in 20 epochs
Five digits (reversed):
+ One layer LSTM (128 HN), 550k training examples = 99% train/test accuracy in 30 epochs
---
## Setup
```python
import keras
from keras import layers
import numpy as np
# Parameters for the model and dataset.
TRAINING_SIZE = 50000
DIGITS = 3
REVERSE = True
# Maximum length of input is 'int + int' (e.g., '345+678'). Maximum length of
# int is DIGITS.
MAXLEN = DIGITS + 1 + DIGITS
```
---
## Generate the data
```python
class CharacterTable:
"""Given a set of characters:
+ Encode them to a one-hot integer representation
+ Decode the one-hot or integer representation to their character output
+ Decode a vector of probabilities to their character output
"""
def __init__(self, chars):
"""Initialize character table.
# Arguments
chars: Characters that can appear in the input.
"""
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
def encode(self, C, num_rows):
"""One-hot encode given string C.
# Arguments
C: string, to be encoded.
num_rows: Number of rows in the returned one-hot encoding. This is
used to keep the # of rows for each data the same.
"""
x = np.zeros((num_rows, len(self.chars)))
for i, c in enumerate(C):
x[i, self.char_indices[c]] = 1
return x
def decode(self, x, calc_argmax=True):
"""Decode the given vector or 2D array to their character output.
# Arguments
x: A vector or a 2D array of probabilities or one-hot representations;
or a vector of character indices (used with `calc_argmax=False`).
calc_argmax: Whether to find the character index with maximum
probability, defaults to `True`.
"""
if calc_argmax:
x = x.argmax(axis=-1)
return "".join(self.indices_char[x] for x in x)
# All the numbers, plus sign and space for padding.
chars = "0123456789+ "
ctable = CharacterTable(chars)
questions = []
expected = []
seen = set()
print("Generating data...")
while len(questions) < TRAINING_SIZE:
f = lambda: int(
"".join(
np.random.choice(list("0123456789"))
for i in range(np.random.randint(1, DIGITS + 1))
)
)
a, b = f(), f()
# Skip any addition questions we've already seen
# Also skip any such that x+Y == Y+x (hence the sorting).
key = tuple(sorted((a, b)))
if key in seen:
continue
seen.add(key)
# Pad the data with spaces such that it is always MAXLEN.
q = "{}+{}".format(a, b)
query = q + " " * (MAXLEN - len(q))
ans = str(a + b)
# Answers can be of maximum size DIGITS + 1.
ans += " " * (DIGITS + 1 - len(ans))
if REVERSE:
# Reverse the query, e.g., '12+345 ' becomes ' 543+21'. (Note the
# space used for padding.)
query = query[::-1]
questions.append(query)
expected.append(ans)
print("Total questions:", len(questions))
```
<div class="k-default-codeblock">
```
Generating data...
Total questions: 50000
```
</div>
---
## Vectorize the data
```python
print("Vectorization...")
x = np.zeros((len(questions), MAXLEN, len(chars)), dtype=bool)
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=bool)
for i, sentence in enumerate(questions):
x[i] = ctable.encode(sentence, MAXLEN)
for i, sentence in enumerate(expected):
y[i] = ctable.encode(sentence, DIGITS + 1)
# Shuffle (x, y) in unison as the later parts of x will almost all be larger
# digits.
indices = np.arange(len(y))
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
# Explicitly set apart 10% for validation data that we never train over.
split_at = len(x) - len(x) // 10
(x_train, x_val) = x[:split_at], x[split_at:]
(y_train, y_val) = y[:split_at], y[split_at:]
print("Training Data:")
print(x_train.shape)
print(y_train.shape)
print("Validation Data:")
print(x_val.shape)
print(y_val.shape)
```
<div class="k-default-codeblock">
```
Vectorization...
Training Data:
(45000, 7, 12)
(45000, 4, 12)
Validation Data:
(5000, 7, 12)
(5000, 4, 12)
```
</div>
---
## Build the model
```python
print("Build model...")
num_layers = 1 # Try to add more LSTM layers!
model = keras.Sequential()
# "Encode" the input sequence using a LSTM, producing an output of size 128.
# Note: In a situation where your input sequences have a variable length,
# use input_shape=(None, num_feature).
model.add(layers.Input((MAXLEN, len(chars))))
model.add(layers.LSTM(128))
# As the decoder RNN's input, repeatedly provide with the last output of
# RNN for each time step. Repeat 'DIGITS + 1' times as that's the maximum
# length of output, e.g., when DIGITS=3, max output is 999+999=1998.
model.add(layers.RepeatVector(DIGITS + 1))
# The decoder RNN could be multiple layers stacked or a single layer.
for _ in range(num_layers):
# By setting return_sequences to True, return not only the last output but
# all the outputs so far in the form of (num_samples, timesteps,
# output_dim). This is necessary as TimeDistributed in the below expects
# the first dimension to be the timesteps.
model.add(layers.LSTM(128, return_sequences=True))
# Apply a dense layer to the every temporal slice of an input. For each of step
# of the output sequence, decide which character should be chosen.
model.add(layers.Dense(len(chars), activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
```
<div class="k-default-codeblock">
```
Build model...
```
</div>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ lstm (<span style="color: #0087ff; text-decoration-color: #0087ff">LSTM</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">72,192</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ repeat_vector (<span style="color: #0087ff; text-decoration-color: #0087ff">RepeatVector</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ lstm_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">LSTM</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">131,584</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">12</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,548</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">205,324</span> (802.05 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">205,324</span> (802.05 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Train the model
```python
# Training parameters.
epochs = 30
batch_size = 32
# Formatting characters for results display.
green_color = "\033[92m"
red_color = "\033[91m"
end_char = "\033[0m"
# Train the model each generation and show predictions against the validation
# dataset.
for epoch in range(1, epochs):
print()
print("Iteration", epoch)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=1,
validation_data=(x_val, y_val),
)
# Select 10 samples from the validation set at random so we can visualize
# errors.
for i in range(10):
ind = np.random.randint(0, len(x_val))
rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
preds = np.argmax(model.predict(rowx, verbose=0), axis=-1)
q = ctable.decode(rowx[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print("Q", q[::-1] if REVERSE else q, end=" ")
print("T", correct, end=" ")
if correct == guess:
print(f"{green_color}☑ {guess}{end_char}")
else:
print(f"{red_color}☒ {guess}{end_char}")
```
<div class="k-default-codeblock">
```
Iteration 1
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 10s 6ms/step - accuracy: 0.3258 - loss: 1.8801 - val_accuracy: 0.4268 - val_loss: 1.5506
Q 499+58 T 557 ☒ 511
Q 51+638 T 689 ☒ 662
Q 87+12 T 99 ☒ 11
Q 259+55 T 314 ☒ 561
Q 704+87 T 791 ☒ 811
Q 988+67 T 1055 ☒ 101
Q 94+116 T 210 ☒ 111
Q 724+4 T 728 ☒ 777
Q 8+673 T 681 ☒ 772
Q 8+991 T 999 ☒ 900
```
</div>
<div class="k-default-codeblock">
```
Iteration 2
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.4688 - loss: 1.4235 - val_accuracy: 0.5846 - val_loss: 1.1293
Q 379+6 T 385 ☒ 387
Q 15+504 T 519 ☒ 525
Q 552+299 T 851 ☒ 727
Q 664+0 T 664 ☒ 667
Q 500+257 T 757 ☒ 797
Q 50+818 T 868 ☒ 861
Q 310+691 T 1001 ☒ 900
Q 378+548 T 926 ☒ 827
Q 46+59 T 105 ☒ 122
Q 49+817 T 866 ☒ 871
```
</div>
<div class="k-default-codeblock">
```
Iteration 3
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.6053 - loss: 1.0648 - val_accuracy: 0.6665 - val_loss: 0.9070
Q 1+266 T 267 ☒ 260
Q 73+257 T 330 ☒ 324
Q 421+628 T 1049 ☒ 1022
Q 85+590 T 675 ☒ 660
Q 66+34 T 100 ☒ 90
Q 256+639 T 895 ☒ 890
Q 6+677 T 683 ☑ 683
Q 162+637 T 799 ☒ 792
Q 5+324 T 329 ☒ 337
Q 848+34 T 882 ☒ 889
```
</div>
<div class="k-default-codeblock">
```
Iteration 4
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.6781 - loss: 0.8751 - val_accuracy: 0.7037 - val_loss: 0.8092
Q 677+1 T 678 ☒ 676
Q 1+531 T 532 ☒ 535
Q 699+60 T 759 ☒ 756
Q 475+139 T 614 ☒ 616
Q 327+592 T 919 ☒ 915
Q 48+912 T 960 ☒ 956
Q 520+78 T 598 ☒ 505
Q 318+8 T 326 ☒ 327
Q 914+53 T 967 ☒ 966
Q 734+0 T 734 ☒ 733
```
</div>
<div class="k-default-codeblock">
```
Iteration 5
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.7142 - loss: 0.7807 - val_accuracy: 0.7164 - val_loss: 0.7622
Q 150+337 T 487 ☒ 489
Q 72+934 T 1006 ☒ 1005
Q 171+62 T 233 ☒ 231
Q 108+21 T 129 ☒ 135
Q 755+896 T 1651 ☒ 1754
Q 117+1 T 118 ☒ 119
Q 148+95 T 243 ☒ 241
Q 719+956 T 1675 ☒ 1684
Q 656+43 T 699 ☒ 695
Q 368+8 T 376 ☒ 372
```
</div>
<div class="k-default-codeblock">
```
Iteration 6
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.7377 - loss: 0.7157 - val_accuracy: 0.7541 - val_loss: 0.6684
Q 945+364 T 1309 ☒ 1305
Q 762+96 T 858 ☒ 855
Q 5+650 T 655 ☑ 655
Q 52+680 T 732 ☒ 735
Q 77+724 T 801 ☒ 800
Q 46+739 T 785 ☑ 785
Q 843+43 T 886 ☒ 885
Q 158+3 T 161 ☒ 160
Q 426+711 T 1137 ☒ 1138
Q 157+41 T 198 ☒ 190
```
</div>
<div class="k-default-codeblock">
```
Iteration 7
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.7642 - loss: 0.6462 - val_accuracy: 0.7955 - val_loss: 0.5433
Q 822+27 T 849 ☑ 849
Q 82+495 T 577 ☒ 563
Q 9+366 T 375 ☒ 373
Q 9+598 T 607 ☒ 696
Q 186+41 T 227 ☒ 226
Q 920+920 T 1840 ☒ 1846
Q 445+345 T 790 ☒ 797
Q 783+588 T 1371 ☒ 1360
Q 36+473 T 509 ☒ 502
Q 354+61 T 415 ☒ 416
```
</div>
<div class="k-default-codeblock">
```
Iteration 8
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.8326 - loss: 0.4626 - val_accuracy: 0.9069 - val_loss: 0.2744
Q 458+154 T 612 ☑ 612
Q 309+19 T 328 ☑ 328
Q 808+97 T 905 ☑ 905
Q 28+736 T 764 ☑ 764
Q 28+79 T 107 ☑ 107
Q 44+84 T 128 ☒ 129
Q 744+13 T 757 ☑ 757
Q 24+996 T 1020 ☒ 1011
Q 8+193 T 201 ☒ 101
Q 483+9 T 492 ☒ 491
```
</div>
<div class="k-default-codeblock">
```
Iteration 9
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.9365 - loss: 0.2275 - val_accuracy: 0.9657 - val_loss: 0.1393
Q 330+61 T 391 ☑ 391
Q 207+82 T 289 ☒ 299
Q 23+234 T 257 ☑ 257
Q 690+567 T 1257 ☑ 1257
Q 293+97 T 390 ☒ 380
Q 312+868 T 1180 ☑ 1180
Q 956+40 T 996 ☑ 996
Q 97+105 T 202 ☒ 203
Q 365+44 T 409 ☑ 409
Q 76+639 T 715 ☑ 715
```
</div>
<div class="k-default-codeblock">
```
Iteration 10
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 7s 5ms/step - accuracy: 0.9717 - loss: 0.1223 - val_accuracy: 0.9744 - val_loss: 0.0965
Q 123+143 T 266 ☑ 266
Q 599+1 T 600 ☑ 600
Q 729+237 T 966 ☑ 966
Q 51+120 T 171 ☑ 171
Q 97+672 T 769 ☑ 769
Q 840+5 T 845 ☑ 845
Q 86+494 T 580 ☒ 570
Q 278+51 T 329 ☑ 329
Q 8+832 T 840 ☑ 840
Q 383+9 T 392 ☑ 392
```
</div>
<div class="k-default-codeblock">
```
Iteration 11
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 7s 5ms/step - accuracy: 0.9842 - loss: 0.0729 - val_accuracy: 0.9808 - val_loss: 0.0690
Q 181+923 T 1104 ☑ 1104
Q 747+24 T 771 ☑ 771
Q 6+65 T 71 ☑ 71
Q 75+994 T 1069 ☑ 1069
Q 712+587 T 1299 ☑ 1299
Q 977+10 T 987 ☑ 987
Q 742+24 T 766 ☑ 766
Q 215+44 T 259 ☑ 259
Q 817+683 T 1500 ☑ 1500
Q 102+48 T 150 ☒ 140
```
</div>
<div class="k-default-codeblock">
```
Iteration 12
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.9820 - loss: 0.0695 - val_accuracy: 0.9823 - val_loss: 0.0596
Q 819+885 T 1704 ☒ 1604
Q 34+20 T 54 ☑ 54
Q 9+996 T 1005 ☑ 1005
Q 915+811 T 1726 ☑ 1726
Q 166+640 T 806 ☑ 806
Q 229+82 T 311 ☑ 311
Q 1+418 T 419 ☑ 419
Q 552+28 T 580 ☑ 580
Q 279+733 T 1012 ☑ 1012
Q 756+734 T 1490 ☑ 1490
```
</div>
<div class="k-default-codeblock">
```
Iteration 13
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.9836 - loss: 0.0587 - val_accuracy: 0.9941 - val_loss: 0.0296
Q 793+0 T 793 ☑ 793
Q 79+48 T 127 ☑ 127
Q 484+92 T 576 ☑ 576
Q 39+655 T 694 ☑ 694
Q 64+708 T 772 ☑ 772
Q 568+341 T 909 ☑ 909
Q 9+918 T 927 ☑ 927
Q 48+912 T 960 ☑ 960
Q 31+289 T 320 ☑ 320
Q 378+548 T 926 ☑ 926
```
</div>
<div class="k-default-codeblock">
```
Iteration 14
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.9915 - loss: 0.0353 - val_accuracy: 0.9901 - val_loss: 0.0358
Q 318+8 T 326 ☒ 325
Q 886+63 T 949 ☒ 959
Q 77+8 T 85 ☑ 85
Q 418+40 T 458 ☑ 458
Q 30+32 T 62 ☑ 62
Q 541+93 T 634 ☑ 634
Q 6+7 T 13 ☒ 14
Q 670+74 T 744 ☑ 744
Q 97+57 T 154 ☑ 154
Q 60+13 T 73 ☑ 73
```
</div>
<div class="k-default-codeblock">
```
Iteration 15
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.9911 - loss: 0.0335 - val_accuracy: 0.9934 - val_loss: 0.0262
Q 24+533 T 557 ☑ 557
Q 324+44 T 368 ☑ 368
Q 63+505 T 568 ☑ 568
Q 670+74 T 744 ☑ 744
Q 58+359 T 417 ☑ 417
Q 16+428 T 444 ☑ 444
Q 17+99 T 116 ☑ 116
Q 779+903 T 1682 ☑ 1682
Q 40+576 T 616 ☑ 616
Q 947+773 T 1720 ☑ 1720
```
</div>
<div class="k-default-codeblock">
```
Iteration 16
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.9968 - loss: 0.0175 - val_accuracy: 0.9901 - val_loss: 0.0360
Q 315+155 T 470 ☑ 470
Q 594+950 T 1544 ☑ 1544
Q 372+37 T 409 ☑ 409
Q 537+47 T 584 ☑ 584
Q 8+263 T 271 ☑ 271
Q 81+500 T 581 ☑ 581
Q 75+270 T 345 ☑ 345
Q 0+796 T 796 ☑ 796
Q 655+965 T 1620 ☑ 1620
Q 384+1 T 385 ☑ 385
```
</div>
<div class="k-default-codeblock">
```
Iteration 17
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.9972 - loss: 0.0148 - val_accuracy: 0.9924 - val_loss: 0.0278
Q 168+83 T 251 ☑ 251
Q 951+53 T 1004 ☑ 1004
Q 400+37 T 437 ☑ 437
Q 996+473 T 1469 ☒ 1569
Q 996+847 T 1843 ☑ 1843
Q 842+550 T 1392 ☑ 1392
Q 479+72 T 551 ☑ 551
Q 753+782 T 1535 ☑ 1535
Q 99+188 T 287 ☑ 287
Q 2+974 T 976 ☑ 976
```
</div>
<div class="k-default-codeblock">
```
Iteration 18
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 7s 5ms/step - accuracy: 0.9929 - loss: 0.0258 - val_accuracy: 0.9973 - val_loss: 0.0135
Q 380+62 T 442 ☑ 442
Q 774+305 T 1079 ☑ 1079
Q 248+272 T 520 ☑ 520
Q 479+736 T 1215 ☑ 1215
Q 859+743 T 1602 ☑ 1602
Q 667+20 T 687 ☑ 687
Q 932+56 T 988 ☑ 988
Q 740+31 T 771 ☑ 771
Q 588+88 T 676 ☑ 676
Q 109+57 T 166 ☑ 166
```
</div>
<div class="k-default-codeblock">
```
Iteration 19
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.9977 - loss: 0.0116 - val_accuracy: 0.9571 - val_loss: 0.1416
Q 635+89 T 724 ☑ 724
Q 50+818 T 868 ☑ 868
Q 37+622 T 659 ☑ 659
Q 913+49 T 962 ☑ 962
Q 641+962 T 1603 ☒ 1503
Q 11+626 T 637 ☑ 637
Q 20+405 T 425 ☑ 425
Q 667+208 T 875 ☑ 875
Q 89+794 T 883 ☑ 883
Q 234+55 T 289 ☑ 289
```
</div>
<div class="k-default-codeblock">
```
Iteration 20
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.9947 - loss: 0.0194 - val_accuracy: 0.9967 - val_loss: 0.0136
Q 5+777 T 782 ☑ 782
Q 1+266 T 267 ☑ 267
Q 579+1 T 580 ☑ 580
Q 665+6 T 671 ☑ 671
Q 210+546 T 756 ☑ 756
Q 660+86 T 746 ☑ 746
Q 75+349 T 424 ☑ 424
Q 984+36 T 1020 ☑ 1020
Q 4+367 T 371 ☑ 371
Q 249+213 T 462 ☑ 462
```
</div>
<div class="k-default-codeblock">
```
Iteration 21
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 7s 5ms/step - accuracy: 0.9987 - loss: 0.0081 - val_accuracy: 0.9840 - val_loss: 0.0481
Q 228+95 T 323 ☑ 323
Q 72+18 T 90 ☑ 90
Q 34+687 T 721 ☑ 721
Q 932+0 T 932 ☑ 932
Q 933+54 T 987 ☑ 987
Q 735+455 T 1190 ☑ 1190
Q 790+70 T 860 ☑ 860
Q 416+36 T 452 ☒ 462
Q 194+110 T 304 ☑ 304
Q 349+70 T 419 ☑ 419
```
</div>
<div class="k-default-codeblock">
```
Iteration 22
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 40s 28ms/step - accuracy: 0.9902 - loss: 0.0326 - val_accuracy: 0.9947 - val_loss: 0.0190
Q 95+237 T 332 ☑ 332
Q 5+188 T 193 ☑ 193
Q 19+931 T 950 ☑ 950
Q 38+499 T 537 ☑ 537
Q 25+21 T 46 ☑ 46
Q 55+85 T 140 ☑ 140
Q 555+7 T 562 ☑ 562
Q 83+873 T 956 ☑ 956
Q 95+527 T 622 ☑ 622
Q 556+558 T 1114 ☑ 1114
```
</div>
<div class="k-default-codeblock">
```
Iteration 23
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.9835 - loss: 0.0572 - val_accuracy: 0.9962 - val_loss: 0.0141
Q 48+413 T 461 ☑ 461
Q 71+431 T 502 ☑ 502
Q 892+534 T 1426 ☑ 1426
Q 934+201 T 1135 ☑ 1135
Q 898+967 T 1865 ☒ 1855
Q 958+0 T 958 ☑ 958
Q 23+179 T 202 ☑ 202
Q 138+60 T 198 ☑ 198
Q 718+5 T 723 ☑ 723
Q 816+514 T 1330 ☑ 1330
```
</div>
<div class="k-default-codeblock">
```
Iteration 24
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 20s 14ms/step - accuracy: 0.9932 - loss: 0.0255 - val_accuracy: 0.9932 - val_loss: 0.0243
Q 4+583 T 587 ☑ 587
Q 49+466 T 515 ☑ 515
Q 920+26 T 946 ☑ 946
Q 624+813 T 1437 ☑ 1437
Q 87+315 T 402 ☑ 402
Q 368+73 T 441 ☑ 441
Q 86+833 T 919 ☑ 919
Q 528+423 T 951 ☑ 951
Q 0+705 T 705 ☑ 705
Q 581+928 T 1509 ☑ 1509
```
</div>
<div class="k-default-codeblock">
```
Iteration 25
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.9908 - loss: 0.0303 - val_accuracy: 0.9944 - val_loss: 0.0169
Q 107+34 T 141 ☑ 141
Q 998+90 T 1088 ☑ 1088
Q 71+520 T 591 ☑ 591
Q 91+996 T 1087 ☑ 1087
Q 94+69 T 163 ☑ 163
Q 108+21 T 129 ☑ 129
Q 785+60 T 845 ☑ 845
Q 71+628 T 699 ☑ 699
Q 294+9 T 303 ☑ 303
Q 399+34 T 433 ☑ 433
```
</div>
<div class="k-default-codeblock">
```
Iteration 26
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 5ms/step - accuracy: 0.9965 - loss: 0.0139 - val_accuracy: 0.9979 - val_loss: 0.0094
Q 19+133 T 152 ☑ 152
Q 841+3 T 844 ☑ 844
Q 698+6 T 704 ☑ 704
Q 942+28 T 970 ☑ 970
Q 81+735 T 816 ☑ 816
Q 325+14 T 339 ☑ 339
Q 790+64 T 854 ☑ 854
Q 4+839 T 843 ☑ 843
Q 505+96 T 601 ☑ 601
Q 917+42 T 959 ☑ 959
```
</div>
<div class="k-default-codeblock">
```
Iteration 27
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 72s 51ms/step - accuracy: 0.9952 - loss: 0.0173 - val_accuracy: 0.9992 - val_loss: 0.0036
Q 71+628 T 699 ☑ 699
Q 791+9 T 800 ☑ 800
Q 19+148 T 167 ☑ 167
Q 7+602 T 609 ☑ 609
Q 6+566 T 572 ☑ 572
Q 437+340 T 777 ☑ 777
Q 614+533 T 1147 ☑ 1147
Q 948+332 T 1280 ☑ 1280
Q 56+619 T 675 ☑ 675
Q 86+251 T 337 ☑ 337
```
</div>
<div class="k-default-codeblock">
```
Iteration 28
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 8s 6ms/step - accuracy: 0.9964 - loss: 0.0124 - val_accuracy: 0.9990 - val_loss: 0.0047
Q 2+572 T 574 ☑ 574
Q 437+96 T 533 ☑ 533
Q 15+224 T 239 ☑ 239
Q 16+655 T 671 ☑ 671
Q 714+5 T 719 ☑ 719
Q 645+417 T 1062 ☑ 1062
Q 25+919 T 944 ☑ 944
Q 89+329 T 418 ☑ 418
Q 22+513 T 535 ☑ 535
Q 497+983 T 1480 ☑ 1480
```
</div>
<div class="k-default-codeblock">
```
Iteration 29
1407/1407 ━━━━━━━━━━━━━━━━━━━━ 7s 5ms/step - accuracy: 0.9970 - loss: 0.0106 - val_accuracy: 0.9990 - val_loss: 0.0048
Q 2+962 T 964 ☑ 964
Q 6+76 T 82 ☑ 82
Q 986+20 T 1006 ☑ 1006
Q 727+49 T 776 ☑ 776
Q 948+332 T 1280 ☑ 1280
Q 921+463 T 1384 ☑ 1384
Q 77+556 T 633 ☑ 633
Q 133+849 T 982 ☑ 982
Q 301+478 T 779 ☑ 779
Q 3+243 T 246 ☑ 246
```
</div>
You'll get to 99+% validation accuracy after ~30 epochs.
| keras-io/examples/nlp/md/addition_rnn.md/0 | {
"file_path": "keras-io/examples/nlp/md/addition_rnn.md",
"repo_id": "keras-io",
"token_count": 11811
} | 85 |
<jupyter_start><jupyter_text>Classification with Neural Decision Forests**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2021/01/15**Last modified:** 2021/01/15**Description:** How to train differentiable decision trees for end-to-end learning in deep neural networks. IntroductionThis example provides an implementation of the[Deep Neural Decision Forest](https://ieeexplore.ieee.org/document/7410529)model introduced by P. Kontschieder et al. for structured data classification.It demonstrates how to build a stochastic and differentiable decision tree model,train it end-to-end, and unify decision trees with deep representation learning. The datasetThis example uses the[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/census+income)provided by the[UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).The task is binary classificationto predict whether a person is likely to be making over USD 50,000 a year.The dataset includes 48,842 instances with 14 input features (such as age, work class, education, occupation, and so on): 5 numerical featuresand 9 categorical features. Setup<jupyter_code>import keras
from keras import layers
from keras.layers import StringLookup
from keras import ops
from tensorflow import data as tf_data
import numpy as np
import pandas as pd
import math<jupyter_output><empty_output><jupyter_text>Prepare the data<jupyter_code>CSV_HEADER = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"gender",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"income_bracket",
]
train_data_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
)
train_data = pd.read_csv(train_data_url, header=None, names=CSV_HEADER)
test_data_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
)
test_data = pd.read_csv(test_data_url, header=None, names=CSV_HEADER)
print(f"Train dataset shape: {train_data.shape}")
print(f"Test dataset shape: {test_data.shape}")<jupyter_output><empty_output><jupyter_text>Remove the first record (because it is not a valid data example) and a trailing'dot' in the class labels.<jupyter_code>test_data = test_data[1:]
test_data.income_bracket = test_data.income_bracket.apply(
lambda value: value.replace(".", "")
)<jupyter_output><empty_output><jupyter_text>We store the training and test data splits locally as CSV files.<jupyter_code>train_data_file = "train_data.csv"
test_data_file = "test_data.csv"
train_data.to_csv(train_data_file, index=False, header=False)
test_data.to_csv(test_data_file, index=False, header=False)<jupyter_output><empty_output><jupyter_text>Define dataset metadataHere, we define the metadata of the dataset that will be useful for reading and parsingand encoding input features.<jupyter_code># A list of the numerical feature names.
NUMERIC_FEATURE_NAMES = [
"age",
"education_num",
"capital_gain",
"capital_loss",
"hours_per_week",
]
# A dictionary of the categorical features and their vocabulary.
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
"workclass": sorted(list(train_data["workclass"].unique())),
"education": sorted(list(train_data["education"].unique())),
"marital_status": sorted(list(train_data["marital_status"].unique())),
"occupation": sorted(list(train_data["occupation"].unique())),
"relationship": sorted(list(train_data["relationship"].unique())),
"race": sorted(list(train_data["race"].unique())),
"gender": sorted(list(train_data["gender"].unique())),
"native_country": sorted(list(train_data["native_country"].unique())),
}
# A list of the columns to ignore from the dataset.
IGNORE_COLUMN_NAMES = ["fnlwgt"]
# A list of the categorical feature names.
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys())
# A list of all the input features.
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES
# A list of column default values for each feature.
COLUMN_DEFAULTS = [
[0.0] if feature_name in NUMERIC_FEATURE_NAMES + IGNORE_COLUMN_NAMES else ["NA"]
for feature_name in CSV_HEADER
]
# The name of the target feature.
TARGET_FEATURE_NAME = "income_bracket"
# A list of the labels of the target features.
TARGET_LABELS = [" <=50K", " >50K"]<jupyter_output><empty_output><jupyter_text>Create `tf_data.Dataset` objects for training and validationWe create an input function to read and parse the file, and convert features and labelsinto a [`tf_data.Dataset`](https://www.tensorflow.org/guide/datasets)for training and validation. We also preprocess the input by mapping the target labelto an index.<jupyter_code>target_label_lookup = StringLookup(
vocabulary=TARGET_LABELS, mask_token=None, num_oov_indices=0
)
lookup_dict = {}
for feature_name in CATEGORICAL_FEATURE_NAMES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token, nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = StringLookup(vocabulary=vocabulary, mask_token=None, num_oov_indices=0)
lookup_dict[feature_name] = lookup
def encode_categorical(batch_x, batch_y):
for feature_name in CATEGORICAL_FEATURE_NAMES:
batch_x[feature_name] = lookup_dict[feature_name](batch_x[feature_name])
return batch_x, batch_y
def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
dataset = (
tf_data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
column_defaults=COLUMN_DEFAULTS,
label_name=TARGET_FEATURE_NAME,
num_epochs=1,
header=False,
na_value="?",
shuffle=shuffle,
)
.map(lambda features, target: (features, target_label_lookup(target)))
.map(encode_categorical)
)
return dataset.cache()<jupyter_output><empty_output><jupyter_text>Create model inputs<jupyter_code>def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
if feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="float32"
)
else:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="int32"
)
return inputs<jupyter_output><empty_output><jupyter_text>Encode input features<jupyter_code>def encode_inputs(inputs):
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token, nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
value_index = inputs[feature_name]
embedding_dims = int(math.sqrt(lookup.vocabulary_size()))
# Create an embedding layer with the specified dimensions.
embedding = layers.Embedding(
input_dim=lookup.vocabulary_size(), output_dim=embedding_dims
)
# Convert the index values to embedding representations.
encoded_feature = embedding(value_index)
else:
# Use the numerical features as-is.
encoded_feature = inputs[feature_name]
if inputs[feature_name].shape[-1] is None:
encoded_feature = keras.ops.expand_dims(encoded_feature, -1)
encoded_features.append(encoded_feature)
encoded_features = layers.concatenate(encoded_features)
return encoded_features<jupyter_output><empty_output><jupyter_text>Deep Neural Decision TreeA neural decision tree model has two sets of weights to learn. The first set is `pi`,which represents the probability distribution of the classes in the tree leaves.The second set is the weights of the routing layer `decision_fn`, which represents the probabilityof going to each leave. The forward pass of the model works as follows:1. The model expects input `features` as a single vector encoding all the features of an instancein the batch. This vector can be generated from a Convolution Neural Network (CNN) applied to imagesor dense transformations applied to structured data features.2. The model first applies a `used_features_mask` to randomly select a subset of input features to use.3. Then, the model computes the probabilities (`mu`) for the input instances to reach the tree leavesby iteratively performing a *stochastic* routing throughout the tree levels.4. Finally, the probabilities of reaching the leaves are combined by the class probabilities at theleaves to produce the final `outputs`.<jupyter_code>class NeuralDecisionTree(keras.Model):
def __init__(self, depth, num_features, used_features_rate, num_classes):
super().__init__()
self.depth = depth
self.num_leaves = 2**depth
self.num_classes = num_classes
# Create a mask for the randomly selected features.
num_used_features = int(num_features * used_features_rate)
one_hot = np.eye(num_features)
sampled_feature_indices = np.random.choice(
np.arange(num_features), num_used_features, replace=False
)
self.used_features_mask = ops.convert_to_tensor(
one_hot[sampled_feature_indices], dtype="float32"
)
# Initialize the weights of the classes in leaves.
self.pi = self.add_weight(
initializer="random_normal",
shape=[self.num_leaves, self.num_classes],
dtype="float32",
trainable=True,
)
# Initialize the stochastic routing layer.
self.decision_fn = layers.Dense(
units=self.num_leaves, activation="sigmoid", name="decision"
)
def call(self, features):
batch_size = ops.shape(features)[0]
# Apply the feature mask to the input features.
features = ops.matmul(
features, ops.transpose(self.used_features_mask)
) # [batch_size, num_used_features]
# Compute the routing probabilities.
decisions = ops.expand_dims(
self.decision_fn(features), axis=2
) # [batch_size, num_leaves, 1]
# Concatenate the routing probabilities with their complements.
decisions = layers.concatenate(
[decisions, 1 - decisions], axis=2
) # [batch_size, num_leaves, 2]
mu = ops.ones([batch_size, 1, 1])
begin_idx = 1
end_idx = 2
# Traverse the tree in breadth-first order.
for level in range(self.depth):
mu = ops.reshape(mu, [batch_size, -1, 1]) # [batch_size, 2 ** level, 1]
mu = ops.tile(mu, (1, 1, 2)) # [batch_size, 2 ** level, 2]
level_decisions = decisions[
:, begin_idx:end_idx, :
] # [batch_size, 2 ** level, 2]
mu = mu * level_decisions # [batch_size, 2**level, 2]
begin_idx = end_idx
end_idx = begin_idx + 2 ** (level + 1)
mu = ops.reshape(mu, [batch_size, self.num_leaves]) # [batch_size, num_leaves]
probabilities = keras.activations.softmax(self.pi) # [num_leaves, num_classes]
outputs = ops.matmul(mu, probabilities) # [batch_size, num_classes]
return outputs<jupyter_output><empty_output><jupyter_text>Deep Neural Decision ForestThe neural decision forest model consists of a set of neural decision trees that aretrained simultaneously. The output of the forest model is the average outputs of its trees.<jupyter_code>class NeuralDecisionForest(keras.Model):
def __init__(self, num_trees, depth, num_features, used_features_rate, num_classes):
super().__init__()
self.ensemble = []
# Initialize the ensemble by adding NeuralDecisionTree instances.
# Each tree will have its own randomly selected input features to use.
for _ in range(num_trees):
self.ensemble.append(
NeuralDecisionTree(depth, num_features, used_features_rate, num_classes)
)
def call(self, inputs):
# Initialize the outputs: a [batch_size, num_classes] matrix of zeros.
batch_size = ops.shape(inputs)[0]
outputs = ops.zeros([batch_size, num_classes])
# Aggregate the outputs of trees in the ensemble.
for tree in self.ensemble:
outputs += tree(inputs)
# Divide the outputs by the ensemble size to get the average.
outputs /= len(self.ensemble)
return outputs<jupyter_output><empty_output><jupyter_text>Finally, let's set up the code that will train and evaluate the model.<jupyter_code>learning_rate = 0.01
batch_size = 265
num_epochs = 10
def run_experiment(model):
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
print("Start training the model...")
train_dataset = get_dataset_from_csv(
train_data_file, shuffle=True, batch_size=batch_size
)
model.fit(train_dataset, epochs=num_epochs)
print("Model training finished")
print("Evaluating the model on the test data...")
test_dataset = get_dataset_from_csv(test_data_file, batch_size=batch_size)
_, accuracy = model.evaluate(test_dataset)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")<jupyter_output><empty_output><jupyter_text>Experiment 1: train a decision tree modelIn this experiment, we train a single neural decision tree modelwhere we use all input features.<jupyter_code>num_trees = 10
depth = 10
used_features_rate = 1.0
num_classes = len(TARGET_LABELS)
def create_tree_model():
inputs = create_model_inputs()
features = encode_inputs(inputs)
features = layers.BatchNormalization()(features)
num_features = features.shape[1]
tree = NeuralDecisionTree(depth, num_features, used_features_rate, num_classes)
outputs = tree(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
tree_model = create_tree_model()
run_experiment(tree_model)<jupyter_output><empty_output><jupyter_text>Experiment 2: train a forest modelIn this experiment, we train a neural decision forest with `num_trees` treeswhere each tree uses randomly selected 50% of the input features. You can control the numberof features to be used in each tree by setting the `used_features_rate` variable.In addition, we set the depth to 5 instead of 10 compared to the previous experiment.<jupyter_code>num_trees = 25
depth = 5
used_features_rate = 0.5
def create_forest_model():
inputs = create_model_inputs()
features = encode_inputs(inputs)
features = layers.BatchNormalization()(features)
num_features = features.shape[1]
forest_model = NeuralDecisionForest(
num_trees, depth, num_features, used_features_rate, num_classes
)
outputs = forest_model(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
forest_model = create_forest_model()
run_experiment(forest_model)<jupyter_output><empty_output> | keras-io/examples/structured_data/ipynb/deep_neural_decision_forests.ipynb/0 | {
"file_path": "keras-io/examples/structured_data/ipynb/deep_neural_decision_forests.ipynb",
"repo_id": "keras-io",
"token_count": 5833
} | 86 |
# Structured data classification with FeatureSpace
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2022/11/09<br>
**Last modified:** 2022/11/09<br>
**Description:** Classify tabular data in a few lines of code.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/structured_data/ipynb/structured_data_classification_with_feature_space.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/structured_data/structured_data_classification_with_feature_space.py)
---
## Introduction
This example demonstrates how to do structured data classification
(also known as tabular data classification), starting from a raw
CSV file. Our data includes numerical features,
and integer categorical features, and string categorical features.
We will use the utility `keras.utils.FeatureSpace` to index,
preprocess, and encode our features.
The code is adapted from the example
[Structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/).
While the previous example managed its own low-level feature preprocessing and
encoding with Keras preprocessing layers, in this example we
delegate everything to `FeatureSpace`, making the workflow
extremely quick and easy.
### The dataset
[Our dataset](https://archive.ics.uci.edu/ml/datasets/heart+Disease) is provided by the
Cleveland Clinic Foundation for Heart Disease.
It's a CSV file with 303 rows. Each row contains information about a patient (a
**sample**), and each column describes an attribute of the patient (a **feature**). We
use the features to predict whether a patient has a heart disease
(**binary classification**).
Here's the description of each feature:
Column| Description| Feature Type
------------|--------------------|----------------------
Age | Age in years | Numerical
Sex | (1 = male; 0 = female) | Categorical
CP | Chest pain type (0, 1, 2, 3, 4) | Categorical
Trestbpd | Resting blood pressure (in mm Hg on admission) | Numerical
Chol | Serum cholesterol in mg/dl | Numerical
FBS | fasting blood sugar in 120 mg/dl (1 = true; 0 = false) | Categorical
RestECG | Resting electrocardiogram results (0, 1, 2) | Categorical
Thalach | Maximum heart rate achieved | Numerical
Exang | Exercise induced angina (1 = yes; 0 = no) | Categorical
Oldpeak | ST depression induced by exercise relative to rest | Numerical
Slope | Slope of the peak exercise ST segment | Numerical
CA | Number of major vessels (0-3) colored by fluoroscopy | Both numerical & categorical
Thal | 3 = normal; 6 = fixed defect; 7 = reversible defect | Categorical
Target | Diagnosis of heart disease (1 = true; 0 = false) | Target
---
## Setup
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import pandas as pd
import keras
from keras.utils import FeatureSpace
```
---
## Preparing the data
Let's download the data and load it into a Pandas dataframe:
```python
file_url = "http://storage.googleapis.com/download.tensorflow.org/data/heart.csv"
dataframe = pd.read_csv(file_url)
```
The dataset includes 303 samples with 14 columns per sample
(13 features, plus the target label):
```python
print(dataframe.shape)
```
<div class="k-default-codeblock">
```
(303, 14)
```
</div>
Here's a preview of a few samples:
```python
dataframe.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
<div class="k-default-codeblock">
```
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
```
</div>
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>age</th>
<th>sex</th>
<th>cp</th>
<th>trestbps</th>
<th>chol</th>
<th>fbs</th>
<th>restecg</th>
<th>thalach</th>
<th>exang</th>
<th>oldpeak</th>
<th>slope</th>
<th>ca</th>
<th>thal</th>
<th>target</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>63</td>
<td>1</td>
<td>1</td>
<td>145</td>
<td>233</td>
<td>1</td>
<td>2</td>
<td>150</td>
<td>0</td>
<td>2.3</td>
<td>3</td>
<td>0</td>
<td>fixed</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>67</td>
<td>1</td>
<td>4</td>
<td>160</td>
<td>286</td>
<td>0</td>
<td>2</td>
<td>108</td>
<td>1</td>
<td>1.5</td>
<td>2</td>
<td>3</td>
<td>normal</td>
<td>1</td>
</tr>
<tr>
<th>2</th>
<td>67</td>
<td>1</td>
<td>4</td>
<td>120</td>
<td>229</td>
<td>0</td>
<td>2</td>
<td>129</td>
<td>1</td>
<td>2.6</td>
<td>2</td>
<td>2</td>
<td>reversible</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>37</td>
<td>1</td>
<td>3</td>
<td>130</td>
<td>250</td>
<td>0</td>
<td>0</td>
<td>187</td>
<td>0</td>
<td>3.5</td>
<td>3</td>
<td>0</td>
<td>normal</td>
<td>0</td>
</tr>
<tr>
<th>4</th>
<td>41</td>
<td>0</td>
<td>2</td>
<td>130</td>
<td>204</td>
<td>0</td>
<td>2</td>
<td>172</td>
<td>0</td>
<td>1.4</td>
<td>1</td>
<td>0</td>
<td>normal</td>
<td>0</td>
</tr>
</tbody>
</table>
</div>
The last column, "target", indicates whether the patient
has a heart disease (1) or not (0).
Let's split the data into a training and validation set:
```python
val_dataframe = dataframe.sample(frac=0.2, random_state=1337)
train_dataframe = dataframe.drop(val_dataframe.index)
print(
"Using %d samples for training and %d for validation"
% (len(train_dataframe), len(val_dataframe))
)
```
<div class="k-default-codeblock">
```
Using 242 samples for training and 61 for validation
```
</div>
Let's generate `tf.data.Dataset` objects for each dataframe:
```python
def dataframe_to_dataset(dataframe):
dataframe = dataframe.copy()
labels = dataframe.pop("target")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.shuffle(buffer_size=len(dataframe))
return ds
train_ds = dataframe_to_dataset(train_dataframe)
val_ds = dataframe_to_dataset(val_dataframe)
```
Each `Dataset` yields a tuple `(input, target)` where `input` is a dictionary of features
and `target` is the value `0` or `1`:
```python
for x, y in train_ds.take(1):
print("Input:", x)
print("Target:", y)
```
<div class="k-default-codeblock">
```
Input: {'age': <tf.Tensor: shape=(), dtype=int64, numpy=65>, 'sex': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'cp': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'trestbps': <tf.Tensor: shape=(), dtype=int64, numpy=138>, 'chol': <tf.Tensor: shape=(), dtype=int64, numpy=282>, 'fbs': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'restecg': <tf.Tensor: shape=(), dtype=int64, numpy=2>, 'thalach': <tf.Tensor: shape=(), dtype=int64, numpy=174>, 'exang': <tf.Tensor: shape=(), dtype=int64, numpy=0>, 'oldpeak': <tf.Tensor: shape=(), dtype=float64, numpy=1.4>, 'slope': <tf.Tensor: shape=(), dtype=int64, numpy=2>, 'ca': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'thal': <tf.Tensor: shape=(), dtype=string, numpy=b'normal'>}
Target: tf.Tensor(0, shape=(), dtype=int64)
```
</div>
Let's batch the datasets:
```python
train_ds = train_ds.batch(32)
val_ds = val_ds.batch(32)
```
---
## Configuring a `FeatureSpace`
To configure how each feature should be preprocessed,
we instantiate a `keras.utils.FeatureSpace`, and we
pass to it a dictionary that maps the name of our features
to a string that describes the feature type.
We have a few "integer categorical" features such as `"FBS"`,
one "string categorical" feature (`"thal"`),
and a few numerical features, which we'd like to normalize
-- except `"age"`, which we'd like to discretize into
a number of bins.
We also use the `crosses` argument
to capture *feature interactions* for some categorical
features, that is to say, create additional features
that represent value co-occurrences for these categorical features.
You can compute feature crosses like this for arbitrary sets of
categorical features -- not just tuples of two features.
Because the resulting co-occurences are hashed
into a fixed-sized vector, you don't need to worry about whether
the co-occurence space is too large.
```python
feature_space = FeatureSpace(
features={
# Categorical features encoded as integers
"sex": "integer_categorical",
"cp": "integer_categorical",
"fbs": "integer_categorical",
"restecg": "integer_categorical",
"exang": "integer_categorical",
"ca": "integer_categorical",
# Categorical feature encoded as string
"thal": "string_categorical",
# Numerical features to discretize
"age": "float_discretized",
# Numerical features to normalize
"trestbps": "float_normalized",
"chol": "float_normalized",
"thalach": "float_normalized",
"oldpeak": "float_normalized",
"slope": "float_normalized",
},
# We create additional features by hashing
# value co-occurrences for the
# following groups of categorical features.
crosses=[("sex", "age"), ("thal", "ca")],
# The hashing space for these co-occurrences
# wil be 32-dimensional.
crossing_dim=32,
# Our utility will one-hot encode all categorical
# features and concat all features into a single
# vector (one vector per sample).
output_mode="concat",
)
```
---
## Further customizing a `FeatureSpace`
Specifying the feature type via a string name is quick and easy,
but sometimes you may want to further configure the preprocessing
of each feature. For instance, in our case, our categorical
features don't have a large set of possible values -- it's only
a handful of values per feature (e.g. `1` and `0` for the feature `"FBS"`),
and all possible values are represented in the training set.
As a result, we don't need to reserve an index to represent "out of vocabulary" values
for these features -- which would have been the default behavior.
Below, we just specify `num_oov_indices=0` in each of these features
to tell the feature preprocessor to skip "out of vocabulary" indexing.
Other customizations you have access to include specifying the number of
bins for discretizing features of type `"float_discretized"`,
or the dimensionality of the hashing space for feature crossing.
```python
feature_space = FeatureSpace(
features={
# Categorical features encoded as integers
"sex": FeatureSpace.integer_categorical(num_oov_indices=0),
"cp": FeatureSpace.integer_categorical(num_oov_indices=0),
"fbs": FeatureSpace.integer_categorical(num_oov_indices=0),
"restecg": FeatureSpace.integer_categorical(num_oov_indices=0),
"exang": FeatureSpace.integer_categorical(num_oov_indices=0),
"ca": FeatureSpace.integer_categorical(num_oov_indices=0),
# Categorical feature encoded as string
"thal": FeatureSpace.string_categorical(num_oov_indices=0),
# Numerical features to discretize
"age": FeatureSpace.float_discretized(num_bins=30),
# Numerical features to normalize
"trestbps": FeatureSpace.float_normalized(),
"chol": FeatureSpace.float_normalized(),
"thalach": FeatureSpace.float_normalized(),
"oldpeak": FeatureSpace.float_normalized(),
"slope": FeatureSpace.float_normalized(),
},
# Specify feature cross with a custom crossing dim.
crosses=[
FeatureSpace.cross(feature_names=("sex", "age"), crossing_dim=64),
FeatureSpace.cross(
feature_names=("thal", "ca"),
crossing_dim=16,
),
],
output_mode="concat",
)
```
---
## Adapt the `FeatureSpace` to the training data
Before we start using the `FeatureSpace` to build a model, we have
to adapt it to the training data. During `adapt()`, the `FeatureSpace` will:
- Index the set of possible values for categorical features.
- Compute the mean and variance for numerical features to normalize.
- Compute the value boundaries for the different bins for numerical features to discretize.
Note that `adapt()` should be called on a `tf.data.Dataset` which yields dicts
of feature values -- no labels.
```python
train_ds_with_no_labels = train_ds.map(lambda x, _: x)
feature_space.adapt(train_ds_with_no_labels)
```
At this point, the `FeatureSpace` can be called on a dict of raw feature values, and will return a
single concatenate vector for each sample, combining encoded features and feature crosses.
```python
for x, _ in train_ds.take(1):
preprocessed_x = feature_space(x)
print("preprocessed_x.shape:", preprocessed_x.shape)
print("preprocessed_x.dtype:", preprocessed_x.dtype)
```
<div class="k-default-codeblock">
```
preprocessed_x.shape: (32, 138)
preprocessed_x.dtype: <dtype: 'float32'>
```
</div>
---
## Two ways to manage preprocessing: as part of the `tf.data` pipeline, or in the model itself
There are two ways in which you can leverage your `FeatureSpace`:
### Asynchronous preprocessing in `tf.data`
You can make it part of your data pipeline, before the model. This enables asynchronous parallel
preprocessing of the data on CPU before it hits the model. Do this if you're training on GPU or TPU,
or if you want to speed up preprocessing. Usually, this is always the right thing to do during training.
### Synchronous preprocessing in the model
You can make it part of your model. This means that the model will expect dicts of raw feature
values, and the preprocessing batch will be done synchronously (in a blocking manner) before the
rest of the forward pass. Do this if you want to have an end-to-end model that can process
raw feature values -- but keep in mind that your model will only be able to run on CPU,
since most types of feature preprocessing (e.g. string preprocessing) are not GPU or TPU compatible.
Do not do this on GPU / TPU or in performance-sensitive settings. In general, you want to do in-model
preprocessing when you do inference on CPU.
In our case, we will apply the `FeatureSpace` in the tf.data pipeline during training, but we will
do inference with an end-to-end model that includes the `FeatureSpace`.
Let's create a training and validation dataset of preprocessed batches:
```python
preprocessed_train_ds = train_ds.map(
lambda x, y: (feature_space(x), y), num_parallel_calls=tf.data.AUTOTUNE
)
preprocessed_train_ds = preprocessed_train_ds.prefetch(tf.data.AUTOTUNE)
preprocessed_val_ds = val_ds.map(
lambda x, y: (feature_space(x), y), num_parallel_calls=tf.data.AUTOTUNE
)
preprocessed_val_ds = preprocessed_val_ds.prefetch(tf.data.AUTOTUNE)
```
---
## Build a model
Time to build a model -- or rather two models:
- A training model that expects preprocessed features (one sample = one vector)
- An inference model that expects raw features (one sample = dict of raw feature values)
```python
dict_inputs = feature_space.get_inputs()
encoded_features = feature_space.get_encoded_features()
x = keras.layers.Dense(32, activation="relu")(encoded_features)
x = keras.layers.Dropout(0.5)(x)
predictions = keras.layers.Dense(1, activation="sigmoid")(x)
training_model = keras.Model(inputs=encoded_features, outputs=predictions)
training_model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
inference_model = keras.Model(inputs=dict_inputs, outputs=predictions)
```
---
## Train the model
Let's train our model for 50 epochs. Note that feature preprocessing is happening
as part of the tf.data pipeline, not as part of the model.
```python
training_model.fit(
preprocessed_train_ds,
epochs=20,
validation_data=preprocessed_val_ds,
verbose=2,
)
```
<div class="k-default-codeblock">
```
Epoch 1/20
8/8 - 3s - 352ms/step - accuracy: 0.5200 - loss: 0.7407 - val_accuracy: 0.6196 - val_loss: 0.6663
Epoch 2/20
8/8 - 0s - 20ms/step - accuracy: 0.5881 - loss: 0.6874 - val_accuracy: 0.7732 - val_loss: 0.6015
Epoch 3/20
8/8 - 0s - 19ms/step - accuracy: 0.6580 - loss: 0.6192 - val_accuracy: 0.7839 - val_loss: 0.5577
Epoch 4/20
8/8 - 0s - 19ms/step - accuracy: 0.7096 - loss: 0.5721 - val_accuracy: 0.7856 - val_loss: 0.5200
Epoch 5/20
8/8 - 0s - 18ms/step - accuracy: 0.7292 - loss: 0.5553 - val_accuracy: 0.7764 - val_loss: 0.4853
Epoch 6/20
8/8 - 0s - 19ms/step - accuracy: 0.7561 - loss: 0.5103 - val_accuracy: 0.7732 - val_loss: 0.4627
Epoch 7/20
8/8 - 0s - 19ms/step - accuracy: 0.7231 - loss: 0.5374 - val_accuracy: 0.7764 - val_loss: 0.4413
Epoch 8/20
8/8 - 0s - 19ms/step - accuracy: 0.7769 - loss: 0.4564 - val_accuracy: 0.7683 - val_loss: 0.4320
Epoch 9/20
8/8 - 0s - 18ms/step - accuracy: 0.7769 - loss: 0.4324 - val_accuracy: 0.7856 - val_loss: 0.4191
Epoch 10/20
8/8 - 0s - 19ms/step - accuracy: 0.7778 - loss: 0.4340 - val_accuracy: 0.7888 - val_loss: 0.4084
Epoch 11/20
8/8 - 0s - 19ms/step - accuracy: 0.7760 - loss: 0.4124 - val_accuracy: 0.7716 - val_loss: 0.3977
Epoch 12/20
8/8 - 0s - 19ms/step - accuracy: 0.7964 - loss: 0.4125 - val_accuracy: 0.7667 - val_loss: 0.3959
Epoch 13/20
8/8 - 0s - 18ms/step - accuracy: 0.8051 - loss: 0.3979 - val_accuracy: 0.7856 - val_loss: 0.3891
Epoch 14/20
8/8 - 0s - 19ms/step - accuracy: 0.8043 - loss: 0.3891 - val_accuracy: 0.7856 - val_loss: 0.3840
Epoch 15/20
8/8 - 0s - 18ms/step - accuracy: 0.8633 - loss: 0.3571 - val_accuracy: 0.7872 - val_loss: 0.3764
Epoch 16/20
8/8 - 0s - 19ms/step - accuracy: 0.8728 - loss: 0.3548 - val_accuracy: 0.7888 - val_loss: 0.3699
Epoch 17/20
8/8 - 0s - 19ms/step - accuracy: 0.8698 - loss: 0.3171 - val_accuracy: 0.7872 - val_loss: 0.3727
Epoch 18/20
8/8 - 0s - 18ms/step - accuracy: 0.8529 - loss: 0.3454 - val_accuracy: 0.7904 - val_loss: 0.3669
Epoch 19/20
8/8 - 0s - 17ms/step - accuracy: 0.8589 - loss: 0.3359 - val_accuracy: 0.7980 - val_loss: 0.3770
Epoch 20/20
8/8 - 0s - 17ms/step - accuracy: 0.8455 - loss: 0.3113 - val_accuracy: 0.8044 - val_loss: 0.3684
<keras.src.callbacks.history.History at 0x7f139bb4ed10>
```
</div>
We quickly get to 80% validation accuracy.
---
## Inference on new data with the end-to-end model
Now, we can use our inference model (which includes the `FeatureSpace`)
to make predictions based on dicts of raw features values, as follows:
```python
sample = {
"age": 60,
"sex": 1,
"cp": 1,
"trestbps": 145,
"chol": 233,
"fbs": 1,
"restecg": 2,
"thalach": 150,
"exang": 0,
"oldpeak": 2.3,
"slope": 3,
"ca": 0,
"thal": "fixed",
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = inference_model.predict(input_dict)
print(
f"This particular patient had a {100 * predictions[0][0]:.2f}% probability "
"of having a heart disease, as evaluated by our model."
)
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 273ms/step
This particular patient had a 43.13% probability of having a heart disease, as evaluated by our model.
```
</div> | keras-io/examples/structured_data/md/structured_data_classification_with_feature_space.md/0 | {
"file_path": "keras-io/examples/structured_data/md/structured_data_classification_with_feature_space.md",
"repo_id": "keras-io",
"token_count": 7556
} | 87 |
"""
Title: Image classification via fine-tuning with EfficientNet
Author: [Yixing Fu](https://github.com/yixingfu)
Date created: 2020/06/30
Last modified: 2023/07/10
Description: Use EfficientNet with weights pre-trained on imagenet for Stanford Dogs classification.
Accelerator: GPU
"""
"""
## Introduction: what is EfficientNet
EfficientNet, first introduced in [Tan and Le, 2019](https://arxiv.org/abs/1905.11946)
is among the most efficient models (i.e. requiring least FLOPS for inference)
that reaches State-of-the-Art accuracy on both
imagenet and common image classification transfer learning tasks.
The smallest base model is similar to [MnasNet](https://arxiv.org/abs/1807.11626), which
reached near-SOTA with a significantly smaller model. By introducing a heuristic way to
scale the model, EfficientNet provides a family of models (B0 to B7) that represents a
good combination of efficiency and accuracy on a variety of scales. Such a scaling
heuristics (compound-scaling, details see
[Tan and Le, 2019](https://arxiv.org/abs/1905.11946)) allows the
efficiency-oriented base model (B0) to surpass models at every scale, while avoiding
extensive grid-search of hyperparameters.
A summary of the latest updates on the model is available at
[here](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet), where various
augmentation schemes and semi-supervised learning approaches are applied to further
improve the imagenet performance of the models. These extensions of the model can be used
by updating weights without changing model architecture.
## B0 to B7 variants of EfficientNet
*(This section provides some details on "compound scaling", and can be skipped
if you're only interested in using the models)*
Based on the [original paper](https://arxiv.org/abs/1905.11946) people may have the
impression that EfficientNet is a continuous family of models created by arbitrarily
choosing scaling factor in as Eq.(3) of the paper. However, choice of resolution,
depth and width are also restricted by many factors:
- Resolution: Resolutions not divisible by 8, 16, etc. cause zero-padding near boundaries
of some layers which wastes computational resources. This especially applies to smaller
variants of the model, hence the input resolution for B0 and B1 are chosen as 224 and
240.
- Depth and width: The building blocks of EfficientNet demands channel size to be
multiples of 8.
- Resource limit: Memory limitation may bottleneck resolution when depth
and width can still increase. In such a situation, increasing depth and/or
width but keep resolution can still improve performance.
As a result, the depth, width and resolution of each variant of the EfficientNet models
are hand-picked and proven to produce good results, though they may be significantly
off from the compound scaling formula.
Therefore, the keras implementation (detailed below) only provide these 8 models, B0 to B7,
instead of allowing arbitray choice of width / depth / resolution parameters.
## Keras implementation of EfficientNet
An implementation of EfficientNet B0 to B7 has been shipped with Keras since v2.3. To
use EfficientNetB0 for classifying 1000 classes of images from ImageNet, run:
```python
from tensorflow.keras.applications import EfficientNetB0
model = EfficientNetB0(weights='imagenet')
```
This model takes input images of shape `(224, 224, 3)`, and the input data should be in the
range `[0, 255]`. Normalization is included as part of the model.
Because training EfficientNet on ImageNet takes a tremendous amount of resources and
several techniques that are not a part of the model architecture itself. Hence the Keras
implementation by default loads pre-trained weights obtained via training with
[AutoAugment](https://arxiv.org/abs/1805.09501).
For B0 to B7 base models, the input shapes are different. Here is a list of input shape
expected for each model:
| Base model | resolution|
|----------------|-----|
| EfficientNetB0 | 224 |
| EfficientNetB1 | 240 |
| EfficientNetB2 | 260 |
| EfficientNetB3 | 300 |
| EfficientNetB4 | 380 |
| EfficientNetB5 | 456 |
| EfficientNetB6 | 528 |
| EfficientNetB7 | 600 |
When the model is intended for transfer learning, the Keras implementation
provides a option to remove the top layers:
```
model = EfficientNetB0(include_top=False, weights='imagenet')
```
This option excludes the final `Dense` layer that turns 1280 features on the penultimate
layer into prediction of the 1000 ImageNet classes. Replacing the top layer with custom
layers allows using EfficientNet as a feature extractor in a transfer learning workflow.
Another argument in the model constructor worth noticing is `drop_connect_rate` which controls
the dropout rate responsible for [stochastic depth](https://arxiv.org/abs/1603.09382).
This parameter serves as a toggle for extra regularization in finetuning, but does not
affect loaded weights. For example, when stronger regularization is desired, try:
```python
model = EfficientNetB0(weights='imagenet', drop_connect_rate=0.4)
```
The default value is 0.2.
## Example: EfficientNetB0 for Stanford Dogs.
EfficientNet is capable of a wide range of image classification tasks.
This makes it a good model for transfer learning.
As an end-to-end example, we will show using pre-trained EfficientNetB0 on
[Stanford Dogs](http://vision.stanford.edu/aditya86/ImageNetDogs/main.html) dataset.
"""
"""
## Setup and data loading
"""
import numpy as np
import tensorflow_datasets as tfds
import tensorflow as tf # For tf.data
import matplotlib.pyplot as plt
import keras
from keras import layers
from keras.applications import EfficientNetB0
# IMG_SIZE is determined by EfficientNet model choice
IMG_SIZE = 224
BATCH_SIZE = 64
"""
### Loading data
Here we load data from [tensorflow_datasets](https://www.tensorflow.org/datasets)
(hereafter TFDS).
Stanford Dogs dataset is provided in
TFDS as [stanford_dogs](https://www.tensorflow.org/datasets/catalog/stanford_dogs).
It features 20,580 images that belong to 120 classes of dog breeds
(12,000 for training and 8,580 for testing).
By simply changing `dataset_name` below, you may also try this notebook for
other datasets in TFDS such as
[cifar10](https://www.tensorflow.org/datasets/catalog/cifar10),
[cifar100](https://www.tensorflow.org/datasets/catalog/cifar100),
[food101](https://www.tensorflow.org/datasets/catalog/food101),
etc. When the images are much smaller than the size of EfficientNet input,
we can simply upsample the input images. It has been shown in
[Tan and Le, 2019](https://arxiv.org/abs/1905.11946) that transfer learning
result is better for increased resolution even if input images remain small.
"""
dataset_name = "stanford_dogs"
(ds_train, ds_test), ds_info = tfds.load(
dataset_name, split=["train", "test"], with_info=True, as_supervised=True
)
NUM_CLASSES = ds_info.features["label"].num_classes
"""
When the dataset include images with various size, we need to resize them into a
shared size. The Stanford Dogs dataset includes only images at least 200x200
pixels in size. Here we resize the images to the input size needed for EfficientNet.
"""
size = (IMG_SIZE, IMG_SIZE)
ds_train = ds_train.map(lambda image, label: (tf.image.resize(image, size), label))
ds_test = ds_test.map(lambda image, label: (tf.image.resize(image, size), label))
"""
### Visualizing the data
The following code shows the first 9 images with their labels.
"""
def format_label(label):
string_label = label_info.int2str(label)
return string_label.split("-")[1]
label_info = ds_info.features["label"]
for i, (image, label) in enumerate(ds_train.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image.numpy().astype("uint8"))
plt.title("{}".format(format_label(label)))
plt.axis("off")
"""
### Data augmentation
We can use the preprocessing layers APIs for image augmentation.
"""
img_augmentation_layers = [
layers.RandomRotation(factor=0.15),
layers.RandomTranslation(height_factor=0.1, width_factor=0.1),
layers.RandomFlip(),
layers.RandomContrast(factor=0.1),
]
def img_augmentation(images):
for layer in img_augmentation_layers:
images = layer(images)
return images
"""
This `Sequential` model object can be used both as a part of
the model we later build, and as a function to preprocess
data before feeding into the model. Using them as function makes
it easy to visualize the augmented images. Here we plot 9 examples
of augmentation result of a given figure.
"""
for image, label in ds_train.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
aug_img = img_augmentation(np.expand_dims(image.numpy(), axis=0))
aug_img = np.array(aug_img)
plt.imshow(aug_img[0].astype("uint8"))
plt.title("{}".format(format_label(label)))
plt.axis("off")
"""
### Prepare inputs
Once we verify the input data and augmentation are working correctly,
we prepare dataset for training. The input data are resized to uniform
`IMG_SIZE`. The labels are put into one-hot
(a.k.a. categorical) encoding. The dataset is batched.
Note: `prefetch` and `AUTOTUNE` may in some situation improve
performance, but depends on environment and the specific dataset used.
See this [guide](https://www.tensorflow.org/guide/data_performance)
for more information on data pipeline performance.
"""
# One-hot / categorical encoding
def input_preprocess_train(image, label):
image = img_augmentation(image)
label = tf.one_hot(label, NUM_CLASSES)
return image, label
def input_preprocess_test(image, label):
label = tf.one_hot(label, NUM_CLASSES)
return image, label
ds_train = ds_train.map(input_preprocess_train, num_parallel_calls=tf.data.AUTOTUNE)
ds_train = ds_train.batch(batch_size=BATCH_SIZE, drop_remainder=True)
ds_train = ds_train.prefetch(tf.data.AUTOTUNE)
ds_test = ds_test.map(input_preprocess_test, num_parallel_calls=tf.data.AUTOTUNE)
ds_test = ds_test.batch(batch_size=BATCH_SIZE, drop_remainder=True)
"""
## Training a model from scratch
We build an EfficientNetB0 with 120 output classes, that is initialized from scratch:
Note: the accuracy will increase very slowly and may overfit.
"""
model = EfficientNetB0(
include_top=True,
weights=None,
classes=NUM_CLASSES,
input_shape=(IMG_SIZE, IMG_SIZE, 3),
)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
epochs = 40 # @param {type: "slider", min:10, max:100}
hist = model.fit(ds_train, epochs=epochs, validation_data=ds_test)
"""
Training the model is relatively fast. This might make it sounds easy to simply train EfficientNet on any
dataset wanted from scratch. However, training EfficientNet on smaller datasets,
especially those with lower resolution like CIFAR-100, faces the significant challenge of
overfitting.
Hence training from scratch requires very careful choice of hyperparameters and is
difficult to find suitable regularization. It would also be much more demanding in resources.
Plotting the training and validation accuracy
makes it clear that validation accuracy stagnates at a low value.
"""
import matplotlib.pyplot as plt
def plot_hist(hist):
plt.plot(hist.history["accuracy"])
plt.plot(hist.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
plot_hist(hist)
"""
## Transfer learning from pre-trained weights
Here we initialize the model with pre-trained ImageNet weights,
and we fine-tune it on our own dataset.
"""
def build_model(num_classes):
inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
model = EfficientNetB0(include_top=False, input_tensor=inputs, weights="imagenet")
# Freeze the pretrained weights
model.trainable = False
# Rebuild top
x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
x = layers.BatchNormalization()(x)
top_dropout_rate = 0.2
x = layers.Dropout(top_dropout_rate, name="top_dropout")(x)
outputs = layers.Dense(num_classes, activation="softmax", name="pred")(x)
# Compile
model = keras.Model(inputs, outputs, name="EfficientNet")
optimizer = keras.optimizers.Adam(learning_rate=1e-2)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
"""
The first step to transfer learning is to freeze all layers and train only the top
layers. For this step, a relatively large learning rate (1e-2) can be used.
Note that validation accuracy and loss will usually be better than training
accuracy and loss. This is because the regularization is strong, which only
suppresses training-time metrics.
Note that the convergence may take up to 50 epochs depending on choice of learning rate.
If image augmentation layers were not
applied, the validation accuracy may only reach ~60%.
"""
model = build_model(num_classes=NUM_CLASSES)
epochs = 25 # @param {type: "slider", min:8, max:80}
hist = model.fit(ds_train, epochs=epochs, validation_data=ds_test)
plot_hist(hist)
"""
The second step is to unfreeze a number of layers and fit the model using smaller
learning rate. In this example we show unfreezing all layers, but depending on
specific dataset it may be desireble to only unfreeze a fraction of all layers.
When the feature extraction with
pretrained model works good enough, this step would give a very limited gain on
validation accuracy. In our case we only see a small improvement,
as ImageNet pretraining already exposed the model to a good amount of dogs.
On the other hand, when we use pretrained weights on a dataset that is more different
from ImageNet, this fine-tuning step can be crucial as the feature extractor also
needs to be adjusted by a considerable amount. Such a situation can be demonstrated
if choosing CIFAR-100 dataset instead, where fine-tuning boosts validation accuracy
by about 10% to pass 80% on `EfficientNetB0`.
A side note on freezing/unfreezing models: setting `trainable` of a `Model` will
simultaneously set all layers belonging to the `Model` to the same `trainable`
attribute. Each layer is trainable only if both the layer itself and the model
containing it are trainable. Hence when we need to partially freeze/unfreeze
a model, we need to make sure the `trainable` attribute of the model is set
to `True`.
"""
def unfreeze_model(model):
# We unfreeze the top 20 layers while leaving BatchNorm layers frozen
for layer in model.layers[-20:]:
if not isinstance(layer, layers.BatchNormalization):
layer.trainable = True
optimizer = keras.optimizers.Adam(learning_rate=1e-5)
model.compile(
optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"]
)
unfreeze_model(model)
epochs = 4 # @param {type: "slider", min:4, max:10}
hist = model.fit(ds_train, epochs=epochs, validation_data=ds_test)
plot_hist(hist)
"""
### Tips for fine tuning EfficientNet
On unfreezing layers:
- The `BatchNormalization` layers need to be kept frozen
([more details](https://keras.io/guides/transfer_learning/)).
If they are also turned to trainable, the
first epoch after unfreezing will significantly reduce accuracy.
- In some cases it may be beneficial to open up only a portion of layers instead of
unfreezing all. This will make fine tuning much faster when going to larger models like
B7.
- Each block needs to be all turned on or off. This is because the architecture includes
a shortcut from the first layer to the last layer for each block. Not respecting blocks
also significantly harms the final performance.
Some other tips for utilizing EfficientNet:
- Larger variants of EfficientNet do not guarantee improved performance, especially for
tasks with less data or fewer classes. In such a case, the larger variant of EfficientNet
chosen, the harder it is to tune hyperparameters.
- EMA (Exponential Moving Average) is very helpful in training EfficientNet from scratch,
but not so much for transfer learning.
- Do not use the RMSprop setup as in the original paper for transfer learning. The
momentum and learning rate are too high for transfer learning. It will easily corrupt the
pretrained weight and blow up the loss. A quick check is to see if loss (as categorical
cross entropy) is getting significantly larger than log(NUM_CLASSES) after the same
epoch. If so, the initial learning rate/momentum is too high.
- Smaller batch size benefit validation accuracy, possibly due to effectively providing
regularization.
"""
| keras-io/examples/vision/image_classification_efficientnet_fine_tuning.py/0 | {
"file_path": "keras-io/examples/vision/image_classification_efficientnet_fine_tuning.py",
"repo_id": "keras-io",
"token_count": 4949
} | 88 |
<jupyter_start><jupyter_text>Image Classification using BigTransfer (BiT)**Author:** [Sayan Nath](https://twitter.com/sayannath2350)**Date created:** 2021/09/24**Last modified:** 2023/12/22**Description:** BigTransfer (BiT) State-of-the-art transfer learning for image classification. IntroductionBigTransfer (also known as BiT) is a state-of-the-art transfer learning method for imageclassification. Transfer of pre-trained representations improves sample efficiency andsimplifies hyperparameter tuning when training deep neural networks for vision. BiTrevisit the paradigm of pre-training on large supervised datasets and fine-tuning themodel on a target task. The importance of appropriately choosing normalization layers andscaling the architecture capacity as the amount of pre-training data increases.BigTransfer(BiT) is trained on public datasets, along with code in[TF2, Jax and Pytorch](https://github.com/google-research/big_transfer). This will help anyone to reachstate of the art performance on their task of interest, even with just a handful oflabeled images per class.You can find BiT models pre-trained on[ImageNet](https://image-net.org/challenges/LSVRC/2012/index) and ImageNet-21k in[TFHub](https://tfhub.dev/google/collections/bit/1) as TensorFlow2 SavedModels that youcan use easily as Keras Layers. There are a variety of sizes ranging from a standardResNet50 to a ResNet152x4 (152 layers deep, 4x wider than a typical ResNet50) for userswith larger computational and memory budgets but higher accuracy requirements.Figure: The x-axis shows the number of images used per class, ranging from 1 to the fulldataset. On the plots on the left, the curve in blue above is our BiT-L model, whereasthe curve below is a ResNet-50 pre-trained on ImageNet (ILSVRC-2012). Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras import ops
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
SEEDS = 42
keras.utils.set_random_seed(SEEDS)<jupyter_output><empty_output><jupyter_text>Gather Flower Dataset<jupyter_code>train_ds, validation_ds = tfds.load(
"tf_flowers",
split=["train[:85%]", "train[85%:]"],
as_supervised=True,
)<jupyter_output><empty_output><jupyter_text>Visualise the dataset<jupyter_code>plt.figure(figsize=(10, 10))
for i, (image, label) in enumerate(train_ds.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title(int(label))
plt.axis("off")<jupyter_output><empty_output><jupyter_text>Define hyperparameters<jupyter_code>RESIZE_TO = 384
CROP_TO = 224
BATCH_SIZE = 64
STEPS_PER_EPOCH = 10
AUTO = tf.data.AUTOTUNE # optimise the pipeline performance
NUM_CLASSES = 5 # number of classes
SCHEDULE_LENGTH = (
500 # we will train on lower resolution images and will still attain good results
)
SCHEDULE_BOUNDARIES = [
200,
300,
400,
] # more the dataset size the schedule length increase<jupyter_output><empty_output><jupyter_text>The hyperparamteres like `SCHEDULE_LENGTH` and `SCHEDULE_BOUNDARIES` are determined basedon empirical results. The method has been explained in the [originalpaper](https://arxiv.org/abs/1912.11370) and in their [Google AI BlogPost](https://ai.googleblog.com/2020/05/open-sourcing-bit-exploring-large-scale.html).The `SCHEDULE_LENGTH` is aslo determined whether to use [MixUpAugmentation](https://arxiv.org/abs/1710.09412) or not. You can also find an easy MixUpImplementation in [Keras Coding Examples](https://keras.io/examples/vision/mixup/). Define preprocessing helper functions<jupyter_code>SCHEDULE_LENGTH = SCHEDULE_LENGTH * 512 / BATCH_SIZE
random_flip = keras.layers.RandomFlip("horizontal")
random_crop = keras.layers.RandomCrop(CROP_TO, CROP_TO)
def preprocess_train(image, label):
image = random_flip(image)
image = ops.image.resize(image, (RESIZE_TO, RESIZE_TO))
image = random_crop(image)
image = image / 255.0
return (image, label)
def preprocess_test(image, label):
image = ops.image.resize(image, (RESIZE_TO, RESIZE_TO))
image = ops.cast(image, dtype="float32")
image = image / 255.0
return (image, label)
DATASET_NUM_TRAIN_EXAMPLES = train_ds.cardinality().numpy()
repeat_count = int(
SCHEDULE_LENGTH * BATCH_SIZE / DATASET_NUM_TRAIN_EXAMPLES * STEPS_PER_EPOCH
)
repeat_count += 50 + 1 # To ensure at least there are 50 epochs of training<jupyter_output><empty_output><jupyter_text>Define the data pipeline<jupyter_code># Training pipeline
pipeline_train = (
train_ds.shuffle(10000)
.repeat(repeat_count) # Repeat dataset_size / num_steps
.map(preprocess_train, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# Validation pipeline
pipeline_validation = (
validation_ds.map(preprocess_test, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)<jupyter_output><empty_output><jupyter_text>Visualise the training samples<jupyter_code>image_batch, label_batch = next(iter(pipeline_train))
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n])
plt.title(label_batch[n].numpy())
plt.axis("off")<jupyter_output><empty_output><jupyter_text>Load pretrained TF-Hub model into a `KerasLayer`<jupyter_code>bit_model_url = "https://tfhub.dev/google/bit/m-r50x1/1"
bit_module = hub.load(bit_model_url)<jupyter_output><empty_output><jupyter_text>Create BigTransfer (BiT) modelTo create the new model, we:1. Cut off the BiT model’s original head. This leaves us with the “pre-logits” output.We do not have to do this if we use the ‘feature extractor’ models (i.e. all those insubdirectories titled `feature_vectors`), since for those models the head has alreadybeen cut off.2. Add a new head with the number of outputs equal to the number of classes of our newtask. Note that it is important that we initialise the head to all zeroes.<jupyter_code>class MyBiTModel(keras.Model):
def __init__(self, num_classes, module, **kwargs):
super().__init__(**kwargs)
self.num_classes = num_classes
self.head = keras.layers.Dense(num_classes, kernel_initializer="zeros")
self.bit_model = module
def call(self, images):
bit_embedding = self.bit_model(images)
return self.head(bit_embedding)
model = MyBiTModel(num_classes=NUM_CLASSES, module=bit_module)<jupyter_output><empty_output><jupyter_text>Define optimizer and loss<jupyter_code>learning_rate = 0.003 * BATCH_SIZE / 512
# Decay learning rate by a factor of 10 at SCHEDULE_BOUNDARIES.
lr_schedule = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=SCHEDULE_BOUNDARIES,
values=[
learning_rate,
learning_rate * 0.1,
learning_rate * 0.01,
learning_rate * 0.001,
],
)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule, momentum=0.9)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)<jupyter_output><empty_output><jupyter_text>Compile the model<jupyter_code>model.compile(optimizer=optimizer, loss=loss_fn, metrics=["accuracy"])<jupyter_output><empty_output><jupyter_text>Set up callbacks<jupyter_code>train_callbacks = [
keras.callbacks.EarlyStopping(
monitor="val_accuracy", patience=2, restore_best_weights=True
)
]<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>history = model.fit(
pipeline_train,
batch_size=BATCH_SIZE,
epochs=int(SCHEDULE_LENGTH / STEPS_PER_EPOCH),
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=pipeline_validation,
callbacks=train_callbacks,
)<jupyter_output><empty_output><jupyter_text>Plot the training and validation metrics<jupyter_code>def plot_hist(hist):
plt.plot(hist.history["accuracy"])
plt.plot(hist.history["val_accuracy"])
plt.plot(hist.history["loss"])
plt.plot(hist.history["val_loss"])
plt.title("Training Progress")
plt.ylabel("Accuracy/Loss")
plt.xlabel("Epochs")
plt.legend(["train_acc", "val_acc", "train_loss", "val_loss"], loc="upper left")
plt.show()
plot_hist(history)<jupyter_output><empty_output><jupyter_text>Evaluate the model<jupyter_code>accuracy = model.evaluate(pipeline_validation)[1] * 100
print("Accuracy: {:.2f}%".format(accuracy))<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/bit.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/bit.ipynb",
"repo_id": "keras-io",
"token_count": 3057
} | 89 |
<jupyter_start><jupyter_text>Image Segmentation using Composable Fully-Convolutional Networks**Author:** [Suvaditya Mukherjee](https://twitter.com/halcyonrayes)**Date created:** 2023/06/16**Last modified:** 2023/12/25**Description:** Using the Fully-Convolutional Network for Image Segmentation. IntroductionThe following example walks through the steps to implement Fully-Convolutional Networksfor Image Segmentation on the Oxford-IIIT Pets dataset.The model was proposed in the paper,[Fully Convolutional Networks for Semantic Segmentation by Long et. al.(2014)](https://arxiv.org/abs/1411.4038).Image segmentation is one of the most common and introductory tasks when it comes toComputer Vision, where we extend the problem of Image Classification fromone-label-per-image to a pixel-wise classification problem.In this example, we will assemble the aforementioned Fully-Convolutional Segmentation architecture,capable of performing Image Segmentation.The network extends the pooling layer outputs from the VGG in order to perform upsamplingand get a final result. The intermediate outputs coming from the 3rd, 4th and 5th Max-Pooling layers from VGG19 areextracted out and upsampled at different levels and factors to get a final output with the same shape as thatof the output, but with the class of each pixel present at each location, instead of pixel intensity values.Different intermediate pool layers are extracted and processed upon for different versions of the network.The FCN architecture has 3 versions of differing quality.- FCN-32S- FCN-16S- FCN-8SAll versions of the model derive their outputs through an iterative processing ofsuccessive intermediate pool layers of the main backbone used.A better idea can be gained from the figure below.| || :--: || **Diagram 1**: Combined Architecture Versions (Source: Paper) |To get a better idea on Image Segmentation or find more pre-trained models, feel free tonavigate to the [Hugging Face Image Segmentation Models](https://huggingface.co/models?pipeline_tag=image-segmentation) page,or a [PyImageSearch Blog on Semantic Segmentation](https://pyimagesearch.com/2018/09/03/semantic-segmentation-with-opencv-and-deep-learning/) Setup Imports<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras import ops
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import numpy as np
AUTOTUNE = tf.data.AUTOTUNE<jupyter_output><empty_output><jupyter_text>Set configurations for notebook variablesWe set the required parameters for the experiment.The chosen dataset has a total of 4 classes per image, with regards to the segmentation mask.We also set our hyperparameters in this cell.Mixed Precision as an option is also available in systems which support it, to reduceload.This would make most tensors use `16-bit float` values instead of `32-bit float`values, in places where it will not adversely affect computation.This means, during computation, TensorFlow will use `16-bit float` Tensors to increase speed at the cost of precision,while storing the values in their original default `32-bit float` form.<jupyter_code>NUM_CLASSES = 4
INPUT_HEIGHT = 224
INPUT_WIDTH = 224
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-4
EPOCHS = 20
BATCH_SIZE = 32
MIXED_PRECISION = True
SHUFFLE = True
# Mixed-precision setting
if MIXED_PRECISION:
policy = keras.mixed_precision.Policy("mixed_float16")
keras.mixed_precision.set_global_policy(policy)<jupyter_output><empty_output><jupyter_text>Load datasetWe make use of the [Oxford-IIIT Pets dataset](http://www.robots.ox.ac.uk/~vgg/data/pets/)which contains a total of 7,349 samples and their segmentation masks.We have 37 classes, with roughly 200 samples per class.Our training and validation dataset has 3,128 and 552 samples respectively.Aside from this, our test split has a total of 3,669 samples.We set a `batch_size` parameter that will batch our samples together, use a `shuffle`parameter to mix our samples together.<jupyter_code>(train_ds, valid_ds, test_ds) = tfds.load(
"oxford_iiit_pet",
split=["train[:85%]", "train[85%:]", "test"],
batch_size=BATCH_SIZE,
shuffle_files=SHUFFLE,
)<jupyter_output><empty_output><jupyter_text>Unpack and preprocess datasetWe define a simple function that includes performs Resizing over ourtraining, validation and test datasets.We do the same process on the masks as well, to make sure both are aligned in terms of shape and size.<jupyter_code># Image and Mask Pre-processing
def unpack_resize_data(section):
image = section["image"]
segmentation_mask = section["segmentation_mask"]
resize_layer = keras.layers.Resizing(INPUT_HEIGHT, INPUT_WIDTH)
image = resize_layer(image)
segmentation_mask = resize_layer(segmentation_mask)
return image, segmentation_mask
train_ds = train_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)
valid_ds = valid_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)
test_ds = test_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Visualize one random sample from the pre-processed datasetWe visualize what a random sample in our test split of the dataset looks like, and plotthe segmentation mask on top to see the effective mask areas.Note that we have performed pre-processing on this dataset too,which makes the image and mask size same.<jupyter_code># Select random image and mask. Cast to NumPy array
# for Matplotlib visualization.
images, masks = next(iter(test_ds))
random_idx = keras.random.uniform([], minval=0, maxval=BATCH_SIZE, seed=10)
test_image = images[int(random_idx)].numpy().astype("float")
test_mask = masks[int(random_idx)].numpy().astype("float")
# Overlay segmentation mask on top of image.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].set_title("Image")
ax[0].imshow(test_image / 255.0)
ax[1].set_title("Image with segmentation mask overlay")
ax[1].imshow(test_image / 255.0)
ax[1].imshow(
test_mask,
cmap="inferno",
alpha=0.6,
)
plt.show()<jupyter_output><empty_output><jupyter_text>Perform VGG-specific pre-processing`keras.applications.VGG19` requires the use of a `preprocess_input` function that willpro-actively perform Image-net style Standard Deviation Normalization scheme.<jupyter_code>def preprocess_data(image, segmentation_mask):
image = keras.applications.vgg19.preprocess_input(image)
return image, segmentation_mask
train_ds = (
train_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)
valid_ds = (
valid_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)
test_ds = (
test_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)<jupyter_output><empty_output><jupyter_text>Model DefinitionThe Fully-Convolutional Network boasts a simple architecture composed of only`keras.layers.Conv2D` Layers, `keras.layers.Dense` layers and `keras.layers.Dropout`layers.| || :--: || **Diagram 2**: Generic FCN Forward Pass (Source: Paper)|Pixel-wise prediction is performed by having a Softmax Convolutional layer with the samesize of the image, such that we can perform direct comparisonWe can find several important metrics such as Accuracy and Mean-Intersection-over-Union on the network. Backbone (VGG-19)We use the [VGG-19 network](https://keras.io/api/applications/vgg/) as the backbone, asthe paper suggests it to be one of the most effective backbones for this network.We extract different outputs from the network by making use of `keras.models.Model`.Following this, we add layers on top to make a network perfectly simulating that ofDiagram 1.The backbone's `keras.layers.Dense` layers will be converted to `keras.layers.Conv2D`layers based on the [original Caffe code present here.](https://github.com/linxi159/FCN-caffe/blob/master/pascalcontext-fcn16s/net.py)All 3 networks will share the same backbone weights, but will have differing resultsbased on their extensions.We make the backbone non-trainable to improve training time requirements.It is also noted in the paper that making the network trainable does not yield major benefits.<jupyter_code>input_layer = keras.Input(shape=(INPUT_HEIGHT, INPUT_WIDTH, 3))
# VGG Model backbone with pre-trained ImageNet weights.
vgg_model = keras.applications.vgg19.VGG19(include_top=True, weights="imagenet")
# Extracting different outputs from same model
fcn_backbone = keras.models.Model(
inputs=vgg_model.layers[1].input,
outputs=[
vgg_model.get_layer(block_name).output
for block_name in ["block3_pool", "block4_pool", "block5_pool"]
],
)
# Setting backbone to be non-trainable
fcn_backbone.trainable = False
x = fcn_backbone(input_layer)
# Converting Dense layers to Conv2D layers
units = [4096, 4096]
dense_convs = []
for filter_idx in range(len(units)):
dense_conv = keras.layers.Conv2D(
filters=units[filter_idx],
kernel_size=(7, 7) if filter_idx == 0 else (1, 1),
strides=(1, 1),
activation="relu",
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.Constant(1.0),
)
dense_convs.append(dense_conv)
dropout_layer = keras.layers.Dropout(0.5)
dense_convs.append(dropout_layer)
dense_convs = keras.Sequential(dense_convs)
dense_convs.trainable = False
x[-1] = dense_convs(x[-1])
pool3_output, pool4_output, pool5_output = x<jupyter_output><empty_output><jupyter_text>FCN-32SWe extend the last output, perform a `1x1 Convolution` and perform 2D Bilinear Upsamplingby a factor of 32 to get an image of the same size as that of our input.We use a simple `keras.layers.UpSampling2D` layer over a `keras.layers.Conv2DTranspose`since it yields performance benefits from being a deterministic mathematical operationover a Convolutional operationIt is also noted in the paper that making the Up-sampling parameters trainable does not yield benefits.Original experiments of the paper used Upsampling as well.<jupyter_code># 1x1 convolution to set channels = number of classes
pool5 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="relu",
)
# Get Softmax outputs for all classes
fcn32s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn32s_upsampling = keras.layers.UpSampling2D(
size=(32, 32),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
final_fcn32s_pool = pool5(pool5_output)
final_fcn32s_output = fcn32s_conv_layer(final_fcn32s_pool)
final_fcn32s_output = fcn32s_upsampling(final_fcn32s_output)
fcn32s_model = keras.Model(inputs=input_layer, outputs=final_fcn32s_output)<jupyter_output><empty_output><jupyter_text>FCN-16SThe pooling output from the FCN-32S is extended and added to the 4th-level Pooling outputof our backbone.Following this, we upsample by a factor of 16 to get image of the samesize as that of our input.<jupyter_code># 1x1 convolution to set channels = number of classes
# Followed from the original Caffe implementation
pool4 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="linear",
kernel_initializer=keras.initializers.Zeros(),
)(pool4_output)
# Intermediate up-sample
pool5 = keras.layers.UpSampling2D(
size=(2, 2),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)(final_fcn32s_pool)
# Get Softmax outputs for all classes
fcn16s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn16s_upsample_layer = keras.layers.UpSampling2D(
size=(16, 16),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
# Add intermediate outputs
final_fcn16s_pool = keras.layers.Add()([pool4, pool5])
final_fcn16s_output = fcn16s_conv_layer(final_fcn16s_pool)
final_fcn16s_output = fcn16s_upsample_layer(final_fcn16s_output)
fcn16s_model = keras.models.Model(inputs=input_layer, outputs=final_fcn16s_output)<jupyter_output><empty_output><jupyter_text>FCN-8SThe pooling output from the FCN-16S is extended once more, and added from the 3rd-levelPooling output of our backbone.This result is upsampled by a factor of 8 to get an image of the same size as that of our input.<jupyter_code># 1x1 convolution to set channels = number of classes
# Followed from the original Caffe implementation
pool3 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="linear",
kernel_initializer=keras.initializers.Zeros(),
)(pool3_output)
# Intermediate up-sample
intermediate_pool_output = keras.layers.UpSampling2D(
size=(2, 2),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)(final_fcn16s_pool)
# Get Softmax outputs for all classes
fcn8s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn8s_upsample_layer = keras.layers.UpSampling2D(
size=(8, 8),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
# Add intermediate outputs
final_fcn8s_pool = keras.layers.Add()([pool3, intermediate_pool_output])
final_fcn8s_output = fcn8s_conv_layer(final_fcn8s_pool)
final_fcn8s_output = fcn8s_upsample_layer(final_fcn8s_output)
fcn8s_model = keras.models.Model(inputs=input_layer, outputs=final_fcn8s_output)<jupyter_output><empty_output><jupyter_text>Load weights into backboneIt was noted in the paper, as well as through experimentation that extracting the weightsof the last 2 Fully-connected Dense layers from the backbone, reshaping the weights tofit that of the `keras.layers.Dense` layers we had previously converted into`keras.layers.Conv2D`, and setting them to it yields far better results and a significantincrease in mIOU performance.<jupyter_code># VGG's last 2 layers
weights1 = vgg_model.get_layer("fc1").get_weights()[0]
weights2 = vgg_model.get_layer("fc2").get_weights()[0]
weights1 = weights1.reshape(7, 7, 512, 4096)
weights2 = weights2.reshape(1, 1, 4096, 4096)
dense_convs.layers[0].set_weights([weights1])
dense_convs.layers[2].set_weights([weights2])<jupyter_output><empty_output><jupyter_text>TrainingThe original paper talks about making use of [SGD with Momentum](https://keras.io/api/optimizers/sgd/) as the optimizer of choice.But it was noticed during experimentation that[AdamW](https://keras.io/api/optimizers/adamw/)yielded better results in terms of mIOU and Pixel-wise Accuracy. FCN-32S<jupyter_code>fcn32s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn32s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn32s_model.compile(
optimizer=fcn32s_optimizer,
loss=fcn32s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn32s_history = fcn32s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)<jupyter_output><empty_output><jupyter_text>FCN-16S<jupyter_code>fcn16s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn16s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn16s_model.compile(
optimizer=fcn16s_optimizer,
loss=fcn16s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn16s_history = fcn16s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)<jupyter_output><empty_output><jupyter_text>FCN-8S<jupyter_code>fcn8s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn8s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn8s_model.compile(
optimizer=fcn8s_optimizer,
loss=fcn8s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn8s_history = fcn8s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)<jupyter_output><empty_output><jupyter_text>Visualizations Plotting metrics for training runWe perform a comparative study between all 3 versions of the model by tracking trainingand validation metrics of Accuracy, Loss and Mean IoU.<jupyter_code>total_plots = len(fcn32s_history.history)
cols = total_plots // 2
rows = total_plots // cols
if total_plots % cols != 0:
rows += 1
# Set all history dictionary objects
fcn32s_dict = fcn32s_history.history
fcn16s_dict = fcn16s_history.history
fcn8s_dict = fcn8s_history.history
pos = range(1, total_plots + 1)
plt.figure(figsize=(15, 10))
for i, ((key_32s, value_32s), (key_16s, value_16s), (key_8s, value_8s)) in enumerate(
zip(fcn32s_dict.items(), fcn16s_dict.items(), fcn8s_dict.items())
):
plt.subplot(rows, cols, pos[i])
plt.plot(range(len(value_32s)), value_32s)
plt.plot(range(len(value_16s)), value_16s)
plt.plot(range(len(value_8s)), value_8s)
plt.title(str(key_32s) + " (combined)")
plt.legend(["FCN-32S", "FCN-16S", "FCN-8S"])
plt.show()<jupyter_output><empty_output><jupyter_text>Visualizing predicted segmentation masksTo understand the results and see them better, we pick a random image from the testdataset and perform inference on it to see the masks generated by each model.Note: For better results, the model must be trained for a higher number of epochs.<jupyter_code>images, masks = next(iter(test_ds))
random_idx = keras.random.uniform([], minval=0, maxval=BATCH_SIZE,seed=10)
# Get random test image and mask
test_image = images[int(random_idx)].numpy().astype("float")
test_mask = masks[int(random_idx)].numpy().astype("float")
pred_image = ops.expand_dims(test_image, axis=0)
pred_image = keras.applications.vgg19.preprocess_input(pred_image)
# Perform inference on FCN-32S
pred_mask_32s = fcn32s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_32s = np.argmax(pred_mask_32s, axis=-1)
pred_mask_32s = pred_mask_32s[0, ...]
# Perform inference on FCN-16S
pred_mask_16s = fcn16s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_16s = np.argmax(pred_mask_16s, axis=-1)
pred_mask_16s = pred_mask_16s[0, ...]
# Perform inference on FCN-8S
pred_mask_8s = fcn8s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_8s = np.argmax(pred_mask_8s, axis=-1)
pred_mask_8s = pred_mask_8s[0, ...]
# Plot all results
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15, 8))
fig.delaxes(ax[0, 2])
ax[0, 0].set_title("Image")
ax[0, 0].imshow(test_image / 255.0)
ax[0, 1].set_title("Image with ground truth overlay")
ax[0, 1].imshow(test_image / 255.0)
ax[0, 1].imshow(
test_mask,
cmap="inferno",
alpha=0.6,
)
ax[1, 0].set_title("Image with FCN-32S mask overlay")
ax[1, 0].imshow(test_image / 255.0)
ax[1, 0].imshow(pred_mask_32s, cmap="inferno", alpha=0.6)
ax[1, 1].set_title("Image with FCN-16S mask overlay")
ax[1, 1].imshow(test_image / 255.0)
ax[1, 1].imshow(pred_mask_16s, cmap="inferno", alpha=0.6)
ax[1, 2].set_title("Image with FCN-8S mask overlay")
ax[1, 2].imshow(test_image / 255.0)
ax[1, 2].imshow(pred_mask_8s, cmap="inferno", alpha=0.6)
plt.show()<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/fully_convolutional_network.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/fully_convolutional_network.ipynb",
"repo_id": "keras-io",
"token_count": 7096
} | 90 |
<jupyter_start><jupyter_text>Metric learning for image similarity search using TensorFlow Similarity**Author:** [Owen Vallis](https://twitter.com/owenvallis)**Date created:** 2021/09/30**Last modified:** 2022/02/29**Description:** Example of using similarity metric learning on CIFAR-10 images. OverviewThis example is based on the["Metric learning for image similarity search" example](https://keras.io/examples/vision/metric_learning/).We aim to use the same data set but implement the model using[TensorFlow Similarity](https://github.com/tensorflow/similarity).Metric learning aims to train models that can embed inputs into ahigh-dimensional space such that "similar" inputs are pulled closer to eachother and "dissimilar" inputs are pushed farther apart. Once trained, thesemodels can produce embeddings for downstream systems where such similarity isuseful, for instance as a ranking signal for search or as a form of pretrainedembedding model for another supervised problem.For a more detailed overview of metric learning, see:* [What is metric learning?](http://contrib.scikit-learn.org/metric-learn/introduction.html)* ["Using crossentropy for metric learning" tutorial](https://www.youtube.com/watch?v=Jb4Ewl5RzkI) SetupThis tutorial will use the [TensorFlow Similarity](https://github.com/tensorflow/similarity) libraryto learn and evaluate the similarity embedding.TensorFlow Similarity provides components that:* Make training contrastive models simple and fast.* Make it easier to ensure that batches contain pairs of examples.* Enable the evaluation of the quality of the embedding.TensorFlow Similarity can be installed easily via pip, as follows:```pip -q install tensorflow_similarity```<jupyter_code>import random
from matplotlib import pyplot as plt
from mpl_toolkits import axes_grid1
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_similarity as tfsim
tfsim.utils.tf_cap_memory()
print("TensorFlow:", tf.__version__)
print("TensorFlow Similarity:", tfsim.__version__)<jupyter_output>TensorFlow: 2.7.0
TensorFlow Similarity: 0.15.5<jupyter_text>Dataset samplersWe will be using the[CIFAR-10](https://www.tensorflow.org/datasets/catalog/cifar10)dataset for this tutorial.For a similarity model to learn efficiently, each batch must contains at least 2examples of each class.To make this easy, tf_similarity offers `Sampler` objects that enable you to set boththe number of classes and the minimum number of examples of each class perbatch.The train and validation datasets will be created using the`TFDatasetMultiShotMemorySampler` object. This creates a sampler that loads datasetsfrom [TensorFlow Datasets](https://www.tensorflow.org/datasets) and yieldsbatches containing a target number of classes and a target number of examplesper class. Additionally, we can restrict the sampler to only yield the subset ofclasses defined in `class_list`, enabling us to train on a subset of the classesand then test how the embedding generalizes to the unseen classes. This can beuseful when working on few-shot learning problems.The following cell creates a train_ds sample that:* Loads the CIFAR-10 dataset from TFDS and then takes the `examples_per_class_per_batch`.* Ensures the sampler restricts the classes to those defined in `class_list`.* Ensures each batch contains 10 different classes with 8 examples each.We also create a validation dataset in the same way, but we limit the total number ofexamples per class to 100 and the examples per class per batch is set to thedefault of 2.<jupyter_code># This determines the number of classes used during training.
# Here we are using all the classes.
num_known_classes = 10
class_list = random.sample(population=range(10), k=num_known_classes)
classes_per_batch = 10
# Passing multiple examples per class per batch ensures that each example has
# multiple positive pairs. This can be useful when performing triplet mining or
# when using losses like `MultiSimilarityLoss` or `CircleLoss` as these can
# take a weighted mix of all the positive pairs. In general, more examples per
# class will lead to more information for the positive pairs, while more classes
# per batch will provide more varied information in the negative pairs. However,
# the losses compute the pairwise distance between the examples in a batch so
# the upper limit of the batch size is restricted by the memory.
examples_per_class_per_batch = 8
print(
"Batch size is: "
f"{min(classes_per_batch, num_known_classes) * examples_per_class_per_batch}"
)
print(" Create Training Data ".center(34, "#"))
train_ds = tfsim.samplers.TFDatasetMultiShotMemorySampler(
"cifar10",
classes_per_batch=min(classes_per_batch, num_known_classes),
splits="train",
steps_per_epoch=4000,
examples_per_class_per_batch=examples_per_class_per_batch,
class_list=class_list,
)
print("\n" + " Create Validation Data ".center(34, "#"))
val_ds = tfsim.samplers.TFDatasetMultiShotMemorySampler(
"cifar10",
classes_per_batch=classes_per_batch,
splits="test",
total_examples_per_class=100,
)<jupyter_output>Batch size is: 80
###### Create Training Data ######<jupyter_text>Visualize the datasetThe samplers will shuffle the dataset, so we can get a sense of the dataset byplotting the first 25 images.The samplers provide a `get_slice(begin, size)` method that allows us to easilyselect a block of samples.Alternatively, we can use the `generate_batch()` method to yield a batch. Thiscan allow us to check that a batch contains the expected number of classes andexamples per class.<jupyter_code>num_cols = num_rows = 5
# Get the first 25 examples.
x_slice, y_slice = train_ds.get_slice(begin=0, size=num_cols * num_rows)
fig = plt.figure(figsize=(6.0, 6.0))
grid = axes_grid1.ImageGrid(fig, 111, nrows_ncols=(num_cols, num_rows), axes_pad=0.1)
for ax, im, label in zip(grid, x_slice, y_slice):
ax.imshow(im)
ax.axis("off")<jupyter_output><empty_output><jupyter_text>Embedding modelNext we define a `SimilarityModel` using the Keras Functional API. The modelis a standard convnet with the addition of a `MetricEmbedding` layer thatapplies L2 normalization. The metric embedding layer is helpful when using`Cosine` distance as we only care about the angle between the vectors.Additionally, the `SimilarityModel` provides a number of helper methods for:* Indexing embedded examples* Performing example lookups* Evaluating the classification* Evaluating the quality of the embedding spaceSee the [TensorFlow Similarity documentation](https://github.com/tensorflow/similarity)for more details.<jupyter_code>embedding_size = 256
inputs = keras.layers.Input((32, 32, 3))
x = keras.layers.Rescaling(scale=1.0 / 255)(inputs)
x = keras.layers.Conv2D(64, 3, activation="relu")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Conv2D(128, 3, activation="relu")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.MaxPool2D((4, 4))(x)
x = keras.layers.Conv2D(256, 3, activation="relu")(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Conv2D(256, 3, activation="relu")(x)
x = keras.layers.GlobalMaxPool2D()(x)
outputs = tfsim.layers.MetricEmbedding(embedding_size)(x)
# building model
model = tfsim.models.SimilarityModel(inputs, outputs)
model.summary()<jupyter_output>Model: "similarity_model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 32, 32, 3)] 0
rescaling_1 (Rescaling) (None, 32, 32, 3) 0
conv2d_4 (Conv2D) (None, 30, 30, 64) 1792
batch_normalization_3 (Batc (None, 30, 30, 64) 256
hNormalization)
conv2d_5 (Conv2D) (None, 28, 28, 128) 73856
batch_normalization_4 (Batc (None, 28, 28, 128[...]<jupyter_text>Similarity lossThe similarity loss expects batches containing at least 2 examples of eachclass, from which it computes the loss over the pairwise positive and negativedistances. Here we are using `MultiSimilarityLoss()`([paper](ihttps://arxiv.org/abs/1904.06627)), one of several losses in[TensorFlow Similarity](https://github.com/tensorflow/similarity). This lossattempts to use all informative pairs in the batch, taking into account theself-similarity, positive-similarity, and the negative-similarity.<jupyter_code>epochs = 3
learning_rate = 0.002
val_steps = 50
# init similarity loss
loss = tfsim.losses.MultiSimilarityLoss()
# compiling and training
model.compile(
optimizer=keras.optimizers.Adam(learning_rate), loss=loss, steps_per_execution=10,
)
history = model.fit(
train_ds, epochs=epochs, validation_data=val_ds, validation_steps=val_steps
)<jupyter_output>Distance metric automatically set to cosine use the distance arg to override.
Epoch 1/3
4000/4000 [==============================] - ETA: 0s - loss: 2.2139Warmup complete
4000/4000 [==============================] - 36s 9ms/step - loss: 2.2139 - val_loss: 0.8510
Warmup complete
Epoch 2/3
4000/4000 [==============================] - 34s 9ms/step - loss: 1.8664 - val_loss: 0.8311
Epoch 3/3
4000/4000 [==============================] - 35s 9ms/step - loss: 1.6039 - val_loss: 0.8076<jupyter_text>IndexingNow that we have trained our model, we can create an index of examples. Here webatch index the first 200 validation examples by passing the x and y to the indexalong with storing the image in the data parameter. The `x_index` values areembedded and then added to the index to make them searchable. The `y_index` anddata parameters are optional but allow the user to associate metadata with theembedded example.<jupyter_code>x_index, y_index = val_ds.get_slice(begin=0, size=200)
model.reset_index()
model.index(x_index, y_index, data=x_index)<jupyter_output>[Indexing 200 points]
|-Computing embeddings
|-Storing data points in key value store
|-Adding embeddings to index.
|-Building index.<jupyter_text>CalibrationOnce the index is built, we can calibrate a distance threshold using a matchingstrategy and a calibration metric.Here we are searching for the optimal F1 score while using K=1 as ourclassifier. All matches at or below the calibrated threshold distance will belabeled as a Positive match between the query example and the label associatedwith the match result, while all matches above the threshold distance will belabeled as a Negative match.Additionally, we pass in extra metrics to compute as well. All values in theoutput are computed at the calibrated threshold.Finally, `model.calibrate()` returns a `CalibrationResults` object containing:* `"cutpoints"`: A Python dict mapping the cutpoint name to a dict containing the`ClassificationMetric` values associated with a particular distance threshold,e.g., `"optimal" : {"acc": 0.90, "f1": 0.92}`.* `"thresholds"`: A Python dict mapping `ClassificationMetric` names to a listcontaining the metric's value computed at each of the distance thresholds, e.g.,`{"f1": [0.99, 0.80], "distance": [0.0, 1.0]}`.<jupyter_code>x_train, y_train = train_ds.get_slice(begin=0, size=1000)
calibration = model.calibrate(
x_train,
y_train,
calibration_metric="f1",
matcher="match_nearest",
extra_metrics=["precision", "recall", "binary_accuracy"],
verbose=1,
)<jupyter_output>Performing NN search<jupyter_text>VisualizationIt may be difficult to get a sense of the model quality from the metrics alone.A complementary approach is to manually inspect a set of query results to get afeel for the match quality.Here we take 10 validation examples and plot them with their 5 nearestneighbors and the distances to the query example. Looking at the results, we seethat while they are imperfect they still represent meaningfully similar images,and that the model is able to find similar images irrespective of their pose orimage illumination.We can also see that the model is very confident with certain images, resultingin very small distances between the query and the neighbors. Conversely, we seemore mistakes in the class labels as the distances become larger. This is one ofthe reasons why calibration is critical for matching applications.<jupyter_code>num_neighbors = 5
labels = [
"Airplane",
"Automobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
"Unknown",
]
class_mapping = {c_id: c_lbl for c_id, c_lbl in zip(range(11), labels)}
x_display, y_display = val_ds.get_slice(begin=200, size=10)
# lookup nearest neighbors in the index
nns = model.lookup(x_display, k=num_neighbors)
# display
for idx in np.argsort(y_display):
tfsim.visualization.viz_neigbors_imgs(
x_display[idx],
y_display[idx],
nns[idx],
class_mapping=class_mapping,
fig_size=(16, 2),
)<jupyter_output>Performing NN search<jupyter_text>MetricsWe can also plot the extra metrics contained in the `CalibrationResults` to geta sense of the matching performance as the distance threshold increases.The following plots show the Precision, Recall, and F1 Score. We can see thatthe matching precision degrades as the distance increases, but that thepercentage of the queries that we accept as positive matches (recall) growsfaster up to the calibrated distance threshold.<jupyter_code>fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
x = calibration.thresholds["distance"]
ax1.plot(x, calibration.thresholds["precision"], label="precision")
ax1.plot(x, calibration.thresholds["recall"], label="recall")
ax1.plot(x, calibration.thresholds["f1"], label="f1 score")
ax1.legend()
ax1.set_title("Metric evolution as distance increase")
ax1.set_xlabel("Distance")
ax1.set_ylim((-0.05, 1.05))
ax2.plot(calibration.thresholds["recall"], calibration.thresholds["precision"])
ax2.set_title("Precision recall curve")
ax2.set_xlabel("Recall")
ax2.set_ylabel("Precision")
ax2.set_ylim((-0.05, 1.05))
plt.show()<jupyter_output><empty_output><jupyter_text>We can also take 100 examples for each class and plot the confusion matrix foreach example and their nearest match. We also add an "extra" 10th class torepresent the matches above the calibrated distance threshold.We can see that most of the errors are between the animal classes with aninteresting number of confusions between Airplane and Bird. Additionally, we seethat only a few of the 100 examples for each class returned matches outside ofthe calibrated distance threshold.<jupyter_code>cutpoint = "optimal"
# This yields 100 examples for each class.
# We defined this when we created the val_ds sampler.
x_confusion, y_confusion = val_ds.get_slice(0, -1)
matches = model.match(x_confusion, cutpoint=cutpoint, no_match_label=10)
cm = tfsim.visualization.confusion_matrix(
matches,
y_confusion,
labels=labels,
title="Confusion matrix for cutpoint:%s" % cutpoint,
normalize=False,
)<jupyter_output><empty_output><jupyter_text>No MatchWe can plot the examples outside of the calibrated threshold to see which imagesare not matching any indexed examples.This may provide insight into what other examples may need to be indexed orsurface anomalous examples within the class.<jupyter_code>idx_no_match = np.where(np.array(matches) == 10)
no_match_queries = x_confusion[idx_no_match]
if len(no_match_queries):
plt.imshow(no_match_queries[0])
else:
print("All queries have a match below the distance threshold.")<jupyter_output>All queries have a match below the distance threshold.<jupyter_text>Visualize clustersOne of the best ways to quickly get a sense of the quality of how the model isdoing and understand it's short comings is to project the embedding into a 2Dspace.This allows us to inspect clusters of images and understand which classes areentangled.<jupyter_code># Each class in val_ds was restricted to 100 examples.
num_examples_to_clusters = 1000
thumb_size = 96
plot_size = 800
vx, vy = val_ds.get_slice(0, num_examples_to_clusters)
# Uncomment to run the interactive projector.
# tfsim.visualization.projector(
# model.predict(vx),
# labels=vy,
# images=vx,
# class_mapping=class_mapping,
# image_size=thumb_size,
# plot_size=plot_size,
# )<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/metric_learning_tf_similarity.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/metric_learning_tf_similarity.ipynb",
"repo_id": "keras-io",
"token_count": 5437
} | 91 |
<jupyter_start><jupyter_text>Investigating Vision Transformer representations**Authors:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Sayak Paul](https://twitter.com/RisingSayak) (equal contribution)**Date created:** 2022/04/12**Last modified:** 2023/11/20**Description:** Looking into the representations learned by different Vision Transformers variants. IntroductionIn this example, we look into the representations learned by different VisionTransformer (ViT) models. Our main goal with this example is to provide insights intowhat empowers ViTs to learn from image data. In particular, the example discussesimplementations of a few different ViT analysis tools.**Note:** when we say "Vision Transformer", we refer to a computer vision architecture thatinvolves Transformer blocks ([Vaswani et al.](https://arxiv.org/abs/1706.03762)) and notnecessarily the original Vision Transformer model([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929)). Models consideredSince the inception of the original Vision Transformer, the computer vision community hasseen a number of different ViT variants improving upon the original in various ways:training improvements, architecture improvements, and so on.In this example, we consider the following ViT model families:* ViTs trained using supervised pretraining with the ImageNet-1k and ImageNet-21kdatasets ([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929))* ViTs trained using supervised pretraining but only with the ImageNet-1k dataset withmore regularization and distillation ([Touvron et al.](https://arxiv.org/abs/2012.12877))(DeiT).* ViTs trained using self-supervised pretraining ([Caron et al.](https://arxiv.org/abs/2104.14294))(DINO).Since the pretrained models are not implemented in Keras, we first implemented them asfaithfully as possible. We then populated them with the official pretrained parameters.Finally, we evaluated our implementations on the ImageNet-1k validation set to ensure theevaluation numbers were matching with the original implementations. The details of our implementationsare available in [this repository](https://github.com/sayakpaul/probing-vits).To keep the example concise, we won't exhaustively pair each model with the analysismethods. We'll provide notes in the respective sections so that you can pick up thepieces.To run this example on Google Colab, we need to update the `gdown` library like so:```shellpip install -U gdown -q``` Imports<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import zipfile
from io import BytesIO
import cv2
import matplotlib.pyplot as plt
import numpy as np
import requests
from PIL import Image
from sklearn.preprocessing import MinMaxScaler
import keras
from keras import ops<jupyter_output><empty_output><jupyter_text>Constants<jupyter_code>RESOLUTION = 224
PATCH_SIZE = 16
GITHUB_RELEASE = "https://github.com/sayakpaul/probing-vits/releases/download/v1.0.0/probing_vits.zip"
FNAME = "probing_vits.zip"
MODELS_ZIP = {
"vit_dino_base16": "Probing_ViTs/vit_dino_base16.zip",
"vit_b16_patch16_224": "Probing_ViTs/vit_b16_patch16_224.zip",
"vit_b16_patch16_224-i1k_pretrained": "Probing_ViTs/vit_b16_patch16_224-i1k_pretrained.zip",
}<jupyter_output><empty_output><jupyter_text>Data utilitiesFor the original ViT models, the input images need to be scaled to the range `[-1, 1]`. Forthe other model families mentioned at the beginning, we need to normalize the images withchannel-wise mean and standard deviation of the ImageNet-1k training set.<jupyter_code>crop_layer = keras.layers.CenterCrop(RESOLUTION, RESOLUTION)
norm_layer = keras.layers.Normalization(
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
variance=[(0.229 * 255) ** 2, (0.224 * 255) ** 2, (0.225 * 255) ** 2],
)
rescale_layer = keras.layers.Rescaling(scale=1.0 / 127.5, offset=-1)
def preprocess_image(image, model_type, size=RESOLUTION):
# Turn the image into a numpy array and add batch dim.
image = np.array(image)
image = ops.expand_dims(image, 0)
# If model type is vit rescale the image to [-1, 1].
if model_type == "original_vit":
image = rescale_layer(image)
# Resize the image using bicubic interpolation.
resize_size = int((256 / 224) * size)
image = ops.image.resize(image, (resize_size, resize_size), interpolation="bicubic")
# Crop the image.
image = crop_layer(image)
# If model type is DeiT or DINO normalize the image.
if model_type != "original_vit":
image = norm_layer(image)
return ops.convert_to_numpy(image)
def load_image_from_url(url, model_type):
# Credit: Willi Gierke
response = requests.get(url)
image = Image.open(BytesIO(response.content))
preprocessed_image = preprocess_image(image, model_type)
return image, preprocessed_image<jupyter_output><empty_output><jupyter_text>Load a test image and display it<jupyter_code># ImageNet-1k label mapping file and load it.
mapping_file = keras.utils.get_file(
origin="https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt"
)
with open(mapping_file, "r") as f:
lines = f.readlines()
imagenet_int_to_str = [line.rstrip() for line in lines]
img_url = "https://dl.fbaipublicfiles.com/dino/img.png"
image, preprocessed_image = load_image_from_url(img_url, model_type="original_vit")
plt.imshow(image)
plt.axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>Load a model<jupyter_code>zip_path = keras.utils.get_file(
fname=FNAME,
origin=GITHUB_RELEASE,
)
with zipfile.ZipFile(zip_path, "r") as zip_ref:
zip_ref.extractall("./")
os.rename("Probing ViTs", "Probing_ViTs")
def load_model(model_path: str) -> keras.Model:
with zipfile.ZipFile(model_path, "r") as zip_ref:
zip_ref.extractall("Probing_ViTs/")
model_name = model_path.split(".")[0]
inputs = keras.Input((RESOLUTION, RESOLUTION, 3))
model = keras.layers.TFSMLayer(model_name, call_endpoint="serving_default")
outputs = model(inputs, training=False)
return keras.Model(inputs, outputs=outputs)
vit_base_i21k_patch16_224 = load_model(MODELS_ZIP["vit_b16_patch16_224-i1k_pretrained"])
print("Model loaded.")<jupyter_output><empty_output><jupyter_text>**More about the model**:This model was pretrained on the ImageNet-21k dataset and was then fine-tuned on theImageNet-1k dataset. To learn more about how we developed this model in TensorFlow(with pretrained weights from[this source](https://github.com/google-research/vision_transformer/)) refer to[this notebook](https://github.com/sayakpaul/probing-vits/blob/main/notebooks/load-jax-weights-vitb16.ipynb). Running regular inference with the modelWe now run inference with the loaded model on our test image.<jupyter_code>def split_prediction_and_attention_scores(outputs):
predictions = outputs["output_1"]
attention_score_dict = {}
for key, value in outputs.items():
if key.startswith("output_2_"):
attention_score_dict[key[len("output_2_") :]] = value
return predictions, attention_score_dict
predictions, attention_score_dict = split_prediction_and_attention_scores(
vit_base_i21k_patch16_224.predict(preprocessed_image)
)
predicted_label = imagenet_int_to_str[int(np.argmax(predictions))]
print(predicted_label)<jupyter_output><empty_output><jupyter_text>`attention_score_dict` contains the attention scores (softmaxed outputs) from eachattention head of each Transformer block. Method I: Mean attention distance[Dosovitskiy et al.](https://arxiv.org/abs/2010.11929) and[Raghu et al.](https://arxiv.org/abs/2108.08810) use a measure called"mean attention distance" from each attention head of differentTransformer blocks to understand how local and global information flowsinto Vision Transformers.Mean attention distance is defined as the distance between query tokens and the othertokens times attention weights. So, for a single image* we take individual patches (tokens) extracted from the image,* calculate their geometric distance, and* multiply that with the attention scores.Attention scores are calculated here after forward passing the image in inference modethrough the network. The following figure may help you understand the process alittle bit better.This animation is created by [Ritwik Raha](https://twitter.com/ritwik_raha).<jupyter_code>def compute_distance_matrix(patch_size, num_patches, length):
distance_matrix = np.zeros((num_patches, num_patches))
for i in range(num_patches):
for j in range(num_patches):
if i == j: # zero distance
continue
xi, yi = (int(i / length)), (i % length)
xj, yj = (int(j / length)), (j % length)
distance_matrix[i, j] = patch_size * np.linalg.norm([xi - xj, yi - yj])
return distance_matrix
def compute_mean_attention_dist(patch_size, attention_weights, model_type):
num_cls_tokens = 2 if "distilled" in model_type else 1
# The attention_weights shape = (batch, num_heads, num_patches, num_patches)
attention_weights = attention_weights[
..., num_cls_tokens:, num_cls_tokens:
] # Removing the CLS token
num_patches = attention_weights.shape[-1]
length = int(np.sqrt(num_patches))
assert length**2 == num_patches, "Num patches is not perfect square"
distance_matrix = compute_distance_matrix(patch_size, num_patches, length)
h, w = distance_matrix.shape
distance_matrix = distance_matrix.reshape((1, 1, h, w))
# The attention_weights along the last axis adds to 1
# this is due to the fact that they are softmax of the raw logits
# summation of the (attention_weights * distance_matrix)
# should result in an average distance per token.
mean_distances = attention_weights * distance_matrix
mean_distances = np.sum(
mean_distances, axis=-1
) # Sum along last axis to get average distance per token
mean_distances = np.mean(
mean_distances, axis=-1
) # Now average across all the tokens
return mean_distances<jupyter_output><empty_output><jupyter_text>Thanks to [Simon Kornblith](https://scholar.google.com/citations?user=1O3RPmsAAAAJ&hl=en)from Google who helped us with this code snippet. It can be found[here](https://gist.github.com/simonster/155894d48aef2bd36bd2dd8267e62391). Let's now usethese utilities to generate a plot of attention distances with our loaded model and testimage.<jupyter_code># Build the mean distances for every Transformer block.
mean_distances = {
f"{name}_mean_dist": compute_mean_attention_dist(
patch_size=PATCH_SIZE,
attention_weights=attention_weight,
model_type="original_vit",
)
for name, attention_weight in attention_score_dict.items()
}
# Get the number of heads from the mean distance output.
num_heads = mean_distances["transformer_block_0_att_mean_dist"].shape[-1]
# Print the shapes
print(f"Num Heads: {num_heads}.")
plt.figure(figsize=(9, 9))
for idx in range(len(mean_distances)):
mean_distance = mean_distances[f"transformer_block_{idx}_att_mean_dist"]
x = [idx] * num_heads
y = mean_distance[0, :]
plt.scatter(x=x, y=y, label=f"transformer_block_{idx}")
plt.legend(loc="lower right")
plt.xlabel("Attention Head", fontsize=14)
plt.ylabel("Attention Distance", fontsize=14)
plt.title("vit_base_i21k_patch16_224", fontsize=14)
plt.grid()
plt.show()<jupyter_output><empty_output><jupyter_text>Inspecting the plots**How does self-attention span across the input space? Do they attendinput regions locally or globally?**The promise of self-attention is to enable the learning of contextual dependenciesso that a model can attend to the regions of inputs which are the most salient w.r.tthe objective. From the above plots we can notice that different attention heads yielddifferent attention distances suggesting they use both local and global informationfrom an image. But as we go deeper in the Transformer blocks the heads tend tofocus more on global aggregate information.Inspired by [Raghu et al.](https://arxiv.org/abs/2108.08810) we computed mean attentiondistances over 1000 images randomly taken from the ImageNet-1k validation set and werepeated the process for all the models mentioned at the beginning. Intrestingly, wenotice the following:* Pretraining with a larger dataset helps with more global attention spans:| Pretrained on ImageNet-21kFine-tuned on ImageNet-1k | Pretrained on ImageNet-1k || :--: | :--: || | |* When distilled from a CNN ViTs tend to have less global attention spans:| No distillation (ViT B-16 from DeiT) | Distilled ViT B-16 from DeiT || :--: | :--: || | |To reproduce these plots, please refer to[this notebook](https://github.com/sayakpaul/probing-vits/blob/main/notebooks/mean-attention-distance-1k.ipynb). Method II: Attention Rollout[Abnar et al.](https://arxiv.org/abs/2005.00928) introduce "Attention rollout" forquantifying how information flow through self-attention layers of Transformer blocks.Original ViT authors use this method to investigate the learned representations, stating:> Briefly, we averaged attention weights of ViTL/16 across all heads and then recursivelymultiplied the weight matrices of all layers. This accounts for the mixing of attentionacross tokens through all layers.We used[this notebook](https://colab.research.google.com/github/jeonsworld/ViT-pytorch/blob/main/visualize_attention_map.ipynb)and modified the attention rollout code from it for compatibility with our models.<jupyter_code>def attention_rollout_map(image, attention_score_dict, model_type):
num_cls_tokens = 2 if "distilled" in model_type else 1
# Stack the individual attention matrices from individual Transformer blocks.
attn_mat = ops.stack([attention_score_dict[k] for k in attention_score_dict.keys()])
attn_mat = ops.squeeze(attn_mat, axis=1)
# Average the attention weights across all heads.
attn_mat = ops.mean(attn_mat, axis=1)
# To account for residual connections, we add an identity matrix to the
# attention matrix and re-normalize the weights.
residual_attn = ops.eye(attn_mat.shape[1])
aug_attn_mat = attn_mat + residual_attn
aug_attn_mat = aug_attn_mat / ops.sum(aug_attn_mat, axis=-1)[..., None]
aug_attn_mat = ops.convert_to_numpy(aug_attn_mat)
# Recursively multiply the weight matrices.
joint_attentions = np.zeros(aug_attn_mat.shape)
joint_attentions[0] = aug_attn_mat[0]
for n in range(1, aug_attn_mat.shape[0]):
joint_attentions[n] = np.matmul(aug_attn_mat[n], joint_attentions[n - 1])
# Attention from the output token to the input space.
v = joint_attentions[-1]
grid_size = int(np.sqrt(aug_attn_mat.shape[-1]))
mask = v[0, num_cls_tokens:].reshape(grid_size, grid_size)
mask = cv2.resize(mask / mask.max(), image.size)[..., np.newaxis]
result = (mask * image).astype("uint8")
return result<jupyter_output><empty_output><jupyter_text>Let's now use these utilities to generate an attention plot based on our previous resultsfrom the "Running regular inference with the model" section. Following are the links todownload each individual model:* [Original ViT model (pretrained on ImageNet-21k)](https://drive.google.com/file/d/1mbtnliT3jRb3yJUHhbItWw8unfYZw8KJ/view?usp=sharing)* [Original ViT model (pretrained on ImageNet-1k)](https://drive.google.com/file/d/1ApOdYe4NXxhPhJABefgZ3KVvqsQzhCL7/view?usp=sharing)* [DINO model (pretrained on ImageNet-1k)](https://drive.google.com/file/d/16_1oDm0PeCGJ_KGBG5UKVN7TsAtiRNrN/view?usp=sharing)* [DeiT models (pretrained on ImageNet-1k including distilled and non-distilled ones)](https://tfhub.dev/sayakpaul/collections/deit/1)<jupyter_code>attn_rollout_result = attention_rollout_map(
image, attention_score_dict, model_type="original_vit"
)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 10))
fig.suptitle(f"Predicted label: {predicted_label}.", fontsize=20)
_ = ax1.imshow(image)
_ = ax2.imshow(attn_rollout_result)
ax1.set_title("Input Image", fontsize=16)
ax2.set_title("Attention Map", fontsize=16)
ax1.axis("off")
ax2.axis("off")
fig.tight_layout()
fig.subplots_adjust(top=1.35)
fig.show()<jupyter_output><empty_output><jupyter_text>Inspecting the plots**How can we quanitfy the information flow that propagates through theattention layers?**We notice that the model is able to focus its attention on thesalient parts of the input image. We encourage you to apply thismethod to the other models we mentioned and compare the results. Theattention rollout plots will differ according to the tasks andaugmentation the model was trained with. We observe that DeiT has thebest rollout plot, likely due to its augmentation regime. Method III: Attention heatmapsA simple yet useful way to probe into the representation of a Vision Transformer is tovisualise the attention maps overlayed on the input images. This helps form an intuitionabout what the model attends to. We use the DINO model for this purpose, because ityields better attention heatmaps.<jupyter_code># Load the model.
vit_dino_base16 = load_model(MODELS_ZIP["vit_dino_base16"])
print("Model loaded.")
# Preprocess the same image but with normlization.
img_url = "https://dl.fbaipublicfiles.com/dino/img.png"
image, preprocessed_image = load_image_from_url(img_url, model_type="dino")
# Grab the predictions.
predictions, attention_score_dict = split_prediction_and_attention_scores(
vit_dino_base16.predict(preprocessed_image)
)<jupyter_output><empty_output><jupyter_text>A Transformer block consists of multiple heads. Each head in a Transformer block projectsthe input data to different sub-spaces. This helps each individual head to attend todifferent parts of the image. Therefore, it makes sense to visualize each attention headmap seperately, to make sense of what each heads looks at.**Notes**:* The following code has been copy-modified from the[original DINO codebase](https://github.com/facebookresearch/dino/blob/main/visualize_attention.py).* Here we grab the attention maps of the last Transformer block.* [DINO](https://arxiv.org/abs/2104.14294) was pretrained using a self-supervisedobjective.<jupyter_code>def attention_heatmap(attention_score_dict, image, model_type="dino"):
num_tokens = 2 if "distilled" in model_type else 1
# Sort the Transformer blocks in order of their depth.
attention_score_list = list(attention_score_dict.keys())
attention_score_list.sort(key=lambda x: int(x.split("_")[-2]), reverse=True)
# Process the attention maps for overlay.
w_featmap = image.shape[2] // PATCH_SIZE
h_featmap = image.shape[1] // PATCH_SIZE
attention_scores = attention_score_dict[attention_score_list[0]]
# Taking the representations from CLS token.
attentions = attention_scores[0, :, 0, num_tokens:].reshape(num_heads, -1)
# Reshape the attention scores to resemble mini patches.
attentions = attentions.reshape(num_heads, w_featmap, h_featmap)
attentions = attentions.transpose((1, 2, 0))
# Resize the attention patches to 224x224 (224: 14x16).
attentions = ops.image.resize(
attentions, size=(h_featmap * PATCH_SIZE, w_featmap * PATCH_SIZE)
)
return attentions<jupyter_output><empty_output><jupyter_text>We can use the same image we used for inference with DINO and the `attention_score_dict`we extracted from the results.<jupyter_code># De-normalize the image for visual clarity.
in1k_mean = np.array([0.485 * 255, 0.456 * 255, 0.406 * 255])
in1k_std = np.array([0.229 * 255, 0.224 * 255, 0.225 * 255])
preprocessed_img_orig = (preprocessed_image * in1k_std) + in1k_mean
preprocessed_img_orig = preprocessed_img_orig / 255.0
preprocessed_img_orig = ops.convert_to_numpy(ops.clip(preprocessed_img_orig, 0.0, 1.0))
# Generate the attention heatmaps.
attentions = attention_heatmap(attention_score_dict, preprocessed_img_orig)
# Plot the maps.
fig, axes = plt.subplots(nrows=3, ncols=4, figsize=(13, 13))
img_count = 0
for i in range(3):
for j in range(4):
if img_count < len(attentions):
axes[i, j].imshow(preprocessed_img_orig[0])
axes[i, j].imshow(attentions[..., img_count], cmap="inferno", alpha=0.6)
axes[i, j].title.set_text(f"Attention head: {img_count}")
axes[i, j].axis("off")
img_count += 1<jupyter_output><empty_output><jupyter_text>Inspecting the plots**How can we qualitatively evaluate the attention weights?**The attention weights of a Transformer block are computed between thekey and the query. The weights quantifies how important is the key to the query.In the ViTs the key and the query comes from the same image, hencethe weights determine which part of the image is important.Plotting the attention weigths overlayed on the image gives us a greatintuition about the parts of the image that are important to the Transformer.This plot qualitatively evaluates the purpose of the attention weights. Method IV: Visualizing the learned projection filtersAfter extracting non-overlapping patches, ViTs flatten those patches across theirsaptial dimensions, and then linearly project them. One might wonder, how do theseprojections look like? Below, we take the ViT B-16 model and visualize itslearned projections.<jupyter_code>def extract_weights(model, name):
for variable in model.weights:
if variable.name.startswith(name):
return variable.numpy()
# Extract the projections.
projections = extract_weights(vit_base_i21k_patch16_224, "conv_projection/kernel")
projection_dim = projections.shape[-1]
patch_h, patch_w, patch_channels = projections.shape[:-1]
# Scale the projections.
scaled_projections = MinMaxScaler().fit_transform(
projections.reshape(-1, projection_dim)
)
# Reshape the scaled projections so that the leading
# three dimensions resemble an image.
scaled_projections = scaled_projections.reshape(patch_h, patch_w, patch_channels, -1)
# Visualize the first 128 filters of the learned
# projections.
fig, axes = plt.subplots(nrows=8, ncols=16, figsize=(13, 8))
img_count = 0
limit = 128
for i in range(8):
for j in range(16):
if img_count < limit:
axes[i, j].imshow(scaled_projections[..., img_count])
axes[i, j].axis("off")
img_count += 1
fig.tight_layout()<jupyter_output><empty_output><jupyter_text>Inspecting the plots**What do the projection filters learn?**[When visualized](https://distill.pub/2017/feature-visualization/),the kernels of a convolutional neural network showthe pattern that they look for in an image. This could be circles,sometimes lines -- when combined together (in later stage of a ConvNet), the filterstransform into more complex shapes. We have found a stark similarity between suchConvNet kernels and the projection filters of a ViT. Method V: Visualizing the positional emebddingsTransformers are permutation-invariant. This means that do not take into accountthe spatial position of the input tokens. To overcome thislimitation, we add positional information to the input tokens.The positional information can be in the form of leaned positionalembeddings or handcrafted constant embeddings. In our case, all thethree variants of ViTs feature learned positional embeddings.In this section, we visualize the similarities between thelearned positional embeddings with itself. Below, we take the ViT B-16model and visualize the similarity of the positional embeddings bytaking their dot-product.<jupyter_code>position_embeddings = extract_weights(vit_base_i21k_patch16_224, "pos_embedding")
# Discard the batch dimension and the position embeddings of the
# cls token.
position_embeddings = position_embeddings.squeeze()[1:, ...]
similarity = position_embeddings @ position_embeddings.T
plt.imshow(similarity, cmap="inferno")
plt.show()<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/probing_vits.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/probing_vits.ipynb",
"repo_id": "keras-io",
"token_count": 7922
} | 92 |
# Convolutional autoencoder for image denoising
**Author:** [Santiago L. Valdarrama](https://twitter.com/svpino)<br>
**Date created:** 2021/03/01<br>
**Last modified:** 2021/03/01<br>
**Description:** How to train a deep convolutional autoencoder for image denoising.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/autoencoder.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/autoencoder.py)
---
## Introduction
This example demonstrates how to implement a deep convolutional autoencoder
for image denoising, mapping noisy digits images from the MNIST dataset to
clean digits images. This implementation is based on an original blog post
titled [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)
by [François Chollet](https://twitter.com/fchollet).
---
## Setup
```python
import numpy as np
import matplotlib.pyplot as plt
from keras import layers
from keras.datasets import mnist
from keras.models import Model
def preprocess(array):
"""Normalizes the supplied array and reshapes it."""
array = array.astype("float32") / 255.0
array = np.reshape(array, (len(array), 28, 28, 1))
return array
def noise(array):
"""Adds random noise to each image in the supplied array."""
noise_factor = 0.4
noisy_array = array + noise_factor * np.random.normal(
loc=0.0, scale=1.0, size=array.shape
)
return np.clip(noisy_array, 0.0, 1.0)
def display(array1, array2):
"""Displays ten random images from each array."""
n = 10
indices = np.random.randint(len(array1), size=n)
images1 = array1[indices, :]
images2 = array2[indices, :]
plt.figure(figsize=(20, 4))
for i, (image1, image2) in enumerate(zip(images1, images2)):
ax = plt.subplot(2, n, i + 1)
plt.imshow(image1.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(image2.reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
```
---
## Prepare the data
```python
# Since we only need images from the dataset to encode and decode, we
# won't use the labels.
(train_data, _), (test_data, _) = mnist.load_data()
# Normalize and reshape the data
train_data = preprocess(train_data)
test_data = preprocess(test_data)
# Create a copy of the data with added noise
noisy_train_data = noise(train_data)
noisy_test_data = noise(test_data)
# Display the train data and a version of it with added noise
display(train_data, noisy_train_data)
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
```
</div>

---
## Build the autoencoder
We are going to use the Functional API to build our convolutional autoencoder.
```python
input = layers.Input(shape=(28, 28, 1))
# Encoder
x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(input)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
x = layers.Conv2D(32, (3, 3), activation="relu", padding="same")(x)
x = layers.MaxPooling2D((2, 2), padding="same")(x)
# Decoder
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation="relu", padding="same")(x)
x = layers.Conv2D(1, (3, 3), activation="sigmoid", padding="same")(x)
# Autoencoder
autoencoder = Model(input, x)
autoencoder.compile(optimizer="adam", loss="binary_crossentropy")
autoencoder.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">320</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">289</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">28,353</span> (110.75 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">28,353</span> (110.75 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
Now we can train our autoencoder using `train_data` as both our input data
and target. Notice we are setting up the validation data using the same
format.
```python
autoencoder.fit(
x=train_data,
y=train_data,
epochs=50,
batch_size=128,
shuffle=True,
validation_data=(test_data, test_data),
)
```
<div class="k-default-codeblock">
```
Epoch 1/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 8s 9ms/step - loss: 0.2537 - val_loss: 0.0723
Epoch 2/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0718 - val_loss: 0.0691
Epoch 3/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0695 - val_loss: 0.0677
Epoch 4/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0682 - val_loss: 0.0669
Epoch 5/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0673 - val_loss: 0.0664
Epoch 6/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0668 - val_loss: 0.0660
Epoch 7/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0664 - val_loss: 0.0657
Epoch 8/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0661 - val_loss: 0.0654
Epoch 9/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0657 - val_loss: 0.0651
Epoch 10/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0655 - val_loss: 0.0648
Epoch 11/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0653 - val_loss: 0.0646
Epoch 12/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0651 - val_loss: 0.0644
Epoch 13/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0649 - val_loss: 0.0643
Epoch 14/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0647 - val_loss: 0.0641
Epoch 15/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0646 - val_loss: 0.0640
Epoch 16/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0645 - val_loss: 0.0639
Epoch 17/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0642 - val_loss: 0.0638
Epoch 18/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0641 - val_loss: 0.0638
Epoch 19/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0640 - val_loss: 0.0636
Epoch 20/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0639 - val_loss: 0.0637
Epoch 21/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0639 - val_loss: 0.0634
Epoch 22/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0637 - val_loss: 0.0634
Epoch 23/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0636 - val_loss: 0.0633
Epoch 24/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0637 - val_loss: 0.0632
Epoch 25/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0635 - val_loss: 0.0632
Epoch 26/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0635 - val_loss: 0.0631
Epoch 27/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0635 - val_loss: 0.0630
Epoch 28/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0635 - val_loss: 0.0629
Epoch 29/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0634 - val_loss: 0.0630
Epoch 30/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0633 - val_loss: 0.0629
Epoch 31/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0633 - val_loss: 0.0628
Epoch 32/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0632 - val_loss: 0.0628
Epoch 33/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0631 - val_loss: 0.0627
Epoch 34/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0631 - val_loss: 0.0627
Epoch 35/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0630 - val_loss: 0.0627
Epoch 36/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0631 - val_loss: 0.0626
Epoch 37/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0629 - val_loss: 0.0626
Epoch 38/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0630 - val_loss: 0.0627
Epoch 39/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0630 - val_loss: 0.0625
Epoch 40/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0629 - val_loss: 0.0625
Epoch 41/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0628 - val_loss: 0.0625
Epoch 42/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0629 - val_loss: 0.0625
Epoch 43/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0629 - val_loss: 0.0624
Epoch 44/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0628 - val_loss: 0.0624
Epoch 45/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0628 - val_loss: 0.0624
Epoch 46/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0627 - val_loss: 0.0625
Epoch 47/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0628 - val_loss: 0.0623
Epoch 48/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0627 - val_loss: 0.0623
Epoch 49/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0626 - val_loss: 0.0623
Epoch 50/50
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0626 - val_loss: 0.0622
<keras.src.callbacks.history.History at 0x7ff5889d9930>
```
</div>
Let's predict on our test dataset and display the original image together with
the prediction from our autoencoder.
Notice how the predictions are pretty close to the original images, although
not quite the same.
```python
predictions = autoencoder.predict(test_data)
display(test_data, predictions)
```
<div class="k-default-codeblock">
```
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 1ms/step
```
</div>

Now that we know that our autoencoder works, let's retrain it using the noisy
data as our input and the clean data as our target. We want our autoencoder to
learn how to denoise the images.
```python
autoencoder.fit(
x=noisy_train_data,
y=train_data,
epochs=100,
batch_size=128,
shuffle=True,
validation_data=(noisy_test_data, test_data),
)
```
<div class="k-default-codeblock">
```
Epoch 1/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.1110 - val_loss: 0.0922
Epoch 2/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0925 - val_loss: 0.0904
Epoch 3/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0910 - val_loss: 0.0895
Epoch 4/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0900 - val_loss: 0.0888
Epoch 5/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0892 - val_loss: 0.0882
Epoch 6/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0887 - val_loss: 0.0878
Epoch 7/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0884 - val_loss: 0.0874
Epoch 8/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0880 - val_loss: 0.0871
Epoch 9/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0876 - val_loss: 0.0869
Epoch 10/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0875 - val_loss: 0.0868
Epoch 11/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0872 - val_loss: 0.0864
Epoch 12/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0870 - val_loss: 0.0863
Epoch 13/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0869 - val_loss: 0.0860
Epoch 14/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0868 - val_loss: 0.0859
Epoch 15/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0865 - val_loss: 0.0857
Epoch 16/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0863 - val_loss: 0.0857
Epoch 17/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0863 - val_loss: 0.0858
Epoch 18/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0862 - val_loss: 0.0854
Epoch 19/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0859 - val_loss: 0.0856
Epoch 20/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0859 - val_loss: 0.0853
Epoch 21/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0858 - val_loss: 0.0851
Epoch 22/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0857 - val_loss: 0.0851
Epoch 23/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0856 - val_loss: 0.0850
Epoch 24/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0855 - val_loss: 0.0850
Epoch 25/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0853 - val_loss: 0.0849
Epoch 26/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0855 - val_loss: 0.0849
Epoch 27/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0853 - val_loss: 0.0849
Epoch 28/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0853 - val_loss: 0.0848
Epoch 29/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0853 - val_loss: 0.0850
Epoch 30/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0854 - val_loss: 0.0847
Epoch 31/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0851 - val_loss: 0.0846
Epoch 32/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0851 - val_loss: 0.0846
Epoch 33/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0849 - val_loss: 0.0846
Epoch 34/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0851 - val_loss: 0.0847
Epoch 35/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0849 - val_loss: 0.0846
Epoch 36/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0849 - val_loss: 0.0844
Epoch 37/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0849 - val_loss: 0.0845
Epoch 38/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0848 - val_loss: 0.0844
Epoch 39/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0849 - val_loss: 0.0843
Epoch 40/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0849 - val_loss: 0.0844
Epoch 41/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0848 - val_loss: 0.0844
Epoch 42/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0848 - val_loss: 0.0844
Epoch 43/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0849 - val_loss: 0.0846
Epoch 44/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0846 - val_loss: 0.0843
Epoch 45/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0847 - val_loss: 0.0845
Epoch 46/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0846 - val_loss: 0.0843
Epoch 47/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0845 - val_loss: 0.0842
Epoch 48/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0846 - val_loss: 0.0842
Epoch 49/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0847 - val_loss: 0.0846
Epoch 50/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0847 - val_loss: 0.0843
Epoch 51/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0846 - val_loss: 0.0842
Epoch 52/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0846 - val_loss: 0.0844
Epoch 53/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0844 - val_loss: 0.0842
Epoch 54/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0845 - val_loss: 0.0842
Epoch 55/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0845 - val_loss: 0.0841
Epoch 56/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0844
Epoch 57/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0845 - val_loss: 0.0841
Epoch 58/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0843
Epoch 59/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0842
Epoch 60/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0844 - val_loss: 0.0847
Epoch 61/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0846 - val_loss: 0.0840
Epoch 62/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0840
Epoch 63/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0841
Epoch 64/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0844 - val_loss: 0.0841
Epoch 65/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0841
Epoch 66/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0841
Epoch 67/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0840
Epoch 68/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0841
Epoch 69/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0840
Epoch 70/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0841
Epoch 71/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0844 - val_loss: 0.0841
Epoch 72/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0842 - val_loss: 0.0840
Epoch 73/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0841
Epoch 74/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0844 - val_loss: 0.0840
Epoch 75/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0842 - val_loss: 0.0840
Epoch 76/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0842
Epoch 77/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0842 - val_loss: 0.0841
Epoch 78/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0841
Epoch 79/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0840
Epoch 80/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0843 - val_loss: 0.0839
Epoch 81/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0842 - val_loss: 0.0842
Epoch 82/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0842 - val_loss: 0.0839
Epoch 83/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0840
Epoch 84/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0839
Epoch 85/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0839
Epoch 86/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0840 - val_loss: 0.0838
Epoch 87/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0839
Epoch 88/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0842 - val_loss: 0.0838
Epoch 89/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0842 - val_loss: 0.0838
Epoch 90/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0840
Epoch 91/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0839
Epoch 92/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0842 - val_loss: 0.0838
Epoch 93/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0838
Epoch 94/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0838
Epoch 95/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0840 - val_loss: 0.0837
Epoch 96/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0838
Epoch 97/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0838
Epoch 98/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0837
Epoch 99/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0841 - val_loss: 0.0838
Epoch 100/100
469/469 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 0.0839 - val_loss: 0.0839
<keras.src.callbacks.history.History at 0x7ff5889da230>
```
</div>
Let's now predict on the noisy data and display the results of our autoencoder.
Notice how the autoencoder does an amazing job at removing the noise from the
input images.
```python
predictions = autoencoder.predict(noisy_test_data)
display(noisy_test_data, predictions)
```
<div class="k-default-codeblock">
```
313/313 ━━━━━━━━━━━━━━━━━━━━ 0s 523us/step
```
</div>

| keras-io/examples/vision/md/autoencoder.md/0 | {
"file_path": "keras-io/examples/vision/md/autoencoder.md",
"repo_id": "keras-io",
"token_count": 12992
} | 93 |
# FixRes: Fixing train-test resolution discrepancy
**Author:** [Sayak Paul](https://twitter.com/RisingSayak)<br>
**Date created:** 2021/10/08<br>
**Last modified:** 2021/10/10<br>
**Description:** Mitigating resolution discrepancy between training and test sets.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/fixres.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/fixres.py)
---
## Introduction
It is a common practice to use the same input image resolution while training and testing
vision models. However, as investigated in
[Fixing the train-test resolution discrepancy](https://arxiv.org/abs/1906.06423)
(Touvron et al.), this practice leads to suboptimal performance. Data augmentation
is an indispensable part of the training process of deep neural networks. For vision models, we
typically use random resized crops during training and center crops during inference.
This introduces a discrepancy in the object sizes seen during training and inference.
As shown by Touvron et al., if we can fix this discrepancy, we can significantly
boost model performance.
In this example, we implement the **FixRes** techniques introduced by Touvron et al.
to fix this discrepancy.
---
## Imports
```python
import keras
from keras import layers
import tensorflow as tf # just for image processing and pipeline
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
import matplotlib.pyplot as plt
```
---
## Load the `tf_flowers` dataset
```python
train_dataset, val_dataset = tfds.load(
"tf_flowers", split=["train[:90%]", "train[90%:]"], as_supervised=True
)
num_train = train_dataset.cardinality()
num_val = val_dataset.cardinality()
print(f"Number of training examples: {num_train}")
print(f"Number of validation examples: {num_val}")
```
<div class="k-default-codeblock">
```
Number of training examples: 3303
Number of validation examples: 367
```
</div>
---
## Data preprocessing utilities
We create three datasets:
1. A dataset with a smaller resolution - 128x128.
2. Two datasets with a larger resolution - 224x224.
We will apply different augmentation transforms to the larger-resolution datasets.
The idea of FixRes is to first train a model on a smaller resolution dataset and then fine-tune
it on a larger resolution dataset. This simple yet effective recipe leads to non-trivial performance
improvements. Please refer to the [original paper](https://arxiv.org/abs/1906.06423) for
results.
```python
# Reference: https://github.com/facebookresearch/FixRes/blob/main/transforms_v2.py.
batch_size = 32
auto = tf.data.AUTOTUNE
smaller_size = 128
bigger_size = 224
size_for_resizing = int((bigger_size / smaller_size) * bigger_size)
central_crop_layer = layers.CenterCrop(bigger_size, bigger_size)
def preprocess_initial(train, image_size):
"""Initial preprocessing function for training on smaller resolution.
For training, do random_horizontal_flip -> random_crop.
For validation, just resize.
No color-jittering has been used.
"""
def _pp(image, label, train):
if train:
channels = image.shape[-1]
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
tf.zeros([0, 0, 4], tf.float32),
area_range=(0.05, 1.0),
min_object_covered=0,
use_image_if_no_bounding_boxes=True,
)
image = tf.slice(image, begin, size)
image.set_shape([None, None, channels])
image = tf.image.resize(image, [image_size, image_size])
image = tf.image.random_flip_left_right(image)
else:
image = tf.image.resize(image, [image_size, image_size])
return image, label
return _pp
def preprocess_finetune(image, label, train):
"""Preprocessing function for fine-tuning on a higher resolution.
For training, resize to a bigger resolution to maintain the ratio ->
random_horizontal_flip -> center_crop.
For validation, do the same without any horizontal flipping.
No color-jittering has been used.
"""
image = tf.image.resize(image, [size_for_resizing, size_for_resizing])
if train:
image = tf.image.random_flip_left_right(image)
image = central_crop_layer(image[None, ...])[0]
return image, label
def make_dataset(
dataset: tf.data.Dataset,
train: bool,
image_size: int = smaller_size,
fixres: bool = True,
num_parallel_calls=auto,
):
if image_size not in [smaller_size, bigger_size]:
raise ValueError(f"{image_size} resolution is not supported.")
# Determine which preprocessing function we are using.
if image_size == smaller_size:
preprocess_func = preprocess_initial(train, image_size)
elif not fixres and image_size == bigger_size:
preprocess_func = preprocess_initial(train, image_size)
else:
preprocess_func = preprocess_finetune
dataset = dataset.map(
lambda x, y: preprocess_func(x, y, train),
num_parallel_calls=num_parallel_calls,
)
dataset = dataset.batch(batch_size)
if train:
dataset = dataset.shuffle(batch_size * 10)
return dataset.prefetch(num_parallel_calls)
```
Notice how the augmentation transforms vary for the kind of dataset we are preparing.
---
## Prepare datasets
```python
initial_train_dataset = make_dataset(train_dataset, train=True, image_size=smaller_size)
initial_val_dataset = make_dataset(val_dataset, train=False, image_size=smaller_size)
finetune_train_dataset = make_dataset(train_dataset, train=True, image_size=bigger_size)
finetune_val_dataset = make_dataset(val_dataset, train=False, image_size=bigger_size)
vanilla_train_dataset = make_dataset(
train_dataset, train=True, image_size=bigger_size, fixres=False
)
vanilla_val_dataset = make_dataset(
val_dataset, train=False, image_size=bigger_size, fixres=False
)
```
---
## Visualize the datasets
```python
def visualize_dataset(batch_images):
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(batch_images[n].numpy().astype("int"))
plt.axis("off")
plt.show()
print(f"Batch shape: {batch_images.shape}.")
# Smaller resolution.
initial_sample_images, _ = next(iter(initial_train_dataset))
visualize_dataset(initial_sample_images)
# Bigger resolution, only for fine-tuning.
finetune_sample_images, _ = next(iter(finetune_train_dataset))
visualize_dataset(finetune_sample_images)
# Bigger resolution, with the same augmentation transforms as
# the smaller resolution dataset.
vanilla_sample_images, _ = next(iter(vanilla_train_dataset))
visualize_dataset(vanilla_sample_images)
```

<div class="k-default-codeblock">
```
Batch shape: (32, 128, 128, 3).
```
</div>

<div class="k-default-codeblock">
```
Batch shape: (32, 224, 224, 3).
```
</div>

<div class="k-default-codeblock">
```
Batch shape: (32, 224, 224, 3).
```
</div>
---
## Model training utilities
We train multiple variants of ResNet50V2
([He et al.](https://arxiv.org/abs/1603.05027)):
1. On the smaller resolution dataset (128x128). It will be trained from scratch.
2. Then fine-tune the model from 1 on the larger resolution (224x224) dataset.
3. Train another ResNet50V2 from scratch on the larger resolution dataset.
As a reminder, the larger resolution datasets differ in terms of their augmentation
transforms.
```python
def get_training_model(num_classes=5):
inputs = layers.Input((None, None, 3))
resnet_base = keras.applications.ResNet50V2(
include_top=False, weights=None, pooling="avg"
)
resnet_base.trainable = True
x = layers.Rescaling(scale=1.0 / 127.5, offset=-1)(inputs)
x = resnet_base(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
return keras.Model(inputs, outputs)
def train_and_evaluate(
model,
train_ds,
val_ds,
epochs,
learning_rate=1e-3,
use_early_stopping=False,
):
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
if use_early_stopping:
es_callback = keras.callbacks.EarlyStopping(patience=5)
callbacks = [es_callback]
else:
callbacks = None
model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs,
callbacks=callbacks,
)
_, accuracy = model.evaluate(val_ds)
print(f"Top-1 accuracy on the validation set: {accuracy*100:.2f}%.")
return model
```
---
## Experiment 1: Train on 128x128 and then fine-tune on 224x224
```python
epochs = 30
smaller_res_model = get_training_model()
smaller_res_model = train_and_evaluate(
smaller_res_model, initial_train_dataset, initial_val_dataset, epochs
)
```
<div class="k-default-codeblock">
```
Epoch 1/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 56s 299ms/step - accuracy: 0.4146 - loss: 1.7349 - val_accuracy: 0.2234 - val_loss: 2.0703
Epoch 2/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.5062 - loss: 1.2458 - val_accuracy: 0.3896 - val_loss: 1.5800
Epoch 3/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.5262 - loss: 1.1733 - val_accuracy: 0.5940 - val_loss: 1.0160
Epoch 4/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 37ms/step - accuracy: 0.5740 - loss: 1.1021 - val_accuracy: 0.5967 - val_loss: 1.6164
Epoch 5/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6160 - loss: 1.0289 - val_accuracy: 0.5313 - val_loss: 1.2465
Epoch 6/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6137 - loss: 1.0286 - val_accuracy: 0.6431 - val_loss: 0.8564
Epoch 7/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6237 - loss: 0.9760 - val_accuracy: 0.6240 - val_loss: 1.0114
Epoch 8/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6029 - loss: 0.9994 - val_accuracy: 0.5804 - val_loss: 1.0331
Epoch 9/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6419 - loss: 0.9555 - val_accuracy: 0.6403 - val_loss: 0.8417
Epoch 10/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6513 - loss: 0.9333 - val_accuracy: 0.6376 - val_loss: 1.0658
Epoch 11/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6316 - loss: 0.9637 - val_accuracy: 0.5913 - val_loss: 1.5650
Epoch 12/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6542 - loss: 0.9047 - val_accuracy: 0.6458 - val_loss: 0.9613
Epoch 13/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6551 - loss: 0.8946 - val_accuracy: 0.6866 - val_loss: 0.8427
Epoch 14/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6617 - loss: 0.8848 - val_accuracy: 0.7003 - val_loss: 0.9339
Epoch 15/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6455 - loss: 0.9293 - val_accuracy: 0.6757 - val_loss: 0.9453
Epoch 16/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6821 - loss: 0.8481 - val_accuracy: 0.7466 - val_loss: 0.7237
Epoch 17/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6750 - loss: 0.8449 - val_accuracy: 0.5967 - val_loss: 1.5579
Epoch 18/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 37ms/step - accuracy: 0.6765 - loss: 0.8605 - val_accuracy: 0.6921 - val_loss: 0.8136
Epoch 19/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6969 - loss: 0.8140 - val_accuracy: 0.6131 - val_loss: 1.0785
Epoch 20/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6831 - loss: 0.8257 - val_accuracy: 0.7221 - val_loss: 0.7480
Epoch 21/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6988 - loss: 0.8008 - val_accuracy: 0.7193 - val_loss: 0.7953
Epoch 22/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.7172 - loss: 0.7578 - val_accuracy: 0.6730 - val_loss: 1.1628
Epoch 23/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.6935 - loss: 0.8126 - val_accuracy: 0.7357 - val_loss: 0.6565
Epoch 24/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.7149 - loss: 0.7568 - val_accuracy: 0.7439 - val_loss: 0.8830
Epoch 25/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.7151 - loss: 0.7510 - val_accuracy: 0.7248 - val_loss: 0.7459
Epoch 26/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.7133 - loss: 0.7838 - val_accuracy: 0.7084 - val_loss: 0.7140
Epoch 27/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.7314 - loss: 0.7386 - val_accuracy: 0.6730 - val_loss: 1.5988
Epoch 28/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.7259 - loss: 0.7417 - val_accuracy: 0.7275 - val_loss: 0.7255
Epoch 29/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.7006 - loss: 0.7863 - val_accuracy: 0.6621 - val_loss: 1.5714
Epoch 30/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 4s 36ms/step - accuracy: 0.7115 - loss: 0.7498 - val_accuracy: 0.7548 - val_loss: 0.7067
12/12 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - accuracy: 0.7207 - loss: 0.8735
Top-1 accuracy on the validation set: 75.48%.
```
</div>
### Freeze all the layers except for the final Batch Normalization layer
For fine-tuning, we train only two layers:
* The final Batch Normalization ([Ioffe et al.](https://arxiv.org/abs/1502.03167)) layer.
* The classification layer.
We are unfreezing the final Batch Normalization layer to compensate for the change in
activation statistics before the global average pooling layer. As shown in
[the paper](https://arxiv.org/abs/1906.06423), unfreezing the final Batch
Normalization layer is enough.
For a comprehensive guide on fine-tuning models in Keras, refer to
[this tutorial](https://keras.io/guides/transfer_learning/).
```python
for layer in smaller_res_model.layers[2].layers:
layer.trainable = False
smaller_res_model.layers[2].get_layer("post_bn").trainable = True
epochs = 10
# Use a lower learning rate during fine-tuning.
bigger_res_model = train_and_evaluate(
smaller_res_model,
finetune_train_dataset,
finetune_val_dataset,
epochs,
learning_rate=1e-4,
)
```
<div class="k-default-codeblock">
```
Epoch 1/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 26s 158ms/step - accuracy: 0.6890 - loss: 0.8791 - val_accuracy: 0.7548 - val_loss: 0.7801
Epoch 2/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 34ms/step - accuracy: 0.7372 - loss: 0.8209 - val_accuracy: 0.7466 - val_loss: 0.7866
Epoch 3/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 34ms/step - accuracy: 0.7532 - loss: 0.7925 - val_accuracy: 0.7520 - val_loss: 0.7779
Epoch 4/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 34ms/step - accuracy: 0.7417 - loss: 0.7833 - val_accuracy: 0.7439 - val_loss: 0.7625
Epoch 5/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 34ms/step - accuracy: 0.7508 - loss: 0.7624 - val_accuracy: 0.7439 - val_loss: 0.7449
Epoch 6/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 34ms/step - accuracy: 0.7542 - loss: 0.7406 - val_accuracy: 0.7493 - val_loss: 0.7220
Epoch 7/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 34ms/step - accuracy: 0.7471 - loss: 0.7716 - val_accuracy: 0.7520 - val_loss: 0.7111
Epoch 8/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 35ms/step - accuracy: 0.7580 - loss: 0.7082 - val_accuracy: 0.7548 - val_loss: 0.6939
Epoch 9/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 34ms/step - accuracy: 0.7571 - loss: 0.7121 - val_accuracy: 0.7520 - val_loss: 0.6915
Epoch 10/10
104/104 ━━━━━━━━━━━━━━━━━━━━ 6s 34ms/step - accuracy: 0.7482 - loss: 0.7285 - val_accuracy: 0.7520 - val_loss: 0.6830
12/12 ━━━━━━━━━━━━━━━━━━━━ 0s 34ms/step - accuracy: 0.7296 - loss: 0.7253
Top-1 accuracy on the validation set: 75.20%.
```
</div>
---
## Experiment 2: Train a model on 224x224 resolution from scratch
Now, we train another model from scratch on the larger resolution dataset. Recall that
the augmentation transforms used in this dataset are different from before.
```python
epochs = 30
vanilla_bigger_res_model = get_training_model()
vanilla_bigger_res_model = train_and_evaluate(
vanilla_bigger_res_model, vanilla_train_dataset, vanilla_val_dataset, epochs
)
```
<div class="k-default-codeblock">
```
Epoch 1/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 58s 318ms/step - accuracy: 0.4148 - loss: 1.6685 - val_accuracy: 0.2807 - val_loss: 1.5614
Epoch 2/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.5137 - loss: 1.2569 - val_accuracy: 0.3324 - val_loss: 1.4950
Epoch 3/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.5582 - loss: 1.1617 - val_accuracy: 0.5395 - val_loss: 1.0945
Epoch 4/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.5559 - loss: 1.1420 - val_accuracy: 0.5123 - val_loss: 1.5154
Epoch 5/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6036 - loss: 1.0731 - val_accuracy: 0.4823 - val_loss: 1.2676
Epoch 6/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.5376 - loss: 1.1810 - val_accuracy: 0.4496 - val_loss: 3.5370
Epoch 7/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6216 - loss: 0.9956 - val_accuracy: 0.5804 - val_loss: 1.0637
Epoch 8/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6209 - loss: 0.9915 - val_accuracy: 0.5613 - val_loss: 1.1856
Epoch 9/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6229 - loss: 0.9657 - val_accuracy: 0.6076 - val_loss: 1.0131
Epoch 10/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6322 - loss: 0.9654 - val_accuracy: 0.6022 - val_loss: 1.1179
Epoch 11/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6223 - loss: 0.9634 - val_accuracy: 0.6458 - val_loss: 0.8731
Epoch 12/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6414 - loss: 0.9838 - val_accuracy: 0.6975 - val_loss: 0.8202
Epoch 13/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6635 - loss: 0.8912 - val_accuracy: 0.6730 - val_loss: 0.8018
Epoch 14/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6571 - loss: 0.8915 - val_accuracy: 0.5640 - val_loss: 1.2489
Epoch 15/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6725 - loss: 0.8788 - val_accuracy: 0.6240 - val_loss: 1.0039
Epoch 16/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6776 - loss: 0.8630 - val_accuracy: 0.6322 - val_loss: 1.0803
Epoch 17/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6728 - loss: 0.8673 - val_accuracy: 0.7330 - val_loss: 0.7256
Epoch 18/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 85ms/step - accuracy: 0.6969 - loss: 0.8069 - val_accuracy: 0.7275 - val_loss: 0.8264
Epoch 19/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 85ms/step - accuracy: 0.6891 - loss: 0.8271 - val_accuracy: 0.6594 - val_loss: 0.9932
Epoch 20/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 85ms/step - accuracy: 0.6678 - loss: 0.8630 - val_accuracy: 0.7221 - val_loss: 0.7238
Epoch 21/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6980 - loss: 0.7991 - val_accuracy: 0.6267 - val_loss: 0.8916
Epoch 22/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 85ms/step - accuracy: 0.7187 - loss: 0.7546 - val_accuracy: 0.7466 - val_loss: 0.6844
Epoch 23/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 85ms/step - accuracy: 0.7210 - loss: 0.7491 - val_accuracy: 0.6676 - val_loss: 1.1051
Epoch 24/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.6930 - loss: 0.7762 - val_accuracy: 0.7493 - val_loss: 0.6720
Epoch 25/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.7192 - loss: 0.7706 - val_accuracy: 0.7357 - val_loss: 0.7281
Epoch 26/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.7227 - loss: 0.7339 - val_accuracy: 0.7602 - val_loss: 0.6618
Epoch 27/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.7108 - loss: 0.7641 - val_accuracy: 0.7057 - val_loss: 0.8372
Epoch 28/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.7186 - loss: 0.7644 - val_accuracy: 0.7657 - val_loss: 0.5906
Epoch 29/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.7166 - loss: 0.7394 - val_accuracy: 0.7820 - val_loss: 0.6294
Epoch 30/30
104/104 ━━━━━━━━━━━━━━━━━━━━ 10s 84ms/step - accuracy: 0.7122 - loss: 0.7655 - val_accuracy: 0.7139 - val_loss: 0.8012
12/12 ━━━━━━━━━━━━━━━━━━━━ 0s 33ms/step - accuracy: 0.6797 - loss: 0.8819
Top-1 accuracy on the validation set: 71.39%.
```
</div>
As we can notice from the above cells, FixRes leads to a better performance. Another
advantage of FixRes is the improved total training time and reduction in GPU memory usage.
FixRes is model-agnostic, you can use it on any image classification model
to potentially boost performance.
You can find more results
[here](https://tensorboard.dev/experiment/BQOg28w0TlmvuJYeqsVntw)
that were gathered by running the same code with different random seeds.
| keras-io/examples/vision/md/fixres.md/0 | {
"file_path": "keras-io/examples/vision/md/fixres.md",
"repo_id": "keras-io",
"token_count": 9244
} | 94 |
# Supervised Contrastive Learning
**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br>
**Date created:** 2020/11/30<br>
**Last modified:** 2020/11/30<br>
**Description:** Using supervised contrastive learning for image classification.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/supervised-contrastive-learning.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/supervised-contrastive-learning.py)
## Introduction
[Supervised Contrastive Learning](https://arxiv.org/abs/2004.11362)
(Prannay Khosla et al.) is a training methodology that outperforms
supervised training with crossentropy on classification tasks.
Essentially, training an image classification model with Supervised Contrastive
Learning is performed in two phases:
1. Training an encoder to learn to produce vector representations of input images such
that representations of images in the same class will be more similar compared to
representations of images in different classes.
2. Training a classifier on top of the frozen encoder.
Note that this example requires [TensorFlow Addons](https://www.tensorflow.org/addons), which you can install using
the following command:
```python
pip install tensorflow-addons
```
## Setup
```python
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
```
## Prepare the data
```python
num_classes = 10
input_shape = (32, 32, 3)
# Load the train and test data splits
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
# Display shapes of train and test datasets
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
```
<div class="k-default-codeblock">
```
x_train shape: (50000, 32, 32, 3) - y_train shape: (50000, 1)
x_test shape: (10000, 32, 32, 3) - y_test shape: (10000, 1)
```
</div>
---
## Using image data augmentation
```python
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.02),
]
)
# Setting the state of the normalization layer.
data_augmentation.layers[0].adapt(x_train)
```
---
## Build the encoder model
The encoder model takes the image as input and turns it into a 2048-dimensional
feature vector.
```python
def create_encoder():
resnet = keras.applications.ResNet50V2(
include_top=False, weights=None, input_shape=input_shape, pooling="avg"
)
inputs = keras.Input(shape=input_shape)
augmented = data_augmentation(inputs)
outputs = resnet(augmented)
model = keras.Model(inputs=inputs, outputs=outputs, name="cifar10-encoder")
return model
encoder = create_encoder()
encoder.summary()
learning_rate = 0.001
batch_size = 265
hidden_units = 512
projection_units = 128
num_epochs = 50
dropout_rate = 0.5
temperature = 0.05
```
<div class="k-default-codeblock">
```
Model: "cifar10-encoder"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 32, 32, 3)] 0
_________________________________________________________________
sequential (Sequential) (None, None, None, 3) 7
_________________________________________________________________
resnet50v2 (Functional) (None, 2048) 23564800
=================================================================
Total params: 23,564,807
Trainable params: 23,519,360
Non-trainable params: 45,447
_________________________________________________________________
```
</div>
---
## Build the classification model
The classification model adds a fully-connected layer on top of the encoder,
plus a softmax layer with the target classes.
```python
def create_classifier(encoder, trainable=True):
for layer in encoder.layers:
layer.trainable = trainable
inputs = keras.Input(shape=input_shape)
features = encoder(inputs)
features = layers.Dropout(dropout_rate)(features)
features = layers.Dense(hidden_units, activation="relu")(features)
features = layers.Dropout(dropout_rate)(features)
outputs = layers.Dense(num_classes, activation="softmax")(features)
model = keras.Model(inputs=inputs, outputs=outputs, name="cifar10-classifier")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
return model
```
---
## Experiment 1: Train the baseline classification model
In this experiment, a baseline classifier is trained as usual, i.e., the
encoder and the classifier parts are trained together as a single model
to minimize the crossentropy loss.
```python
encoder = create_encoder()
classifier = create_classifier(encoder)
classifier.summary()
history = classifier.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs)
accuracy = classifier.evaluate(x_test, y_test)[1]
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
```
<div class="k-default-codeblock">
```
Model: "cifar10-classifier"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) [(None, 32, 32, 3)] 0
_________________________________________________________________
cifar10-encoder (Functional) (None, 2048) 23564807
_________________________________________________________________
dropout (Dropout) (None, 2048) 0
_________________________________________________________________
dense (Dense) (None, 512) 1049088
_________________________________________________________________
dropout_1 (Dropout) (None, 512) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 5130
=================================================================
Total params: 24,619,025
Trainable params: 24,573,578
Non-trainable params: 45,447
_________________________________________________________________
Epoch 1/50
189/189 [==============================] - 15s 77ms/step - loss: 1.9369 - sparse_categorical_accuracy: 0.2874
Epoch 2/50
189/189 [==============================] - 11s 57ms/step - loss: 1.5133 - sparse_categorical_accuracy: 0.4505
Epoch 3/50
189/189 [==============================] - 11s 57ms/step - loss: 1.3468 - sparse_categorical_accuracy: 0.5204
Epoch 4/50
189/189 [==============================] - 11s 60ms/step - loss: 1.2159 - sparse_categorical_accuracy: 0.5733
Epoch 5/50
189/189 [==============================] - 11s 56ms/step - loss: 1.1516 - sparse_categorical_accuracy: 0.6032
Epoch 6/50
189/189 [==============================] - 11s 58ms/step - loss: 1.0769 - sparse_categorical_accuracy: 0.6254
Epoch 7/50
189/189 [==============================] - 11s 58ms/step - loss: 0.9964 - sparse_categorical_accuracy: 0.6547
Epoch 8/50
189/189 [==============================] - 10s 55ms/step - loss: 0.9563 - sparse_categorical_accuracy: 0.6703
Epoch 9/50
189/189 [==============================] - 10s 55ms/step - loss: 0.8952 - sparse_categorical_accuracy: 0.6925
Epoch 10/50
189/189 [==============================] - 11s 56ms/step - loss: 0.8986 - sparse_categorical_accuracy: 0.6922
Epoch 11/50
189/189 [==============================] - 10s 55ms/step - loss: 0.8381 - sparse_categorical_accuracy: 0.7145
Epoch 12/50
189/189 [==============================] - 10s 55ms/step - loss: 0.8513 - sparse_categorical_accuracy: 0.7086
Epoch 13/50
189/189 [==============================] - 11s 56ms/step - loss: 0.7557 - sparse_categorical_accuracy: 0.7448
Epoch 14/50
189/189 [==============================] - 11s 56ms/step - loss: 0.7168 - sparse_categorical_accuracy: 0.7548
Epoch 15/50
189/189 [==============================] - 10s 55ms/step - loss: 0.6772 - sparse_categorical_accuracy: 0.7690
Epoch 16/50
189/189 [==============================] - 11s 56ms/step - loss: 0.7587 - sparse_categorical_accuracy: 0.7416
Epoch 17/50
189/189 [==============================] - 10s 55ms/step - loss: 0.6873 - sparse_categorical_accuracy: 0.7665
Epoch 18/50
189/189 [==============================] - 11s 56ms/step - loss: 0.6418 - sparse_categorical_accuracy: 0.7804
Epoch 19/50
189/189 [==============================] - 11s 56ms/step - loss: 0.6086 - sparse_categorical_accuracy: 0.7927
Epoch 20/50
189/189 [==============================] - 10s 55ms/step - loss: 0.5903 - sparse_categorical_accuracy: 0.7978
Epoch 21/50
189/189 [==============================] - 11s 56ms/step - loss: 0.5636 - sparse_categorical_accuracy: 0.8083
Epoch 22/50
189/189 [==============================] - 11s 56ms/step - loss: 0.5527 - sparse_categorical_accuracy: 0.8123
Epoch 23/50
189/189 [==============================] - 11s 56ms/step - loss: 0.5308 - sparse_categorical_accuracy: 0.8191
Epoch 24/50
189/189 [==============================] - 10s 55ms/step - loss: 0.5282 - sparse_categorical_accuracy: 0.8223
Epoch 25/50
189/189 [==============================] - 10s 55ms/step - loss: 0.5090 - sparse_categorical_accuracy: 0.8263
Epoch 26/50
189/189 [==============================] - 10s 55ms/step - loss: 0.5497 - sparse_categorical_accuracy: 0.8181
Epoch 27/50
189/189 [==============================] - 10s 55ms/step - loss: 0.4950 - sparse_categorical_accuracy: 0.8332
Epoch 28/50
189/189 [==============================] - 11s 56ms/step - loss: 0.4727 - sparse_categorical_accuracy: 0.8391
Epoch 29/50
167/189 [=========================>....] - ETA: 1s - loss: 0.4594 - sparse_categorical_accuracy: 0.8444
```
</div>
---
## Experiment 2: Use supervised contrastive learning
In this experiment, the model is trained in two phases. In the first phase,
the encoder is pretrained to optimize the supervised contrastive loss,
described in [Prannay Khosla et al.](https://arxiv.org/abs/2004.11362).
In the second phase, the classifier is trained using the trained encoder with
its weights freezed; only the weights of fully-connected layers with the
softmax are optimized.
### 1. Supervised contrastive learning loss function
```python
class SupervisedContrastiveLoss(keras.losses.Loss):
def __init__(self, temperature=1, name=None):
super().__init__(name=name)
self.temperature = temperature
def __call__(self, labels, feature_vectors, sample_weight=None):
# Normalize feature vectors
feature_vectors_normalized = tf.math.l2_normalize(feature_vectors, axis=1)
# Compute logits
logits = tf.divide(
tf.matmul(
feature_vectors_normalized, tf.transpose(feature_vectors_normalized)
),
self.temperature,
)
return tfa.losses.npairs_loss(tf.squeeze(labels), logits)
def add_projection_head(encoder):
inputs = keras.Input(shape=input_shape)
features = encoder(inputs)
outputs = layers.Dense(projection_units, activation="relu")(features)
model = keras.Model(
inputs=inputs, outputs=outputs, name="cifar-encoder_with_projection-head"
)
return model
```
### 2. Pretrain the encoder
```python
encoder = create_encoder()
encoder_with_projection_head = add_projection_head(encoder)
encoder_with_projection_head.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=SupervisedContrastiveLoss(temperature),
)
encoder_with_projection_head.summary()
history = encoder_with_projection_head.fit(
x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs
)
```
<div class="k-default-codeblock">
```
Model: "cifar-encoder_with_projection-head"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_8 (InputLayer) [(None, 32, 32, 3)] 0
_________________________________________________________________
cifar10-encoder (Functional) (None, 2048) 23564807
_________________________________________________________________
dense_2 (Dense) (None, 128) 262272
=================================================================
Total params: 23,827,079
Trainable params: 23,781,632
Non-trainable params: 45,447
_________________________________________________________________
Epoch 1/50
189/189 [==============================] - 11s 56ms/step - loss: 5.3730
Epoch 2/50
189/189 [==============================] - 11s 56ms/step - loss: 5.1583
Epoch 3/50
189/189 [==============================] - 10s 55ms/step - loss: 5.0368
Epoch 4/50
189/189 [==============================] - 11s 56ms/step - loss: 4.9349
Epoch 5/50
189/189 [==============================] - 10s 55ms/step - loss: 4.8262
Epoch 6/50
189/189 [==============================] - 11s 56ms/step - loss: 4.7470
Epoch 7/50
189/189 [==============================] - 11s 56ms/step - loss: 4.6835
Epoch 8/50
189/189 [==============================] - 11s 56ms/step - loss: 4.6120
Epoch 9/50
189/189 [==============================] - 11s 56ms/step - loss: 4.5608
Epoch 10/50
189/189 [==============================] - 10s 55ms/step - loss: 4.5075
Epoch 11/50
189/189 [==============================] - 11s 56ms/step - loss: 4.4674
Epoch 12/50
189/189 [==============================] - 10s 56ms/step - loss: 4.4362
Epoch 13/50
189/189 [==============================] - 11s 56ms/step - loss: 4.3899
Epoch 14/50
189/189 [==============================] - 10s 55ms/step - loss: 4.3664
Epoch 15/50
189/189 [==============================] - 11s 56ms/step - loss: 4.3188
Epoch 16/50
189/189 [==============================] - 10s 56ms/step - loss: 4.3030
Epoch 17/50
189/189 [==============================] - 11s 57ms/step - loss: 4.2725
Epoch 18/50
189/189 [==============================] - 10s 55ms/step - loss: 4.2523
Epoch 19/50
189/189 [==============================] - 11s 56ms/step - loss: 4.2100
Epoch 20/50
189/189 [==============================] - 10s 55ms/step - loss: 4.2033
Epoch 21/50
189/189 [==============================] - 11s 56ms/step - loss: 4.1741
Epoch 22/50
189/189 [==============================] - 11s 56ms/step - loss: 4.1443
Epoch 23/50
189/189 [==============================] - 11s 56ms/step - loss: 4.1350
Epoch 24/50
189/189 [==============================] - 11s 57ms/step - loss: 4.1192
Epoch 25/50
189/189 [==============================] - 11s 56ms/step - loss: 4.1002
Epoch 26/50
189/189 [==============================] - 11s 57ms/step - loss: 4.0797
Epoch 27/50
189/189 [==============================] - 11s 56ms/step - loss: 4.0547
Epoch 28/50
189/189 [==============================] - 11s 56ms/step - loss: 4.0336
Epoch 29/50
189/189 [==============================] - 11s 56ms/step - loss: 4.0299
Epoch 30/50
189/189 [==============================] - 11s 56ms/step - loss: 4.0031
Epoch 31/50
189/189 [==============================] - 11s 56ms/step - loss: 3.9979
Epoch 32/50
189/189 [==============================] - 11s 56ms/step - loss: 3.9777
Epoch 33/50
189/189 [==============================] - 10s 55ms/step - loss: 3.9800
Epoch 34/50
189/189 [==============================] - 11s 56ms/step - loss: 3.9538
Epoch 35/50
189/189 [==============================] - 11s 56ms/step - loss: 3.9298
Epoch 36/50
189/189 [==============================] - 11s 57ms/step - loss: 3.9241
Epoch 37/50
189/189 [==============================] - 11s 56ms/step - loss: 3.9102
Epoch 38/50
189/189 [==============================] - 11s 56ms/step - loss: 3.9075
Epoch 39/50
189/189 [==============================] - 11s 56ms/step - loss: 3.8897
Epoch 40/50
189/189 [==============================] - 11s 57ms/step - loss: 3.8871
Epoch 41/50
189/189 [==============================] - 11s 56ms/step - loss: 3.8596
Epoch 42/50
189/189 [==============================] - 10s 56ms/step - loss: 3.8526
Epoch 43/50
189/189 [==============================] - 11s 56ms/step - loss: 3.8417
Epoch 44/50
189/189 [==============================] - 10s 55ms/step - loss: 3.8239
Epoch 45/50
189/189 [==============================] - 11s 56ms/step - loss: 3.8178
Epoch 46/50
189/189 [==============================] - 11s 56ms/step - loss: 3.8065
Epoch 47/50
189/189 [==============================] - 11s 56ms/step - loss: 3.8185
Epoch 48/50
189/189 [==============================] - 11s 56ms/step - loss: 3.8022
Epoch 49/50
189/189 [==============================] - 11s 56ms/step - loss: 3.7815
Epoch 50/50
189/189 [==============================] - 11s 56ms/step - loss: 3.7601
```
</div>
### 3. Train the classifier with the frozen encoder
```python
classifier = create_classifier(encoder, trainable=False)
history = classifier.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs)
accuracy = classifier.evaluate(x_test, y_test)[1]
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
```
<div class="k-default-codeblock">
```
Epoch 1/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3979 - sparse_categorical_accuracy: 0.8869
Epoch 2/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3422 - sparse_categorical_accuracy: 0.8959
Epoch 3/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3251 - sparse_categorical_accuracy: 0.9004
Epoch 4/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3313 - sparse_categorical_accuracy: 0.8963
Epoch 5/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3213 - sparse_categorical_accuracy: 0.9006
Epoch 6/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3221 - sparse_categorical_accuracy: 0.9001
Epoch 7/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3134 - sparse_categorical_accuracy: 0.9001
Epoch 8/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3245 - sparse_categorical_accuracy: 0.8978
Epoch 9/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3144 - sparse_categorical_accuracy: 0.9001
Epoch 10/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3191 - sparse_categorical_accuracy: 0.8984
Epoch 11/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3104 - sparse_categorical_accuracy: 0.9025
Epoch 12/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3261 - sparse_categorical_accuracy: 0.8958
Epoch 13/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3130 - sparse_categorical_accuracy: 0.9001
Epoch 14/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3147 - sparse_categorical_accuracy: 0.9003
Epoch 15/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3113 - sparse_categorical_accuracy: 0.9016
Epoch 16/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3114 - sparse_categorical_accuracy: 0.9008
Epoch 17/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3044 - sparse_categorical_accuracy: 0.9026
Epoch 18/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3142 - sparse_categorical_accuracy: 0.8987
Epoch 19/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3139 - sparse_categorical_accuracy: 0.9018
Epoch 20/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3199 - sparse_categorical_accuracy: 0.8987
Epoch 21/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3125 - sparse_categorical_accuracy: 0.8994
Epoch 22/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3291 - sparse_categorical_accuracy: 0.8967
Epoch 23/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3208 - sparse_categorical_accuracy: 0.8963
Epoch 24/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3065 - sparse_categorical_accuracy: 0.9041
Epoch 25/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3099 - sparse_categorical_accuracy: 0.9006
Epoch 26/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3181 - sparse_categorical_accuracy: 0.8986
Epoch 27/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3112 - sparse_categorical_accuracy: 0.9013
Epoch 28/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3136 - sparse_categorical_accuracy: 0.8996
Epoch 29/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3217 - sparse_categorical_accuracy: 0.8969
Epoch 30/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3161 - sparse_categorical_accuracy: 0.8998
Epoch 31/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3151 - sparse_categorical_accuracy: 0.8999
Epoch 32/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3092 - sparse_categorical_accuracy: 0.9009
Epoch 33/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3246 - sparse_categorical_accuracy: 0.8961
Epoch 34/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3143 - sparse_categorical_accuracy: 0.8995
Epoch 35/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3106 - sparse_categorical_accuracy: 0.9002
Epoch 36/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3210 - sparse_categorical_accuracy: 0.8980
Epoch 37/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3178 - sparse_categorical_accuracy: 0.9009
Epoch 38/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3064 - sparse_categorical_accuracy: 0.9032
Epoch 39/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3196 - sparse_categorical_accuracy: 0.8981
Epoch 40/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3177 - sparse_categorical_accuracy: 0.8988
Epoch 41/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3167 - sparse_categorical_accuracy: 0.8987
Epoch 42/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3110 - sparse_categorical_accuracy: 0.9014
Epoch 43/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3124 - sparse_categorical_accuracy: 0.9002
Epoch 44/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3128 - sparse_categorical_accuracy: 0.8999
Epoch 45/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3131 - sparse_categorical_accuracy: 0.8991
Epoch 46/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3149 - sparse_categorical_accuracy: 0.8992
Epoch 47/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3082 - sparse_categorical_accuracy: 0.9021
Epoch 48/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3223 - sparse_categorical_accuracy: 0.8959
Epoch 49/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3195 - sparse_categorical_accuracy: 0.8981
Epoch 50/50
189/189 [==============================] - 3s 16ms/step - loss: 0.3240 - sparse_categorical_accuracy: 0.8962
313/313 [==============================] - 2s 7ms/step - loss: 0.7332 - sparse_categorical_accuracy: 0.8162
Test accuracy: 81.62%
```
</div>
We get to an improved test accuracy.
---
## Conclusion
As shown in the experiments, using the supervised contrastive learning technique
outperformed the conventional technique in terms of the test accuracy. Note that
the same training budget (i.e., number of epochs) was given to each technique.
Supervised contrastive learning pays off when the encoder involves a complex
architecture, like ResNet, and multi-class problems with many labels.
In addition, large batch sizes and multi-layer projection heads
improve its effectiveness. See the [Supervised Contrastive Learning](https://arxiv.org/abs/2004.11362)
paper for more details.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/supervised-contrastive-learning-cifar10) and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/supervised-contrastive-learning).
| keras-io/examples/vision/md/supervised-contrastive-learning.md/0 | {
"file_path": "keras-io/examples/vision/md/supervised-contrastive-learning.md",
"repo_id": "keras-io",
"token_count": 8488
} | 95 |
<jupyter_start><jupyter_text>Distributed hyperparameter tuning**Authors:** Tom O'Malley, Haifeng Jin**Date created:** 2019/10/24**Last modified:** 2021/06/02**Description:** Tuning the hyperparameters of the models with multiple GPUs and multiple machines.<jupyter_code>!pip install keras-tuner -q<jupyter_output><empty_output><jupyter_text>IntroductionKerasTuner makes it easy to perform distributed hyperparameter search. Nochanges to your code are needed to scale up from running single-threadedlocally to running on dozens or hundreds of workers in parallel. DistributedKerasTuner uses a chief-worker model. The chief runs a service to which theworkers report results and query for the hyperparameters to try next. The chiefshould be run on a single-threaded CPU instance (or alternatively as a separateprocess on one of the workers). Configuring distributed modeConfiguring distributed mode for KerasTuner only requires setting threeenvironment variables:**KERASTUNER_TUNER_ID**: This should be set to "chief" for the chief process.Other workers should be passed a unique ID (by convention, "tuner0", "tuner1",etc).**KERASTUNER_ORACLE_IP**: The IP address or hostname that the chief serviceshould run on. All workers should be able to resolve and access this address.**KERASTUNER_ORACLE_PORT**: The port that the chief service should run on. Thiscan be freely chosen, but must be a port that is accessible to the otherworkers. Instances communicate via the [gRPC](https://www.grpc.io) protocol.The same code can be run on all workers. Additional considerations fordistributed mode are:- All workers should have access to a centralized file system to which they canwrite their results.- All workers should be able to access the necessary training and validationdata needed for tuning.- To support fault-tolerance, `overwrite` should be kept as `False` in`Tuner.__init__` (`False` is the default).Example bash script for chief service (sample code for `run_tuning.py` atbottom of page):```export KERASTUNER_TUNER_ID="chief"export KERASTUNER_ORACLE_IP="127.0.0.1"export KERASTUNER_ORACLE_PORT="8000"python run_tuning.py```Example bash script for worker:```export KERASTUNER_TUNER_ID="tuner0"export KERASTUNER_ORACLE_IP="127.0.0.1"export KERASTUNER_ORACLE_PORT="8000"python run_tuning.py``` Data parallelism with `tf.distribute`KerasTuner also supports data parallelism via[tf.distribute](https://www.tensorflow.org/tutorials/distribute/keras). Dataparallelism and distributed tuning can be combined. For example, if you have 10workers with 4 GPUs on each worker, you can run 10 parallel trials with eachtrial training on 4 GPUs by using[tf.distribute.MirroredStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy).You can also run each trial on TPUs via[tf.distribute.TPUStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/TPUStrategy).Currently[tf.distribute.MultiWorkerMirroredStrategy](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/MultiWorkerMirroredStrategy)is not supported, but support for this is on the roadmap. Example codeWhen the enviroment variables described above are set, the example below willrun distributed tuning and use data parallelism within each trial via`tf.distribute`. The example loads MNIST from `tensorflow_datasets` and uses[Hyperband](https://arxiv.org/abs/1603.06560) for the hyperparametersearch.<jupyter_code>import keras
import keras_tuner
import tensorflow as tf
import numpy as np
def build_model(hp):
"""Builds a convolutional model."""
inputs = keras.Input(shape=(28, 28, 1))
x = inputs
for i in range(hp.Int("conv_layers", 1, 3, default=3)):
x = keras.layers.Conv2D(
filters=hp.Int("filters_" + str(i), 4, 32, step=4, default=8),
kernel_size=hp.Int("kernel_size_" + str(i), 3, 5),
activation="relu",
padding="same",
)(x)
if hp.Choice("pooling" + str(i), ["max", "avg"]) == "max":
x = keras.layers.MaxPooling2D()(x)
else:
x = keras.layers.AveragePooling2D()(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
if hp.Choice("global_pooling", ["max", "avg"]) == "max":
x = keras.layers.GlobalMaxPooling2D()(x)
else:
x = keras.layers.GlobalAveragePooling2D()(x)
outputs = keras.layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs, outputs)
optimizer = hp.Choice("optimizer", ["adam", "sgd"])
model.compile(
optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
return model
tuner = keras_tuner.Hyperband(
hypermodel=build_model,
objective="val_accuracy",
max_epochs=2,
factor=3,
hyperband_iterations=1,
distribution_strategy=tf.distribute.MirroredStrategy(),
directory="results_dir",
project_name="mnist",
overwrite=True,
)
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Reshape the images to have the channel dimension.
x_train = (x_train.reshape(x_train.shape + (1,)) / 255.0)[:1000]
y_train = y_train.astype(np.int64)[:1000]
x_test = (x_test.reshape(x_test.shape + (1,)) / 255.0)[:100]
y_test = y_test.astype(np.int64)[:100]
tuner.search(
x_train,
y_train,
steps_per_epoch=600,
validation_data=(x_test, y_test),
validation_steps=100,
callbacks=[keras.callbacks.EarlyStopping("val_accuracy")],
)<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_tuner/distributed_tuning.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_tuner/distributed_tuning.ipynb",
"repo_id": "keras-io",
"token_count": 1913
} | 96 |
<jupyter_start><jupyter_text>Writing a training loop from scratch in TensorFlow**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2019/03/01**Last modified:** 2023/06/25**Description:** Writing low-level training & evaluation loops in TensorFlow.<jupyter_code>!pip install keras==3.0.0 --upgrade --quiet<jupyter_output><empty_output><jupyter_text>Setup<jupyter_code>import time
import os
# This guide can only be run with the TensorFlow backend.
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
import numpy as np<jupyter_output><empty_output><jupyter_text>IntroductionKeras provides default training and evaluation loops, `fit()` and `evaluate()`.Their usage is covered in the guide[Training & evaluation with the built-in methods](/guides/training_with_built_in_methods/).If you want to customize the learning algorithm of your model while still leveragingthe convenience of `fit()`(for instance, to train a GAN using `fit()`), you can subclass the `Model` class andimplement your own `train_step()` method, whichis called repeatedly during `fit()`.Now, if you want very low-level control over training & evaluation, you should writeyour own training & evaluation loops from scratch. This is what this guide is about. A first end-to-end exampleLet's consider a simple MNIST model:<jupyter_code>def get_model():
inputs = keras.Input(shape=(784,), name="digits")
x1 = keras.layers.Dense(64, activation="relu")(inputs)
x2 = keras.layers.Dense(64, activation="relu")(x1)
outputs = keras.layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
model = get_model()<jupyter_output><empty_output><jupyter_text>Let's train it using mini-batch gradient with a custom training loop.First, we're going to need an optimizer, a loss function, and a dataset:<jupyter_code># Instantiate an optimizer.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the training dataset.
batch_size = 32
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Prepare the training dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)<jupyter_output><empty_output><jupyter_text>Calling a model inside a `GradientTape` scope enables you to retrieve the gradients ofthe trainable weights of the layer with respect to a loss value. Using an optimizerinstance, you can use these gradients to update these variables (which you canretrieve using `model.trainable_weights`).Here's our training loop, step by step:- We open a `for` loop that iterates over epochs- For each epoch, we open a `for` loop that iterates over the dataset, in batches- For each batch, we open a `GradientTape()` scope- Inside this scope, we call the model (forward pass) and compute the loss- Outside the scope, we retrieve the gradients of the weightsof the model with regard to the loss- Finally, we use the optimizer to update the weights of the model based on thegradients<jupyter_code>epochs = 3
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train, training=True) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply(grads, model.trainable_weights)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")<jupyter_output><empty_output><jupyter_text>Low-level handling of metricsLet's add metrics monitoring to this basic loop.You can readily reuse the built-in metrics (or custom ones you wrote) in such trainingloops written from scratch. Here's the flow:- Instantiate the metric at the start of the loop- Call `metric.update_state()` after each batch- Call `metric.result()` when you need to display the current value of the metric- Call `metric.reset_state()` when you need to clear the state of the metric(typically at the end of an epoch)Let's use this knowledge to compute `SparseCategoricalAccuracy` on training andvalidation data at the end of each epoch:<jupyter_code># Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()<jupyter_output><empty_output><jupyter_text>Here's our training & evaluation loop:<jupyter_code>epochs = 2
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply(grads, model.trainable_weights)
# Update training metric.
train_acc_metric.update_state(y_batch_train, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
print(f"Time taken: {time.time() - start_time:.2f}s")<jupyter_output><empty_output><jupyter_text>Speeding-up your training step with `tf.function`The default runtime in TensorFlow is eager execution.As such, our training loop above executes eagerly.This is great for debugging, but graph compilation has a definite performanceadvantage. Describing your computation as a static graph enables the frameworkto apply global performance optimizations. This is impossible whenthe framework is constrained to greedily execute one operation after another,with no knowledge of what comes next.You can compile into a static graph any function that takes tensors as input.Just add a `@tf.function` decorator on it, like this:<jupyter_code>@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply(grads, model.trainable_weights)
train_acc_metric.update_state(y, logits)
return loss_value<jupyter_output><empty_output><jupyter_text>Let's do the same with the evaluation step:<jupyter_code>@tf.function
def test_step(x, y):
val_logits = model(x, training=False)
val_acc_metric.update_state(y, val_logits)<jupyter_output><empty_output><jupyter_text>Now, let's re-run our training loop with this compiled training step:<jupyter_code>epochs = 2
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step(x_batch_train, y_batch_train)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
print(f"Time taken: {time.time() - start_time:.2f}s")<jupyter_output><empty_output><jupyter_text>Much faster, isn't it? Low-level handling of losses tracked by the modelLayers & models recursively track any losses created during the forward passby layers that call `self.add_loss(value)`. The resulting list of scalar lossvalues are available via the property `model.losses`at the end of the forward pass.If you want to be using these loss components, you should sum themand add them to the main loss in your training step.Consider this layer, that creates an activity regularization loss:<jupyter_code>class ActivityRegularizationLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs<jupyter_output><empty_output><jupyter_text>Let's build a really simple model that uses it:<jupyter_code>inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = keras.layers.Dense(64, activation="relu")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)<jupyter_output><empty_output><jupyter_text>Here's what our training step should look like now:<jupyter_code>@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
# Add any extra losses created during the forward pass.
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply(grads, model.trainable_weights)
train_acc_metric.update_state(y, logits)
return loss_value<jupyter_output><empty_output><jupyter_text>SummaryNow you know everything there is to know about using built-in training loops andwriting your own from scratch.To conclude, here's a simple end-to-end example that ties together everythingyou've learned in this guide: a DCGAN trained on MNIST digits. End-to-end example: a GAN training loop from scratchYou may be familiar with Generative Adversarial Networks (GANs). GANs can generate newimages that look almost real, by learning the latent distribution of a trainingdataset of images (the "latent space" of the images).A GAN is made of two parts: a "generator" model that maps points in the latentspace to points in image space, a "discriminator" model, a classifierthat can tell the difference between real images (from the training dataset)and fake images (the output of the generator network).A GAN training loop looks like this:1) Train the discriminator.- Sample a batch of random points in the latent space.- Turn the points into fake images via the "generator" model.- Get a batch of real images and combine them with the generated images.- Train the "discriminator" model to classify generated vs. real images.2) Train the generator.- Sample random points in the latent space.- Turn the points into fake images via the "generator" network.- Get a batch of real images and combine them with the generated images.- Train the "generator" model to "fool" the discriminator and classify the fake imagesas real.For a much more detailed overview of how GANs works, see[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).Let's implement this training loop. First, create the discriminator meant to classifyfake vs real digits:<jupyter_code>discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.GlobalMaxPooling2D(),
keras.layers.Dense(1),
],
name="discriminator",
)
discriminator.summary()<jupyter_output><empty_output><jupyter_text>Then let's create a generator network,that turns latent vectors into outputs of shape `(28, 28, 1)` (representingMNIST digits):<jupyter_code>latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
keras.layers.Dense(7 * 7 * 128),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Reshape((7, 7, 128)),
keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
keras.layers.LeakyReLU(negative_slope=0.2),
keras.layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)<jupyter_output><empty_output><jupyter_text>Here's the key bit: the training loop. As you can see it is quite straightforward. Thetraining step function only takes 17 lines.<jupyter_code># Instantiate one optimizer for the discriminator and another for the generator.
d_optimizer = keras.optimizers.Adam(learning_rate=0.0003)
g_optimizer = keras.optimizers.Adam(learning_rate=0.0004)
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def train_step(real_images):
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Decode them to fake images
generated_images = generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(labels.shape)
# Train the discriminator
with tf.GradientTape() as tape:
predictions = discriminator(combined_images)
d_loss = loss_fn(labels, predictions)
grads = tape.gradient(d_loss, discriminator.trainable_weights)
d_optimizer.apply(grads, discriminator.trainable_weights)
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = discriminator(generator(random_latent_vectors))
g_loss = loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, generator.trainable_weights)
g_optimizer.apply(grads, generator.trainable_weights)
return d_loss, g_loss, generated_images<jupyter_output><empty_output><jupyter_text>Let's train our GAN, by repeatedly calling `train_step` on batches of images.Since our discriminator and generator are convnets, you're going to want torun this code on a GPU.<jupyter_code># Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
epochs = 1 # In practice you need at least 20 epochs to generate nice digits.
save_dir = "./"
for epoch in range(epochs):
print(f"\nStart epoch {epoch}")
for step, real_images in enumerate(dataset):
# Train the discriminator & generator on one batch of real images.
d_loss, g_loss, generated_images = train_step(real_images)
# Logging.
if step % 100 == 0:
# Print metrics
print(f"discriminator loss at step {step}: {d_loss:.2f}")
print(f"adversarial loss at step {step}: {g_loss:.2f}")
# Save one generated image
img = keras.utils.array_to_img(generated_images[0] * 255.0, scale=False)
img.save(os.path.join(save_dir, f"generated_img_{step}.png"))
# To limit execution time we stop after 10 steps.
# Remove the lines below to actually train the model!
if step > 10:
break<jupyter_output><empty_output> | keras-io/guides/ipynb/writing_a_custom_training_loop_in_tensorflow.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/writing_a_custom_training_loop_in_tensorflow.ipynb",
"repo_id": "keras-io",
"token_count": 6570
} | 97 |
# The Functional API
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2019/03/01<br>
**Last modified:** 2023/06/25<br>
**Description:** Complete guide to the functional API.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/functional_api.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/functional_api.py)
---
## Setup
```python
import numpy as np
import keras
from keras import layers
from keras import ops
```
---
## Introduction
The Keras *functional API* is a way to create models that are more flexible
than the `keras.Sequential` API. The functional API can handle models
with non-linear topology, shared layers, and even multiple inputs or outputs.
The main idea is that a deep learning model is usually
a directed acyclic graph (DAG) of layers.
So the functional API is a way to build *graphs of layers*.
Consider the following model:
<div class="k-default-codeblock">
```
(input: 784-dimensional vectors)
↧
[Dense (64 units, relu activation)]
↧
[Dense (64 units, relu activation)]
↧
[Dense (10 units, softmax activation)]
↧
(output: logits of a probability distribution over 10 classes)
```
</div>
This is a basic graph with three layers.
To build this model using the functional API, start by creating an input node:
```python
inputs = keras.Input(shape=(784,))
```
The shape of the data is set as a 784-dimensional vector.
The batch size is always omitted since only the shape of each sample is specified.
If, for example, you have an image input with a shape of `(32, 32, 3)`,
you would use:
```python
# Just for demonstration purposes.
img_inputs = keras.Input(shape=(32, 32, 3))
```
The `inputs` that is returned contains information about the shape and `dtype`
of the input data that you feed to your model.
Here's the shape:
```python
inputs.shape
```
<div class="k-default-codeblock">
```
(None, 784)
```
</div>
Here's the dtype:
```python
inputs.dtype
```
<div class="k-default-codeblock">
```
'float32'
```
</div>
You create a new node in the graph of layers by calling a layer on this `inputs`
object:
```python
dense = layers.Dense(64, activation="relu")
x = dense(inputs)
```
The "layer call" action is like drawing an arrow from "inputs" to this layer
you created.
You're "passing" the inputs to the `dense` layer, and you get `x` as the output.
Let's add a few more layers to the graph of layers:
```python
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10)(x)
```
At this point, you can create a `Model` by specifying its inputs and outputs
in the graph of layers:
```python
model = keras.Model(inputs=inputs, outputs=outputs, name="mnist_model")
```
Let's check out what the model summary looks like:
```python
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "mnist_model"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">784</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">50,240</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,160</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">650</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">55,050</span> (215.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">55,050</span> (215.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
You can also plot the model as a graph:
```python
keras.utils.plot_model(model, "my_first_model.png")
```

And, optionally, display the input and output shapes of each layer
in the plotted graph:
```python
keras.utils.plot_model(model, "my_first_model_with_shape_info.png", show_shapes=True)
```

This figure and the code are almost identical. In the code version,
the connection arrows are replaced by the call operation.
A "graph of layers" is an intuitive mental image for a deep learning model,
and the functional API is a way to create models that closely mirrors this.
---
## Training, evaluation, and inference
Training, evaluation, and inference work exactly in the same way for models
built using the functional API as for `Sequential` models.
The `Model` class offers a built-in training loop (the `fit()` method)
and a built-in evaluation loop (the `evaluate()` method). Note
that you can easily customize these loops to implement your own training routines.
See also the guides on customizing what happens in `fit()`:
- [Writing a custom train step with TensorFlow](/guides/custom_train_step_in_tensorflow/)
- [Writing a custom train step with JAX](/guides/custom_train_step_in_jax/)
- [Writing a custom train step with PyTorch](/guides/custom_train_step_in_torch/)
Here, load the MNIST image data, reshape it into vectors,
fit the model on the data (while monitoring performance on a validation split),
then evaluate the model on the test data:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=["accuracy"],
)
history = model.fit(x_train, y_train, batch_size=64, epochs=2, validation_split=0.2)
test_scores = model.evaluate(x_test, y_test, verbose=2)
print("Test loss:", test_scores[0])
print("Test accuracy:", test_scores[1])
```
<div class="k-default-codeblock">
```
Epoch 1/2
750/750 ━━━━━━━━━━━━━━━━━━━━ 1s 543us/step - accuracy: 0.8343 - loss: 0.6052 - val_accuracy: 0.9473 - val_loss: 0.1853
Epoch 2/2
750/750 ━━━━━━━━━━━━━━━━━━━━ 0s 373us/step - accuracy: 0.9462 - loss: 0.1814 - val_accuracy: 0.9553 - val_loss: 0.1507
313/313 - 0s - 292us/step - accuracy: 0.9535 - loss: 0.1525
Test loss: 0.15254925191402435
Test accuracy: 0.953499972820282
```
</div>
For further reading, see the
[training and evaluation](/guides/training_with_built_in_methods/) guide.
---
## Save and serialize
Saving the model and serialization work the same way for models built using
the functional API as they do for `Sequential` models. The standard way
to save a functional model is to call `model.save()`
to save the entire model as a single file. You can later recreate the same model
from this file, even if the code that built the model is no longer available.
This saved file includes the:
- model architecture
- model weight values (that were learned during training)
- model training config, if any (as passed to `compile()`)
- optimizer and its state, if any (to restart training where you left off)
```python
model.save("my_model.keras")
del model
# Recreate the exact same model purely from the file:
model = keras.models.load_model("my_model.keras")
```
For details, read the model [serialization & saving](/guides/serialization_and_saving/) guide.
---
## Use the same graph of layers to define multiple models
In the functional API, models are created by specifying their inputs
and outputs in a graph of layers. That means that a single
graph of layers can be used to generate multiple models.
In the example below, you use the same stack of layers to instantiate two models:
an `encoder` model that turns image inputs into 16-dimensional vectors,
and an end-to-end `autoencoder` model for training.
```python
encoder_input = keras.Input(shape=(28, 28, 1), name="img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
x = layers.Reshape((4, 4, 1))(encoder_output)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
autoencoder = keras.Model(encoder_input, decoder_output, name="autoencoder")
autoencoder.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "encoder"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ img (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">160</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,640</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,624</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ global_max_pooling2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalMaxPooling2D</span>) │ │ │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">18,672</span> (72.94 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">18,672</span> (72.94 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "autoencoder"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ img (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">160</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,640</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,624</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ global_max_pooling2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalMaxPooling2D</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ reshape (<span style="color: #0087ff; text-decoration-color: #0087ff">Reshape</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">160</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,640</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ up_sampling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,624</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">145</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">28,241</span> (110.32 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">28,241</span> (110.32 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
Here, the decoding architecture is strictly symmetrical
to the encoding architecture, so the output shape is the same as
the input shape `(28, 28, 1)`.
The reverse of a `Conv2D` layer is a `Conv2DTranspose` layer,
and the reverse of a `MaxPooling2D` layer is an `UpSampling2D` layer.
---
## All models are callable, just like layers
You can treat any model as if it were a layer by invoking it on an `Input` or
on the output of another layer. By calling a model you aren't just reusing
the architecture of the model, you're also reusing its weights.
To see this in action, here's a different take on the autoencoder example that
creates an encoder model, a decoder model, and chains them in two calls
to obtain the autoencoder model:
```python
encoder_input = keras.Input(shape=(28, 28, 1), name="original_img")
x = layers.Conv2D(16, 3, activation="relu")(encoder_input)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(32, 3, activation="relu")(x)
x = layers.Conv2D(16, 3, activation="relu")(x)
encoder_output = layers.GlobalMaxPooling2D()(x)
encoder = keras.Model(encoder_input, encoder_output, name="encoder")
encoder.summary()
decoder_input = keras.Input(shape=(16,), name="encoded_img")
x = layers.Reshape((4, 4, 1))(decoder_input)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu")(x)
x = layers.UpSampling2D(3)(x)
x = layers.Conv2DTranspose(16, 3, activation="relu")(x)
decoder_output = layers.Conv2DTranspose(1, 3, activation="relu")(x)
decoder = keras.Model(decoder_input, decoder_output, name="decoder")
decoder.summary()
autoencoder_input = keras.Input(shape=(28, 28, 1), name="img")
encoded_img = encoder(autoencoder_input)
decoded_img = decoder(encoded_img)
autoencoder = keras.Model(autoencoder_input, decoded_img, name="autoencoder")
autoencoder.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "encoder"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ original_img (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">160</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,640</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,624</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ global_max_pooling2d_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalMaxPooling2D</span>) │ │ │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">18,672</span> (72.94 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">18,672</span> (72.94 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "decoder"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ encoded_img (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ reshape_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Reshape</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_4 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">6</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">160</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_5 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,640</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ up_sampling2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_6 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4,624</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_7 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">145</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">9,569</span> (37.38 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">9,569</span> (37.38 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "autoencoder"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ img (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ encoder (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">18,672</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ decoder (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,569</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">28,241</span> (110.32 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">28,241</span> (110.32 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
As you can see, the model can be nested: a model can contain sub-models
(since a model is just like a layer).
A common use case for model nesting is *ensembling*.
For example, here's how to ensemble a set of models into a single model
that averages their predictions:
```python
def get_model():
inputs = keras.Input(shape=(128,))
outputs = layers.Dense(1)(inputs)
return keras.Model(inputs, outputs)
model1 = get_model()
model2 = get_model()
model3 = get_model()
inputs = keras.Input(shape=(128,))
y1 = model1(inputs)
y2 = model2(inputs)
y3 = model3(inputs)
outputs = layers.average([y1, y2, y3])
ensemble_model = keras.Model(inputs=inputs, outputs=outputs)
```
---
## Manipulate complex graph topologies
### Models with multiple inputs and outputs
The functional API makes it easy to manipulate multiple inputs and outputs.
This cannot be handled with the `Sequential` API.
For example, if you're building a system for ranking customer issue tickets by
priority and routing them to the correct department,
then the model will have three inputs:
- the title of the ticket (text input),
- the text body of the ticket (text input), and
- any tags added by the user (categorical input)
This model will have two outputs:
- the priority score between 0 and 1 (scalar sigmoid output), and
- the department that should handle the ticket (softmax output
over the set of departments).
You can build this model in a few lines with the functional API:
```python
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(shape=(None,), name="body") # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs={"priority": priority_pred, "department": department_pred},
)
```
Now plot the model:
```python
keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
```

When compiling this model, you can assign different losses to each output.
You can even assign different weights to each loss -- to modulate
their contribution to the total training loss.
```python
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.BinaryCrossentropy(from_logits=True),
keras.losses.CategoricalCrossentropy(from_logits=True),
],
loss_weights=[1.0, 0.2],
)
```
Since the output layers have different names, you could also specify
the losses and loss weights with the corresponding layer names:
```python
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"priority": keras.losses.BinaryCrossentropy(from_logits=True),
"department": keras.losses.CategoricalCrossentropy(from_logits=True),
},
loss_weights={"priority": 1.0, "department": 0.2},
)
```
Train the model by passing lists of NumPy arrays of inputs and targets:
```python
# Dummy input data
title_data = np.random.randint(num_words, size=(1280, 10))
body_data = np.random.randint(num_words, size=(1280, 100))
tags_data = np.random.randint(2, size=(1280, num_tags)).astype("float32")
# Dummy target data
priority_targets = np.random.random(size=(1280, 1))
dept_targets = np.random.randint(2, size=(1280, num_departments))
model.fit(
{"title": title_data, "body": body_data, "tags": tags_data},
{"priority": priority_targets, "department": dept_targets},
epochs=2,
batch_size=32,
)
```
<div class="k-default-codeblock">
```
Epoch 1/2
40/40 ━━━━━━━━━━━━━━━━━━━━ 1s 12ms/step - loss: 1.2673
Epoch 2/2
40/40 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 1.2440
<keras.src.callbacks.history.History at 0x2bd054970>
```
</div>
When calling fit with a `Dataset` object, it should yield either a
tuple of lists like `([title_data, body_data, tags_data], [priority_targets, dept_targets])`
or a tuple of dictionaries like
`({'title': title_data, 'body': body_data, 'tags': tags_data}, {'priority': priority_targets, 'department': dept_targets})`.
For more detailed explanation, refer to the
[training and evaluation](/guides/training_with_built_in_methods/) guide.
### A toy ResNet model
In addition to models with multiple inputs and outputs,
the functional API makes it easy to manipulate non-linear connectivity
topologies -- these are models with layers that are not connected sequentially,
which the `Sequential` API cannot handle.
A common use case for this is residual connections.
Let's build a toy ResNet model for CIFAR10 to demonstrate this:
```python
inputs = keras.Input(shape=(32, 32, 3), name="img")
x = layers.Conv2D(32, 3, activation="relu")(inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
block_1_output = layers.MaxPooling2D(3)(x)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_1_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_2_output = layers.add([x, block_1_output])
x = layers.Conv2D(64, 3, activation="relu", padding="same")(block_2_output)
x = layers.Conv2D(64, 3, activation="relu", padding="same")(x)
block_3_output = layers.add([x, block_2_output])
x = layers.Conv2D(64, 3, activation="relu")(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10)(x)
model = keras.Model(inputs, outputs, name="toy_resnet")
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "toy_resnet"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ img (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv2d_8 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">30</span>, <span style="color: #00af00; text-decoration-color: #00af00">30</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">896</span> │ img[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv2d_9 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">18,496</span> │ conv2d_8[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ max_pooling2d_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv2d_9[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv2d_10 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │ max_pooling2d_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv2d_11 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │ conv2d_10[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv2d_11[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ │ │ │ max_pooling2d_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv2d_12 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │ add[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv2d_13 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │ conv2d_12[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ add_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv2d_13[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ │ │ │ add[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ conv2d_14 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │ add_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ global_average_poo… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv2d_14[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePool…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dense_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,640</span> │ global_average_pool… │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ dense_6[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dense_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,570</span> │ dropout[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">223,242</span> (872.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">223,242</span> (872.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
Plot the model:
```python
keras.utils.plot_model(model, "mini_resnet.png", show_shapes=True)
```

Now train the model:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=["acc"],
)
# We restrict the data to the first 1000 samples so as to limit execution time
# on Colab. Try to train on the entire dataset until convergence!
model.fit(
x_train[:1000],
y_train[:1000],
batch_size=64,
epochs=1,
validation_split=0.2,
)
```
<div class="k-default-codeblock">
```
13/13 ━━━━━━━━━━━━━━━━━━━━ 2s 135ms/step - acc: 0.0976 - loss: 2.3050 - val_acc: 0.1350 - val_loss: 2.3056
<keras.src.callbacks.history.History at 0x2a35d1b70>
```
</div>
---
## Shared layers
Another good use for the functional API are models that use *shared layers*.
Shared layers are layer instances that are reused multiple times in the same model --
they learn features that correspond to multiple paths in the graph-of-layers.
Shared layers are often used to encode inputs from similar spaces
(say, two different pieces of text that feature similar vocabulary).
They enable sharing of information across these different inputs,
and they make it possible to train such a model on less data.
If a given word is seen in one of the inputs,
that will benefit the processing of all inputs that pass through the shared layer.
To share a layer in the functional API, call the same layer instance multiple times.
For instance, here's an `Embedding` layer shared across two different text inputs:
```python
# Embedding for 1000 unique words mapped to 128-dimensional vectors
shared_embedding = layers.Embedding(1000, 128)
# Variable-length sequence of integers
text_input_a = keras.Input(shape=(None,), dtype="int32")
# Variable-length sequence of integers
text_input_b = keras.Input(shape=(None,), dtype="int32")
# Reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
```
---
## Extract and reuse nodes in the graph of layers
Because the graph of layers you are manipulating is a static data structure,
it can be accessed and inspected. And this is how you are able to plot
functional models as images.
This also means that you can access the activations of intermediate layers
("nodes" in the graph) and reuse them elsewhere --
which is very useful for something like feature extraction.
Let's look at an example. This is a VGG19 model with weights pretrained on ImageNet:
```python
vgg19 = keras.applications.VGG19()
```
And these are the intermediate activations of the model,
obtained by querying the graph data structure:
```python
features_list = [layer.output for layer in vgg19.layers]
```
Use these features to create a new feature-extraction model that returns
the values of the intermediate layer activations:
```python
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
img = np.random.random((1, 224, 224, 3)).astype("float32")
extracted_features = feat_extraction_model(img)
```
This comes in handy for tasks like
[neural style transfer](https://keras.io/examples/generative/neural_style_transfer/),
among other things.
---
## Extend the API using custom layers
`keras` includes a wide range of built-in layers, for example:
- Convolutional layers: `Conv1D`, `Conv2D`, `Conv3D`, `Conv2DTranspose`
- Pooling layers: `MaxPooling1D`, `MaxPooling2D`, `MaxPooling3D`, `AveragePooling1D`
- RNN layers: `GRU`, `LSTM`, `ConvLSTM2D`
- `BatchNormalization`, `Dropout`, `Embedding`, etc.
But if you don't find what you need, it's easy to extend the API by creating
your own layers. All layers subclass the `Layer` class and implement:
- `call` method, that specifies the computation done by the layer.
- `build` method, that creates the weights of the layer (this is just a style
convention since you can create weights in `__init__`, as well).
To learn more about creating layers from scratch, read
[custom layers and models](/guides/making_new_layers_and_models_via_subclassing) guide.
The following is a basic implementation of `keras.layers.Dense`:
```python
class CustomDense(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
```
For serialization support in your custom layer, define a `get_config()`
method that returns the constructor arguments of the layer instance:
```python
class CustomDense(layers.Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return ops.matmul(inputs, self.w) + self.b
def get_config(self):
return {"units": self.units}
inputs = keras.Input((4,))
outputs = CustomDense(10)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(config, custom_objects={"CustomDense": CustomDense})
```
Optionally, implement the class method `from_config(cls, config)` which is used
when recreating a layer instance given its config dictionary.
The default implementation of `from_config` is:
```python
def from_config(cls, config):
return cls(**config)
```
---
## When to use the functional API
Should you use the Keras functional API to create a new model,
or just subclass the `Model` class directly? In general, the functional API
is higher-level, easier and safer, and has a number of
features that subclassed models do not support.
However, model subclassing provides greater flexibility when building models
that are not easily expressible as directed acyclic graphs of layers.
For example, you could not implement a Tree-RNN with the functional API
and would have to subclass `Model` directly.
For an in-depth look at the differences between the functional API and
model subclassing, read
[What are Symbolic and Imperative APIs in TensorFlow 2.0?](https://blog.tensorflow.org/2019/01/what-are-symbolic-and-imperative-apis.html).
### Functional API strengths:
The following properties are also true for Sequential models
(which are also data structures), but are not true for subclassed models
(which are Python bytecode, not data structures).
#### Less verbose
There is no `super().__init__(...)`, no `def call(self, ...):`, etc.
Compare:
```python
inputs = keras.Input(shape=(32,))
x = layers.Dense(64, activation='relu')(inputs)
outputs = layers.Dense(10)(x)
mlp = keras.Model(inputs, outputs)
```
With the subclassed version:
```python
class MLP(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense_1 = layers.Dense(64, activation='relu')
self.dense_2 = layers.Dense(10)
def call(self, inputs):
x = self.dense_1(inputs)
return self.dense_2(x)
# Instantiate the model.
mlp = MLP()
# Necessary to create the model's state.
# The model doesn't have a state until it's called at least once.
_ = mlp(ops.zeros((1, 32)))
```
#### Model validation while defining its connectivity graph
In the functional API, the input specification (shape and dtype) is created
in advance (using `Input`). Every time you call a layer,
the layer checks that the specification passed to it matches its assumptions,
and it will raise a helpful error message if not.
This guarantees that any model you can build with the functional API will run.
All debugging -- other than convergence-related debugging --
happens statically during the model construction and not at execution time.
This is similar to type checking in a compiler.
#### A functional model is plottable and inspectable
You can plot the model as a graph, and you can easily access intermediate nodes
in this graph. For example, to extract and reuse the activations of intermediate
layers (as seen in a previous example):
```python
features_list = [layer.output for layer in vgg19.layers]
feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)
```
#### A functional model can be serialized or cloned
Because a functional model is a data structure rather than a piece of code,
it is safely serializable and can be saved as a single file
that allows you to recreate the exact same model
without having access to any of the original code.
See the [serialization & saving guide](/guides/serialization_and_saving/).
To serialize a subclassed model, it is necessary for the implementer
to specify a `get_config()`
and `from_config()` method at the model level.
### Functional API weakness:
#### It does not support dynamic architectures
The functional API treats models as DAGs of layers.
This is true for most deep learning architectures, but not all -- for example,
recursive networks or Tree RNNs do not follow this assumption and cannot
be implemented in the functional API.
---
## Mix-and-match API styles
Choosing between the functional API or Model subclassing isn't a
binary decision that restricts you into one category of models.
All models in the `keras` API can interact with each other, whether they're
`Sequential` models, functional models, or subclassed models that are written
from scratch.
You can always use a functional model or `Sequential` model
as part of a subclassed model or layer:
```python
units = 32
timesteps = 10
input_dim = 5
# Define a Functional model
inputs = keras.Input((None, units))
x = layers.GlobalAveragePooling1D()(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
class CustomRNN(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
# Our previously-defined Functional model
self.classifier = model
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
print(features.shape)
return self.classifier(features)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, timesteps, input_dim)))
```
<div class="k-default-codeblock">
```
(1, 10, 32)
(1, 10, 32)
```
</div>
You can use any subclassed layer or model in the functional API
as long as it implements a `call` method that follows one of the following patterns:
- `call(self, inputs, **kwargs)` --
Where `inputs` is a tensor or a nested structure of tensors (e.g. a list of tensors),
and where `**kwargs` are non-tensor arguments (non-inputs).
- `call(self, inputs, training=None, **kwargs)` --
Where `training` is a boolean indicating whether the layer should behave
in training mode and inference mode.
- `call(self, inputs, mask=None, **kwargs)` --
Where `mask` is a boolean mask tensor (useful for RNNs, for instance).
- `call(self, inputs, training=None, mask=None, **kwargs)` --
Of course, you can have both masking and training-specific behavior at the same time.
Additionally, if you implement the `get_config` method on your custom Layer or model,
the functional models you create will still be serializable and cloneable.
Here's a quick example of a custom RNN, written from scratch,
being used in a functional model:
```python
units = 32
timesteps = 10
input_dim = 5
batch_size = 16
class CustomRNN(layers.Layer):
def __init__(self):
super().__init__()
self.units = units
self.projection_1 = layers.Dense(units=units, activation="tanh")
self.projection_2 = layers.Dense(units=units, activation="tanh")
self.classifier = layers.Dense(1)
def call(self, inputs):
outputs = []
state = ops.zeros(shape=(inputs.shape[0], self.units))
for t in range(inputs.shape[1]):
x = inputs[:, t, :]
h = self.projection_1(x)
y = h + self.projection_2(state)
state = y
outputs.append(y)
features = ops.stack(outputs, axis=1)
return self.classifier(features)
# Note that you specify a static batch size for the inputs with the `batch_shape`
# arg, because the inner computation of `CustomRNN` requires a static batch size
# (when you create the `state` zeros tensor).
inputs = keras.Input(batch_shape=(batch_size, timesteps, input_dim))
x = layers.Conv1D(32, 3)(inputs)
outputs = CustomRNN()(x)
model = keras.Model(inputs, outputs)
rnn_model = CustomRNN()
_ = rnn_model(ops.zeros((1, 10, 5)))
```
| keras-io/guides/md/functional_api.md/0 | {
"file_path": "keras-io/guides/md/functional_api.md",
"repo_id": "keras-io",
"token_count": 29002
} | 98 |
# Handling failed trials in KerasTuner
**Authors:** Haifeng Jin<br>
**Date created:** 2023/02/28<br>
**Last modified:** 2023/02/28<br>
**Description:** The basics of fault tolerance configurations in KerasTuner.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_tuner/failed_trials.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_tuner/failed_trials.py)
---
## Introduction
A KerasTuner program may take a long time to run since each model may take a
long time to train. We do not want the program to fail just because some trials
failed randomly.
In this guide, we will show how to handle the failed trials in KerasTuner,
including:
* How to tolerate the failed trials during the search
* How to mark a trial as failed during building and evaluating the model
* How to terminate the search by raising a `FatalError`
---
## Setup
```python
!pip install keras-tuner -q
```
```python
import keras
from keras import layers
import keras_tuner
import numpy as np
```
---
## Tolerate failed trials
We will use the `max_retries_per_trial` and `max_consecutive_failed_trials`
arguments when initializing the tuners.
`max_retries_per_trial` controls the maximum number of retries to run if a trial
keeps failing. For example, if it is set to 3, the trial may run 4 times (1
failed run + 3 failed retries) before it is finally marked as failed. The
default value of `max_retries_per_trial` is 0.
`max_consecutive_failed_trials` controls how many consecutive failed trials
(failed trial here refers to a trial that failed all of its retries) occur
before terminating the search. For example, if it is set to 3 and Trial 2, Trial
3, and Trial 4 all failed, the search would be terminated. However, if it is set
to 3 and only Trial 2, Trial 3, Trial 5, and Trial 6 fail, the search would not
be terminated since the failed trials are not consecutive. The default value of
`max_consecutive_failed_trials` is 3.
The following code shows how these two arguments work in action.
* We define a search space with 2 hyperparameters for the number of units in the
2 dense layers.
* When their product is larger than 800, we raise a `ValueError` for the model
too large.
```python
def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
raise ValueError(f"Model too large! It contains {num_params} params.")
return model
```
We set up the tuner as follows.
* We set `max_retries_per_trial=3`.
* We set `max_consecutive_failed_trials=8`.
* We use `GridSearch` to enumerate all hyperparameter value combinations.
```python
tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
# Print the results.
tuner.results_summary()
```
<div class="k-default-codeblock">
```
Trial 12 Complete [00h 00m 00s]
```
</div>
<div class="k-default-codeblock">
```
Best val_loss So Far: 0.12375041842460632
Total elapsed time: 00h 00m 08s
Results summary
Results in ./untitled_project
Showing 10 best trials
Objective(name="val_loss", direction="min")
```
</div>
<div class="k-default-codeblock">
```
Trial 0003 summary
Hyperparameters:
units_1: 20
units_2: 10
Score: 0.12375041842460632
```
</div>
<div class="k-default-codeblock">
```
Trial 0001 summary
Hyperparameters:
units_1: 10
units_2: 20
Score: 0.12741881608963013
```
</div>
<div class="k-default-codeblock">
```
Trial 0002 summary
Hyperparameters:
units_1: 10
units_2: 30
Score: 0.13982832431793213
```
</div>
<div class="k-default-codeblock">
```
Trial 0000 summary
Hyperparameters:
units_1: 10
units_2: 10
Score: 0.1433391124010086
```
</div>
<div class="k-default-codeblock">
```
Trial 0005 summary
Hyperparameters:
units_1: 20
units_2: 30
Score: 0.14747518301010132
```
</div>
<div class="k-default-codeblock">
```
Trial 0006 summary
Hyperparameters:
units_1: 30
units_2: 10
Score: 0.15092280507087708
```
</div>
<div class="k-default-codeblock">
```
Trial 0004 summary
Hyperparameters:
units_1: 20
units_2: 20
Score: 0.21962997317314148
```
</div>
<div class="k-default-codeblock">
```
Trial 0007 summary
Hyperparameters:
units_1: 30
units_2: 20
Traceback (most recent call last):
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 273, in _try_run_and_update_trial
self._run_and_update_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 238, in _run_and_update_trial
results = self.run_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 314, in run_trial
obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 232, in _build_and_fit_model
model = self._try_build(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 164, in _try_build
model = self._build_hypermodel(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 155, in _build_hypermodel
model = self.hypermodel.build(hp)
File "/tmp/ipykernel_21713/966577796.py", line 19, in build_model
raise ValueError(f"Model too large! It contains {num_params} params.")
ValueError: Model too large! It contains 1271 params.
```
</div>
<div class="k-default-codeblock">
```
Trial 0008 summary
Hyperparameters:
units_1: 30
units_2: 30
Traceback (most recent call last):
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 273, in _try_run_and_update_trial
self._run_and_update_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 238, in _run_and_update_trial
results = self.run_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 314, in run_trial
obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 232, in _build_and_fit_model
model = self._try_build(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 164, in _try_build
model = self._build_hypermodel(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 155, in _build_hypermodel
model = self.hypermodel.build(hp)
File "/tmp/ipykernel_21713/966577796.py", line 19, in build_model
raise ValueError(f"Model too large! It contains {num_params} params.")
ValueError: Model too large! It contains 1591 params.
```
</div>
<div class="k-default-codeblock">
```
Trial 0009 summary
Hyperparameters:
units_1: 40
units_2: 10
Traceback (most recent call last):
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 273, in _try_run_and_update_trial
self._run_and_update_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 238, in _run_and_update_trial
results = self.run_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 314, in run_trial
obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 232, in _build_and_fit_model
model = self._try_build(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 164, in _try_build
model = self._build_hypermodel(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 155, in _build_hypermodel
model = self.hypermodel.build(hp)
File "/tmp/ipykernel_21713/966577796.py", line 19, in build_model
raise ValueError(f"Model too large! It contains {num_params} params.")
ValueError: Model too large! It contains 1261 params.
```
</div>
---
## Mark a trial as failed
When the model is too large, we do not need to retry it. No matter how many
times we try with the same hyperparameters, it is always too large.
We can set `max_retries_per_trial=0` to do it. However, it will not retry no
matter what errors are raised while we may still want to retry for other
unexpected errors. Is there a way to better handle this situation?
We can raise the `FailedTrialError` to skip the retries. Whenever, this error is
raised, the trial would not be retried. The retries will still run when other
errors occur. An example is shown as follows.
```python
def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
# When this error is raised, it skips the retries.
raise keras_tuner.errors.FailedTrialError(
f"Model too large! It contains {num_params} params."
)
return model
tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
# Print the results.
tuner.results_summary()
```
<div class="k-default-codeblock">
```
Trial 12 Complete [00h 00m 00s]
```
</div>
<div class="k-default-codeblock">
```
Best val_loss So Far: 0.08265472948551178
Total elapsed time: 00h 00m 05s
Results summary
Results in ./untitled_project
Showing 10 best trials
Objective(name="val_loss", direction="min")
```
</div>
<div class="k-default-codeblock">
```
Trial 0002 summary
Hyperparameters:
units_1: 10
units_2: 30
Score: 0.08265472948551178
```
</div>
<div class="k-default-codeblock">
```
Trial 0005 summary
Hyperparameters:
units_1: 20
units_2: 30
Score: 0.11731438338756561
```
</div>
<div class="k-default-codeblock">
```
Trial 0006 summary
Hyperparameters:
units_1: 30
units_2: 10
Score: 0.13600358366966248
```
</div>
<div class="k-default-codeblock">
```
Trial 0004 summary
Hyperparameters:
units_1: 20
units_2: 20
Score: 0.1465979516506195
```
</div>
<div class="k-default-codeblock">
```
Trial 0000 summary
Hyperparameters:
units_1: 10
units_2: 10
Score: 0.15967626869678497
```
</div>
<div class="k-default-codeblock">
```
Trial 0001 summary
Hyperparameters:
units_1: 10
units_2: 20
Score: 0.1646396517753601
```
</div>
<div class="k-default-codeblock">
```
Trial 0003 summary
Hyperparameters:
units_1: 20
units_2: 10
Score: 0.1696309596300125
```
</div>
<div class="k-default-codeblock">
```
Trial 0007 summary
Hyperparameters:
units_1: 30
units_2: 20
Traceback (most recent call last):
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 273, in _try_run_and_update_trial
self._run_and_update_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 238, in _run_and_update_trial
results = self.run_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 314, in run_trial
obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 232, in _build_and_fit_model
model = self._try_build(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 164, in _try_build
model = self._build_hypermodel(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 155, in _build_hypermodel
model = self.hypermodel.build(hp)
File "/tmp/ipykernel_21713/2463037569.py", line 20, in build_model
raise keras_tuner.errors.FailedTrialError(
keras_tuner.src.errors.FailedTrialError: Model too large! It contains 1271 params.
```
</div>
<div class="k-default-codeblock">
```
Trial 0008 summary
Hyperparameters:
units_1: 30
units_2: 30
Traceback (most recent call last):
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 273, in _try_run_and_update_trial
self._run_and_update_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 238, in _run_and_update_trial
results = self.run_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 314, in run_trial
obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 232, in _build_and_fit_model
model = self._try_build(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 164, in _try_build
model = self._build_hypermodel(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 155, in _build_hypermodel
model = self.hypermodel.build(hp)
File "/tmp/ipykernel_21713/2463037569.py", line 20, in build_model
raise keras_tuner.errors.FailedTrialError(
keras_tuner.src.errors.FailedTrialError: Model too large! It contains 1591 params.
```
</div>
<div class="k-default-codeblock">
```
Trial 0009 summary
Hyperparameters:
units_1: 40
units_2: 10
Traceback (most recent call last):
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 273, in _try_run_and_update_trial
self._run_and_update_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/base_tuner.py", line 238, in _run_and_update_trial
results = self.run_trial(trial, *fit_args, **fit_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 314, in run_trial
obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 232, in _build_and_fit_model
model = self._try_build(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 164, in _try_build
model = self._build_hypermodel(hp)
File "/home/codespace/.local/lib/python3.10/site-packages/keras_tuner/src/engine/tuner.py", line 155, in _build_hypermodel
model = self.hypermodel.build(hp)
File "/tmp/ipykernel_21713/2463037569.py", line 20, in build_model
raise keras_tuner.errors.FailedTrialError(
keras_tuner.src.errors.FailedTrialError: Model too large! It contains 1261 params.
```
</div>
---
## Terminate the search programmatically
When there is a bug in the code we should terminate the search immediately and
fix the bug. You can terminate the search programmatically when your defined
conditions are met. Raising a `FatalError` (or its subclasses `FatalValueError`,
`FatalTypeError`, or `FatalRuntimeError`) will terminate the search regardless
of the `max_consecutive_failed_trials` argument.
Following is an example to terminate the search when the model is too large.
```python
def build_model(hp):
# Define the 2 hyperparameters for the units in dense layers
units_1 = hp.Int("units_1", 10, 40, step=10)
units_2 = hp.Int("units_2", 10, 30, step=10)
# Define the model
model = keras.Sequential(
[
layers.Dense(units=units_1, input_shape=(20,)),
layers.Dense(units=units_2),
layers.Dense(units=1),
]
)
model.compile(loss="mse")
# Raise an error when the model is too large
num_params = model.count_params()
if num_params > 1200:
# When this error is raised, the search is terminated.
raise keras_tuner.errors.FatalError(
f"Model too large! It contains {num_params} params."
)
return model
tuner = keras_tuner.GridSearch(
hypermodel=build_model,
objective="val_loss",
overwrite=True,
max_retries_per_trial=3,
max_consecutive_failed_trials=8,
)
try:
# Use random data to train the model.
tuner.search(
x=np.random.rand(100, 20),
y=np.random.rand(100, 1),
validation_data=(
np.random.rand(100, 20),
np.random.rand(100, 1),
),
epochs=10,
)
except keras_tuner.errors.FatalError:
print("The search is terminated.")
```
<div class="k-default-codeblock">
```
Trial 7 Complete [00h 00m 01s]
val_loss: 0.14219732582569122
```
</div>
<div class="k-default-codeblock">
```
Best val_loss So Far: 0.09755773097276688
Total elapsed time: 00h 00m 04s
```
</div>
<div class="k-default-codeblock">
```
Search: Running Trial #8
```
</div>
<div class="k-default-codeblock">
```
Value |Best Value So Far |Hyperparameter
30 |10 |units_1
20 |20 |units_2
```
</div>
<div class="k-default-codeblock">
```
The search is terminated.
```
</div>
---
## Takeaways
In this guide, you learn how to handle failed trials in KerasTuner:
* Use `max_retries_per_trial` to specify the number of retries for a failed
trial.
* Use `max_consecutive_failed_trials` to specify the maximum consecutive failed
trials to tolerate.
* Raise `FailedTrialError` to directly mark a trial as failed and skip the
retries.
* Raise `FatalError`, `FatalValueError`, `FatalTypeError`, `FatalRuntimeError`
to terminate the search immediately.
| keras-io/guides/md/keras_tuner/failed_trials.md/0 | {
"file_path": "keras-io/guides/md/keras_tuner/failed_trials.md",
"repo_id": "keras-io",
"token_count": 7559
} | 99 |
# Writing a training loop from scratch
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2019/03/01<br>
**Last modified:** 2023/07/10<br>
**Description:** Complete guide to writing low-level training & evaluation loops.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/writing_a_training_loop_from_scratch.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/writing_a_training_loop_from_scratch.py)
---
## Setup
```python
import tensorflow as tf
import keras
from keras import layers
import numpy as np
```
---
## Introduction
Keras provides default training and evaluation loops, `fit()` and `evaluate()`.
Their usage is covered in the guide
[Training & evaluation with the built-in methods](https://keras.io/guides/training_with_built_in_methods/).
If you want to customize the learning algorithm of your model while still leveraging
the convenience of `fit()`
(for instance, to train a GAN using `fit()`), you can subclass the `Model` class and
implement your own `train_step()` method, which
is called repeatedly during `fit()`. This is covered in the guide
[Customizing what happens in `fit()`](https://keras.io/guides/customizing_what_happens_in_fit/).
Now, if you want very low-level control over training & evaluation, you should write
your own training & evaluation loops from scratch. This is what this guide is about.
---
## Using the `GradientTape`: a first end-to-end example
Calling a model inside a `GradientTape` scope enables you to retrieve the gradients of
the trainable weights of the layer with respect to a loss value. Using an optimizer
instance, you can use these gradients to update these variables (which you can
retrieve using `model.trainable_weights`).
Let's consider a simple MNIST model:
```python
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu")(inputs)
x2 = layers.Dense(64, activation="relu")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Let's train it using mini-batch gradient with a custom training loop.
First, we're going to need an optimizer, a loss function, and a dataset:
```python
# Instantiate an optimizer.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the training dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Prepare the training dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
```
Here's our training loop:
- We open a `for` loop that iterates over epochs
- For each epoch, we open a `for` loop that iterates over the dataset, in batches
- For each batch, we open a `GradientTape()` scope
- Inside this scope, we call the model (forward pass) and compute the loss
- Outside the scope, we retrieve the gradients of the weights
of the model with regard to the loss
- Finally, we use the optimizer to update the weights of the model based on the
gradients
```python
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train, training=True) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %s samples" % ((step + 1) * batch_size))
```
<div class="k-default-codeblock">
```
Start of epoch 0
Training loss (for one batch) at step 0: 120.0656
Seen so far: 64 samples
Training loss (for one batch) at step 200: 1.4296
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 1.0072
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 0.8556
Seen so far: 38464 samples
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 1
Training loss (for one batch) at step 0: 0.6670
Seen so far: 64 samples
Training loss (for one batch) at step 200: 0.3697
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 0.3445
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 0.4279
Seen so far: 38464 samples
```
</div>
---
## Low-level handling of metrics
Let's add metrics monitoring to this basic loop.
You can readily reuse the built-in metrics (or custom ones you wrote) in such training
loops written from scratch. Here's the flow:
- Instantiate the metric at the start of the loop
- Call `metric.update_state()` after each batch
- Call `metric.result()` when you need to display the current value of the metric
- Call `metric.reset_states()` when you need to clear the state of the metric
(typically at the end of an epoch)
Let's use this knowledge to compute `SparseCategoricalAccuracy` on validation data at
the end of each epoch:
```python
# Get model
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
```
Here's our training & evaluation loop:
```python
import time
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
# Update training metric.
train_acc_metric.update_state(y_batch_train, logits)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * batch_size))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
```
<div class="k-default-codeblock">
```
Start of epoch 0
Training loss (for one batch) at step 0: 154.5849
Seen so far: 64 samples
Training loss (for one batch) at step 200: 1.2994
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 1.0750
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 1.1264
Seen so far: 38464 samples
Training acc over epoch: 0.7203
Validation acc: 0.8233
Time taken: 7.95s
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 1
Training loss (for one batch) at step 0: 1.0552
Seen so far: 64 samples
Training loss (for one batch) at step 200: 0.8037
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 0.2875
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 0.5536
Seen so far: 38464 samples
Training acc over epoch: 0.8370
Validation acc: 0.8622
Time taken: 7.97s
```
</div>
---
## Speeding-up your training step with `tf.function`
The default runtime in TensorFlow 2 is
[eager execution](https://www.tensorflow.org/guide/eager).
As such, our training loop above executes eagerly.
This is great for debugging, but graph compilation has a definite performance
advantage. Describing your computation as a static graph enables the framework
to apply global performance optimizations. This is impossible when
the framework is constrained to greedily execute one operation after another,
with no knowledge of what comes next.
You can compile into a static graph any function that takes tensors as input.
Just add a `@tf.function` decorator on it, like this:
```python
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
```
Let's do the same with the evaluation step:
```python
@tf.function
def test_step(x, y):
val_logits = model(x, training=False)
val_acc_metric.update_state(y, val_logits)
```
Now, let's re-run our training loop with this compiled training step:
```python
import time
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step(x_batch_train, y_batch_train)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * batch_size))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
```
<div class="k-default-codeblock">
```
Start of epoch 0
Training loss (for one batch) at step 0: 0.4807
Seen so far: 64 samples
Training loss (for one batch) at step 200: 0.4289
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 0.6062
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 0.5791
Seen so far: 38464 samples
Training acc over epoch: 0.8666
Validation acc: 0.8798
Time taken: 1.45s
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 1
Training loss (for one batch) at step 0: 0.5122
Seen so far: 64 samples
Training loss (for one batch) at step 200: 0.4184
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 0.2736
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 0.5048
Seen so far: 38464 samples
Training acc over epoch: 0.8823
Validation acc: 0.8872
Time taken: 1.11s
```
</div>
Much faster, isn't it?
---
## Low-level handling of losses tracked by the model
Layers & models recursively track any losses created during the forward pass
by layers that call `self.add_loss(value)`. The resulting list of scalar loss
values are available via the property `model.losses`
at the end of the forward pass.
If you want to be using these loss components, you should sum them
and add them to the main loss in your training step.
Consider this layer, that creates an activity regularization loss:
```python
@keras.saving.register_keras_serializable()
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * tf.reduce_sum(inputs))
return inputs
```
Let's build a really simple model that uses it:
```python
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Here's what our training step should look like now:
```python
@tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
# Add any extra losses created during the forward pass.
loss_value += sum(model.losses)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
```
---
## Summary
Now you know everything there is to know about using built-in training loops and
writing your own from scratch.
To conclude, here's a simple end-to-end example that ties together everything
you've learned in this guide: a DCGAN trained on MNIST digits.
---
## End-to-end example: a GAN training loop from scratch
You may be familiar with Generative Adversarial Networks (GANs). GANs can generate new
images that look almost real, by learning the latent distribution of a training
dataset of images (the "latent space" of the images).
A GAN is made of two parts: a "generator" model that maps points in the latent
space to points in image space, a "discriminator" model, a classifier
that can tell the difference between real images (from the training dataset)
and fake images (the output of the generator network).
A GAN training loop looks like this:
1) Train the discriminator.
- Sample a batch of random points in the latent space.
- Turn the points into fake images via the "generator" model.
- Get a batch of real images and combine them with the generated images.
- Train the "discriminator" model to classify generated vs. real images.
2) Train the generator.
- Sample random points in the latent space.
- Turn the points into fake images via the "generator" network.
- Get a batch of real images and combine them with the generated images.
- Train the "generator" model to "fool" the discriminator and classify the fake images
as real.
For a much more detailed overview of how GANs works, see
[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python).
Let's implement this training loop. First, create the discriminator meant to classify
fake vs real digits:
```python
discriminator = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
discriminator.summary()
```
<div class="k-default-codeblock">
```
Model: "discriminator"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 14, 14, 64) 640
leaky_re_lu (LeakyReLU) (None, 14, 14, 64) 0
conv2d_1 (Conv2D) (None, 7, 7, 128) 73856
leaky_re_lu_1 (LeakyReLU) (None, 7, 7, 128) 0
global_max_pooling2d (Glob (None, 128) 0
alMaxPooling2D)
dense_4 (Dense) (None, 1) 129
=================================================================
Total params: 74625 (291.50 KB)
Trainable params: 74625 (291.50 KB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
```
</div>
Then let's create a generator network,
that turns latent vectors into outputs of shape `(28, 28, 1)` (representing
MNIST digits):
```python
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
# We want to generate 128 coefficients to reshape into a 7x7x128 map
layers.Dense(7 * 7 * 128),
layers.LeakyReLU(alpha=0.2),
layers.Reshape((7, 7, 128)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(alpha=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
```
Here's the key bit: the training loop. As you can see it is quite straightforward. The
training step function only takes 17 lines.
```python
# Instantiate one optimizer for the discriminator and another for the generator.
d_optimizer = keras.optimizers.Adam(learning_rate=0.0003)
g_optimizer = keras.optimizers.Adam(learning_rate=0.0004)
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
@tf.function
def train_step(real_images):
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Decode them to fake images
generated_images = generator(random_latent_vectors)
# Combine them with real images
combined_images = tf.concat([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = tf.concat(
[tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(labels.shape)
# Train the discriminator
with tf.GradientTape() as tape:
predictions = discriminator(combined_images)
d_loss = loss_fn(labels, predictions)
grads = tape.gradient(d_loss, discriminator.trainable_weights)
d_optimizer.apply_gradients(zip(grads, discriminator.trainable_weights))
# Sample random points in the latent space
random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim))
# Assemble labels that say "all real images"
misleading_labels = tf.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = discriminator(generator(random_latent_vectors))
g_loss = loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, generator.trainable_weights)
g_optimizer.apply_gradients(zip(grads, generator.trainable_weights))
return d_loss, g_loss, generated_images
```
Let's train our GAN, by repeatedly calling `train_step` on batches of images.
Since our discriminator and generator are convnets, you're going to want to
run this code on a GPU.
```python
import os
# Prepare the dataset. We use both the training & test MNIST digits.
batch_size = 64
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
dataset = tf.data.Dataset.from_tensor_slices(all_digits)
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
epochs = 1 # In practice you need at least 20 epochs to generate nice digits.
save_dir = "./"
for epoch in range(epochs):
print("\nStart epoch", epoch)
for step, real_images in enumerate(dataset):
# Train the discriminator & generator on one batch of real images.
d_loss, g_loss, generated_images = train_step(real_images)
# Logging.
if step % 200 == 0:
# Print metrics
print("discriminator loss at step %d: %.2f" % (step, d_loss))
print("adversarial loss at step %d: %.2f" % (step, g_loss))
# Save one generated image
img = keras.utils.array_to_img(generated_images[0] * 255.0, scale=False)
img.save(os.path.join(save_dir, "generated_img" + str(step) + ".png"))
# To limit execution time we stop after 10 steps.
# Remove the lines below to actually train the model!
if step > 10:
break
```
<div class="k-default-codeblock">
```
Start epoch 0
discriminator loss at step 0: 0.68
adversarial loss at step 0: 0.69
```
</div>
That's it! You'll get nice-looking fake MNIST digits after just ~30s of training on the
Colab GPU.
| keras-io/guides/md/writing_a_training_loop_from_scratch.md/0 | {
"file_path": "keras-io/guides/md/writing_a_training_loop_from_scratch.md",
"repo_id": "keras-io",
"token_count": 8557
} | 100 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_nlp/preprocessing_layers/multi_segment_packer/'" />
| keras-io/redirects/api/keras_nlp/layers/multi_segment_packer/index.html/0 | {
"file_path": "keras-io/redirects/api/keras_nlp/layers/multi_segment_packer/index.html",
"repo_id": "keras-io",
"token_count": 49
} | 101 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/datasets/'" />
| keras-io/redirects/datasets/index.html/0 | {
"file_path": "keras-io/redirects/datasets/index.html",
"repo_id": "keras-io",
"token_count": 33
} | 102 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/locally_connected_layers/'" />
| keras-io/redirects/layers/local/index.html/0 | {
"file_path": "keras-io/redirects/layers/local/index.html",
"repo_id": "keras-io",
"token_count": 40
} | 103 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/preprocessing/timeseries/'" />
| keras-io/redirects/preprocessing/sequence/index.html/0 | {
"file_path": "keras-io/redirects/preprocessing/sequence/index.html",
"repo_id": "keras-io",
"token_count": 35
} | 104 |
try:
import tf_keras
except Exception as e:
print(f"Could not import tf_keras. Exception: {e}")
tf_keras = None
if tf_keras:
parts = tf_keras.__version__.split(".")
tf_keras_version = parts[0] + "." + parts[1]
else:
tf_keras_version = "None"
# In order to refresh the pages for an old version (e.g. 2.14)
# You will need to re-run `python autogen.py make` after updating
# the tf_keras version in autogen.py and making sure to install
# the targeted keras_version locally. When you do this
# `/2.14/api/` (for instance) will be regenerated. You can then
# just reupload, which won't affect the directories for any other
# version number.
KERAS2_API_MASTER = {
"path": tf_keras_version + "/api/",
"title": "Keras 2 API documentation",
"toc": True,
"children": [
{
"path": "models/",
"title": "Models API",
"toc": True,
"children": [
{
"path": "model",
"title": "The Model class",
"generate": [
"tf_keras.Model",
"tf_keras.Model.summary",
"tf_keras.Model.get_layer",
],
},
{
"path": "sequential",
"title": "The Sequential class",
"generate": [
"tf_keras.Sequential",
"tf_keras.Sequential.add",
"tf_keras.Sequential.pop",
],
},
{
"path": "model_training_apis",
"title": "Model training APIs",
"generate": [
"tf_keras.Model.compile",
"tf_keras.Model.fit",
"tf_keras.Model.evaluate",
"tf_keras.Model.predict",
"tf_keras.Model.train_on_batch",
"tf_keras.Model.test_on_batch",
"tf_keras.Model.predict_on_batch",
"tf_keras.Model.run_eagerly",
],
},
{
"path": "model_saving_apis/",
"title": "Saving & serialization",
"toc": True,
"children": [
{
"path": "model_saving_and_loading",
"title": "Whole model saving & loading",
"generate": [
"tf_keras.Model.save",
"tf_keras.saving.save_model",
"tf_keras.saving.load_model",
],
},
{
"path": "weights_saving_and_loading",
"title": "Weights-only saving & loading",
"generate": [
"tf_keras.Model.get_weights",
"tf_keras.Model.set_weights",
"tf_keras.Model.save_weights",
"tf_keras.Model.load_weights",
],
},
{
"path": "model_config_serialization",
"title": "Model config serialization",
"generate": [
"tf_keras.Model.get_config",
"tf_keras.Model.from_config",
"tf_keras.models.clone_model",
],
},
{
"path": "export",
"title": "Model export for inference",
"generate": [
"tf_keras.export.ExportArchive",
],
},
{
"path": "serialization_utils",
"title": "Serialization utilities",
"generate": [
"tf_keras.utils.serialize_keras_object", # TODO: move to saving
"tf_keras.utils.deserialize_keras_object", # TODO: move to saving
"tf_keras.saving.custom_object_scope",
"tf_keras.saving.get_custom_objects",
"tf_keras.saving.register_keras_serializable",
],
},
],
},
],
},
{
"path": "layers/",
"title": "Layers API",
"toc": True,
"children": [
{
"path": "base_layer",
"title": "The base Layer class",
"generate": [
"tf_keras.layers.Layer",
"tf_keras.layers.Layer.weights",
"tf_keras.layers.Layer.trainable_weights",
"tf_keras.layers.Layer.non_trainable_weights",
"tf_keras.layers.Layer.add_weight",
"tf_keras.layers.Layer.trainable",
"tf_keras.layers.Layer.get_weights",
"tf_keras.layers.Layer.set_weights",
"tf_keras.Model.get_config",
"tf_keras.layers.Layer.add_loss",
"tf_keras.layers.Layer.losses",
],
},
{
"path": "activations",
"title": "Layer activations",
"generate": [
"tf_keras.activations.relu",
"tf_keras.activations.sigmoid",
"tf_keras.activations.softmax",
"tf_keras.activations.softplus",
"tf_keras.activations.softsign",
"tf_keras.activations.tanh",
"tf_keras.activations.selu",
"tf_keras.activations.elu",
"tf_keras.activations.exponential",
],
},
{
"path": "initializers",
"title": "Layer weight initializers",
"generate": [
"tf_keras.initializers.RandomNormal",
"tf_keras.initializers.RandomUniform",
"tf_keras.initializers.TruncatedNormal",
"tf_keras.initializers.Zeros",
"tf_keras.initializers.Ones",
"tf_keras.initializers.GlorotNormal",
"tf_keras.initializers.GlorotUniform",
"tf_keras.initializers.HeNormal",
"tf_keras.initializers.HeUniform",
"tf_keras.initializers.Identity",
"tf_keras.initializers.Orthogonal",
"tf_keras.initializers.Constant",
"tf_keras.initializers.VarianceScaling",
],
},
{
"path": "regularizers",
"title": "Layer weight regularizers",
"generate": [
"tf_keras.regularizers.L1",
"tf_keras.regularizers.L2",
"tf_keras.regularizers.L1L2",
"tf_keras.regularizers.OrthogonalRegularizer",
],
},
{
"path": "constraints",
"title": "Layer weight constraints",
"generate": [
"tf_keras.constraints.MaxNorm",
"tf_keras.constraints.MinMaxNorm",
"tf_keras.constraints.NonNeg",
"tf_keras.constraints.UnitNorm",
"tf_keras.constraints.RadialConstraint",
],
},
{
"path": "core_layers/",
"title": "Core layers",
"toc": True,
"children": [
{
"path": "input",
"title": "Input object",
"generate": ["tf_keras.Input"],
},
{
"path": "dense",
"title": "Dense layer",
"generate": ["tf_keras.layers.Dense"],
},
{
"path": "activation",
"title": "Activation layer",
"generate": ["tf_keras.layers.Activation"],
},
{
"path": "embedding",
"title": "Embedding layer",
"generate": ["tf_keras.layers.Embedding"],
},
{
"path": "masking",
"title": "Masking layer",
"generate": ["tf_keras.layers.Masking"],
},
{
"path": "lambda",
"title": "Lambda layer",
"generate": ["tf_keras.layers.Lambda"],
},
],
},
{
"path": "convolution_layers/",
"title": "Convolution layers",
"toc": True,
"children": [
{
"path": "convolution1d",
"title": "Conv1D layer",
"generate": ["tf_keras.layers.Conv1D"],
},
{
"path": "convolution2d",
"title": "Conv2D layer",
"generate": ["tf_keras.layers.Conv2D"],
},
{
"path": "convolution3d",
"title": "Conv3D layer",
"generate": ["tf_keras.layers.Conv3D"],
},
{
"path": "separable_convolution1d",
"title": "SeparableConv1D layer",
"generate": ["tf_keras.layers.SeparableConv1D"],
},
{
"path": "separable_convolution2d",
"title": "SeparableConv2D layer",
"generate": ["tf_keras.layers.SeparableConv2D"],
},
{
"path": "depthwise_convolution2d",
"title": "DepthwiseConv2D layer",
"generate": ["tf_keras.layers.DepthwiseConv2D"],
},
{
"path": "convolution1d_transpose",
"title": "Conv1DTranspose layer",
"generate": ["tf_keras.layers.Conv1DTranspose"],
},
{
"path": "convolution2d_transpose",
"title": "Conv2DTranspose layer",
"generate": ["tf_keras.layers.Conv2DTranspose"],
},
{
"path": "convolution3d_transpose",
"title": "Conv3DTranspose layer",
"generate": ["tf_keras.layers.Conv3DTranspose"],
},
],
},
{
"path": "pooling_layers/",
"title": "Pooling layers",
"toc": True,
"children": [
{
"path": "max_pooling1d",
"title": "MaxPooling1D layer",
"generate": ["tf_keras.layers.MaxPooling1D"],
},
{
"path": "max_pooling2d",
"title": "MaxPooling2D layer",
"generate": ["tf_keras.layers.MaxPooling2D"],
},
{
"path": "max_pooling3d",
"title": "MaxPooling3D layer",
"generate": ["tf_keras.layers.MaxPooling3D"],
},
{
"path": "average_pooling1d",
"title": "AveragePooling1D layer",
"generate": ["tf_keras.layers.AveragePooling1D"],
},
{
"path": "average_pooling2d",
"title": "AveragePooling2D layer",
"generate": ["tf_keras.layers.AveragePooling2D"],
},
{
"path": "average_pooling3d",
"title": "AveragePooling3D layer",
"generate": ["tf_keras.layers.AveragePooling3D"],
},
{
"path": "global_max_pooling1d",
"title": "GlobalMaxPooling1D layer",
"generate": ["tf_keras.layers.GlobalMaxPooling1D"],
},
{
"path": "global_max_pooling2d",
"title": "GlobalMaxPooling2D layer",
"generate": ["tf_keras.layers.GlobalMaxPooling2D"],
},
{
"path": "global_max_pooling3d",
"title": "GlobalMaxPooling3D layer",
"generate": ["tf_keras.layers.GlobalMaxPooling3D"],
},
{
"path": "global_average_pooling1d",
"title": "GlobalAveragePooling1D layer",
"generate": ["tf_keras.layers.GlobalAveragePooling1D"],
},
{
"path": "global_average_pooling2d",
"title": "GlobalAveragePooling2D layer",
"generate": ["tf_keras.layers.GlobalAveragePooling2D"],
},
{
"path": "global_average_pooling3d",
"title": "GlobalAveragePooling3D layer",
"generate": ["tf_keras.layers.GlobalAveragePooling3D"],
},
],
},
{
"path": "recurrent_layers/",
"title": "Recurrent layers",
"toc": True,
"children": [
{
"path": "lstm",
"title": "LSTM layer",
"generate": ["tf_keras.layers.LSTM"],
},
{
"path": "gru",
"title": "GRU layer",
"generate": ["tf_keras.layers.GRU"],
},
{
"path": "simple_rnn",
"title": "SimpleRNN layer",
"generate": ["tf_keras.layers.SimpleRNN"],
},
{
"path": "time_distributed",
"title": "TimeDistributed layer",
"generate": ["tf_keras.layers.TimeDistributed"],
},
{
"path": "bidirectional",
"title": "Bidirectional layer",
"generate": ["tf_keras.layers.Bidirectional"],
},
{
"path": "conv_lstm1d",
"title": "ConvLSTM1D layer",
"generate": ["tf_keras.layers.ConvLSTM1D"],
},
{
"path": "conv_lstm2d",
"title": "ConvLSTM2D layer",
"generate": ["tf_keras.layers.ConvLSTM2D"],
},
{
"path": "conv_lstm3d",
"title": "ConvLSTM3D layer",
"generate": ["tf_keras.layers.ConvLSTM3D"],
},
{
"path": "rnn",
"title": "Base RNN layer",
"generate": ["tf_keras.layers.RNN"],
},
],
},
{
"path": "preprocessing_layers/",
"title": "Preprocessing layers",
"toc": True,
"children": [
{
"path": "text/",
"title": "Text preprocessing",
"toc": True,
"children": [
{
"path": "text_vectorization",
"title": "TextVectorization layer",
"generate": ["tf_keras.layers.TextVectorization"],
},
],
},
{
"path": "numerical/",
"title": "Numerical features preprocessing layers",
"toc": True,
"children": [
{
"path": "normalization",
"title": "Normalization layer",
"generate": ["tf_keras.layers.Normalization"],
},
{
"path": "discretization",
"title": "Discretization layer",
"generate": ["tf_keras.layers.Discretization"],
},
],
},
{
"path": "categorical/",
"title": "Categorical features preprocessing layers",
"toc": True,
"children": [
{
"path": "category_encoding",
"title": "CategoryEncoding layer",
"generate": ["tf_keras.layers.CategoryEncoding"],
},
{
"path": "hashing",
"title": "Hashing layer",
"generate": ["tf_keras.layers.Hashing"],
},
{
"path": "hashed_crossing",
"title": "HashedCrossing layer",
"generate": ["tf_keras.layers.HashedCrossing"],
},
{
"path": "string_lookup",
"title": "StringLookup layer",
"generate": ["tf_keras.layers.StringLookup"],
},
{
"path": "integer_lookup",
"title": "IntegerLookup layer",
"generate": ["tf_keras.layers.IntegerLookup"],
},
],
},
{
"path": "image_preprocessing/",
"title": "Image preprocessing layers",
"toc": True,
"children": [
{
"path": "resizing",
"title": "Resizing layer",
"generate": ["tf_keras.layers.Resizing"],
},
{
"path": "rescaling",
"title": "Rescaling layer",
"generate": ["tf_keras.layers.Rescaling"],
},
{
"path": "center_crop",
"title": "CenterCrop layer",
"generate": ["tf_keras.layers.CenterCrop"],
},
],
},
{
"path": "image_augmentation/",
"title": "Image augmentation layers",
"toc": True,
"children": [
{
"path": "random_crop",
"title": "RandomCrop layer",
"generate": ["tf_keras.layers.RandomCrop"],
},
{
"path": "random_flip",
"title": "RandomFlip layer",
"generate": ["tf_keras.layers.RandomFlip"],
},
{
"path": "random_translation",
"title": "RandomTranslation layer",
"generate": ["tf_keras.layers.RandomTranslation"],
},
{
"path": "random_rotation",
"title": "RandomRotation layer",
"generate": ["tf_keras.layers.RandomRotation"],
},
{
"path": "random_zoom",
"title": "RandomZoom layer",
"generate": ["tf_keras.layers.RandomZoom"],
},
{
"path": "random_contrast",
"title": "RandomContrast layer",
"generate": ["tf_keras.layers.RandomContrast"],
},
{
"path": "random_brightness",
"title": "RandomBrightness layer",
"generate": ["tf_keras.layers.RandomBrightness"],
},
],
},
],
},
{
"path": "normalization_layers/",
"title": "Normalization layers",
"toc": True,
"children": [
{
"path": "batch_normalization",
"title": "BatchNormalization layer",
"generate": ["tf_keras.layers.BatchNormalization"],
},
{
"path": "layer_normalization",
"title": "LayerNormalization layer",
"generate": ["tf_keras.layers.LayerNormalization"],
},
{
"path": "unit_normalization",
"title": "UnitNormalization layer",
"generate": ["tf_keras.layers.UnitNormalization"],
},
{
"path": "group_normalization",
"title": "GroupNormalization layer",
"generate": ["tf_keras.layers.GroupNormalization"],
},
],
},
{
"path": "regularization_layers/",
"title": "Regularization layers",
"toc": True,
"children": [
{
"path": "dropout",
"title": "Dropout layer",
"generate": ["tf_keras.layers.Dropout"],
},
{
"path": "spatial_dropout1d",
"title": "SpatialDropout1D layer",
"generate": ["tf_keras.layers.SpatialDropout1D"],
},
{
"path": "spatial_dropout2d",
"title": "SpatialDropout2D layer",
"generate": ["tf_keras.layers.SpatialDropout2D"],
},
{
"path": "spatial_dropout3d",
"title": "SpatialDropout3D layer",
"generate": ["tf_keras.layers.SpatialDropout3D"],
},
{
"path": "gaussian_dropout",
"title": "GaussianDropout layer",
"generate": ["tf_keras.layers.GaussianDropout"],
},
{
"path": "gaussian_noise",
"title": "GaussianNoise layer",
"generate": ["tf_keras.layers.GaussianNoise"],
},
{
"path": "activity_regularization",
"title": "ActivityRegularization layer",
"generate": ["tf_keras.layers.ActivityRegularization"],
},
],
},
{
"path": "attention_layers/",
"title": "Attention layers",
"toc": True,
"children": [
{
"path": "multi_head_attention",
"title": "MultiHeadAttention layer",
"generate": ["tf_keras.layers.MultiHeadAttention"],
},
{
"path": "attention",
"title": "Attention layer",
"generate": ["tf_keras.layers.Attention"],
},
{
"path": "additive_attention",
"title": "AdditiveAttention layer",
"generate": ["tf_keras.layers.AdditiveAttention"],
},
],
},
{
"path": "reshaping_layers/",
"title": "Reshaping layers",
"toc": True,
"children": [
{
"path": "reshape",
"title": "Reshape layer",
"generate": ["tf_keras.layers.Reshape"],
},
{
"path": "flatten",
"title": "Flatten layer",
"generate": ["tf_keras.layers.Flatten"],
},
{
"path": "repeat_vector",
"title": "RepeatVector layer",
"generate": ["tf_keras.layers.RepeatVector"],
},
{
"path": "permute",
"title": "Permute layer",
"generate": ["tf_keras.layers.Permute"],
},
{
"path": "cropping1d",
"title": "Cropping1D layer",
"generate": ["tf_keras.layers.Cropping1D"],
},
{
"path": "cropping2d",
"title": "Cropping2D layer",
"generate": ["tf_keras.layers.Cropping2D"],
},
{
"path": "cropping3d",
"title": "Cropping3D layer",
"generate": ["tf_keras.layers.Cropping3D"],
},
{
"path": "up_sampling1d",
"title": "UpSampling1D layer",
"generate": ["tf_keras.layers.UpSampling1D"],
},
{
"path": "up_sampling2d",
"title": "UpSampling2D layer",
"generate": ["tf_keras.layers.UpSampling2D"],
},
{
"path": "up_sampling3d",
"title": "UpSampling3D layer",
"generate": ["tf_keras.layers.UpSampling3D"],
},
{
"path": "zero_padding1d",
"title": "ZeroPadding1D layer",
"generate": ["tf_keras.layers.ZeroPadding1D"],
},
{
"path": "zero_padding2d",
"title": "ZeroPadding2D layer",
"generate": ["tf_keras.layers.ZeroPadding2D"],
},
{
"path": "zero_padding3d",
"title": "ZeroPadding3D layer",
"generate": ["tf_keras.layers.ZeroPadding3D"],
},
],
},
{
"path": "merging_layers/",
"title": "Merging layers",
"toc": True,
"children": [
{
"path": "concatenate",
"title": "Concatenate layer",
"generate": ["tf_keras.layers.Concatenate"],
},
{
"path": "average",
"title": "Average layer",
"generate": ["tf_keras.layers.Average"],
},
{
"path": "maximum",
"title": "Maximum layer",
"generate": ["tf_keras.layers.Maximum"],
},
{
"path": "minimum",
"title": "Minimum layer",
"generate": ["tf_keras.layers.Minimum"],
},
{
"path": "add",
"title": "Add layer",
"generate": ["tf_keras.layers.Add"],
},
{
"path": "subtract",
"title": "Subtract layer",
"generate": ["tf_keras.layers.Subtract"],
},
{
"path": "multiply",
"title": "Multiply layer",
"generate": ["tf_keras.layers.Multiply"],
},
{
"path": "dot",
"title": "Dot layer",
"generate": ["tf_keras.layers.Dot"],
},
],
},
{
"path": "activation_layers/",
"title": "Activation layers",
"toc": True,
"children": [
{
"path": "relu",
"title": "ReLU layer",
"generate": ["tf_keras.layers.ReLU"],
},
{
"path": "softmax",
"title": "Softmax layer",
"generate": ["tf_keras.layers.Softmax"],
},
{
"path": "leaky_relu",
"title": "LeakyReLU layer",
"generate": ["tf_keras.layers.LeakyReLU"],
},
{
"path": "prelu",
"title": "PReLU layer",
"generate": ["tf_keras.layers.PReLU"],
},
{
"path": "elu",
"title": "ELU layer",
"generate": ["tf_keras.layers.ELU"],
},
{
"path": "threshold_relu",
"title": "ThresholdedReLU layer",
"generate": ["tf_keras.layers.ThresholdedReLU"],
},
],
},
],
},
{
"path": "callbacks/",
"title": "Callbacks API",
"toc": True,
"children": [
{
"path": "base_callback",
"title": "Base Callback class",
"generate": ["tf_keras.callbacks.Callback"],
},
{
"path": "model_checkpoint",
"title": "ModelCheckpoint",
"generate": ["tf_keras.callbacks.ModelCheckpoint"],
},
{
"path": "backup_and_restore",
"title": "BackupAndRestore",
"generate": ["tf_keras.callbacks.BackupAndRestore"],
},
{
"path": "tensorboard",
"title": "TensorBoard",
"generate": ["tf_keras.callbacks.TensorBoard"],
},
{
"path": "early_stopping",
"title": "EarlyStopping",
"generate": ["tf_keras.callbacks.EarlyStopping"],
},
{ # LEGACY
"path": "learning_rate_scheduler",
"title": "LearningRateScheduler",
"generate": ["tf_keras.callbacks.LearningRateScheduler"],
},
{
"path": "reduce_lr_on_plateau",
"title": "ReduceLROnPlateau",
"generate": ["tf_keras.callbacks.ReduceLROnPlateau"],
},
{
"path": "remote_monitor",
"title": "RemoteMonitor",
"generate": ["tf_keras.callbacks.RemoteMonitor"],
},
{
"path": "lambda_callback",
"title": "LambdaCallback",
"generate": ["tf_keras.callbacks.LambdaCallback"],
},
{
"path": "terminate_on_nan",
"title": "TerminateOnNaN",
"generate": ["tf_keras.callbacks.TerminateOnNaN"],
},
{
"path": "csv_logger",
"title": "CSVLogger",
"generate": ["tf_keras.callbacks.CSVLogger"],
},
{
"path": "progbar_logger",
"title": "ProgbarLogger",
"generate": ["tf_keras.callbacks.ProgbarLogger"],
},
],
},
{
"path": "optimizers/",
"title": "Optimizers",
"toc": True,
"generate": [
"tf_keras.optimizers.Optimizer.apply_gradients",
"tf_keras.optimizers.Optimizer.variables",
],
"children": [
{
"path": "sgd",
"title": "SGD",
"generate": ["tf_keras.optimizers.SGD"],
},
{
"path": "rmsprop",
"title": "RMSprop",
"generate": ["tf_keras.optimizers.RMSprop"],
},
{
"path": "adam",
"title": "Adam",
"generate": ["tf_keras.optimizers.Adam"],
},
{
"path": "adamw",
"title": "AdamW",
"generate": ["tf_keras.optimizers.AdamW"],
},
{
"path": "adadelta",
"title": "Adadelta",
"generate": ["tf_keras.optimizers.Adadelta"],
},
{
"path": "adagrad",
"title": "Adagrad",
"generate": ["tf_keras.optimizers.Adagrad"],
},
{
"path": "adamax",
"title": "Adamax",
"generate": ["tf_keras.optimizers.Adamax"],
},
{
"path": "adafactor",
"title": "Adafactor",
"generate": ["tf_keras.optimizers.Adafactor"],
},
{
"path": "Nadam",
"title": "Nadam",
"generate": ["tf_keras.optimizers.Nadam"],
},
{
"path": "ftrl",
"title": "Ftrl",
"generate": ["tf_keras.optimizers.Ftrl"],
},
{
"path": "learning_rate_schedules/",
"title": "Learning rate schedules API",
"toc": True,
"skip_from_toc": True,
"children": [
{
"path": "exponential_decay",
"title": "ExponentialDecay",
"generate": [
"tf_keras.optimizers.schedules.ExponentialDecay"
],
},
{
"path": "piecewise_constant_decay",
"title": "PiecewiseConstantDecay",
"generate": [
"tf_keras.optimizers.schedules.PiecewiseConstantDecay"
],
},
{
"path": "polynomial_decay",
"title": "PolynomialDecay",
"generate": [
"tf_keras.optimizers.schedules.PolynomialDecay"
],
},
{
"path": "inverse_time_decay",
"title": "InverseTimeDecay",
"generate": [
"tf_keras.optimizers.schedules.InverseTimeDecay"
],
},
{
"path": "cosine_decay",
"title": "CosineDecay",
"generate": ["tf_keras.optimizers.schedules.CosineDecay"],
},
{
"path": "cosine_decay_restarts",
"title": "CosineDecayRestarts",
"generate": [
"tf_keras.optimizers.schedules.CosineDecayRestarts"
],
},
],
},
],
},
{
"path": "metrics/",
"title": "Metrics",
"toc": True,
"children": [
{
"path": "accuracy_metrics",
"title": "Accuracy metrics",
"generate": [
"tf_keras.metrics.Accuracy",
"tf_keras.metrics.BinaryAccuracy",
"tf_keras.metrics.CategoricalAccuracy",
"tf_keras.metrics.SparseCategoricalAccuracy",
"tf_keras.metrics.TopKCategoricalAccuracy",
"tf_keras.metrics.SparseTopKCategoricalAccuracy",
],
},
{
"path": "probabilistic_metrics",
"title": "Probabilistic metrics",
"generate": [
"tf_keras.metrics.BinaryCrossentropy",
"tf_keras.metrics.CategoricalCrossentropy",
"tf_keras.metrics.SparseCategoricalCrossentropy",
"tf_keras.metrics.KLDivergence",
"tf_keras.metrics.Poisson",
],
},
{
"path": "regression_metrics",
"title": "Regression metrics",
"generate": [
"tf_keras.metrics.MeanSquaredError",
"tf_keras.metrics.RootMeanSquaredError",
"tf_keras.metrics.MeanAbsoluteError",
"tf_keras.metrics.MeanAbsolutePercentageError",
"tf_keras.metrics.MeanSquaredLogarithmicError",
"tf_keras.metrics.CosineSimilarity",
"tf_keras.metrics.LogCoshError",
],
},
{
"path": "classification_metrics",
"title": "Classification metrics based on True/False positives & negatives",
"generate": [
"tf_keras.metrics.AUC",
"tf_keras.metrics.Precision",
"tf_keras.metrics.Recall",
"tf_keras.metrics.TruePositives",
"tf_keras.metrics.TrueNegatives",
"tf_keras.metrics.FalsePositives",
"tf_keras.metrics.FalseNegatives",
"tf_keras.metrics.PrecisionAtRecall",
"tf_keras.metrics.SensitivityAtSpecificity",
"tf_keras.metrics.SpecificityAtSensitivity",
],
},
{
"path": "segmentation_metrics",
"title": "Image segmentation metrics",
"generate": ["tf_keras.metrics.MeanIoU"],
},
{
"path": "hinge_metrics",
"title": 'Hinge metrics for "maximum-margin" classification',
"generate": [
"tf_keras.metrics.Hinge",
"tf_keras.metrics.SquaredHinge",
"tf_keras.metrics.CategoricalHinge",
],
},
],
},
{
"path": "losses/",
"title": "Losses",
"toc": True,
"children": [
{
"path": "probabilistic_losses",
"title": "Probabilistic losses",
"generate": [
"tf_keras.losses.BinaryCrossentropy",
"tf_keras.losses.CategoricalCrossentropy",
"tf_keras.losses.SparseCategoricalCrossentropy",
"tf_keras.losses.Poisson",
"tf_keras.losses.binary_crossentropy",
"tf_keras.losses.categorical_crossentropy",
"tf_keras.losses.sparse_categorical_crossentropy",
"tf_keras.losses.poisson",
"tf_keras.losses.KLDivergence",
"tf_keras.losses.kl_divergence",
],
},
{
"path": "regression_losses",
"title": "Regression losses",
"generate": [
"tf_keras.losses.MeanSquaredError",
"tf_keras.losses.MeanAbsoluteError",
"tf_keras.losses.MeanAbsolutePercentageError",
"tf_keras.losses.MeanSquaredLogarithmicError",
"tf_keras.losses.CosineSimilarity",
"tf_keras.losses.mean_squared_error",
"tf_keras.losses.mean_absolute_error",
"tf_keras.losses.mean_absolute_percentage_error",
"tf_keras.losses.mean_squared_logarithmic_error",
"tf_keras.losses.cosine_similarity",
"tf_keras.losses.Huber",
"tf_keras.losses.huber",
"tf_keras.losses.LogCosh",
"tf_keras.losses.log_cosh",
],
},
{
"path": "hinge_losses",
"title": 'Hinge losses for "maximum-margin" classification',
"generate": [
"tf_keras.losses.Hinge",
"tf_keras.losses.SquaredHinge",
"tf_keras.losses.CategoricalHinge",
"tf_keras.losses.hinge",
"tf_keras.losses.squared_hinge",
"tf_keras.losses.categorical_hinge",
],
},
],
},
{
"path": "data_loading/",
"title": "Data loading",
"toc": True,
"children": [
{
"path": "image",
"title": "Image data loading",
"generate": [
"tf_keras.utils.image_dataset_from_directory",
"tf_keras.utils.load_img",
"tf_keras.utils.img_to_array",
"tf_keras.utils.save_img",
# 'tf_keras.preprocessing.image.ImageDataGenerator', # LEGACY
# 'tf_keras.preprocessing.image.ImageDataGenerator.flow', # LEGACY
# 'tf_keras.preprocessing.image.ImageDataGenerator.flow_from_dataframe', # LEGACY
# 'tf_keras.preprocessing.image.ImageDataGenerator.flow_from_directory', # LEGACY
],
},
{
"path": "timeseries",
"title": "Timeseries data loading",
"generate": [
"tf_keras.utils.timeseries_dataset_from_array",
# 'tf_keras.preprocessing.sequence.pad_sequences', # LEGACY
# 'tf_keras.preprocessing.sequence.TimeseriesGenerator', # LEGACY
],
},
{
"path": "text",
"title": "Text data loading",
"generate": [
"tf_keras.utils.text_dataset_from_directory",
# 'tf_keras.preprocessing.text.Tokenizer', # LEGACY
],
},
{
"path": "audio",
"title": "Audio data loading",
"generate": [
"tf_keras.utils.audio_dataset_from_directory",
],
},
],
},
{
"path": "datasets/",
"title": "Built-in small datasets",
"toc": True,
"children": [
{
"path": "mnist",
"title": "MNIST digits classification dataset",
"generate": ["tf_keras.datasets.mnist.load_data"],
},
{
"path": "cifar10",
"title": "CIFAR10 small images classification dataset",
"generate": ["tf_keras.datasets.cifar10.load_data"],
},
{
"path": "cifar100",
"title": "CIFAR100 small images classification dataset",
"generate": ["tf_keras.datasets.cifar100.load_data"],
},
{
"path": "imdb",
"title": "IMDB movie review sentiment classification dataset",
"generate": [
"tf_keras.datasets.imdb.load_data",
"tf_keras.datasets.imdb.get_word_index",
],
},
{
"path": "reuters",
"title": "Reuters newswire classification dataset",
"generate": [
"tf_keras.datasets.reuters.load_data",
"tf_keras.datasets.reuters.get_word_index",
],
},
{
"path": "fashion_mnist",
"title": "Fashion MNIST dataset, an alternative to MNIST",
"generate": ["tf_keras.datasets.fashion_mnist.load_data"],
},
{
"path": "boston_housing",
"title": "Boston Housing price regression dataset",
"generate": ["tf_keras.datasets.boston_housing.load_data"],
},
],
},
{
"path": "applications/",
"title": "Keras Applications",
"children": [
{
"path": "xception",
"title": "Xception",
"generate": ["tf_keras.applications.Xception"],
},
{
"path": "efficientnet",
"title": "EfficientNet B0 to B7",
"generate": [
"tf_keras.applications.EfficientNetB0",
"tf_keras.applications.EfficientNetB1",
"tf_keras.applications.EfficientNetB2",
"tf_keras.applications.EfficientNetB3",
"tf_keras.applications.EfficientNetB4",
"tf_keras.applications.EfficientNetB5",
"tf_keras.applications.EfficientNetB6",
"tf_keras.applications.EfficientNetB7",
],
},
{
"path": "efficientnet_v2",
"title": "EfficientNetV2 B0 to B3 and S, M, L",
"generate": [
"tf_keras.applications.EfficientNetV2B0",
"tf_keras.applications.EfficientNetV2B1",
"tf_keras.applications.EfficientNetV2B2",
"tf_keras.applications.EfficientNetV2B3",
"tf_keras.applications.EfficientNetV2S",
"tf_keras.applications.EfficientNetV2M",
"tf_keras.applications.EfficientNetV2L",
],
},
{
"path": "convnext",
"title": "ConvNeXt Tiny, Small, Base, Large, XLarge",
"generate": [
"tf_keras.applications.ConvNeXtTiny",
"tf_keras.applications.ConvNeXtSmall",
"tf_keras.applications.ConvNeXtBase",
"tf_keras.applications.ConvNeXtLarge",
"tf_keras.applications.ConvNeXtXLarge",
],
},
{
"path": "vgg",
"title": "VGG16 and VGG19",
"generate": [
"tf_keras.applications.VGG16",
"tf_keras.applications.VGG19",
],
},
{
"path": "resnet",
"title": "ResNet and ResNetV2",
"generate": [
"tf_keras.applications.ResNet50",
"tf_keras.applications.ResNet101",
"tf_keras.applications.ResNet152",
"tf_keras.applications.ResNet50V2",
"tf_keras.applications.ResNet101V2",
"tf_keras.applications.ResNet152V2",
],
},
{
"path": "mobilenet",
"title": "MobileNet, MobileNetV2, and MobileNetV3",
"generate": [
"tf_keras.applications.MobileNet",
"tf_keras.applications.MobileNetV2",
"tf_keras.applications.MobileNetV3Small",
"tf_keras.applications.MobileNetV3Large",
],
},
{
"path": "densenet",
"title": "DenseNet",
"generate": [
"tf_keras.applications.DenseNet121",
"tf_keras.applications.DenseNet169",
"tf_keras.applications.DenseNet201",
],
},
{
"path": "nasnet",
"title": "NasNetLarge and NasNetMobile",
"generate": [
"tf_keras.applications.NASNetLarge",
"tf_keras.applications.NASNetMobile",
],
},
{
"path": "inceptionv3",
"title": "InceptionV3",
"generate": [
"tf_keras.applications.InceptionV3",
],
},
{
"path": "inceptionresnetv2",
"title": "InceptionResNetV2",
"generate": [
"tf_keras.applications.InceptionResNetV2",
],
},
],
},
{
"path": "mixed_precision/",
"title": "Mixed precision",
"toc": True,
"children": [
{
"path": "policy",
"title": "Mixed precision policy API",
"generate": [
"tf_keras.mixed_precision.Policy",
"tf_keras.mixed_precision.global_policy",
"tf_keras.mixed_precision.set_global_policy",
],
},
{
"path": "loss_scale_optimizer",
"title": "LossScaleOptimizer",
"generate": [
"tf_keras.mixed_precision.LossScaleOptimizer",
],
},
],
},
{
"path": "utils/",
"title": "Utilities",
"toc": True,
"children": [
{
"path": "model_plotting_utils",
"title": "Model plotting utilities",
"generate": [
"tf_keras.utils.plot_model",
"tf_keras.utils.model_to_dot",
],
},
{
"path": "feature_space",
"title": "Structured data preprocessing utilities",
"generate": [
"tf_keras.utils.FeatureSpace",
],
},
{
"path": "python_utils",
"title": "Python & NumPy utilities",
"generate": [
"tf_keras.utils.set_random_seed",
"tf_keras.utils.split_dataset",
"tf_keras.utils.get_file",
"tf_keras.utils.Progbar",
"tf_keras.utils.Sequence",
"tf_keras.utils.to_categorical",
"tf_keras.utils.to_ordinal",
"tf_keras.utils.normalize",
],
},
{
"path": "backend_utils",
"title": "Backend utilities",
"generate": [
"tf_keras.backend.clear_session",
"tf_keras.backend.floatx",
"tf_keras.backend.set_floatx",
"tf_keras.backend.image_data_format",
"tf_keras.backend.set_image_data_format",
"tf_keras.backend.epsilon",
"tf_keras.backend.set_epsilon",
"tf_keras.backend.is_keras_tensor",
"tf_keras.backend.get_uid",
"tf_keras.backend.rnn",
],
},
],
},
],
}
| keras-io/scripts/keras2_api_master.py/0 | {
"file_path": "keras-io/scripts/keras2_api_master.py",
"repo_id": "keras-io",
"token_count": 41548
} | 105 |
# KerasCV Preprocessing Layers
KerasCV preprocessing layers allow you to easily augment your image data using
standard augmentation techniques such as `CutMix`, `MixUp` and `RandAugment`.
See also this [guide on assembling image data augmentation pipeline](/guides/keras_cv/cut_mix_mix_up_and_rand_augment/).
{{toc}}
| keras-io/templates/api/keras_cv/layers/preprocessing/index.md/0 | {
"file_path": "keras-io/templates/api/keras_cv/layers/preprocessing/index.md",
"repo_id": "keras-io",
"token_count": 93
} | 106 |
# KerasTuner Oracles
The `Oracle` class is the base class for all the search algorithms in KerasTuner.
An `Oracle` object receives evaluation results for a model (from a `Tuner` class)
and generates new hyperparameter values.
The built-in `Oracle` classes are
`RandomSearchOracle`, `BayesianOptimizationOracle`, and `HyperbandOracle`.
You can also write your own tuning algorithm by subclassing the `Oracle` class.
{{toc}}
| keras-io/templates/api/keras_tuner/oracles/index.md/0 | {
"file_path": "keras-io/templates/api/keras_tuner/oracles/index.md",
"repo_id": "keras-io",
"token_count": 115
} | 107 |
# KerasCV Bounding Boxes
All KerasCV components that process bounding boxes require a `bounding_box_format`
argument. This argument allows you to seamlessly integrate KerasCV components into
your own workflows while preserving proper behavior of the components themselves.
Bounding boxes are represented by dictionaries with two keys: `'boxes'` and `'classes'`:
```
{
'boxes': [batch, num_boxes, 4],
'classes': [batch, num_boxes]
}
```
To ensure your bounding boxes comply with the KerasCV specification, you can use [`keras_cv.bounding_box.validate_format(boxes)`](https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/validate_format.py).
The bounding box formats supported in KerasCV
[are listed in the API docs](/api/keras_cv/bounding_box/formats)
If a format you would like to use is missing,
[feel free to open a GitHub issue on KerasCV](https://github.com/keras-team/keras-cv/issues)!
| keras-io/templates/keras_cv/bounding_box/index.md/0 | {
"file_path": "keras-io/templates/keras_cv/bounding_box/index.md",
"repo_id": "keras-io",
"token_count": 283
} | 108 |
# GLUE Benchmark Score on KerasNLP Pretrained Models
We use `glue.py` to test out KerasNLP pretrained models, and report scores in
this doc. Our goal is to quickly verify our model's performance instead of
searching for the best hyperparameters, so the reported score can be a little
worse than reported by the original paper.
Unless specifically noted, hyperparameter settings are the same across all GLUE
tasks.
## BERT
Test target is `keras_nlp.models.BertClassifier()`. WNLI is skipped because it
was not evaluated at the original paper.
### Hyperparameter Settings
- Learning Rate:
We use a `PolynomialDecay` learning rate, with `initial_learning_rate=5e-5`.
```python
lr = tf.keras.optimizers.schedules.PolynomialDecay(
5e-5,
decay_steps={total_training_steps},
end_learning_rate=0.0,
)
```
- Optimizer:
We use `AdamW` optimizer, and exclude `bias` and variables in
`LayerNormalization` from weight decay.
```python
optimizer = tf.keras.optimizers.experimental.AdamW(
lr, weight_decay=0.01, global_clipnorm=1.0
)
optimizer.exclude_from_weight_decay(
var_names=["LayerNorm", "layer_norm", "bias"]
)
```
- Others:
| Hyperparameter Name | Value |
|---------------------|-------|
| batch_size | 32 |
| epochs | 3 |
| dropout | 0.1 |
### Benchmark Score
| Task Name | Metrics | Score |
|-----------|-----------------------|-----------|
| CoLA | Matthew's Corr | 52.2 |
| SST-2 | Accuracy | 93.5 |
| MRPC | F1 / Accuracy | 88.2/83.9 |
| STSB | Pearson-Spearman Corr | 84.5/83.1 |
| QQP | F1 / Accuracy | 71.3/89.3 |
| MNLI_M | Accuracy | 84.3 |
| MNLI_Mis | Accuracy | 83.3 |
| QNLI | Accuracy | 90.4 |
| RTE | Accuracy | 66.7 |
| AX | Matthew's Corr | 34.8 |
See the actual submission in this [link](https://gluebenchmark.com/submission/gnG9xUQGkjfVq6loRQYKTcM1YjG3/-NIe3Owl8pjHLXpistkI).
## RoBERTa
Test target is `keras_nlp.models.RobertaClassifier()`.
### Hyperparameter Settings
#### WNLI
We choose a special setting for WNLI from other tasks.
- Learning Rate:
We use a `PolynomialDecay` learning rate, with `initial_learning_rate=2e-5`.
```python
lr = tf.keras.optimizers.schedules.PolynomialDecay(
2e-5,
decay_steps={total_training_steps},
end_learning_rate=0.0,
)
```
- Optimizer:
We use `Adam` optimizer.
```python
optimizer = tf.keras.optimizers.Adam(lr)
```
- Others:
| Hyperparameter Name | Value |
|---------------------|-------|
| batch_size | 32 |
| epochs | 10 |
| dropout | 0.1 |
#### Other GLUE Tasks
- Learning Rate:
We use a `PolynomialDecay` learning rate, with `initial_learning_rate=2e-5`.
```python
lr = tf.keras.optimizers.schedules.PolynomialDecay(
2e-5,
decay_steps={total_training_steps},
end_learning_rate=0.0,
)
```
- Optimizer:
We use `AdamW` optimizer, and exclude `bias` and variables in
`LayerNormalization` from weight decay.
```python
optimizer = tf.keras.optimizers.experimental.AdamW(
lr, weight_decay=0.01, global_clipnorm=1.0
)
optimizer.exclude_from_weight_decay(
var_names=["LayerNorm", "layer_norm", "bias"]
)
```
- Others:
| Hyperparameter Name | Value |
|---------------------|-------|
| batch_size | 32 |
| epochs | 3 |
| dropout | 0.1 |
### Benchmark Score
| Task Name | Metrics | Score |
|-----------|-----------------------|-----------|
| CoLA | Matthew's Corr | 56.3 |
| SST-2 | Accuracy | 96.1 |
| MRPC | F1 / Accuracy | 89.8/86.3 |
| STSB | Pearson-Spearman Corr | 88.4/87.7 |
| QQP | F1 / Accuracy | 72.3/89.0 |
| MNLI_M | Accuracy | 87.7 |
| MNLI_Mis | Accuracy | 87.1 |
| QNLI | Accuracy | 92.8 |
| RTE | Accuracy | 69.2 |
| WNLI | Accuracy | 65.1 |
| AX | Matthew's Corr | 40.6 |
See the actual submission in this [link](https://gluebenchmark.com/submission/gnG9xUQGkjfVq6loRQYKTcM1YjG3/-NJS0XAX1o9p8DJst3wM). | keras-nlp/examples/glue_benchmark/scores.md/0 | {
"file_path": "keras-nlp/examples/glue_benchmark/scores.md",
"repo_id": "keras-nlp",
"token_count": 2164
} | 109 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.backend import random
from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder
from keras_nlp.tests.test_case import TestCase
class TransformerEncoderTest(TestCase):
@parameterized.named_parameters(
("without_norm_first", False),
("with_norm_first", True),
)
def test_layer_behaviors(self, normalize_first):
self.run_layer_test(
cls=TransformerEncoder,
init_kwargs={
"intermediate_dim": 4,
"num_heads": 2,
"normalize_first": normalize_first,
"activation": "relu",
"layer_norm_epsilon": 1e-05,
"kernel_initializer": "HeNormal",
"bias_initializer": "Zeros",
"dropout": 0.1,
},
input_data=random.uniform(shape=(2, 4, 6)),
expected_output_shape=(2, 4, 6),
expected_num_trainable_weights=16,
expected_num_non_trainable_variables=3, # dropout rng seeds
)
@parameterized.named_parameters(
("without_norm_first", False),
("with_norm_first", True),
)
def test_valid_call(self, normalize_first):
encoder = TransformerEncoder(
intermediate_dim=4,
num_heads=2,
normalize_first=normalize_first,
)
model = keras.Sequential(
[
keras.Input(shape=(4, 6)),
encoder,
]
)
input = random.uniform(shape=[2, 4, 6])
model(input)
def test_valid_call_with_mask(self):
encoder = TransformerEncoder(
intermediate_dim=4,
num_heads=2,
)
encoder.build([2, 4, 6])
input = random.uniform(shape=[2, 4, 6])
mask = input[:, :, 0] < 0.5
encoder(input, mask)
def test_value_error_when_invalid_kernel_inititalizer(self):
with self.assertRaises(ValueError):
TransformerEncoder(
intermediate_dim=4,
num_heads=2,
dropout=0.5,
kernel_initializer="Invalid",
)
def test_mask_propagation(self):
encoder = TransformerEncoder(
intermediate_dim=4,
num_heads=2,
)
inputs = random.uniform(shape=[1, 4, 6])
mask = ops.array([[True, True, False, False]])
inputs._keras_mask = mask
outputs = encoder(inputs)
self.assertAllEqual(outputs._keras_mask, mask)
| keras-nlp/keras_nlp/layers/modeling/transformer_encoder_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/transformer_encoder_test.py",
"repo_id": "keras-nlp",
"token_count": 1498
} | 110 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.utils.tensor_utils import is_float_dtype
from keras_nlp.utils.tensor_utils import tensor_to_list
REPLACE_SUBSTRINGS = [
("<skipped>", ""),
("-\n", ""),
("\n", " "),
(""", '"'),
("&", "&"),
("<", "<"),
(">", ">"),
]
REGEX_PATTERNS = [
# language-dependent part (assuming Western languages)
(r"([\{-\~\[-\` -\&\(-\+\:-\@\/])", r" \1 "),
# tokenize period and comma unless preceded by a digit
(r"([^0-9])([\.,])", r"\1 \2 "),
# tokenize period and comma unless followed by a digit
(r"([\.,])([^0-9])", r" \1 \2"),
# tokenize dash when preceded by a digit
(r"([0-9])(-)", r"\1 \2 "),
# If last character is "." or ",", add space.
(r"[\.,]$", r" \0 \1"),
# one space only between words
(r"\s+", r" "),
]
@keras_nlp_export("keras_nlp.metrics.Bleu")
class Bleu(keras.metrics.Metric):
"""BLEU metric.
This class implements the BLEU metric. BLEU is generally used to evaluate
machine translation systems. By default, this implementation replicates
SacreBLEU, but user-defined tokenizers can be passed to deal with other
languages.
For BLEU score, we count the number of matching n-grams in the candidate
translation and the reference text. We find the "clipped count" of matching
n-grams so as to not give a high score to a (reference, prediction) pair
with redundant, repeated tokens. Secondly, BLEU score tends to reward
shorter predictions more, which is why a brevity penalty is applied to
penalise short predictions. For more details, see the following article:
https://cloud.google.com/translate/automl/docs/evaluate#bleu.
Note on input shapes:
For unbatched inputs, `y_pred` should be a tensor of shape `()`, and
`y_true` should be a tensor of shape `(num_references,)`. For batched
inputs, `y_pred` should be a tensor of shape `(batch_size,)`,
and `y_true` should be a tensor of shape `(batch_size, num_references)`. In
case of batched inputs, `y_true` can also be a ragged tensor of shape
`(batch_size, None)` if different samples have different number of
references.
Args:
tokenizer: callable. A function that takes a string `tf.RaggedTensor`
(of any shape), and tokenizes the strings in the tensor. If the
tokenizer is not specified, the default tokenizer is used. The
default tokenizer replicates the behaviour of SacreBLEU's
`"tokenizer_13a"` tokenizer
(https://github.com/mjpost/sacrebleu/blob/v2.1.0/sacrebleu/tokenizers/tokenizer_13a.py).
max_order: int. The maximum n-gram order to use. For example, if
`max_order` is set to 3, unigrams, bigrams, and trigrams will be
considered. Defaults to `4`.
smooth: bool. Whether to apply Lin et al. 2004 smoothing to the BLEU
score. Adds 1 to the matched n-gram count (i.e., numerator) and 1
to the total n-gram count (i.e., denominator) for every order while
calculating precision. Defaults to `False`.
dtype: string or tf.dtypes.Dtype. Precision of metric computation. If
not specified, it defaults to `"float32"`.
name: string. Name of the metric instance.
**kwargs: Other keyword arguments.
References:
- [Papineni et al., 2002](https://aclanthology.org/P02-1040/)
- [SacreBLEU](https://github.com/mjpost/sacrebleu)
- [Lin et al., 2004](https://aclanthology.org/P04-1077/)
"""
def __init__(
self,
tokenizer=None,
max_order=4,
smooth=False,
dtype="float32",
name="bleu",
**kwargs,
):
super().__init__(name=name, dtype=dtype, **kwargs)
if not is_float_dtype(dtype):
raise ValueError(
"`dtype` must be a floating point type. "
f"Received: dtype={dtype}"
)
self.tokenizer = tokenizer
self.max_order = max_order
self.smooth = smooth
self._matches = self.add_weight(
shape=(self.max_order,),
initializer="zeros",
dtype=self.dtype,
name="bleu_matches",
)
self._possible_matches = self.add_weight(
shape=(self.max_order,),
initializer="zeros",
dtype=self.dtype,
name="bleu_possible_matches",
)
self._translation_length = self.add_weight(
shape=(),
initializer="zeros",
dtype=self.dtype,
name="bleu_translation_length",
)
self._reference_length = self.add_weight(
shape=(),
initializer="zeros",
dtype=self.dtype,
name="bleu_reference_length",
)
self._bleu = self.add_weight(
shape=(),
initializer="zeros",
dtype=self.dtype,
name="bleu",
)
def _tokenizer(self, inputs):
"""
Tokenizes the input strings. By default, replicates the behaviour of
SacreBLEU's default tokenizer, namely, `tokenizer_13a`.
"""
if self.tokenizer:
return self.tokenizer(inputs)
for pattern, replacement in REPLACE_SUBSTRINGS + REGEX_PATTERNS:
inputs = tf.strings.regex_replace(
input=inputs,
pattern=pattern,
rewrite=replacement,
replace_global=True,
name=None,
)
inputs = tf.strings.split(inputs)
return inputs
def _get_ngrams(self, segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Uses Python ops. Inspired from
https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py.
Args:
segment: list. Text segment from which n-grams will be
extracted.
max_order: int. Maximum length in tokens of the n-grams returned
by this method.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i : i + order])
ngram_counts[ngram] += 1
return ngram_counts
def _corpus_bleu(
self,
reference_corpus,
translation_corpus,
matches_by_order,
possible_matches_by_order,
translation_length,
reference_length,
max_order=4,
smooth=False,
):
"""Corpus BLEU implementation using Python ops.
Computes BLEU score of translated segments against one or more
references. Inspired from
https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py.
Args:
reference_corpus: list of lists of references for each
translation. Each reference should be tokenized into a list
of tokens.
translation_corpus: list of translations to score. Each
translation should be tokenized into a list of tokens.
matches_by_order: list of floats containing the initial number
of matches for each order.
possible_matches_by_order: list of floats containing the initial
number of possible matches for each order.
translation_length: float. Initial number of tokens in all the
translations.
reference_length: float. Initial number of tokens in all the
references.
max_order: int. Maximum n-gram order to use when computing
BLEU score.
smooth: boolean. Whether or not to apply Lin et al. 2004
smoothing.
"""
for references, translation in zip(
reference_corpus, translation_corpus
):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= self._get_ngrams(
reference, max_order
)
translation_ngram_counts = self._get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = (matches_by_order[i] + 1.0) / (
possible_matches_by_order[i] + 1.0
)
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (
float(matches_by_order[i])
/ possible_matches_by_order[i]
)
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1.0 / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.0
else:
bp = math.exp(1 - 1.0 / ratio)
bleu = geo_mean * bp
return (
bleu,
matches_by_order,
possible_matches_by_order,
translation_length,
reference_length,
)
def _calculate_bleu_score(self, references, translation):
if isinstance(references, (tf.Tensor, tf.RaggedTensor)):
references = tensor_to_list(references)
if isinstance(translation, (tf.Tensor, tf.RaggedTensor)):
translation = tensor_to_list(translation)
matches = self._matches.numpy()
possible_matches = self._possible_matches.numpy()
translation_length = self._translation_length.numpy()
reference_length = self._reference_length.numpy()
(
bleu_score,
matches,
possible_matches,
translation_length,
reference_length,
) = self._corpus_bleu(
reference_corpus=references,
translation_corpus=translation,
matches_by_order=matches,
possible_matches_by_order=possible_matches,
translation_length=translation_length,
reference_length=reference_length,
max_order=self.max_order,
smooth=self.smooth,
)
return (
bleu_score,
matches,
possible_matches,
translation_length,
reference_length,
)
def update_state(self, y_true, y_pred, sample_weight=None):
def validate_and_fix_rank(inputs, tensor_name, base_rank=0):
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
inputs = tf.convert_to_tensor(inputs)
if inputs.shape.rank == base_rank:
return inputs[tf.newaxis]
elif inputs.shape.rank == base_rank + 1:
return inputs
elif inputs.shape.rank == base_rank + 2:
if tf.shape(inputs)[-1] != 1:
raise ValueError(
f"{tensor_name} is of rank {input.shape.rank}. The "
f"last dimension must be of size 1."
)
return tf.squeeze(inputs, axis=-1)
else:
raise ValueError(
f"{tensor_name} must be of rank {base_rank}, {base_rank+1} "
f"or {base_rank+2}. Found rank: {inputs.shape.rank}"
)
y_true = validate_and_fix_rank(y_true, "y_true", 1)
y_pred = validate_and_fix_rank(y_pred, "y_pred", 0)
# Tokenize the inputs.
y_true = self._tokenizer(y_true)
y_pred = self._tokenizer(y_pred)
(
bleu_score,
matches,
possible_matches,
translation_length,
reference_length,
) = self._calculate_bleu_score(y_true, y_pred)
self._matches.assign(matches)
self._possible_matches.assign(possible_matches)
self._translation_length.assign(translation_length)
self._reference_length.assign(reference_length)
self._bleu.assign(bleu_score)
def result(self):
return self._bleu
def reset_state(self):
self._matches.assign(
ops.zeros(shape=(self.max_order,), dtype=self.dtype)
)
self._possible_matches.assign(
ops.zeros(shape=(self.max_order,), dtype=self.dtype)
)
self._translation_length.assign(0.0)
self._reference_length.assign(0.0)
self._bleu.assign(0.0)
def get_config(self):
config = super().get_config()
config.update(
{
"tokenizer": self.tokenizer,
"max_order": self.max_order,
"smooth": self.smooth,
}
)
return config
| keras-nlp/keras_nlp/metrics/bleu.py/0 | {
"file_path": "keras-nlp/keras_nlp/metrics/bleu.py",
"repo_id": "keras-nlp",
"token_count": 6711
} | 111 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BART model preset configurations."""
backbone_presets = {
"bart_base_en": {
"metadata": {
"description": (
"6-layer BART model where case is maintained. "
"Trained on BookCorpus, English Wikipedia and CommonCrawl."
),
"params": 139417344,
"official_name": "BART",
"path": "bart",
"model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md",
},
"kaggle_handle": "kaggle://keras/bart/keras/bart_base_en/2",
},
"bart_large_en": {
"metadata": {
"description": (
"12-layer BART model where case is maintained. "
"Trained on BookCorpus, English Wikipedia and CommonCrawl."
),
"params": 406287360,
"official_name": "BART",
"path": "bart",
"model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md",
},
"config": {
"vocabulary_size": 50265,
"num_layers": 12,
"num_heads": 16,
"hidden_dim": 1024,
"intermediate_dim": 4096,
"dropout": 0.1,
"max_sequence_length": 1024,
},
"kaggle_handle": "kaggle://keras/bart/keras/bart_large_en/2",
},
"bart_large_en_cnn": {
"metadata": {
"description": (
"The `bart_large_en` backbone model fine-tuned on the CNN+DM "
"summarization dataset."
),
"params": 406287360,
"official_name": "BART",
"path": "bart",
"model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md",
},
"config": {
"vocabulary_size": 50264,
"num_layers": 12,
"num_heads": 16,
"hidden_dim": 1024,
"intermediate_dim": 4096,
"dropout": 0.1,
"max_sequence_length": 1024,
},
"kaggle_handle": "kaggle://keras/bart/keras/bart_large_en_cnn/2",
},
}
| keras-nlp/keras_nlp/models/bart/bart_presets.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bart/bart_presets.py",
"repo_id": "keras-nlp",
"token_count": 1299
} | 112 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.models.bloom.bloom_tokenizer import BloomTokenizer
from keras_nlp.tests.test_case import TestCase
class BloomTokenizerTest(TestCase):
def setUp(self):
self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"]
self.vocab += ["<s>", "</s>", "<pad>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.init_kwargs = {"vocabulary": self.vocab, "merges": self.merges}
self.input_data = [
"<s>airplane at airport<pad>",
"<s> airplane airport<pad>",
]
def test_tokenizer_basics(self):
self.run_preprocessing_layer_test(
cls=BloomTokenizer,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=[[6, 1, 3, 4, 2, 5, 8], [6, 2, 3, 2, 5, 8]],
)
def test_errors_missing_special_tokens(self):
with self.assertRaises(ValueError):
BloomTokenizer(vocabulary=["a", "b", "c"], merges=[])
@pytest.mark.large
def test_smallest_preset(self):
self.run_preset_test(
cls=BloomTokenizer,
preset="bloom_560m_multi",
input_data=["The quick brown fox."],
expected_output=[[2175, 23714, 73173, 144252, 17]],
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in BloomTokenizer.presets:
self.run_preset_test(
cls=BloomTokenizer,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/bloom/bloom_tokenizer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bloom/bloom_tokenizer_test.py",
"repo_id": "keras-nlp",
"token_count": 1054
} | 113 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.utils.keras_utils import clone_initializer
class DisentangledSelfAttention(keras.layers.Layer):
"""DisentangledSelfAttention layer.
This is an implementation of disentangled self-attention as described in the
paper ["DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing"](https://arxiv.org/abs/2111.09543).
Effectively, this layer implements Multi-Head Self Attention with relative
attention, i.e., to get the final attention score, we compute the
content-to-position and position-to-content attention scores, and add these
scores to the vanilla multi-head self-attention scores.
Args:
num_heads: int. Number of attention heads.
hidden_dim: int. Hidden dimension of the input, i.e., `hidden_states`.
max_position_embeddings: int. The maximum input
sequence length. Defaults to `512`.
bucket_size: int. The size of the relative position
buckets. Generally equal to `max_sequence_length // 2`.
Defaults to `256`.
dropout: float. Dropout probability. Defaults to `0.1`.
kernel_initializer: string or `keras.initializers` initializer.
The kernel initializer for the dense layers.
Defaults to `"glorot_uniform"`.
bias_initializer: string or `keras.initializers` initializer.
The bias initializer for the dense layers.
Defaults to `"zeros"`.
"""
def __init__(
self,
num_heads,
hidden_dim,
max_position_embeddings=512,
bucket_size=256,
dropout=0.1,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs,
):
super().__init__(**kwargs)
# Passed args.
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.max_position_embeddings = max_position_embeddings
self.bucket_size = bucket_size
self.dropout = dropout
# Initializers.
self._kernel_initializer = keras.initializers.get(kernel_initializer)
self._bias_initializer = keras.initializers.get(bias_initializer)
# Derived args.
self.attn_head_size = hidden_dim // num_heads
# We have three types of attention - MHA, p2c and c2p.
num_type_attn = 3
self.scale_factor = 1.0 / math.sqrt(
float(num_type_attn * self.attn_head_size)
)
def build(self, inputs_shape, rel_embeddings_shape=None):
# Q, K, V linear layers.
self._query_dense = keras.layers.EinsumDense(
equation="abc,cde->abde",
output_shape=(None, self.num_heads, self.attn_head_size),
bias_axes="de",
**self._get_common_kwargs_for_sublayer(use_bias=True),
dtype=self.dtype_policy,
name="query",
)
self._query_dense.build(inputs_shape)
self._key_dense = keras.layers.EinsumDense(
equation="abc,cde->abde",
output_shape=(None, self.num_heads, self.attn_head_size),
bias_axes="de",
**self._get_common_kwargs_for_sublayer(use_bias=True),
dtype=self.dtype_policy,
name="key",
)
self._key_dense.build(inputs_shape)
self._value_dense = keras.layers.EinsumDense(
equation="abc,cde->abde",
output_shape=(None, self.num_heads, self.attn_head_size),
bias_axes="de",
**self._get_common_kwargs_for_sublayer(use_bias=True),
dtype=self.dtype_policy,
name="value",
)
self._value_dense.build(inputs_shape)
# Relative attention.
self._position_dropout_layer = keras.layers.Dropout(
self.dropout,
dtype=self.dtype_policy,
)
self._attn_dropout_layer = keras.layers.Dropout(
self.dropout,
dtype=self.dtype_policy,
name="attention_dropout",
)
self._softmax = keras.layers.Softmax(
axis=-1,
dtype="float32",
name="attention_softmax",
)
# Output.
self._output_dense = keras.layers.EinsumDense(
equation="abc,cd->abd",
output_shape=(None, self.hidden_dim),
bias_axes="d",
**self._get_common_kwargs_for_sublayer(use_bias=True),
dtype=self.dtype_policy,
name="attention_output",
)
self._output_dense.build(inputs_shape)
self.built = True
def _get_common_kwargs_for_sublayer(self, use_bias=True):
common_kwargs = {}
kernel_initializer = clone_initializer(self._kernel_initializer)
bias_initializer = clone_initializer(self._bias_initializer)
common_kwargs["kernel_initializer"] = kernel_initializer
if use_bias:
common_kwargs["bias_initializer"] = bias_initializer
return common_kwargs
def _masked_softmax(self, attention_scores, attention_mask=None):
"""Normalizes the attention scores to probabilities using softmax.
This implementation is the similar to the one present in
`keras.layers.MultiHeadAttention`.
"""
if attention_mask is not None:
mask_expansion_axis = -3
for _ in range(
len(attention_scores.shape) - len(attention_mask.shape)
):
attention_mask = ops.expand_dims(
attention_mask, axis=mask_expansion_axis
)
return self._softmax(attention_scores, attention_mask)
def _compute_attention(
self,
query,
key,
value,
rel_embeddings,
attention_mask=None,
training=None,
):
"""Computes the attention score and returns the attended outputs.
This function computes vanilla MHA score, and relative attention scores
(p2c and c2p). It then sums them up to get the final attention score,
which is used to compute the attended outputs.
"""
attention_scores = ops.einsum(
"aecd,abcd->acbe",
key,
query,
)
attention_scores = ops.multiply(attention_scores, self.scale_factor)
rel_embeddings = self._position_dropout_layer(
rel_embeddings,
training=training,
)
rel_attn_scores = self._compute_disentangled_attention(
query=query,
key=key,
rel_embeddings=rel_embeddings,
)
if rel_attn_scores is not None:
attention_scores += rel_attn_scores
attention_scores = self._masked_softmax(
attention_scores, attention_mask
)
attention_scores = self._attn_dropout_layer(
attention_scores, training=training
)
attention_output = ops.einsum(
"acbe,aecd->abcd", attention_scores, value
)
return attention_output, attention_scores
def _make_log_bucket_position(self, rel_pos):
dtype = rel_pos.dtype
sign = ops.sign(rel_pos)
mid = self.bucket_size // 2
mid = ops.cast(mid, dtype=dtype)
# If `rel_pos[i][j]` is out of bounds, assign value `mid`.
abs_pos = ops.where(
condition=(rel_pos < mid) & (rel_pos > -mid),
x1=mid - 1,
x2=ops.abs(rel_pos),
)
def _get_log_pos(abs_pos, mid):
numerator = ops.log(abs_pos / mid)
numerator = numerator * ops.cast(mid - 1, dtype=numerator.dtype)
denominator = ops.log((self.max_position_embeddings - 1) / mid)
val = ops.ceil(numerator / denominator)
val = ops.cast(val, dtype=mid.dtype)
val = val + mid
return val
log_pos = _get_log_pos(abs_pos, mid)
bucket_pos = ops.where(
condition=abs_pos <= mid,
x1=rel_pos,
x2=log_pos * sign,
)
bucket_pos = ops.cast(bucket_pos, dtype="int")
return bucket_pos
def _get_rel_pos(self, num_positions):
ids = ops.arange(num_positions)
ids = ops.cast(ids, dtype="int")
query_ids = ops.expand_dims(ids, axis=-1)
key_ids = ops.expand_dims(ids, axis=0)
key_ids = ops.repeat(key_ids, repeats=num_positions, axis=0)
rel_pos = query_ids - key_ids
rel_pos = self._make_log_bucket_position(rel_pos)
rel_pos = ops.expand_dims(ops.expand_dims(rel_pos, axis=0), axis=0)
return rel_pos
def _compute_disentangled_attention(
self,
query,
key,
rel_embeddings,
):
"""Computes relative attention scores (p2c and c2p)."""
batch_size = ops.shape(query)[0]
num_positions = ops.shape(query)[1]
rel_pos = self._get_rel_pos(num_positions)
rel_attn_span = self.bucket_size
score = 0
pos_query = self._query_dense(rel_embeddings)
pos_key = self._key_dense(rel_embeddings)
# c2p
c2p_attn_scores = ops.einsum(
"aecd,abcd->acbe",
pos_key,
query,
)
c2p_pos = ops.clip(rel_pos + rel_attn_span, 0, rel_attn_span * 2 - 1)
c2p_pos = ops.broadcast_to(
c2p_pos,
shape=(
batch_size,
self.num_heads,
num_positions,
num_positions,
),
)
c2p_attn_scores = ops.take_along_axis(
c2p_attn_scores,
indices=c2p_pos,
axis=3,
)
c2p_attn_scores = ops.multiply(c2p_attn_scores, self.scale_factor)
score += c2p_attn_scores
# p2c
p2c_attn_scores = ops.einsum(
"aecd,abcd->acbe",
pos_query,
key,
)
p2c_pos = ops.clip(-rel_pos + rel_attn_span, 0, rel_attn_span * 2 - 1)
p2c_pos = ops.broadcast_to(
p2c_pos,
shape=(
batch_size,
self.num_heads,
num_positions,
num_positions,
),
)
p2c_attn_scores = ops.take_along_axis(
p2c_attn_scores,
indices=p2c_pos,
axis=3,
)
p2c_attn_scores = ops.transpose(p2c_attn_scores, [0, 1, 3, 2])
p2c_attn_scores = ops.multiply(p2c_attn_scores, self.scale_factor)
score += p2c_attn_scores
return score
def call(
self,
inputs,
rel_embeddings,
attention_mask=None,
return_attention_scores=False,
training=None,
):
# `query`, `key`, `value` shape:
# `(batch_size, sequence_length, num_heads, attn_head_size)`.
query = self._query_dense(inputs)
key = self._key_dense(inputs)
value = self._value_dense(inputs)
attention_output, attention_scores = self._compute_attention(
query=query,
key=key,
value=value,
rel_embeddings=rel_embeddings,
attention_mask=attention_mask,
training=training,
)
# Reshape `attention_output` to `(batch_size, sequence_length, hidden_dim)`.
attention_output = ops.reshape(
attention_output,
[
ops.shape(attention_output)[0],
ops.shape(attention_output)[1],
self.hidden_dim,
],
)
attention_output = self._output_dense(attention_output)
if return_attention_scores:
return attention_output, attention_scores
return attention_output
def get_config(self):
config = super().get_config()
config.update(
{
"num_heads": self.num_heads,
"hidden_dim": self.hidden_dim,
"max_position_embeddings": self.max_position_embeddings,
"bucket_size": self.bucket_size,
"dropout": self.dropout,
"kernel_initializer": keras.initializers.serialize(
self._kernel_initializer
),
"bias_initializer": keras.initializers.serialize(
self._bias_initializer
),
}
)
return config
| keras-nlp/keras_nlp/models/deberta_v3/disentangled_self_attention.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/deberta_v3/disentangled_self_attention.py",
"repo_id": "keras-nlp",
"token_count": 6450
} | 114 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FNet model preset configurations."""
backbone_presets = {
"f_net_base_en": {
"metadata": {
"description": (
"12-layer FNet model where case is maintained. "
"Trained on the C4 dataset."
),
"params": 82861056,
"official_name": "FNet",
"path": "f_net",
"model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md",
},
"kaggle_handle": "kaggle://keras/f_net/keras/f_net_base_en/2",
},
"f_net_large_en": {
"metadata": {
"description": (
"24-layer FNet model where case is maintained. "
"Trained on the C4 dataset."
),
"params": 236945408,
"official_name": "FNet",
"path": "f_net",
"model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md",
},
"kaggle_handle": "kaggle://keras/f_net/keras/f_net_large_en/2",
},
}
| keras-nlp/keras_nlp/models/f_net/f_net_presets.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/f_net/f_net_presets.py",
"repo_id": "keras-nlp",
"token_count": 713
} | 115 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mistral model preset configurations."""
# Metadata for loading pretrained model weights.
backbone_presets = {
"mistral_7b_en": {
"metadata": {
"description": "Mistral 7B base model",
"params": 7241732096,
"official_name": "Mistral",
"path": "mistral",
"model_card": "https://github.com/mistralai/mistral-src/blob/main/README.md",
},
"kaggle_handle": "kaggle://keras/mistral/keras/mistral_7b_en/3",
},
"mistral_instruct_7b_en": {
"metadata": {
"description": "Mistral 7B instruct model",
"params": 7241732096,
"official_name": "Mistral",
"path": "mistral",
"model_card": "https://github.com/mistralai/mistral-src/blob/main/README.md",
},
"kaggle_handle": "kaggle://keras/mistral/keras/mistral_instruct_7b_en/3",
},
}
| keras-nlp/keras_nlp/models/mistral/mistral_presets.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/mistral/mistral_presets.py",
"repo_id": "keras-nlp",
"token_count": 602
} | 116 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.layers.preprocessing.preprocessing_layer import (
PreprocessingLayer,
)
from keras_nlp.utils.preset_utils import check_preset_class
from keras_nlp.utils.preset_utils import load_from_preset
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring
@keras.saving.register_keras_serializable(package="keras_nlp")
class Preprocessor(PreprocessingLayer):
"""Base class for model preprocessors."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tokenizer = None
def __setattr__(self, name, value):
# Work around torch setattr for properties.
if name in ["tokenizer"]:
return object.__setattr__(self, name, value)
return super().__setattr__(name, value)
@property
def tokenizer(self):
"""The tokenizer used to tokenize strings."""
return self._tokenizer
@tokenizer.setter
def tokenizer(self, value):
self._tokenizer = value
def get_config(self):
config = super().get_config()
config["tokenizer"] = keras.layers.serialize(self.tokenizer)
return config
@classmethod
def from_config(cls, config):
if "tokenizer" in config and isinstance(config["tokenizer"], dict):
config["tokenizer"] = keras.layers.deserialize(config["tokenizer"])
return cls(**config)
@classproperty
def tokenizer_cls(cls):
return None
@classproperty
def presets(cls):
return {}
@classmethod
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate {{preprocessor_name}} from preset architecture.
Args:
preset: string. Must be one of "{{preset_names}}".
Examples:
```python
# Load a preprocessor layer from a preset.
preprocessor = keras_nlp.models.{{preprocessor_name}}.from_preset(
"{{example_preset_name}}",
)
```
"""
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
preset = cls.presets[preset]["kaggle_handle"]
config_file = "tokenizer.json"
check_preset_class(preset, cls.tokenizer_cls, config_file=config_file)
tokenizer = load_from_preset(
preset,
config_file=config_file,
)
return cls(tokenizer=tokenizer, **kwargs)
def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to setup a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
# If the subclass does not define from_preset, assign a wrapper so that
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:
def from_preset(calling_cls, *args, **kwargs):
return super(cls, calling_cls).from_preset(*args, **kwargs)
cls.from_preset = classmethod(from_preset)
# Format and assign the docstring unless the subclass has overridden it.
if cls.from_preset.__doc__ is None:
cls.from_preset.__func__.__doc__ = Preprocessor.from_preset.__doc__
format_docstring(
preprocessor_name=cls.__name__,
example_preset_name=next(iter(cls.presets), ""),
preset_names='", "'.join(cls.presets),
)(cls.from_preset.__func__)
| keras-nlp/keras_nlp/models/preprocessor.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/preprocessor.py",
"repo_id": "keras-nlp",
"token_count": 1690
} | 117 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding
from keras_nlp.models.backbone import Backbone
from keras_nlp.models.t5.t5_layer_norm import T5LayerNorm
from keras_nlp.models.t5.t5_presets import backbone_presets
from keras_nlp.models.t5.t5_transformer_layer import T5TransformerLayer
from keras_nlp.utils.python_utils import classproperty
@keras_nlp_export("keras_nlp.models.T5Backbone")
class T5Backbone(Backbone):
"""T5 encoder-decoder backbone model.
T5 is a LLM pretrained on a mix of unsupervised and supervised tasks,
where each task is converted to a sequence-to-sequence format.
T5 works well on a variety of tasks out-of-the-box by prepending
various prefixex to the input sequence, e.g., for translation:
`"translate English to German: ..."`, for summarization:
`"summarize: ..."`.
T5 was introduced in
[Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683)
The default constructor gives a fully customizable, randomly initialized T5
model with any number of layers, heads, and embedding dimensions. To load
preset architectures and weights, use the `from_preset` constructor.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind.
Args:
vocabulary_size: int. The size of the token vocabulary.
num_layers: int. The number of Transformer layers.
num_heads: int. The number of attention heads for each Transformer.
The hidden size must be divisible by the number of attention heads.
hidden_dim: int. The hidden size of the Transformer layers.
intermediate_dim: int. The output dimension of the first Dense layer in
a two-layer feedforward network for each Transformer layer.
key_value_dim: int. The dimension of each head of the key/value
projections in the multi-head attention layers. Defaults to
hidden_dim / num_heads.
dropout: float. Dropout probability for the Transformer layers.
activation: activation function (or activation string name). The
activation to be used in the inner dense blocks of the
Transformer layers. Defaults to `"relu"`.
use_gated_activation: boolean. Whether to use activation gating in
the inner dense blocks of the Transformer layers.
The original T5 architecture didn't use gating, but more
recent versions do. Defaults to `True`.
layer_norm_epsilon: float. Epsilon factor to be used in the
layer normalization layers in the Transformer layers.
tie_embedding_weights: boolean. If `True`, the weights of the token
embedding and the weights projecting language model outputs from
`hidden_dim`.
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
for model computations and weights. Note that some computations,
such as softmax and layer normalization, will always be done at
float32 precision regardless of dtype.
"""
def __init__(
self,
vocabulary_size,
num_layers,
num_heads,
hidden_dim,
intermediate_dim,
key_value_dim=None,
dropout=0.1,
activation="relu",
use_gated_activation=True,
layer_norm_epsilon=1e-06,
tie_embedding_weights=True,
dtype=None,
**kwargs,
):
# Token embedding layer. This layer is shared by encoder and decoder.
self.token_embedding = ReversibleEmbedding(
input_dim=vocabulary_size,
output_dim=hidden_dim,
tie_weights=tie_embedding_weights,
embeddings_initializer=keras.initializers.TruncatedNormal(1.0),
dtype=dtype,
name="token_embedding",
)
self.encoder_embedding_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="encoder_embedding_dropout",
)
self.encoder_transformer_layers = []
for i in range(num_layers):
layer = T5TransformerLayer(
is_decoder=False,
hidden_dim=hidden_dim,
intermediate_dim=intermediate_dim,
key_value_dim=key_value_dim or hidden_dim // num_heads,
dropout=dropout,
activation=activation,
layer_norm_epsilon=layer_norm_epsilon,
num_heads=num_heads,
use_gated_activation=use_gated_activation,
use_relative_attention_bias=bool(i == 0),
dtype=dtype,
name=f"transformer_encoder_layer_{i}",
)
self.encoder_transformer_layers.append(layer)
self.encoder_layer_norm = T5LayerNorm(
epsilon=layer_norm_epsilon,
dtype=dtype,
name="encoder_output_layer_norm",
)
self.encoder_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="encoder_output_dropout",
)
self.decoder_embedding_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="decoder_embedding_dropout",
)
self.decoder_transformer_layers = []
for i in range(num_layers):
layer = T5TransformerLayer(
is_decoder=True,
hidden_dim=hidden_dim,
intermediate_dim=intermediate_dim,
key_value_dim=key_value_dim or hidden_dim // num_heads,
dropout=dropout,
activation=activation,
layer_norm_epsilon=layer_norm_epsilon,
num_heads=num_heads,
use_gated_activation=use_gated_activation,
use_relative_attention_bias=bool(i == 0),
dtype=dtype,
name=f"transformer_decoder_layer_{i}",
)
self.decoder_transformer_layers.append(layer)
self.decoder_layer_norm = T5LayerNorm(
epsilon=layer_norm_epsilon,
dtype=dtype,
name="decoder_output_layer_norm",
)
self.decoder_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="decoder_output_dropout",
)
# === Functional Model ===
encoder_token_id_input = keras.Input(
shape=(None,), dtype="int32", name="encoder_token_ids"
)
encoder_padding_mask_input = keras.Input(
shape=(None,), dtype="int32", name="encoder_padding_mask"
)
decoder_token_id_input = keras.Input(
shape=(None,), dtype="int32", name="decoder_token_ids"
)
decoder_padding_mask_input = keras.Input(
shape=(None,), dtype="int32", name="decoder_padding_mask"
)
# Encoder.
x = self.token_embedding(encoder_token_id_input)
x = self.encoder_embedding_dropout(x)
encoder_attention_mask = encoder_padding_mask_input[:, None, :]
position_bias = None
for transformer_layer in self.encoder_transformer_layers:
output = transformer_layer(
x,
attention_mask=encoder_attention_mask,
position_bias=position_bias,
use_causal_mask=False,
)
if isinstance(output, tuple):
x, position_bias = output
x = self.encoder_layer_norm(x)
x = self.encoder_dropout(x)
encoder_output = x
# Decoder.
x = self.token_embedding(decoder_token_id_input)
x = self.decoder_embedding_dropout(x)
decoder_attention_mask = decoder_padding_mask_input[:, None, :]
position_bias = None
for transformer_layer in self.decoder_transformer_layers:
output = transformer_layer(
x,
attention_mask=decoder_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_attention_mask,
use_causal_mask=True,
)
if isinstance(output, tuple):
x, position_bias = output
x = self.decoder_layer_norm(x)
x = self.decoder_dropout(x)
decoder_output = x
super().__init__(
{
"encoder_token_ids": encoder_token_id_input,
"encoder_padding_mask": encoder_padding_mask_input,
"decoder_token_ids": decoder_token_id_input,
"decoder_padding_mask": decoder_padding_mask_input,
},
outputs={
"encoder_sequence_output": encoder_output,
"decoder_sequence_output": decoder_output,
},
**kwargs,
)
# === Config ===
self.vocabulary_size = vocabulary_size
self.hidden_dim = hidden_dim
self.intermediate_dim = intermediate_dim
self.num_layers = num_layers
self.num_heads = num_heads
self.activation = keras.activations.get(activation)
self.key_value_dim = key_value_dim
self.dropout = dropout
self.use_gated_activation = use_gated_activation
self.layer_norm_epsilon = layer_norm_epsilon
self.tie_embedding_weights = tie_embedding_weights
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"hidden_dim": self.hidden_dim,
"intermediate_dim": self.intermediate_dim,
"num_layers": self.num_layers,
"num_heads": self.num_heads,
"activation": keras.activations.serialize(self.activation),
"key_value_dim": self.key_value_dim,
"dropout": self.dropout,
"use_gated_activation": self.use_gated_activation,
"layer_norm_epsilon": self.layer_norm_epsilon,
"tie_embedding_weights": self.tie_embedding_weights,
}
)
return config
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
| keras-nlp/keras_nlp/models/t5/t5_backbone.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/t5/t5_backbone.py",
"repo_id": "keras-nlp",
"token_count": 5053
} | 118 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Whisper decoder block."""
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.transformer_decoder import TransformerDecoder
from keras_nlp.models.whisper.whisper_cached_multi_head_attention import (
WhisperCachedMultiHeadAttention,
)
from keras_nlp.utils.keras_utils import clone_initializer
@keras.saving.register_keras_serializable(package="keras_nlp")
class WhisperDecoder(TransformerDecoder):
"""Whisper decoder.
Inherits from `keras_nlp.layers.TransformerDecoder`, and overrides the
`build` method to use the
`keras_nlp.models.whisper.whisper_multi_head_attention.WhisperMultiHeadAttention`
layer instead of `keras.layers.MultiHeadAttention` and
`keras_nlp.models.whisper.whisper_cached_multi_head_attention.WhisperCachedMultiHeadAttention`
instead of `keras_nlp.layers.cached_multi_head_attention.CachedMultiHeadAttention`.
"""
def build(
self,
decoder_sequence_shape,
encoder_sequence_shape,
):
self._decoder_sequence_shape = decoder_sequence_shape
self._encoder_sequence_shape = encoder_sequence_shape
# Infer the dimension of our hidden feature size from the build shape.
hidden_dim = decoder_sequence_shape[-1]
# Attention head size is `hidden_dim` over the number of heads.
head_dim = int(hidden_dim // self.num_heads)
if head_dim == 0:
raise ValueError(
"Attention `head_dim` computed cannot be zero. "
f"The `hidden_dim` value of {hidden_dim} has to be equal to "
f"or greater than `num_heads` value of {self.num_heads}."
)
# Self attention layers.
self._self_attention_layer = WhisperCachedMultiHeadAttention(
num_heads=self.num_heads,
key_dim=head_dim,
dropout=self.dropout,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="self_attention",
)
self._self_attention_layer.build(
query_shape=decoder_sequence_shape,
value_shape=decoder_sequence_shape,
)
self._self_attention_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="self_attention_layer_norm",
)
self._self_attention_layer_norm.build(decoder_sequence_shape)
self._self_attention_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="self_attention_dropout",
)
self._cross_attention_layer = WhisperCachedMultiHeadAttention(
num_heads=self.num_heads,
key_dim=head_dim,
value_dim=head_dim,
dropout=self.dropout,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="cross_attention",
)
self._cross_attention_layer.build(
query_shape=decoder_sequence_shape,
value_shape=encoder_sequence_shape,
)
self._cross_attention_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="cross_attention_layer_norm",
)
self._cross_attention_layer_norm.build(decoder_sequence_shape)
self._cross_attention_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="cross_attention_dropout",
)
# Feedforward layers.
self._feedforward_intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
activation=self.activation,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="feedforward_intermediate_dense",
)
self._feedforward_intermediate_dense.build(decoder_sequence_shape)
self._feedforward_output_dense = keras.layers.Dense(
hidden_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="feedforward_output_dense",
)
intermediate_shape = list(decoder_sequence_shape)
intermediate_shape[-1] = self.intermediate_dim
self._feedforward_output_dense.build(tuple(intermediate_shape))
self._feedforward_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="feedforward_layer_norm",
)
self._feedforward_layer_norm.build(decoder_sequence_shape)
self._feedforward_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="feedforward_dropout",
)
# Create layers based on input shape.
self.built = True
| keras-nlp/keras_nlp/models/whisper/whisper_decoder.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/whisper/whisper_decoder.py",
"repo_id": "keras-nlp",
"token_count": 2568
} | 119 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.layers.preprocessing.multi_segment_packer import (
MultiSegmentPacker,
)
from keras_nlp.models.preprocessor import Preprocessor
from keras_nlp.models.xlm_roberta.xlm_roberta_presets import backbone_presets
from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import (
XLMRobertaTokenizer,
)
from keras_nlp.utils.keras_utils import (
convert_inputs_to_list_of_tensor_segments,
)
from keras_nlp.utils.keras_utils import pack_x_y_sample_weight
from keras_nlp.utils.python_utils import classproperty
@keras_nlp_export("keras_nlp.models.XLMRobertaPreprocessor")
class XLMRobertaPreprocessor(Preprocessor):
"""An XLM-RoBERTa preprocessing layer which tokenizes and packs inputs.
This preprocessing layer will do three things:
1. Tokenize any number of input segments using the `tokenizer`.
2. Pack the inputs together using a `keras_nlp.layers.MultiSegmentPacker`.
with the appropriate `"<s>"`, `"</s>"` and `"<pad>"` tokens, i.e., adding
a single `"<s>"` at the start of the entire sequence, `"</s></s>"` at the
end of each segment, save the last and a `"</s>"` at the end of the
entire sequence.
3. Construct a dictionary with keys `"token_ids"` and `"padding_mask"`,
that can be passed directly to an XLM-RoBERTa model.
This layer can be used directly with `tf.data.Dataset.map` to preprocess
string data in the `(x, y, sample_weight)` format used by
`keras.Model.fit`.
Args:
tokenizer: A `keras_nlp.tokenizers.XLMRobertaTokenizer` instance.
sequence_length: The length of the packed inputs.
truncate: The algorithm to truncate a list of batched segments to fit
within `sequence_length`. The value can be either `round_robin` or
`waterfall`:
- `"round_robin"`: Available space is assigned one token at a
time in a round-robin fashion to the inputs that still need
some, until the limit is reached.
- `"waterfall"`: The allocation of the budget is done using a
"waterfall" algorithm that allocates quota in a
left-to-right manner and fills up the buckets until we run
out of budget. It supports an arbitrary number of segments.
Call arguments:
x: A tensor of single string sequences, or a tuple of multiple
tensor sequences to be packed together. Inputs may be batched or
unbatched. For single sequences, raw python inputs will be converted
to tensors. For multiple sequences, pass tensors directly.
y: Any label data. Will be passed through unaltered.
sample_weight: Any label weight data. Will be passed through unaltered.
Examples:
Directly calling the layer on data.
```python
preprocessor = keras_nlp.models.XLMRobertaPreprocessor.from_preset(
"xlm_roberta_base_multi"
)
# Tokenize and pack a single sentence.
preprocessor("The quick brown fox jumped.")
# Tokenize a batch of single sentences.
preprocessor(["The quick brown fox jumped.", "اسمي اسماعيل"])
# Preprocess a batch of sentence pairs.
# When handling multiple sequences, always convert to tensors first!
first = tf.constant(["The quick brown fox jumped.", "اسمي اسماعيل"])
second = tf.constant(["The fox tripped.", "الأسد ملك الغابة"])
preprocessor((first, second))
# Custom vocabulary.
def train_sentencepiece(ds, vocab_size):
bytes_io = io.BytesIO()
sentencepiece.SentencePieceTrainer.train(
sentence_iterator=ds.as_numpy_iterator(),
model_writer=bytes_io,
vocab_size=vocab_size,
model_type="WORD",
unk_id=0,
bos_id=1,
eos_id=2,
)
return bytes_io.getvalue()
ds = tf.data.Dataset.from_tensor_slices(
["the quick brown fox", "the earth is round"]
)
proto = train_sentencepiece(ds, vocab_size=10)
tokenizer = keras_nlp.models.XLMRobertaTokenizer(proto=proto)
preprocessor = keras_nlp.models.XLMRobertaPreprocessor(tokenizer)
preprocessor("The quick brown fox jumped.")
```
Mapping with `tf.data.Dataset`.
```python
preprocessor = keras_nlp.models.XLMRobertaPreprocessor.from_preset(
"xlm_roberta_base_multi"
)
first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."])
second = tf.constant(["The fox tripped.", "Oh look, a whale."])
label = tf.constant([1, 1])
# Map labeled single sentences.
ds = tf.data.Dataset.from_tensor_slices((first, label))
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map unlabeled single sentences.
ds = tf.data.Dataset.from_tensor_slices(first)
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map labeled sentence pairs.
ds = tf.data.Dataset.from_tensor_slices(((first, second), label))
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map unlabeled sentence pairs.
ds = tf.data.Dataset.from_tensor_slices((first, second))
# Watch out for tf.data's default unpacking of tuples here!
# Best to invoke the `preprocessor` directly in this case.
ds = ds.map(
lambda first, second: preprocessor(x=(first, second)),
num_parallel_calls=tf.data.AUTOTUNE,
)
```
"""
def __init__(
self,
tokenizer,
sequence_length=512,
truncate="round_robin",
**kwargs,
):
super().__init__(**kwargs)
self.tokenizer = tokenizer
self.packer = None
self.truncate = truncate
self.sequence_length = sequence_length
def build(self, input_shape):
# Defer packer creation to `build()` so that we can be sure tokenizer
# assets have loaded when restoring a saved model.
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.start_token_id,
end_value=self.tokenizer.end_token_id,
sep_value=[self.tokenizer.end_token_id] * 2,
pad_value=self.tokenizer.pad_token_id,
truncate=self.truncate,
sequence_length=self.sequence_length,
)
self.built = True
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"truncate": self.truncate,
}
)
return config
def call(self, x, y=None, sample_weight=None):
x = convert_inputs_to_list_of_tensor_segments(x)
x = [self.tokenizer(segment) for segment in x]
token_ids, _ = self.packer(x)
x = {
"token_ids": token_ids,
"padding_mask": token_ids != self.tokenizer.pad_token_id,
}
return pack_x_y_sample_weight(x, y, sample_weight)
@property
def sequence_length(self):
"""The padded length of model input sequences."""
return self._sequence_length
@sequence_length.setter
def sequence_length(self, value):
self._sequence_length = value
if self.packer is not None:
self.packer.sequence_length = value
@classproperty
def tokenizer_cls(cls):
return XLMRobertaTokenizer
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
| keras-nlp/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor.py",
"repo_id": "keras-nlp",
"token_count": 3294
} | 120 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import re
import tensorflow as tf
import tree
from absl.testing import parameterized
from keras_nlp.backend import config
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.tokenizers.tokenizer import Tokenizer
from keras_nlp.utils.tensor_utils import is_float_dtype
from keras_nlp.utils.tensor_utils import standardize_dtype
def convert_to_comparible_type(x):
"""Convert tensors to comparable types.
Any string are converted to plain python types. Any jax or torch tensors
are converted to numpy.
"""
if getattr(x, "dtype", None) == tf.string:
if isinstance(x, tf.RaggedTensor):
x = x.to_list()
if isinstance(x, tf.Tensor):
x = x.numpy() if x.shape.rank == 0 else x.numpy().tolist()
return tree.map_structure(lambda x: x.decode("utf-8"), x)
if isinstance(x, (tf.Tensor, tf.RaggedTensor)):
return x
if hasattr(x, "__array__"):
return ops.convert_to_numpy(x)
return x
class TestCase(tf.test.TestCase, parameterized.TestCase):
"""Base test case class for KerasNLP."""
def assertAllClose(self, x1, x2, atol=1e-6, rtol=1e-6, msg=None):
# This metric dict hack is only needed for tf.keras, and can be
# removed after we fully migrate to keras-core/Keras 3.
if x1.__class__.__name__ == "_MetricDict":
x1 = dict(x1)
if x2.__class__.__name__ == "_MetricDict":
x2 = dict(x2)
x1 = tree.map_structure(convert_to_comparible_type, x1)
x2 = tree.map_structure(convert_to_comparible_type, x2)
super().assertAllClose(x1, x2, atol=atol, rtol=rtol, msg=msg)
def assertEqual(self, x1, x2, msg=None):
x1 = tree.map_structure(convert_to_comparible_type, x1)
x2 = tree.map_structure(convert_to_comparible_type, x2)
super().assertEqual(x1, x2, msg=msg)
def assertAllEqual(self, x1, x2, msg=None):
x1 = tree.map_structure(convert_to_comparible_type, x1)
x2 = tree.map_structure(convert_to_comparible_type, x2)
super().assertAllEqual(x1, x2, msg=msg)
def assertDTypeEqual(self, x, expected_dtype, msg=None):
input_dtype = standardize_dtype(x.dtype)
super().assertEqual(input_dtype, expected_dtype, msg=msg)
def run_layer_test(
self,
cls,
init_kwargs,
input_data,
expected_output_shape,
expected_output_data=None,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=0,
run_training_check=True,
run_precision_checks=True,
):
"""Run basic tests for a modeling layer."""
# Serialization test.
layer = cls(**init_kwargs)
self.run_serialization_test(layer)
def run_build_asserts(layer):
self.assertTrue(layer.built)
self.assertLen(
layer.trainable_weights,
expected_num_trainable_weights,
msg="Unexpected number of trainable_weights",
)
self.assertLen(
layer.non_trainable_weights,
expected_num_non_trainable_weights,
msg="Unexpected number of non_trainable_weights",
)
self.assertLen(
layer.non_trainable_variables,
expected_num_non_trainable_variables,
msg="Unexpected number of non_trainable_variables",
)
def run_output_asserts(layer, output, eager=False):
output_shape = tree.map_structure(
lambda x: None if x is None else x.shape, output
)
self.assertEqual(
expected_output_shape,
output_shape,
msg="Unexpected output shape",
)
output_dtype = tree.flatten(output)[0].dtype
self.assertEqual(
standardize_dtype(layer.dtype),
standardize_dtype(output_dtype),
msg="Unexpected output dtype",
)
if eager and expected_output_data is not None:
self.assertAllClose(expected_output_data, output)
def run_training_step(layer, input_data, output_data):
class TestModel(keras.Model):
def __init__(self, layer):
super().__init__()
self.layer = layer
def call(self, x):
if isinstance(x, dict):
return self.layer(**x)
else:
return self.layer(x)
input_data = tree.map_structure(
lambda x: ops.convert_to_numpy(x), input_data
)
output_data = tree.map_structure(
lambda x: ops.convert_to_numpy(x), output_data
)
model = TestModel(layer)
# Temporarily disable jit compilation on torch backend.
jit_compile = config.backend() != "torch"
model.compile(optimizer="sgd", loss="mse", jit_compile=jit_compile)
model.fit(input_data, output_data, verbose=0)
if config.keras_3():
# Build test.
layer = cls(**init_kwargs)
if isinstance(input_data, dict):
shapes = {k + "_shape": v.shape for k, v in input_data.items()}
layer.build(**shapes)
else:
layer.build(input_data.shape)
run_build_asserts(layer)
# Symbolic call test.
keras_tensor_inputs = tree.map_structure(
lambda x: keras.KerasTensor(x.shape, x.dtype), input_data
)
layer = cls(**init_kwargs)
if isinstance(keras_tensor_inputs, dict):
keras_tensor_outputs = layer(**keras_tensor_inputs)
else:
keras_tensor_outputs = layer(keras_tensor_inputs)
run_build_asserts(layer)
run_output_asserts(layer, keras_tensor_outputs)
# Eager call test and compiled training test.
layer = cls(**init_kwargs)
if isinstance(input_data, dict):
output_data = layer(**input_data)
else:
output_data = layer(input_data)
run_output_asserts(layer, output_data, eager=True)
if run_training_check:
run_training_step(layer, input_data, output_data)
if run_precision_checks:
self.run_precision_test(cls, init_kwargs, input_data)
def run_preprocessing_layer_test(
self,
cls,
init_kwargs,
input_data,
expected_output=None,
expected_detokenize_output=None,
):
"""Run basic tests for a preprocessing layer."""
layer = cls(**init_kwargs)
# Check serialization (without a full save).
self.run_serialization_test(layer)
ds = tf.data.Dataset.from_tensor_slices(input_data)
# Run with direct call.
if isinstance(input_data, tuple):
# Mimic tf.data unpacking behavior for preprocessing layers.
output = layer(*input_data)
else:
output = layer(input_data)
# For tokenizers only, also check detokenize.
if isinstance(layer, Tokenizer):
if not expected_detokenize_output:
expected_detokenize_output = input_data
detokenize_output = layer.detokenize(output)
self.assertAllEqual(detokenize_output, expected_detokenize_output)
# Run with an unbatched dataset.
output_ds = ds.map(layer).ragged_batch(1_000)
self.assertAllClose(output, output_ds.get_single_element())
# Run with a batched dataset.
output_ds = ds.batch(1_000).map(layer)
self.assertAllClose(output, output_ds.get_single_element())
if expected_output:
self.assertAllClose(output, expected_output)
def run_preprocessor_test(
self,
cls,
init_kwargs,
input_data,
expected_output=None,
expected_detokenize_output=None,
token_id_key="token_ids",
):
"""Run basic tests for a Model Preprocessor layer."""
self.run_preprocessing_layer_test(
cls,
init_kwargs,
input_data,
expected_output=expected_output,
expected_detokenize_output=expected_detokenize_output,
)
layer = cls(**self.init_kwargs)
if isinstance(input_data, tuple):
output = layer(*input_data)
else:
output = layer(input_data)
output, _, _ = keras.utils.unpack_x_y_sample_weight(output)
shape = ops.shape(output[token_id_key])
self.assertEqual(shape[-1], layer.sequence_length)
# Update the sequence length.
layer.sequence_length = 17
if isinstance(input_data, tuple):
output = layer(*input_data)
else:
output = layer(input_data)
output, _, _ = keras.utils.unpack_x_y_sample_weight(output)
shape = ops.shape(output[token_id_key])
self.assertEqual(shape[-1], 17)
def run_serialization_test(self, instance):
"""Check idempotency of serialize/deserialize.
Not this is a much faster test than saving."""
run_dir_test = True
# Tokenizers will not initialize the tensorflow trackable system after
# clone, leading to some weird errors here.
if config.backend() == "tensorflow" and isinstance(instance, Tokenizer):
run_dir_test = False
# get_config roundtrip
cls = instance.__class__
cfg = instance.get_config()
cfg_json = json.dumps(cfg, sort_keys=True, indent=4)
ref_dir = dir(instance)[:]
revived_instance = cls.from_config(cfg)
revived_cfg = revived_instance.get_config()
revived_cfg_json = json.dumps(revived_cfg, sort_keys=True, indent=4)
self.assertEqual(cfg_json, revived_cfg_json)
if run_dir_test:
self.assertEqual(set(ref_dir), set(dir(revived_instance)))
# serialization roundtrip
serialized = keras.saving.serialize_keras_object(instance)
serialized_json = json.dumps(serialized, sort_keys=True, indent=4)
revived_instance = keras.saving.deserialize_keras_object(
json.loads(serialized_json)
)
revived_cfg = revived_instance.get_config()
revived_cfg_json = json.dumps(revived_cfg, sort_keys=True, indent=4)
self.assertEqual(cfg_json, revived_cfg_json)
if run_dir_test:
new_dir = dir(revived_instance)[:]
for lst in [ref_dir, new_dir]:
if "__annotations__" in lst:
lst.remove("__annotations__")
self.assertEqual(set(ref_dir), set(new_dir))
def run_precision_test(self, cls, init_kwargs, input_data):
# Keras 2 has some errors as non-float32 precision.
if not config.keras_3():
return
# Never test mixed precision on torch CPU. Torch lacks support.
if config.backend() == "torch":
import torch
if not torch.cuda.is_available():
return
for policy in ["mixed_float16", "mixed_bfloat16", "bfloat16"]:
policy = keras.mixed_precision.Policy(policy)
layer = cls(**{**init_kwargs, "dtype": policy})
if isinstance(layer, keras.Model):
output_data = layer(input_data)
elif isinstance(input_data, dict):
output_data = layer(**input_data)
else:
output_data = layer(input_data)
for tensor in tree.flatten(output_data):
if is_float_dtype(tensor.dtype):
self.assertDTypeEqual(tensor, policy.compute_dtype)
for weight in layer.weights:
if is_float_dtype(weight.dtype):
self.assertDTypeEqual(weight, policy.variable_dtype)
for sublayer in layer._flatten_layers(include_self=False):
if isinstance(
sublayer, (keras.layers.Softmax, keras.layers.InputLayer)
):
continue
self.assertEqual(policy.compute_dtype, sublayer.compute_dtype)
self.assertEqual(policy.variable_dtype, sublayer.variable_dtype)
def run_model_saving_test(
self,
cls,
init_kwargs,
input_data,
):
"""Save and load a model from disk and assert output is unchanged."""
model = cls(**init_kwargs)
model_output = model(input_data)
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(path)
# Check we got the real object back.
self.assertIsInstance(restored_model, cls)
# Check that output matches.
restored_output = restored_model(input_data)
self.assertAllClose(model_output, restored_output)
def run_backbone_test(
self,
cls,
init_kwargs,
input_data,
expected_output_shape,
variable_length_data=None,
run_mixed_precision_check=True,
):
"""Run basic tests for a backbone, including compilation."""
backbone = cls(**init_kwargs)
# Check serialization (without a full save).
self.run_serialization_test(backbone)
# Call model eagerly.
output = backbone(input_data)
if isinstance(expected_output_shape, dict):
for key in expected_output_shape:
self.assertEqual(output[key].shape, expected_output_shape[key])
else:
self.assertEqual(output.shape, expected_output_shape)
# Check we can embed tokens eagerly.
output = backbone.token_embedding(ops.zeros((2, 3), dtype="int32"))
# Check variable length sequences.
if variable_length_data is None:
# If no variable length data passed, assume the second axis of all
# inputs is our sequence axis and create it ourselves.
variable_length_data = [
tree.map_structure(lambda x: x[:, :seq_length, ...], input_data)
for seq_length in (2, 3, 4)
]
for batch in variable_length_data:
backbone(batch)
# Check compiled predict function.
backbone.predict(input_data)
# Convert to numpy first, torch GPU tensor -> tf.data will error.
numpy_data = tree.map_structure(ops.convert_to_numpy, input_data)
# Create a dataset.
input_dataset = tf.data.Dataset.from_tensor_slices(numpy_data).batch(2)
backbone.predict(input_dataset)
# Check name maps to classname.
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", cls.__name__)
name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
self.assertRegexpMatches(backbone.name, name)
self.run_precision_test(cls, init_kwargs, input_data)
def run_task_test(
self,
cls,
init_kwargs,
train_data,
expected_output_shape=None,
batch_size=2,
):
"""Run basic tests for a backbone, including compilation."""
task = cls(**init_kwargs)
# Check serialization (without a full save).
self.run_serialization_test(task)
preprocessor = task.preprocessor
ds = tf.data.Dataset.from_tensor_slices(train_data).batch(batch_size)
x, y, sw = keras.utils.unpack_x_y_sample_weight(train_data)
# Test predict.
output = task.predict(x)
if expected_output_shape is not None:
output_shape = tree.map_structure(lambda x: x.shape, output)
self.assertAllClose(output_shape, expected_output_shape)
# With a dataset.
output_ds = task.predict(ds)
self.assertAllClose(output, output_ds)
# With split preprocessing.
task.preprocessor = None
output_split = task.predict(ds.map(preprocessor))
task.preprocessor = preprocessor
self.assertAllClose(output, output_split)
# Test fit.
task.fit(x, y, sample_weight=sw)
# With a dataset.
task.fit(ds)
# With split preprocessing.
task.preprocessor = None
task.fit(ds.map(preprocessor))
task.preprocessor = preprocessor
def run_preset_test(
self,
cls,
preset,
input_data,
init_kwargs={},
expected_output=None,
expected_output_shape=None,
expected_partial_output=None,
):
"""Run instantiation and a forward pass for a preset."""
self.assertRegex(cls.from_preset.__doc__, preset)
with self.assertRaises(Exception):
cls.from_preset("clowntown", **init_kwargs)
instance = cls.from_preset(preset, **init_kwargs)
if isinstance(input_data, tuple):
# Mimic tf.data unpacking behavior for preprocessing layers.
output = instance(*input_data)
else:
output = instance(input_data)
if isinstance(instance, keras.Model):
instance = cls.from_preset(
preset, load_weights=False, **init_kwargs
)
instance(input_data)
if expected_output is not None:
self.assertAllClose(output, expected_output)
if expected_output_shape is not None:
output_shape = tree.map_structure(lambda x: x.shape, output)
self.assertAllClose(output_shape, expected_output_shape)
if expected_partial_output is not None:
# Allow passing a partial output snippet of the last dimension.
# We want check stability, but the full output would be too long.
def compare(actual, expected):
expected = ops.convert_to_numpy(expected)
self.assertEqual(len(expected.shape), 1)
actual = ops.reshape(actual, (-1,))[: expected.shape[0]]
self.assertAllClose(actual, expected, atol=0.01, rtol=0.01)
tree.map_structure(compare, output, expected_partial_output)
def get_test_data_dir(self):
return str(pathlib.Path(__file__).parent / "test_data")
| keras-nlp/keras_nlp/tests/test_case.py/0 | {
"file_path": "keras-nlp/keras_nlp/tests/test_case.py",
"repo_id": "keras-nlp",
"token_count": 8887
} | 121 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import binascii
import os
from typing import List
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.tokenizers import tokenizer
from keras_nlp.utils.preset_utils import check_preset_class
from keras_nlp.utils.preset_utils import load_from_preset
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring
from keras_nlp.utils.tensor_utils import assert_tf_text_installed
from keras_nlp.utils.tensor_utils import convert_to_ragged_batch
from keras_nlp.utils.tensor_utils import is_int_dtype
from keras_nlp.utils.tensor_utils import is_string_dtype
from keras_nlp.utils.tensor_utils import tensor_to_list
try:
import tensorflow_text as tf_text
except ImportError:
tf_text = None
VOCAB_FILENAME = "vocabulary.spm"
@keras_nlp_export("keras_nlp.tokenizers.SentencePieceTokenizer")
class SentencePieceTokenizer(tokenizer.Tokenizer):
"""A SentencePiece tokenizer layer.
This layer provides an implementation of SentencePiece tokenization
as described in the [SentencePiece paper](https://arxiv.org/abs/1808.06226)
and the [SentencePiece package](https://pypi.org/project/sentencepiece/).
The tokenization will run entirely within the Tensorflow graph, and can
be saved inside a `keras.Model`.
By default, the layer will output a `tf.RaggedTensor` where the last
dimension of the output is ragged after whitespace splitting and sub-word
tokenizing. If `sequence_length` is set, the layer will output a dense
`tf.Tensor` where all inputs have been padded or truncated to
`sequence_length`. The output dtype can be controlled via the `dtype`
argument, which should be either an integer or string type.
Args:
proto: Either a `string` path to a SentencePiece proto file, or a
`bytes` object with a serialized SentencePiece proto. See the
[SentencePiece repository](https://github.com/google/sentencepiece)
for more details on the format.
sequence_length: If set, the output will be converted to a dense
tensor and padded/trimmed so all outputs are of `sequence_length`.
References:
- [Kudo and Richardson, 2018](https://arxiv.org/abs/1808.06226)
Examples:
From bytes.
```python
def train_sentence_piece_bytes(ds, size):
bytes_io = io.BytesIO()
sentencepiece.SentencePieceTrainer.train(
sentence_iterator=ds.as_numpy_iterator(),
model_writer=bytes_io,
vocab_size=size,
)
return bytes_io.getvalue()
# Train a sentencepiece proto.
ds = tf.data.Dataset.from_tensor_slices(["the quick brown fox."])
proto = train_sentence_piece_bytes(ds, 20)
# Tokenize inputs.
tokenizer = keras_nlp.tokenizers.SentencePieceTokenizer(proto=proto)
ds = ds.map(tokenizer)
```
From a file.
```python
def train_sentence_piece_file(ds, path, size):
with open(path, "wb") as model_file:
sentencepiece.SentencePieceTrainer.train(
sentence_iterator=ds.as_numpy_iterator(),
model_writer=model_file,
vocab_size=size,
)
# Train a sentencepiece proto.
ds = tf.data.Dataset.from_tensor_slices(["the quick brown fox."])
proto = train_sentence_piece_file(ds, "model.spm", 20)
# Tokenize inputs.
tokenizer = keras_nlp.tokenizers.SentencePieceTokenizer(proto="model.spm")
ds = ds.map(tokenizer)
```
"""
def __init__(
self,
proto=None,
sequence_length: int = None,
dtype="int32",
**kwargs,
) -> None:
assert_tf_text_installed(self.__class__.__name__)
if not is_int_dtype(dtype) and not is_string_dtype(dtype):
raise ValueError(
"Output dtype must be an integer type or a string. "
f"Received: dtype={dtype}"
)
super().__init__(dtype=dtype, **kwargs)
self.proto = None
self.sequence_length = sequence_length
self.set_proto(proto)
def save_assets(self, dir_path):
path = os.path.join(dir_path, VOCAB_FILENAME)
with open(path, "wb") as file:
file.write(self.proto)
def load_assets(self, dir_path):
path = os.path.join(dir_path, VOCAB_FILENAME)
self.set_proto(path)
def set_proto(self, proto):
if proto is None:
self.proto = None
self._sentence_piece = None
return
if isinstance(proto, str):
# A string could be either a filepath, or a base64 encoded byte
# array (which we need for serialization). We will heuristically
# try to distinguish, by checking if a string is both longer and
# than 2048 characters and valid base64 characters.
is_base64 = False
if len(proto) > 2048:
try:
proto_bytes = base64.b64decode(proto, validate=True)
is_base64 = True
except binascii.Error:
pass
if not is_base64:
proto_bytes = open(proto, "rb").read()
elif isinstance(proto, bytes):
proto_bytes = proto
else:
raise ValueError(
"SentencePiece `proto` argument should be either a `string` "
f"filepath or a `bytes` sequence. "
f"Received unknown type: {type(proto)}"
)
self._sentence_piece = tf_text.SentencepieceTokenizer(
model=proto_bytes,
out_type=self.compute_dtype,
)
# Keras cannot serialize a bytestring, so we base64 encode the model
# byte array as a string for saving.
self.proto = proto_bytes
def vocabulary_size(self) -> int:
"""Get the size of the tokenizer vocabulary."""
self._check_vocabulary()
return int(self._sentence_piece.vocab_size().numpy())
def get_vocabulary(self) -> List[str]:
"""Get the tokenizer vocabulary."""
self._check_vocabulary()
return tensor_to_list(
self._sentence_piece.id_to_string(
tf.range(int(self._sentence_piece.vocab_size().numpy()))
)
)
def id_to_token(self, id: int) -> str:
"""Convert an integer id to a string token."""
self._check_vocabulary()
if id >= self.vocabulary_size() or id < 0:
raise ValueError(
f"`id` must be in range [0, {self.vocabulary_size() - 1}]. "
f"Received: {id}"
)
return tensor_to_list(self._sentence_piece.id_to_string(id))
def token_to_id(self, token: str) -> int:
"""Convert a string token to an integer id."""
self._check_vocabulary()
return int(self._sentence_piece.string_to_id(token).numpy())
def get_config(self):
config = super().get_config()
config.update(
{
"proto": None, # Save vocabulary via an asset!
"sequence_length": self.sequence_length,
}
)
return config
def _check_vocabulary(self):
if self.proto is None:
raise ValueError(
"No vocabulary has been set for SentencePieceTokenizer. Make "
"sure to pass a `proto` argument when creating the layer."
)
def tokenize(self, inputs):
self._check_vocabulary()
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
inputs = tf.convert_to_tensor(inputs)
scalar_input = inputs.shape.rank == 0
if scalar_input:
inputs = tf.expand_dims(inputs, 0)
if self._sentence_piece is None:
raise ValueError(
"No vocabulary has been set for SentencePieceTokenizer. Make "
"sure to pass a `vocabulary` argument when creating the layer."
)
tokens = self._sentence_piece.tokenize(inputs)
# Convert to a dense output if `sequence_length` is set.
if self.sequence_length:
output_shape = tokens.shape.as_list()
output_shape[-1] = self.sequence_length
tokens = tokens.to_tensor(shape=output_shape)
# Convert to a dense output if input was a scalar.
if scalar_input:
tokens = tf.squeeze(tokens, 0)
tf.ensure_shape(tokens, shape=[self.sequence_length])
return tokens
def detokenize(self, inputs):
self._check_vocabulary()
inputs, unbatched, _ = convert_to_ragged_batch(inputs)
# tf-text sentencepiece does not handle int64.
inputs = tf.cast(inputs, "int32")
outputs = self._sentence_piece.detokenize(inputs)
if unbatched:
outputs = tf.squeeze(outputs, 0)
return outputs
@classproperty
def presets(cls):
return {}
@classmethod
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate {{model_name}} tokenizer from preset vocabulary.
Args:
preset: string. Must be one of "{{preset_names}}".
Examples:
```python
# Load a preset tokenizer.
tokenizer = {{model_name}}.from_preset("{{example_preset_name}}")
# Tokenize some input.
tokenizer("The quick brown fox tripped.")
# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
preset = cls.presets[preset]["kaggle_handle"]
config_file = "tokenizer.json"
check_preset_class(preset, cls, config_file=config_file)
return load_from_preset(
preset,
config_file=config_file,
config_overrides=kwargs,
)
def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to setup a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
# If the subclass does not define from_preset, assign a wrapper so that
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:
def from_preset(calling_cls, *args, **kwargs):
return super(cls, calling_cls).from_preset(*args, **kwargs)
cls.from_preset = classmethod(from_preset)
# Format and assign the docstring unless the subclass has overridden it.
if cls.from_preset.__doc__ is None:
cls.from_preset.__func__.__doc__ = (
SentencePieceTokenizer.from_preset.__doc__
)
format_docstring(
model_name=cls.__name__,
example_preset_name=next(iter(cls.presets), ""),
preset_names='", "'.join(cls.presets),
)(cls.from_preset.__func__)
| keras-nlp/keras_nlp/tokenizers/sentence_piece_tokenizer.py/0 | {
"file_path": "keras-nlp/keras_nlp/tokenizers/sentence_piece_tokenizer.py",
"repo_id": "keras-nlp",
"token_count": 5124
} | 122 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tensorflow as tf
from keras_nlp.backend import keras
from keras_nlp.tests.test_case import TestCase
from keras_nlp.utils.pipeline_model import PipelineModel
class NoopPipeline(PipelineModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(1)
def call(self, inputs):
return self.dense(inputs)
class FeaturePipeline(PipelineModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(1)
def preprocess_samples(self, x, y=None, sample_weight=None):
x = tf.strings.to_number(x)
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
def call(self, inputs):
return self.dense(inputs)
class LabelPipeline(PipelineModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(1)
def preprocess_samples(self, x, y=None, sample_weight=None):
if y is not None:
y = tf.strings.to_number(y)
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
def call(self, inputs):
return self.dense(inputs)
class DataPipeline(PipelineModel):
"""This model generates labels straight from the input data."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(1)
def preprocess_samples(self, x, y=None, sample_weight=None):
y = x = tf.strings.to_number(x)
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
def call(self, inputs):
return self.dense(inputs)
class FunctionalPipeline(PipelineModel):
def __init__(self, **kwargs):
inputs = keras.Input(shape=(5,))
outputs = keras.layers.Dense(1)(inputs)
super().__init__(inputs, outputs, **kwargs)
def preprocess_samples(self, x, y=None, sample_weight=None):
x = tf.strings.to_number(x)
return keras.utils.pack_x_y_sample_weight(x, y, sample_weight)
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls(**config)
class TestNoopPipelineModel(TestCase):
def test_fit(self):
x = np.random.uniform(size=(8, 5))
y = np.random.uniform(size=(8, 1))
sw = np.random.uniform(size=(8, 1))
model = NoopPipeline()
model.compile(loss="mse")
# With sample weight.
model.fit(x=x, y=y, sample_weight=sw, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(8))
# Without sample weight.
model.fit(x=x, y=y, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices((x, y)).batch(8))
def test_evaluate(self):
x = np.random.uniform(size=(8, 5))
y = np.random.uniform(size=(8, 1))
sw = np.random.uniform(size=(8, 1))
model = NoopPipeline()
model.compile(loss="mse")
# With sample weight.
model.evaluate(x=x, y=y, sample_weight=sw, batch_size=8)
model.evaluate(tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(8))
# Without sample weight.
model.evaluate(x=x, y=y, batch_size=8)
model.evaluate(tf.data.Dataset.from_tensor_slices((x, y)).batch(8))
def test_predict(self):
x = np.random.uniform(size=(8, 5))
model = NoopPipeline()
model.compile(loss="mse")
model.predict(x=x, batch_size=8)
model.predict(tf.data.Dataset.from_tensor_slices(x).batch(8))
def test_on_batch(self):
x = np.random.uniform(size=(8, 5))
y = np.random.uniform(size=(8, 1))
sw = np.random.uniform(size=(8, 1))
model = NoopPipeline()
model.compile(loss="mse")
# With sample weight.
model.train_on_batch(x=x, y=y, sample_weight=sw)
model.test_on_batch(x=x, y=y, sample_weight=sw)
# Without sample weight.
model.train_on_batch(x=x, y=y)
model.test_on_batch(x=x, y=y)
model.predict_on_batch(x=x)
def test_saved_model(self):
model = NoopPipeline()
x = np.random.uniform(size=(8, 5))
model_output = model.predict(x)
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(
path, custom_objects={"NoopPipeline": NoopPipeline}
)
# Check we got the real object back.
self.assertIsInstance(restored_model, NoopPipeline)
# Check that output matches.
restored_output = restored_model.predict(x)
self.assertAllClose(model_output, restored_output)
class TestFeaturePreprocessingModel(TestCase):
def test_fit_with_preprocessing(self):
x = tf.strings.as_string(np.random.uniform(size=(100, 5)))
y = np.random.uniform(size=(100, 1))
sw = np.random.uniform(size=(100, 1))
model = FeaturePipeline()
model.compile(loss="mse")
# With sample weight.
model.fit(x=x, y=y, sample_weight=sw, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(8))
# Without sample weight.
model.fit(x=x, y=y, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices((x, y)).batch(8))
def test_evaluate_with_preprocessing(self):
x = tf.strings.as_string(np.random.uniform(size=(100, 5)))
y = np.random.uniform(size=(100, 1))
sw = np.random.uniform(size=(100, 1))
model = FeaturePipeline()
model.compile(loss="mse")
# With sample weight.
model.evaluate(x=x, y=y, sample_weight=sw, batch_size=8)
model.evaluate(tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(8))
# Without sample weight.
model.evaluate(x=x, y=y, batch_size=8)
model.evaluate(tf.data.Dataset.from_tensor_slices((x, y)).batch(8))
def test_predict_with_preprocessing(self):
x = tf.strings.as_string(np.random.uniform(size=(100, 5)))
model = FeaturePipeline()
model.compile(loss="mse")
model.predict(x=x, batch_size=8)
model.predict(tf.data.Dataset.from_tensor_slices(x).batch(8))
def test_on_batch(self):
x = tf.strings.as_string(np.random.uniform(size=(8, 5)))
y = np.random.uniform(size=(8, 1))
sw = np.random.uniform(size=(8, 1))
model = FeaturePipeline()
model.compile(loss="mse")
# With sample weight.
model.train_on_batch(x=x, y=y, sample_weight=sw)
model.test_on_batch(x=x, y=y, sample_weight=sw)
# Without sample weight.
model.train_on_batch(x=x, y=y)
model.test_on_batch(x=x, y=y)
model.predict_on_batch(x=x)
def test_saved_model(self):
model = FeaturePipeline()
x = tf.strings.as_string(np.random.uniform(size=(8, 5)))
model_output = model.predict(x)
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(
path, custom_objects={"FeaturePipeline": FeaturePipeline}
)
# Check we got the real object back.
self.assertIsInstance(restored_model, FeaturePipeline)
# Check that output matches.
restored_output = restored_model.predict(x)
self.assertAllClose(model_output, restored_output)
class TestLabelPreprocessingModel(TestCase):
def test_fit_with_preprocessing(self):
x = np.random.uniform(size=(100, 5))
y = tf.strings.as_string(np.random.uniform(size=(100, 1)))
sw = np.random.uniform(size=(100, 1))
model = LabelPipeline()
model.compile(loss="mse")
# With sample weight.
model.fit(x=x, y=y, sample_weight=sw, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(8))
# Without sample weight.
model.fit(x=x, y=y, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices((x, y)).batch(8))
def test_evaluate_with_preprocessing(self):
x = np.random.uniform(size=(100, 5))
y = tf.strings.as_string(np.random.uniform(size=(100, 1)))
sw = np.random.uniform(size=(100, 1))
model = LabelPipeline()
model.compile(loss="mse")
# With sample weight.
model.evaluate(x=x, y=y, sample_weight=sw, batch_size=8)
model.evaluate(tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(8))
# Without sample weight.
model.evaluate(x=x, y=y, batch_size=8)
model.evaluate(tf.data.Dataset.from_tensor_slices((x, y)).batch(8))
def test_predict_with_preprocessing(self):
x = np.random.uniform(size=(100, 5))
model = LabelPipeline()
model.compile(loss="mse")
model.predict(x=x, batch_size=8)
model.predict(tf.data.Dataset.from_tensor_slices(x).batch(8))
def test_on_batch(self):
x = np.random.uniform(size=(8, 5))
y = tf.strings.as_string(np.random.uniform(size=(8, 1)))
sw = np.random.uniform(size=(8, 1))
model = LabelPipeline()
model.compile(loss="mse")
# With sample weight.
model.train_on_batch(x=x, y=y, sample_weight=sw)
model.test_on_batch(x=x, y=y, sample_weight=sw)
# Without sample weight.
model.train_on_batch(x=x, y=y)
model.test_on_batch(x=x, y=y)
model.predict_on_batch(x=x)
def test_saved_model(self):
model = LabelPipeline()
x = np.random.uniform(size=(8, 5))
model_output = model.predict(x)
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(
path, custom_objects={"LabelPipeline": LabelPipeline}
)
# Check we got the real object back.
self.assertIsInstance(restored_model, LabelPipeline)
# Check that output matches.
restored_output = restored_model.predict(x)
self.assertAllClose(model_output, restored_output)
class TestDataPreprocessingModel(TestCase):
def test_fit_with_preprocessing(self):
data = tf.strings.as_string(np.random.uniform(size=(100, 1)))
model = DataPipeline()
model.compile(loss="mse")
model.fit(x=data, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices(data).batch(8))
def test_evaluate_with_preprocessing(self):
data = tf.strings.as_string(np.random.uniform(size=(100, 1)))
model = DataPipeline()
model.compile(loss="mse")
model.evaluate(x=data, batch_size=8)
model.evaluate(tf.data.Dataset.from_tensor_slices(data).batch(8))
def test_predict_with_preprocessing(self):
x = tf.strings.as_string(np.random.uniform(size=(100, 1)))
model = DataPipeline()
model.compile(loss="mse")
model.predict(x=x, batch_size=8)
model.predict(tf.data.Dataset.from_tensor_slices(x).batch(8))
def test_on_batch(self):
data = tf.strings.as_string(np.random.uniform(size=(8, 1)))
model = DataPipeline()
model.compile(loss="mse")
# With sample weight.
model.train_on_batch(x=data)
model.test_on_batch(x=data)
# Without sample weight.
model.train_on_batch(x=data)
model.test_on_batch(x=data)
model.predict_on_batch(x=data)
def test_saved_model(self):
model = DataPipeline()
data = tf.strings.as_string(np.random.uniform(size=(8, 1)))
model_output = model.predict(data)
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(
path, custom_objects={"DataPipeline": DataPipeline}
)
# Check we got the real object back.
self.assertIsInstance(restored_model, DataPipeline)
# Check that output matches.
restored_output = restored_model.predict(data)
self.assertAllClose(model_output, restored_output)
class TestFunctional(TestCase):
def test_fit(self):
x = tf.strings.as_string(np.random.uniform(size=(100, 5)))
y = np.random.uniform(size=(100, 1))
sw = np.random.uniform(size=(100, 1))
model = FunctionalPipeline()
model.compile(loss="mse")
# With sample weight.
model.fit(x=x, y=y, sample_weight=sw, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices((x, y, sw)).batch(8))
# Without sample weight.
model.fit(x=x, y=y, batch_size=8)
model.fit(tf.data.Dataset.from_tensor_slices((x, y)).batch(8))
def test_saved_model(self):
model = FunctionalPipeline()
x = tf.strings.as_string(np.random.uniform(size=(8, 5)))
model_output = model.predict(x)
path = os.path.join(self.get_temp_dir(), "model.keras")
model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(
path, custom_objects={"FunctionalPipeline": FunctionalPipeline}
)
# Check we got the real object back.
self.assertIsInstance(restored_model, FunctionalPipeline)
# Check that output matches.
restored_output = restored_model.predict(x)
self.assertAllClose(model_output, restored_output)
class TestFitArguments(TestCase):
def test_validation_data(self):
x = tf.strings.as_string(np.random.uniform(size=(80, 5)))
y = np.random.uniform(size=(80, 1))
val_x = tf.strings.as_string(np.random.uniform(size=(20, 5)))
val_y = np.random.uniform(size=(20, 1))
model = FeaturePipeline()
model.compile(loss="mse")
model.fit(x=x, y=y, validation_data=(val_x, val_y), batch_size=8)
model.fit(
x=tf.data.Dataset.from_tensor_slices((x, y)).batch(8),
validation_data=tf.data.Dataset.from_tensor_slices(
(val_x, val_y)
).batch(8),
)
def test_validation_split(self):
x = tf.strings.as_string(np.random.uniform(size=(100, 5)))
y = np.random.uniform(size=(100, 1))
model = FeaturePipeline()
model.compile(loss="mse")
model.fit(x=x, y=y, validation_split=0.2, batch_size=8)
def test_error_dataset_and_invalid_arguments(self):
x = tf.strings.as_string(np.random.uniform(size=(100, 5)))
y = np.random.uniform(size=(100, 1))
sw = np.random.uniform(size=(100, 1))
ds = tf.data.Dataset.from_tensor_slices((x, y))
model = FeaturePipeline()
model.compile(loss="mse")
with self.assertRaises(ValueError):
model.fit(ds, validation_split=0.2)
with self.assertRaises(ValueError):
model.fit(ds, batch_size=0.2)
with self.assertRaises(ValueError):
model.fit(ds, y=y)
with self.assertRaises(ValueError):
model.fit(ds, sample_weight=sw)
class TestInputErrors(TestCase):
def test_unbatched_input_raises(self):
model = FeaturePipeline()
with self.assertRaisesRegex(ValueError, "must have a batch dimension"):
model.fit(x=tf.constant("test"))
with self.assertRaisesRegex(ValueError, "must have a batch dimension"):
model.fit(x=tf.constant(["test"]), y=tf.constant(0))
with self.assertRaisesRegex(ValueError, "must have a batch dimension"):
model.fit(
x=tf.constant(["test"]), y=tf.constant([0]), sample_weight=0.0
)
with self.assertRaisesRegex(ValueError, "must have a batch dimension"):
model.fit(x="test")
| keras-nlp/keras_nlp/utils/pipeline_model_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/utils/pipeline_model_test.py",
"repo_id": "keras-nlp",
"token_count": 7547
} | 123 |
# Copyright 2021 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
if os.path.exists("keras_nlp/version_utils.py"):
VERSION = get_version("keras_nlp/version_utils.py")
else:
VERSION = get_version("keras_nlp/src/version_utils.py")
setup(
name="keras-nlp",
description=(
"Industry-strength Natural Language Processing extensions for Keras."
),
long_description=README,
long_description_content_type="text/markdown",
version=VERSION,
url="https://github.com/keras-team/keras-nlp",
author="Keras team",
author_email="[email protected]",
license="Apache License 2.0",
install_requires=[
"keras-core",
"absl-py",
"numpy",
"packaging",
"regex",
"rich",
"dm-tree",
"kagglehub",
# Don't require tensorflow-text on MacOS, there are no binaries for ARM.
# Also, we rely on tensorflow *transitively* through tensorflow-text.
# This avoid a slowdown during `pip install keras-nlp` where pip would
# download many version of both libraries to find compatible versions.
"tensorflow-text; platform_system != 'Darwin'",
],
extras_require={
"extras": [
"rouge-score",
"sentencepiece",
],
},
# Supported Python versions
python_requires=">=3.9",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Unix",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*_test.py",)),
)
| keras-nlp/setup.py/0 | {
"file_path": "keras-nlp/setup.py",
"repo_id": "keras-nlp",
"token_count": 1212
} | 124 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import numpy as np
import tensorflow as tf
import transformers
from absl import app
from absl import flags
from checkpoint_conversion_utils import get_md5_checksum
import keras_nlp
PRESET_MAP = {
"albert_base_en_uncased": "albert-base-v2",
"albert_large_en_uncased": "albert-large-v2",
"albert_extra_large_en_uncased": "albert-xlarge-v2",
"albert_extra_extra_large_en_uncased": "albert-xxlarge-v2",
}
FLAGS = flags.FLAGS
flags.DEFINE_string(
"preset", None, f'Must be one of {",".join(PRESET_MAP.keys())}'
)
def convert_checkpoints(hf_model):
print("\n-> Convert original weights to KerasNLP format.")
print("\n-> Load KerasNLP model.")
keras_nlp_model = keras_nlp.models.AlbertBackbone.from_preset(
FLAGS.preset, load_weights=False
)
hf_wts = hf_model.state_dict()
print("Original weights:")
print(list(hf_wts.keys()))
num_heads = keras_nlp_model.num_heads
hidden_dim = keras_nlp_model.hidden_dim
keras_nlp_model.get_layer("token_embedding").embeddings.assign(
hf_wts["embeddings.word_embeddings.weight"]
)
keras_nlp_model.get_layer("position_embedding").position_embeddings.assign(
hf_wts["embeddings.position_embeddings.weight"]
)
keras_nlp_model.get_layer("segment_embedding").embeddings.assign(
hf_wts["embeddings.token_type_embeddings.weight"]
)
keras_nlp_model.get_layer("embeddings_layer_norm").gamma.assign(
hf_wts["embeddings.LayerNorm.weight"]
)
keras_nlp_model.get_layer("embeddings_layer_norm").beta.assign(
hf_wts["embeddings.LayerNorm.bias"]
)
keras_nlp_model.get_layer("embedding_projection").kernel.assign(
hf_wts["encoder.embedding_hidden_mapping_in.weight"].T
)
keras_nlp_model.get_layer("embedding_projection").bias.assign(
hf_wts["encoder.embedding_hidden_mapping_in.bias"]
)
for i in range(keras_nlp_model.num_groups):
for j in range(keras_nlp_model.num_inner_repetitions):
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer._query_dense.kernel.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.query.weight"
]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer._query_dense.bias.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.query.bias"
]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer._key_dense.kernel.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.key.weight"
]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer._key_dense.bias.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.key.bias"
]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer._value_dense.kernel.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.value.weight"
]
.transpose(1, 0)
.reshape((hidden_dim, num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer._value_dense.bias.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.value.bias"
]
.reshape((num_heads, -1))
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer._output_dense.kernel.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.dense.weight"
]
.transpose(1, 0)
.reshape((num_heads, -1, hidden_dim))
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer._output_dense.bias.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.dense.bias"
].numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer_norm.gamma.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.LayerNorm.weight"
].numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._self_attention_layer_norm.beta.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.attention.LayerNorm.bias"
].numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._feedforward_intermediate_dense.kernel.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.ffn.weight"
]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._feedforward_intermediate_dense.bias.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.ffn.bias"
].numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._feedforward_output_dense.kernel.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.ffn_output.weight"
]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._feedforward_output_dense.bias.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.ffn_output.bias"
].numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._feedforward_layer_norm.gamma.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.full_layer_layer_norm.weight"
].numpy()
)
keras_nlp_model.get_layer(
f"group_{i}_inner_layer_{j}"
)._feedforward_layer_norm.beta.assign(
hf_wts[
f"encoder.albert_layer_groups.{i}.albert_layers.{j}.full_layer_layer_norm.bias"
].numpy()
)
keras_nlp_model.get_layer("pooled_dense").kernel.assign(
hf_wts["pooler.weight"].transpose(1, 0).numpy()
)
keras_nlp_model.get_layer("pooled_dense").bias.assign(
hf_wts["pooler.bias"].numpy()
)
# Save the model.
print("\n-> Save KerasNLP model weights.")
keras_nlp_model.save_weights(os.path.join(FLAGS.preset, "model.h5"))
return keras_nlp_model
def extract_vocab(hf_tokenizer):
spm_path = os.path.join(FLAGS.preset, "spiece.model")
print(f"\n-> Save KerasNLP SPM vocabulary file to `{spm_path}`.")
shutil.copyfile(
transformers.utils.hub.get_file_from_repo(
hf_tokenizer.name_or_path, "spiece.model"
),
spm_path,
)
keras_nlp_tokenizer = keras_nlp.models.AlbertTokenizer(
proto=spm_path,
)
keras_nlp_preprocessor = keras_nlp.models.AlbertPreprocessor(
keras_nlp_tokenizer
)
print("-> Print MD5 checksum of the vocab files.")
print(f"`{spm_path}` md5sum: ", get_md5_checksum(spm_path))
return keras_nlp_preprocessor
def check_output(
keras_nlp_preprocessor,
keras_nlp_model,
hf_tokenizer,
hf_model,
):
print("\n-> Check the outputs.")
sample_text = ["cricket is awesome, easily the best sport in the world!"]
# KerasNLP
keras_nlp_inputs = keras_nlp_preprocessor(tf.constant(sample_text))
keras_nlp_output = keras_nlp_model.predict(keras_nlp_inputs)[
"sequence_output"
]
# HF
hf_inputs = hf_tokenizer(
sample_text, padding="max_length", return_tensors="pt"
)
hf_output = hf_model(**hf_inputs).last_hidden_state
print("KerasNLP output:", keras_nlp_output[0, 0, :10])
print("HF output:", hf_output[0, 0, :10])
print("Difference:", np.mean(keras_nlp_output - hf_output.detach().numpy()))
# Show the MD5 checksum of the model weights.
print(
"Model md5sum: ",
get_md5_checksum(os.path.join(FLAGS.preset, "model.h5")),
)
def main(_):
os.makedirs(FLAGS.preset)
hf_model_name = PRESET_MAP[FLAGS.preset]
print("\n-> Load HF model and HF tokenizer.")
hf_model = transformers.AutoModel.from_pretrained(hf_model_name)
hf_model.eval()
hf_tokenizer = transformers.AutoTokenizer.from_pretrained(hf_model_name)
keras_nlp_model = convert_checkpoints(hf_model)
print("\n -> Load KerasNLP preprocessor.")
keras_nlp_preprocessor = extract_vocab(hf_tokenizer)
check_output(
keras_nlp_preprocessor,
keras_nlp_model,
hf_tokenizer,
hf_model,
)
if __name__ == "__main__":
flags.mark_flag_as_required("preset")
app.run(main)
| keras-nlp/tools/checkpoint_conversion/convert_albert_checkpoints.py/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/convert_albert_checkpoints.py",
"repo_id": "keras-nlp",
"token_count": 5804
} | 125 |
<jupyter_start><jupyter_text>keras-nlp installation<jupyter_code>!pip install -q git+https://github.com/keras-team/keras-nlp.git tensorflow --upgrade<jupyter_output>Preparing metadata (setup.py) ... [?25l[?25hdone<jupyter_text>Imports<jupyter_code>import keras_nlp
import tensorflow as tf
from tensorflow import keras
import tensorflow_datasets as tfds<jupyter_output><empty_output><jupyter_text>Data<jupyter_code># Load Dataset
train_ds, valid_ds = tfds.load(
"glue/sst2",
split=["train", "validation"],
batch_size=16,
)
def split_features(x):
# GLUE comes with dictonary data, we convert it to a uniform format
# (features, label), where features is a tuple consisting of all
# features.
features = x["sentence"]
label = x["label"]
return (features, label)
train_ds = train_ds.map(
split_features, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
valid_ds = valid_ds.map(
split_features, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
# Look first training set batch
# The format is (string_tensor, label_tensor)
train_ds.take(1).get_single_element()<jupyter_output><empty_output><jupyter_text>Model<jupyter_code># Create the Classifier Model
# For more details please look https://keras.io/guides/keras_nlp/getting_started/
classifier = keras_nlp.models.BertClassifier.from_preset(
"bert_tiny_en_uncased", num_classes=2, dropout=0.1
)
# Add loss function, optimizer and metrics
classifier.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.experimental.AdamW(5e-5),
metrics=keras.metrics.SparseCategoricalAccuracy(),
jit_compile=True,
)
# To see the summary(layers) of the model we need to build it(or call it once) but we don't need
# to do it if we just want to train it on a downstream task.
classifier.layers[-1].build(
[
None,
]
)
classifier.summary()
# Train the model
N_EPOCHS = 2
classifier.fit(
train_ds,
validation_data=valid_ds,
epochs=N_EPOCHS,
)
# Save the weights
model_name = f"bert_tiny_uncased_en_sst2-epochs={N_EPOCHS}.h5"
classifier.save_weights(f"/content/{model_name}")
# Load the weigths
weights_path = f"bert_tiny_uncased_en_sst2-epochs={N_EPOCHS}.h5"
classifier.load_weights(weights_path)<jupyter_output><empty_output><jupyter_text>Test the model<jupyter_code># We will shuffle the valid dataset and take 1st example
shuffled_valid_ds = valid_ds.shuffle(55).rebatch(1)
element = shuffled_valid_ds.take(1).get_single_element()
pred_logits = classifier.predict(element[0])
pred = tf.argmax(pred_logits)
print(
f"Text :: {element[0].numpy()[0].decode('utf-8')} \nLabel :: {element[1].numpy()[0]} "
f"\nModel Prediction :: {tf.argmax(pred_logits, axis=1).numpy()[0]}"
)
# You can test the model with your own statement!
label_dict = {0: "Bad Statement", 1: "Good Statement"} # As for this dataset
output = tf.argmax(
classifier.predict(["Lord of the rings is best"]), axis=1
).numpy()[0]
print(label_dict[output])<jupyter_output>1/1 [==============================] - 1s 902ms/step
Good Statement | keras-nlp/tools/checkpoint_training/bert_tiny_uncased_en_sst2_training.ipynb/0 | {
"file_path": "keras-nlp/tools/checkpoint_training/bert_tiny_uncased_en_sst2_training.ipynb",
"repo_id": "keras-nlp",
"token_count": 1230
} | 126 |
"""Utilities for real-time data augmentation on image data.
"""
import os
import warnings
import numpy as np
from .iterator import Iterator
from .utils import array_to_img
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
ignore_class_split: Boolean (default: False), ignore difference
in number of classes in labels across train and validation
split (useful for non-classification tasks)
dtype: Dtype to use for the generated arrays.
"""
def __new__(cls, *args, **kwargs):
try:
from tensorflow.keras.utils import Sequence as TFSequence
if TFSequence not in cls.__bases__:
cls.__bases__ = cls.__bases__ + (TFSequence,)
except ImportError:
pass
return super(NumpyArrayIterator, cls).__new__(cls)
def __init__(self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
sample_weight=None,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
ignore_class_split=False,
dtype='float32'):
self.dtype = dtype
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if (y is not None and not ignore_class_split and not
np.array_equal(np.unique(y[:split_idx]),
np.unique(y[split_idx:]))):
raise ValueError('Training and validation subsets '
'have different number of classes after '
'the split. If your numpy arrays are '
'sorted by the label, you might want '
'to shuffle them.')
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
self.x = np.asarray(x, dtype=self.dtype)
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3, or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=self.dtype)
for i, j in enumerate(index_array):
x = self.x[j]
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(
x.astype(self.dtype), params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
| keras-preprocessing/keras_preprocessing/image/numpy_array_iterator.py/0 | {
"file_path": "keras-preprocessing/keras_preprocessing/image/numpy_array_iterator.py",
"repo_id": "keras-preprocessing",
"token_count": 4345
} | 127 |
import importlib
import inspect
import re
from itertools import compress
import pytest
modules = ['keras_preprocessing',
'keras_preprocessing.image',
'keras_preprocessing.sequence',
'keras_preprocessing.text']
# Tokenizer is being refactored PR #106
accepted_name = ['set_keras_submodules', 'get_keras_submodule', 'Tokenizer']
accepted_module = []
# Functions or classes with less than 'MIN_CODE_SIZE' lines can be ignored
MIN_CODE_SIZE = 10
def handle_class_init(name, member):
init_args = [
arg for arg in list(inspect.signature(member.__init__).parameters.keys())
if arg not in ['self', 'args', 'kwargs']
]
assert_args_presence(init_args, member.__doc__, member, name)
def handle_class(name, member):
if is_accepted(name, member):
return
if member.__doc__ is None and not member_too_small(member):
raise ValueError("{} class doesn't have any documentation".format(name),
member.__module__, inspect.getmodule(member).__file__)
handle_class_init(name, member)
for n, met in inspect.getmembers(member):
if inspect.ismethod(met):
handle_method(n, met)
def handle_function(name, member):
if is_accepted(name, member) or member_too_small(member):
# We don't need to check this one.
return
doc = member.__doc__
if doc is None:
raise ValueError("{} function doesn't have any documentation".format(name),
member.__module__, inspect.getmodule(member).__file__)
args = list(inspect.signature(member).parameters.keys())
assert_args_presence(args, doc, member, name)
assert_function_style(name, member, doc, args)
assert_doc_style(name, member, doc)
def assert_doc_style(name, member, doc):
lines = doc.split("\n")
first_line = lines[0]
if len(first_line.strip()) == 0:
raise ValueError(
"{} the documentation should be on the first line.".format(name),
member.__module__)
first_blank = [i for i, line in enumerate(lines) if not line.strip()]
if len(first_blank) > 0:
if lines[first_blank[0] - 1].strip()[-1] != '.':
raise ValueError("{} first line should end with a '.'".format(name),
member.__module__)
def assert_function_style(name, member, doc, args):
code = inspect.getsource(member)
has_return = re.findall(r"\s*return \S+", code, re.MULTILINE)
if has_return and "# Returns" not in doc:
innerfunction = [inspect.getsource(x) for x in member.__code__.co_consts if
inspect.iscode(x)]
return_in_sub = [ret for code_inner in innerfunction for ret in
re.findall(r"\s*return \S+", code_inner, re.MULTILINE)]
if len(return_in_sub) < len(has_return):
raise ValueError("{} needs a '# Returns' section".format(name),
member.__module__)
has_raise = re.findall(r"^\s*raise \S+", code, re.MULTILINE)
if has_raise and "# Raises" not in doc:
innerfunction = [inspect.getsource(x) for x in member.__code__.co_consts if
inspect.iscode(x)]
raise_in_sub = [ret for code_inner in innerfunction for ret in
re.findall(r"\s*raise \S+", code_inner, re.MULTILINE)]
if len(raise_in_sub) < len(has_raise):
raise ValueError("{} needs a '# Raises' section".format(name),
member.__module__)
if len(args) > 0 and "# Arguments" not in doc:
raise ValueError("{} needs a '# Arguments' section".format(name),
member.__module__)
assert_blank_before(name, member, doc, ['# Arguments', '# Raises', '# Returns'])
def assert_blank_before(name, member, doc, keywords):
doc_lines = [x.strip() for x in doc.split('\n')]
for keyword in keywords:
if keyword in doc_lines:
index = doc_lines.index(keyword)
if doc_lines[index - 1] != '':
raise ValueError(
"{} '{}' should have a blank line above.".format(name, keyword),
member.__module__)
def is_accepted(name, member):
if 'keras_preprocessing' not in str(member.__module__):
return True
return name in accepted_name or member.__module__ in accepted_module
def member_too_small(member):
code = inspect.getsource(member).split('\n')
return len(code) < MIN_CODE_SIZE
def assert_args_presence(args, doc, member, name):
args_not_in_doc = [arg not in doc for arg in args]
if any(args_not_in_doc):
raise ValueError(
"{} {} arguments are not present in documentation ".format(name, list(
compress(args, args_not_in_doc))), member.__module__, member)
words = doc.replace('*', '').split()
# Check arguments styling
styles = [arg + ":" not in words for arg in args]
if any(styles):
raise ValueError(
"{} {} are not style properly 'argument': documentation".format(
name,
list(compress(args, styles))),
member.__module__)
# Check arguments order
indexes = [words.index(arg + ":") for arg in args]
if indexes != sorted(indexes):
raise ValueError(
"{} arguments order is different from the documentation".format(name),
member.__module__, indexes)
def handle_method(name, member):
if name in accepted_name or member.__module__ in accepted_module:
return
handle_function(name, member)
def handle_module(mod):
for name, mem in inspect.getmembers(mod):
if inspect.isclass(mem):
handle_class(name, mem)
elif inspect.isfunction(mem):
handle_function(name, mem)
elif 'keras_preprocessing' in name and inspect.ismodule(mem):
# Only test keras_preprocessing' modules
handle_module(mem)
def test_doc():
for module in modules:
mod = importlib.import_module(module)
handle_module(mod)
if __name__ == '__main__':
pytest.main([__file__])
| keras-preprocessing/tests/test_documentation.py/0 | {
"file_path": "keras-preprocessing/tests/test_documentation.py",
"repo_id": "keras-preprocessing",
"token_count": 2623
} | 128 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_tuner/tuners/'" />
| keras-tuner/docs/site/documentation/tuners/index.html/0 | {
"file_path": "keras-tuner/docs/site/documentation/tuners/index.html",
"repo_id": "keras-tuner",
"token_count": 37
} | 129 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Keras backend module.
This module adds a temporarily Keras API surface that is fully under KerasTuner
control. This allows us to switch between Keras 3 and `tf.keras`, as well
as add shims to support older version of `tf.keras`.
- `config`: check which backend is being run.
- `keras`: The full `keras` API (via `keras` 3 or `tf.keras`).
- `ops`: `keras.ops`, always tf backed if using `tf.keras`.
"""
from keras_tuner.backend import config
from keras_tuner.backend import io
from keras_tuner.backend import keras
from keras_tuner.backend import ops
from keras_tuner.backend import random
| keras-tuner/keras_tuner/backend/__init__.py/0 | {
"file_path": "keras-tuner/keras_tuner/backend/__init__.py",
"repo_id": "keras-tuner",
"token_count": 347
} | 130 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Tuner base class."
import copy
import os
import traceback
import warnings
from keras_tuner import backend
from keras_tuner import config as config_module
from keras_tuner import errors
from keras_tuner import utils
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.distribute import utils as dist_utils
from keras_tuner.engine import hypermodel as hm_module
from keras_tuner.engine import oracle as oracle_module
from keras_tuner.engine import stateful
from keras_tuner.engine import trial as trial_module
from keras_tuner.engine import tuner_utils
@keras_tuner_export(["keras_tuner.engine.base_tuner.BaseTuner"])
class BaseTuner(stateful.Stateful):
"""Tuner base class.
`BaseTuner` is the super class of all `Tuner` classes. It defines the APIs
for the `Tuner` classes and serves as a wrapper class for the internal
logics.
`BaseTuner` supports parallel tuning. In parallel tuning, the communication
between `BaseTuner` and `Oracle` are all going through gRPC. There are
multiple running instances of `BaseTuner` but only one `Oracle`. This design
allows the user to run the same script on multiple machines to launch the
parallel tuning.
The `Oracle` instance should manage the life cycles of all the `Trial`s,
while a `BaseTuner` is a worker for running the `Trial`s. `BaseTuner`s
requests `Trial`s from the `Oracle`, run them, and report the results back
to the `Oracle`. A `BaseTuner` also handles events happening during running
the `Trial`, like saving the model, logging, error handling. Other than
these responsibilities, a `BaseTuner` should avoid managing a `Trial` since
the relevant contexts for a `Trial` are in the `Oracle`, which only
accessible from gRPC.
The `BaseTuner` should be a general tuner for all types of models and avoid
any logic directly related to Keras. The Keras related logics should be
handled by the `Tuner` class, which is a subclass of `BaseTuner`.
Args:
oracle: Instance of Oracle class.
hypermodel: Instance of `HyperModel` class (or callable that takes
hyperparameters and returns a `Model` instance). It is optional
when `Tuner.run_trial()` is overriden and does not use
`self.hypermodel`.
directory: A string, the relative path to the working directory.
project_name: A string, the name to use as prefix for files saved by
this Tuner.
overwrite: Boolean, defaults to `False`. If `False`, reloads an
existing project of the same name if one is found. Otherwise,
overwrites the project.
**kwargs: Arguments for backward compatibility.
Attributes:
remaining_trials: Number of trials remaining, `None` if `max_trials` is
not set. This is useful when resuming a previously stopped search.
"""
def __init__(
self,
oracle,
hypermodel=None,
directory=None,
project_name=None,
overwrite=False,
**kwargs,
):
if not isinstance(oracle, oracle_module.Oracle):
raise ValueError(
"Expected `oracle` argument to be an instance of `Oracle`. "
f"Received: oracle={oracle} (of type ({type(oracle)})."
)
logger = kwargs.pop("logger", None)
if logger is not None:
warnings.warn(
"The `logger` argument in `BaseTuner.__init__() is "
"no longer supported and will be ignored.",
DeprecationWarning,
stacklevel=2,
)
self.logger = logger
if len(kwargs) > 0:
raise ValueError(
f"Unrecognized arguments {list(kwargs.keys())} "
"for `BaseTuner.__init__()`."
)
self.oracle = oracle
self.hypermodel = hm_module.get_hypermodel(hypermodel)
# Ops and metadata
self.directory = directory or "."
self.project_name = project_name or "untitled_project"
self.oracle._set_project_dir(self.directory, self.project_name)
if overwrite and backend.io.exists(self.project_dir):
backend.io.rmtree(self.project_dir)
# To support tuning distribution.
self.tuner_id = os.environ.get("KERASTUNER_TUNER_ID", "tuner0")
# Reloading state.
if not overwrite and backend.io.exists(self._get_tuner_fname()):
print(f"Reloading Tuner from {self._get_tuner_fname()}")
self.reload()
else:
# Only populate initial space if not reloading.
self._populate_initial_space()
# Run in distributed mode.
if dist_utils.has_chief_oracle() and not dist_utils.is_chief_oracle():
# Proxies requests to the chief oracle.
# Avoid import at the top, to avoid inconsistent protobuf versions.
from keras_tuner.distribute import oracle_client
self.oracle = oracle_client.OracleClient(self.oracle)
def _activate_all_conditions(self):
# Lists of stacks of conditions used during `explore_space()`.
scopes_never_active = []
scopes_once_active = []
hp = self.oracle.get_space()
while True:
self.hypermodel.build(hp)
self.oracle.update_space(hp)
# Update the recorded scopes.
for conditions in hp.active_scopes:
if conditions not in scopes_once_active:
scopes_once_active.append(copy.deepcopy(conditions))
if conditions in scopes_never_active:
scopes_never_active.remove(conditions)
for conditions in hp.inactive_scopes:
if conditions not in scopes_once_active:
scopes_never_active.append(copy.deepcopy(conditions))
# All conditional scopes are activated.
if not scopes_never_active:
break
# Generate new values to activate new conditions.
hp = self.oracle.get_space()
conditions = scopes_never_active[0]
for condition in conditions:
hp.values[condition.name] = condition.values[0]
hp.ensure_active_values()
def _populate_initial_space(self):
"""Populate initial search space for oracle.
Keep this function as a subroutine for AutoKeras to override. The space
may not be ready at the initialization of the tuner, but after seeing
the training data.
Build hypermodel multiple times to find all conditional hps. It
generates hp values based on the not activated `conditional_scopes`
found in the builds.
"""
if self.hypermodel is None:
return
# declare_hyperparameters is not overriden.
hp = self.oracle.get_space()
self.hypermodel.declare_hyperparameters(hp)
self.oracle.update_space(hp)
self._activate_all_conditions()
def search(self, *fit_args, **fit_kwargs):
"""Performs a search for best hyperparameter configuations.
Args:
*fit_args: Positional arguments that should be passed to
`run_trial`, for example the training and validation data.
**fit_kwargs: Keyword arguments that should be passed to
`run_trial`, for example the training and validation data.
"""
if "verbose" in fit_kwargs:
verbose = fit_kwargs.get("verbose")
# Only set verbosity on chief or when not running in parallel.
if (
not dist_utils.has_chief_oracle()
or dist_utils.is_chief_oracle()
):
self.oracle.verbose = verbose
if dist_utils.is_chief_oracle():
# Blocks until all the trials are finished.
# Avoid import at the top, to avoid inconsistent protobuf versions.
from keras_tuner.distribute import oracle_chief
self.save()
oracle_chief.start_server(self.oracle)
return
self.on_search_begin()
while True:
self.pre_create_trial()
trial = self.oracle.create_trial(self.tuner_id)
if trial.status == trial_module.TrialStatus.STOPPED:
# Oracle triggered exit.
break
if trial.status == trial_module.TrialStatus.IDLE:
# Oracle is calculating, resend request.
continue
self.on_trial_begin(trial)
self._try_run_and_update_trial(trial, *fit_args, **fit_kwargs)
self.on_trial_end(trial)
self.on_search_end()
def _run_and_update_trial(self, trial, *fit_args, **fit_kwargs):
results = self.run_trial(trial, *fit_args, **fit_kwargs)
if self.oracle.get_trial(trial.trial_id).metrics.exists(
self.oracle.objective.name
):
# The oracle is updated by calling `self.oracle.update_trial()` in
# `Tuner.run_trial()`. For backward compatibility, we support this
# use case. No further action needed in this case.
warnings.warn(
"The use case of calling "
"`self.oracle.update_trial(trial_id, metrics)` "
"in `Tuner.run_trial()` to report the metrics is deprecated, "
"and will be removed in the future."
"Please remove the call and do 'return metrics' "
"in `Tuner.run_trial()` instead. ",
DeprecationWarning,
stacklevel=2,
)
return
tuner_utils.validate_trial_results(
results, self.oracle.objective, "Tuner.run_trial()"
),
self.oracle.update_trial(
trial.trial_id,
# Convert to dictionary before calling `update_trial()`
# to pass it from gRPC.
tuner_utils.convert_to_metrics_dict(
results,
self.oracle.objective,
),
step=tuner_utils.get_best_step(results, self.oracle.objective),
)
def _try_run_and_update_trial(self, trial, *fit_args, **fit_kwargs):
try:
self._run_and_update_trial(trial, *fit_args, **fit_kwargs)
trial.status = trial_module.TrialStatus.COMPLETED
return
except Exception as e:
if isinstance(e, errors.FatalError):
raise e
if config_module.DEBUG:
# Printing the stacktrace and the error.
traceback.print_exc()
if isinstance(e, errors.FailedTrialError):
trial.status = trial_module.TrialStatus.FAILED
else:
trial.status = trial_module.TrialStatus.INVALID
# Include the stack traces in the message.
message = traceback.format_exc()
trial.message = message
def run_trial(self, trial, *fit_args, **fit_kwargs):
"""Evaluates a set of hyperparameter values."""
raise NotImplementedError
def save_model(self, trial_id, model, step=0):
"""Saves a Model for a given trial.
Args:
trial_id: The ID of the `Trial` corresponding to this Model.
model: The trained model.
step: Integer, for models that report intermediate results to the
`Oracle`, the step the saved file correspond to. For example,
for Keras models this is the number of epochs trained.
"""
raise NotImplementedError
def load_model(self, trial):
"""Loads a Model from a given trial.
For models that report intermediate results to the `Oracle`, generally
`load_model` should load the best reported `step` by relying of
`trial.best_step`.
Args:
trial: A `Trial` instance, the `Trial` corresponding to the model
to load.
"""
raise NotImplementedError
def pre_create_trial(self):
"""Called before self.oracle.create_trial and before on_trial_begin."""
def on_trial_begin(self, trial):
"""Called at the beginning of a trial.
Args:
trial: A `Trial` instance.
"""
pass
def on_trial_end(self, trial):
"""Called at the end of a trial.
Args:
trial: A `Trial` instance.
"""
self.oracle.end_trial(trial)
self.save()
def on_search_begin(self):
"""Called at the beginning of the `search` method."""
pass
def on_search_end(self):
"""Called at the end of the `search` method."""
pass
def get_best_models(self, num_models=1):
"""Returns the best model(s), as determined by the objective.
This method is for querying the models trained during the search.
For best performance, it is recommended to retrain your Model on the
full dataset using the best hyperparameters found during `search`,
which can be obtained using `tuner.get_best_hyperparameters()`.
Args:
num_models: Optional number of best models to return.
Defaults to 1.
Returns:
List of trained models sorted from the best to the worst.
"""
best_trials = self.oracle.get_best_trials(num_models)
models = [self.load_model(trial) for trial in best_trials]
return models
def get_best_hyperparameters(self, num_trials=1):
"""Returns the best hyperparameters, as determined by the objective.
This method can be used to reinstantiate the (untrained) best model
found during the search process.
Example:
```python
best_hp = tuner.get_best_hyperparameters()[0]
model = tuner.hypermodel.build(best_hp)
```
Args:
num_trials: Optional number of `HyperParameters` objects to return.
Returns:
List of `HyperParameter` objects sorted from the best to the worst.
"""
return [
t.hyperparameters for t in self.oracle.get_best_trials(num_trials)
]
def search_space_summary(self, extended=False):
"""Print search space summary.
The methods prints a summary of the hyperparameters in the search
space, which can be called before calling the `search` method.
Args:
extended: Optional boolean, whether to display an extended summary.
Defaults to False.
"""
print("Search space summary")
hp = self.oracle.get_space()
print(f"Default search space size: {len(hp.space)}")
for p in hp.space:
config = p.get_config()
name = config.pop("name")
print(f"{name} ({p.__class__.__name__})")
print(config)
def results_summary(self, num_trials=10):
"""Display tuning results summary.
The method prints a summary of the search results including the
hyperparameter values and evaluation results for each trial.
Args:
num_trials: Optional number of trials to display. Defaults to 10.
"""
print("Results summary")
print(f"Results in {self.project_dir}")
print("Showing %d best trials" % num_trials)
print(f"{self.oracle.objective}")
best_trials = self.oracle.get_best_trials(num_trials)
for trial in best_trials:
print()
trial.summary()
@property
def remaining_trials(self):
"""Returns the number of trials remaining.
Will return `None` if `max_trials` is not set. This is useful when
resuming a previously stopped search.
"""
return self.oracle.remaining_trials()
def get_state(self):
return {}
def set_state(self, state):
pass
def _is_worker(self):
"""Return true only if in parallel tuning and is a worker tuner."""
return (
dist_utils.has_chief_oracle() and not dist_utils.is_chief_oracle()
)
def save(self):
"""Saves this object to its project directory."""
if not self._is_worker():
self.oracle.save()
super().save(self._get_tuner_fname())
def reload(self):
"""Reloads this object from its project directory."""
if not self._is_worker():
self.oracle.reload()
super().reload(self._get_tuner_fname())
@property
def project_dir(self):
dirname = os.path.join(str(self.directory), self.project_name)
utils.create_directory(dirname)
return dirname
def get_trial_dir(self, trial_id):
dirname = os.path.join(str(self.project_dir), f"trial_{str(trial_id)}")
utils.create_directory(dirname)
return dirname
def _get_tuner_fname(self):
return os.path.join(str(self.project_dir), f"{str(self.tuner_id)}.json")
| keras-tuner/keras_tuner/engine/base_tuner.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/base_tuner.py",
"repo_id": "keras-tuner",
"token_count": 7479
} | 131 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_tuner import protos
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.engine.hyperparameters import hp_types
def test_int_sampling_arg():
i = hp_module.Int("i", 0, 10, sampling="linear")
i = hp_module.Int.from_config(i.get_config())
assert i.sampling == "linear"
with pytest.raises(ValueError, match="sampling must be one of"):
hp_module.Int("j", 0, 10, sampling="invalid")
with pytest.raises(
ValueError,
match="min_value 1 is greater than the max_value 0",
):
hp_module.Int("k", 1, 0, sampling="linear")
with pytest.raises(
ValueError,
match="min_value 1 is greater than the max_value 0",
):
hp_module.Int("k", 1, 0, sampling="linear")
with pytest.raises(
ValueError,
match="does not support negative values",
):
hp_module.Int("k", -10, -1, sampling="log")
with pytest.raises(
ValueError,
match="For HyperParameters.Int\(name='k'\), expected step > 1",
):
hp_module.Int("k", 1, 10, step=1, sampling="log")
def test_int():
rg = hp_module.Int("rg", min_value=5, max_value=9, step=1, default=6)
rg = hp_module.Int.from_config(rg.get_config())
assert rg.default == 6
assert 5 <= rg.random_sample() <= 9
assert isinstance(rg.random_sample(), int)
assert rg.random_sample(123) == rg.random_sample(123)
assert abs(rg.value_to_prob(6) - 0.3) < 1e-4
# No default
rg = hp_module.Int("rg", min_value=5, max_value=9, step=1)
assert rg.default == 5
def test_int_log_with_step():
rg = hp_module.Int("rg", min_value=2, max_value=32, step=2, sampling="log")
for _ in range(10):
assert rg.random_sample() in [2, 4, 8, 16, 32]
assert abs(rg.value_to_prob(4) - 0.3) < 1e-4
assert rg.prob_to_value(0.3) == 4
def test_int_log_without_step():
rg = hp_module.Int("rg", min_value=2, max_value=32, sampling="log")
assert rg.prob_to_value(rg.value_to_prob(4)) == 4
def test_int_proto():
hp = hp_module.Int("a", 1, 100, sampling="log")
proto = hp.to_proto()
assert proto.name == "a"
assert proto.min_value == 1
assert proto.max_value == 100
assert proto.sampling == protos.get_proto().Sampling.LOG
# Proto stores the implicit default.
assert proto.default == 1
assert proto.step == 0
new_hp = hp_module.Int.from_proto(proto)
assert new_hp._default == 1
# Pop the implicit default for comparison purposes.
new_hp._default = None
assert new_hp.get_config() == hp.get_config()
def test_int_raise_error_with_float_min_value():
with pytest.raises(ValueError, match="must be an int"):
hp_module.Int("j", 0.5, 10)
def test_repr_int_is_str():
assert "name: 'j'" in repr(hp_module.Int("j", 1, 10))
def test_serialize_deserialize_int():
hp = hp_module.Int("j", 1, 10)
new_hp = hp_module.deserialize(hp_module.serialize(hp))
assert repr(hp) == repr(new_hp)
hp = hp_module.Int("j", 1, 10)
new_hp = hp_types.deserialize(hp_types.serialize(hp))
assert repr(hp) == repr(new_hp)
def test_int_values_property_with_step():
assert list(hp_module.Int("int", 2, 8, 2).values) == [2, 4, 6, 8]
assert isinstance(list(hp_module.Int("int", 2, 8, 2).values)[0], int)
assert list(hp_module.Int("int", 2, 8, 2, sampling="log").values) == [
2,
4,
8,
]
def test_int_values_property_without_step():
assert list(hp_module.Int("int", 2, 4).values) == [2, 3, 4]
assert list(hp_module.Int("int", 2, 20).values) == list(range(2, 21))
assert len(list(hp_module.Int("int", 2, 1024, sampling="log").values)) == 10
def test_sampling_none_is_linear():
# This is for backward compatibility
assert hp_module.Int("int", 2, 4, sampling=None).sampling == "linear"
| keras-tuner/keras_tuner/engine/hyperparameters/hp_types/int_hp_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/int_hp_test.py",
"repo_id": "keras-tuner",
"token_count": 1767
} | 132 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import sys
import pytest
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.engine import metrics_tracking
from keras_tuner.engine import trial as trial_module
def test_trial_proto():
hps = hp_module.HyperParameters()
hps.Int("a", 0, 10, default=3)
trial = trial_module.Trial(hps, trial_id="trial1", status="COMPLETED")
trial.metrics.register("score", direction="max")
trial.metrics.update("score", 10, step=1)
proto = trial.to_proto()
assert len(proto.hyperparameters.space.int_space) == 1
assert proto.hyperparameters.values.values["a"].int_value == 3
assert not proto.HasField("score")
new_trial = trial_module.Trial.from_proto(proto)
assert new_trial.status == trial.status
assert new_trial.hyperparameters.get("a") == 3
assert new_trial.trial_id == trial.trial_id
assert new_trial.score == trial.score
assert new_trial.best_step == trial.best_step
trial.score = -10
trial.best_step = 3
proto = trial.to_proto()
assert proto.HasField("score")
assert proto.score.value == -10
assert proto.score.step == 3
new_trial = trial_module.Trial.from_proto(proto)
assert new_trial.score == -10
assert new_trial.best_step == 3
assert new_trial.metrics.get_history("score") == [
metrics_tracking.MetricObservation(10, step=1)
]
def test_trial_status_proto():
assert (
trial_module.TrialStatus.from_proto(
trial_module.TrialStatus.to_proto(None)
)
is None
)
assert (
trial_module.TrialStatus.from_proto(
trial_module.TrialStatus.to_proto(trial_module.TrialStatus.IDLE)
)
== trial_module.TrialStatus.IDLE
)
assert (
trial_module.TrialStatus.from_proto(
trial_module.TrialStatus.to_proto(trial_module.TrialStatus.FAILED)
)
== trial_module.TrialStatus.FAILED
)
assert (
trial_module.TrialStatus.from_proto(
trial_module.TrialStatus.to_proto(trial_module.TrialStatus.INVALID)
)
== trial_module.TrialStatus.INVALID
)
with pytest.raises(ValueError, match="Unknown status"):
trial_module.TrialStatus.to_proto("OTHER")
with pytest.raises(ValueError, match="Unknown status"):
trial_module.TrialStatus.from_proto(16)
def test_trial_error_in_summary():
error_message = "stack_trace\nerror_type\n"
trial = trial_module.Trial(
hyperparameters=hp_module.HyperParameters(),
trial_id="3",
status=trial_module.TrialStatus.FAILED,
message=error_message,
)
stdout = io.StringIO()
sys.stdout = stdout
trial.summary()
sys.stdout = sys.__stdout__
assert error_message in stdout.getvalue()
| keras-tuner/keras_tuner/engine/trial_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/trial_test.py",
"repo_id": "keras-tuner",
"token_count": 1312
} | 133 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
from keras_tuner import backend
from keras_tuner import utils
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.engine import oracle as oracle_module
from keras_tuner.engine import tuner as tuner_module
@keras_tuner_export("keras_tuner.oracles.HyperbandOracle")
class HyperbandOracle(oracle_module.Oracle):
"""Oracle class for Hyperband.
Note that to use this Oracle with your own subclassed Tuner, your Tuner
class must be able to handle in `Tuner.run_trial` three special
hyperparameters that will be set by this Tuner:
- "tuner/trial_id": String, optionally set. The trial_id of the Trial to
load from when starting this trial.
- "tuner/initial_epoch": Int, always set. The initial epoch the Trial should
be started from.
- "tuner/epochs": Int, always set. The cumulative number of epochs this
Trial should be trained.
These hyperparameters will be set during the "successive halving" portion
of the Hyperband algorithm.
Examples:
```python
def run_trial(self, trial, *args, **kwargs):
hp = trial.hyperparameters
if "tuner/trial_id" in hp:
past_trial = self.oracle.get_trial(hp['tuner/trial_id'])
model = self.load_model(past_trial)
else:
model = self.hypermodel.build(hp)
initial_epoch = hp['tuner/initial_epoch']
last_epoch = hp['tuner/epochs']
for epoch in range(initial_epoch, last_epoch):
self.on_epoch_begin(...)
for step in range(...):
# Run model training step here.
self.on_epoch_end(...)
```
Args:
objective: A string, `keras_tuner.Objective` instance, or a list of
`keras_tuner.Objective`s and strings. If a string, the direction of
the optimization (min or max) will be inferred. If a list of
`keras_tuner.Objective`, we will minimize the sum of all the
objectives to minimize subtracting the sum of all the objectives to
maximize. The `objective` argument is optional when
`Tuner.run_trial()` or `HyperModel.fit()` returns a single float as
the objective to minimize.
max_epochs: Integer, the maximum number of epochs to train one model.
It is recommended to set this to a value slightly higher than the
expected epochs to convergence for your largest Model, and to use
early stopping during training (for example, via
`tf.keras.callbacks.EarlyStopping`). Defaults to 100.
factor: Integer, the reduction factor for the number of epochs
and number of models for each bracket. Defaults to 3.
hyperband_iterations: Integer, at least 1, the number of times to
iterate over the full Hyperband algorithm. One iteration will run
approximately `max_epochs * (math.log(max_epochs, factor) ** 2)`
cumulative epochs across all trials. It is recommended to set this
to as high a value as is within your resource budget. Defaults to
1.
seed: Optional integer, the random seed.
hyperparameters: Optional HyperParameters instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
max_retries_per_trial: Integer. Defaults to 0. The maximum number of
times to retry a `Trial` if the trial crashed or the results are
invalid.
max_consecutive_failed_trials: Integer. Defaults to 3. The maximum
number of consecutive failed `Trial`s. When this number is reached,
the search will be stopped. A `Trial` is marked as failed when none
of the retries succeeded.
"""
def __init__(
self,
objective=None,
max_epochs=100,
factor=3,
hyperband_iterations=1,
seed=None,
hyperparameters=None,
allow_new_entries=True,
tune_new_entries=True,
max_retries_per_trial=0,
max_consecutive_failed_trials=3,
):
super().__init__(
objective=objective,
hyperparameters=hyperparameters,
allow_new_entries=allow_new_entries,
tune_new_entries=tune_new_entries,
seed=seed,
max_retries_per_trial=max_retries_per_trial,
max_consecutive_failed_trials=max_consecutive_failed_trials,
)
if factor < 2:
raise ValueError("factor needs to be a int larger than 1.")
self.hyperband_iterations = hyperband_iterations or float("inf")
self.max_epochs = max_epochs
# Minimum epochs before successive halving, Hyperband sweeps through
# varying degress of aggressiveness.
self.min_epochs = 1
self.factor = factor
self._current_iteration = 0
# Start with most aggressively halving bracket.
self._current_bracket = self._get_num_brackets() - 1
self._brackets = []
self._start_new_bracket()
def populate_space(self, trial_id):
"""Fill the hyperparameter space with values.
Args:
trial_id: A string, the ID for this Trial.
Returns:
A dictionary with keys "values" and "status", where "values" is
a mapping of parameter names to suggested values, and "status"
should be one of "RUNNING" (the trial can start normally), "IDLE"
(the oracle is waiting on something and cannot create a trial), or
"STOPPED" (the oracle has finished searching and no new trial should
be created).
"""
self._remove_completed_brackets()
for bracket in self._brackets:
bracket_num = bracket["bracket_num"]
rounds = bracket["rounds"]
if len(rounds[0]) < self._get_size(bracket_num, round_num=0):
# Populate the initial random trials for this bracket.
return self._random_trial(trial_id, bracket)
# Try to populate incomplete rounds for this bracket.
for round_num in range(1, len(rounds)):
round_info = rounds[round_num]
past_round_info = rounds[round_num - 1]
size = self._get_size(bracket_num, round_num)
past_size = self._get_size(bracket_num, round_num - 1)
# If more trials from the last round are ready than will be
# thrown out, we can select the best to run for the next round.
already_selected = [info["past_id"] for info in round_info]
candidates = [
self.trials[info["id"]]
for info in past_round_info
if info["id"] not in already_selected
]
candidates = [t for t in candidates if t.status == "COMPLETED"]
if len(candidates) > past_size - size:
sorted_candidates = sorted(
candidates,
key=lambda t: t.score,
reverse=self.objective.direction == "max",
)
best_trial = sorted_candidates[0]
values = best_trial.hyperparameters.values.copy()
values["tuner/trial_id"] = best_trial.trial_id
values["tuner/epochs"] = self._get_epochs(
bracket_num, round_num
)
values["tuner/initial_epoch"] = self._get_epochs(
bracket_num, round_num - 1
)
values["tuner/bracket"] = self._current_bracket
values["tuner/round"] = round_num
round_info.append(
{"past_id": best_trial.trial_id, "id": trial_id}
)
return {"status": "RUNNING", "values": values}
# This is reached if no trials from current brackets can be run.
# Max sweeps has been reached, no more brackets should be created.
if (
self._current_bracket == 0
and self._current_iteration + 1 == self.hyperband_iterations
):
# Stop creating new brackets, but wait to complete other brackets.
if self.ongoing_trials:
return {"status": "IDLE"}
return {"status": "STOPPED"}
# Create a new bracket.
else:
self._increment_bracket_num()
self._start_new_bracket()
return self._random_trial(trial_id, self._brackets[-1])
def _start_new_bracket(self):
rounds = []
rounds.extend(
[] for _ in range(self._get_num_rounds(self._current_bracket))
)
bracket = {"bracket_num": self._current_bracket, "rounds": rounds}
self._brackets.append(bracket)
def _increment_bracket_num(self):
self._current_bracket -= 1
if self._current_bracket < 0:
self._current_bracket = self._get_num_brackets() - 1
self._current_iteration += 1
def _remove_completed_brackets(self):
# Filter out completed brackets.
def _bracket_is_incomplete(bracket):
bracket_num = bracket["bracket_num"]
rounds = bracket["rounds"]
last_round = len(rounds) - 1
return len(rounds[last_round]) != self._get_size(
bracket_num, last_round
)
self._brackets = list(filter(_bracket_is_incomplete, self._brackets))
def _compute_values_hash(self, values):
values = copy.copy(values)
values.pop("tuner/epochs", None)
values.pop("tuner/initial_epoch", None)
values.pop("tuner/bracket", None)
values.pop("tuner/round", None)
return super()._compute_values_hash(values)
def _random_trial(self, trial_id, bracket):
bracket_num = bracket["bracket_num"]
rounds = bracket["rounds"]
values = self._random_values()
if values:
values["tuner/epochs"] = self._get_epochs(bracket_num, 0)
values["tuner/initial_epoch"] = 0
values["tuner/bracket"] = self._current_bracket
values["tuner/round"] = 0
rounds[0].append({"past_id": None, "id": trial_id})
return {"status": "RUNNING", "values": values}
elif self.ongoing_trials:
# Can't create new random values, but successive halvings may still
# be needed.
return {"status": "IDLE"}
else:
# Collision and no ongoing trials should trigger an exit.
return {"status": "STOPPED"}
def _get_size(self, bracket_num, round_num):
# Set up so that each bracket takes approx. the same amount of
# resources.
bracket0_end_size = math.ceil(
1 + math.log(self.max_epochs, self.factor)
)
bracket_end_size = bracket0_end_size / (bracket_num + 1)
return math.ceil(
bracket_end_size * self.factor ** (bracket_num - round_num)
)
def _get_epochs(self, bracket_num, round_num):
return math.ceil(
self.max_epochs / self.factor ** (bracket_num - round_num)
)
def _get_num_rounds(self, bracket_num):
# Bracket 0 just runs random search, others do successive halving.
return bracket_num + 1
def _get_num_brackets(self):
epochs = self.max_epochs
brackets = 0
while epochs >= self.min_epochs:
epochs = epochs / self.factor
brackets += 1
return brackets
def get_state(self):
state = super().get_state()
state.update(
{
"hyperband_iterations": self.hyperband_iterations,
"max_epochs": self.max_epochs,
"min_epochs": self.min_epochs,
"factor": self.factor,
"brackets": self._brackets,
"current_bracket": self._current_bracket,
"current_iteration": self._current_iteration,
}
)
return state
def set_state(self, state):
super().set_state(state)
self.hyperband_iterations = state["hyperband_iterations"]
self.max_epochs = state["max_epochs"]
self.min_epochs = state["min_epochs"]
self.factor = state["factor"]
self._brackets = state["brackets"]
self._current_bracket = state["current_bracket"]
self._current_iteration = state["current_iteration"]
@keras_tuner_export(["keras_tuner.Hyperband", "keras_tuner.tuners.Hyperband"])
class Hyperband(tuner_module.Tuner):
"""Variation of HyperBand algorithm.
Reference:
Li, Lisha, and Kevin Jamieson.
["Hyperband: A Novel Bandit-Based
Approach to Hyperparameter Optimization."
Journal of Machine Learning Research 18 (2018): 1-52](
http://jmlr.org/papers/v18/16-558.html).
Args:
hypermodel: Instance of `HyperModel` class (or callable that takes
hyperparameters and returns a `Model` instance). It is optional
when `Tuner.run_trial()` is overriden and does not use
`self.hypermodel`.
objective: A string, `keras_tuner.Objective` instance, or a list of
`keras_tuner.Objective`s and strings. If a string, the direction of
the optimization (min or max) will be inferred. If a list of
`keras_tuner.Objective`, we will minimize the sum of all the
objectives to minimize subtracting the sum of all the objectives to
maximize. The `objective` argument is optional when
`Tuner.run_trial()` or `HyperModel.fit()` returns a single float as
the objective to minimize.
max_epochs: Integer, the maximum number of epochs to train one model.
It is recommended to set this to a value slightly higher than the
expected epochs to convergence for your largest Model, and to use
early stopping during training (for example, via
`tf.keras.callbacks.EarlyStopping`). Defaults to 100.
factor: Integer, the reduction factor for the number of epochs
and number of models for each bracket. Defaults to 3.
hyperband_iterations: Integer, at least 1, the number of times to
iterate over the full Hyperband algorithm. One iteration will run
approximately `max_epochs * (math.log(max_epochs, factor) ** 2)`
cumulative epochs across all trials. It is recommended to set this
to as high a value as is within your resource budget. Defaults to
1.
seed: Optional integer, the random seed.
hyperparameters: Optional HyperParameters instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
max_retries_per_trial: Integer. Defaults to 0. The maximum number of
times to retry a `Trial` if the trial crashed or the results are
invalid.
max_consecutive_failed_trials: Integer. Defaults to 3. The maximum
number of consecutive failed `Trial`s. When this number is reached,
the search will be stopped. A `Trial` is marked as failed when none
of the retries succeeded.
**kwargs: Keyword arguments relevant to all `Tuner` subclasses.
Please see the docstring for `Tuner`.
"""
def __init__(
self,
hypermodel=None,
objective=None,
max_epochs=100,
factor=3,
hyperband_iterations=1,
seed=None,
hyperparameters=None,
tune_new_entries=True,
allow_new_entries=True,
max_retries_per_trial=0,
max_consecutive_failed_trials=3,
**kwargs
):
oracle = HyperbandOracle(
objective,
max_epochs=max_epochs,
factor=factor,
hyperband_iterations=hyperband_iterations,
seed=seed,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries,
max_retries_per_trial=max_retries_per_trial,
max_consecutive_failed_trials=max_consecutive_failed_trials,
)
super().__init__(oracle=oracle, hypermodel=hypermodel, **kwargs)
def run_trial(self, trial, *fit_args, **fit_kwargs):
hp = trial.hyperparameters
if "tuner/epochs" in hp.values:
fit_kwargs["epochs"] = hp.values["tuner/epochs"]
fit_kwargs["initial_epoch"] = hp.values["tuner/initial_epoch"]
return super().run_trial(trial, *fit_args, **fit_kwargs)
def _build_hypermodel(self, hp):
model = super()._build_hypermodel(hp)
if "tuner/trial_id" in hp.values:
trial_id = hp.values["tuner/trial_id"]
# Load best checkpoint from this trial.
if backend.config.multi_backend():
model.build_from_config(
utils.load_json(self._get_build_config_fname(trial_id))
)
model.load_weights(self._get_checkpoint_fname(trial_id))
return model
| keras-tuner/keras_tuner/tuners/hyperband.py/0 | {
"file_path": "keras-tuner/keras_tuner/tuners/hyperband.py",
"repo_id": "keras-tuner",
"token_count": 8273
} | 134 |
pytest --cov-report xml:cov.xml --cov keras_tuner $1
| keras-tuner/shell/coverage.sh/0 | {
"file_path": "keras-tuner/shell/coverage.sh",
"repo_id": "keras-tuner",
"token_count": 24
} | 135 |
import numpy as np
import tensorflow as tf
import keras
from keras import layers
from keras import losses
from keras import metrics
from keras import models
from keras import optimizers
from keras.callbacks import LearningRateScheduler
def test_model_fit():
cpus = tf.config.list_physical_devices("CPU")
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
keras.utils.set_random_seed(1337)
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
inputs = layers.Input((100,), batch_size=32)
x = layers.Dense(256, activation="relu")(inputs)
x = layers.Dense(256, activation="relu")(x)
x = layers.Dense(256, activation="relu")(x)
x = layers.BatchNormalization()(x)
outputs = layers.Dense(16)(x)
model = models.Model(inputs, outputs)
callbacks = [LearningRateScheduler(lambda _: 0.1)]
model.summary()
x = np.random.random((5000, 100))
y = np.random.random((5000, 16))
batch_size = 32
epochs = 2
# Fit from numpy arrays:
with strategy.scope():
model.compile(
optimizer=optimizers.LossScaleOptimizer(
optimizers.SGD(learning_rate=0.001, momentum=0.01)
),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(
x,
y,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2,
callbacks=callbacks,
)
print("History:")
print(history.history)
# Fit again from distributed dataset:
with strategy.scope():
dataset = tf.data.Dataset.from_tensor_slices((x, y)).batch(batch_size)
dataset = strategy.experimental_distribute_dataset(dataset)
history = model.fit(dataset, epochs=epochs, callbacks=callbacks)
print("History:")
print(history.history)
if __name__ == "__main__":
test_model_fit()
| keras/integration_tests/tf_distribute_training_test.py/0 | {
"file_path": "keras/integration_tests/tf_distribute_training_test.py",
"repo_id": "keras",
"token_count": 940
} | 136 |
from unittest.mock import patch
from absl.testing import parameterized
from keras import backend
from keras import ops
from keras.backend.common import dtypes
from keras.backend.common.variables import ALLOWED_DTYPES
from keras.testing import test_case
from keras.testing.test_utils import named_product
class DtypesTest(test_case.TestCase, parameterized.TestCase):
"""Test the dtype to verify that the behavior matches JAX."""
if backend.backend() == "torch":
from keras.backend.torch.core import to_torch_dtype
# TODO: torch doesn't support uint64.
ALL_DTYPES = []
for x in ALLOWED_DTYPES:
if x not in ["string", "uint64"]:
x = str(to_torch_dtype(x)).split(".")[-1]
if x not in ALL_DTYPES: # skip duplicates created by remapping
ALL_DTYPES.append(x)
ALL_DTYPES += [None]
else:
ALL_DTYPES = [x for x in ALLOWED_DTYPES if x != "string"] + [None]
def setUp(self):
from jax.experimental import enable_x64
self.jax_enable_x64 = enable_x64()
self.jax_enable_x64.__enter__()
return super().setUp()
def tearDown(self) -> None:
self.jax_enable_x64.__exit__(None, None, None)
return super().tearDown()
@parameterized.named_parameters(
named_product(dtype1=ALL_DTYPES, dtype2=[bool, int, float])
)
def test_result_type_with_python_scalar_types(self, dtype1, dtype2):
import jax.numpy as jnp
out = backend.result_type(dtype1, dtype2)
expected = jnp.result_type(dtype1, dtype2).name
self.assertEqual(out, expected)
@parameterized.named_parameters(
named_product(dtype1=ALL_DTYPES, dtype2=ALL_DTYPES)
)
def test_result_type_with_tensor(self, dtype1, dtype2):
import jax.numpy as jnp
x1 = ops.ones((1,), dtype=dtype1)
x2 = ops.ones((1,), dtype=dtype2)
x1_jax = jnp.ones((1,), dtype=dtype1)
x2_jax = jnp.ones((1,), dtype=dtype2)
out = backend.result_type(x1.dtype, x2.dtype)
expected = jnp.result_type(x1_jax, x2_jax).name
self.assertEqual(out, expected)
def test_result_type_with_none(self):
import jax.numpy as jnp
self.assertEqual(backend.result_type(None), jnp.result_type(None).name)
def test_result_type_empty_list(self):
self.assertEqual(backend.result_type(), "float32")
def test_respect_weak_type_for_bool(self):
self.assertEqual(dtypes._respect_weak_type("bool", True), "bool")
def test_respect_weak_type_for_int(self):
self.assertEqual(dtypes._respect_weak_type("int32", True), "int")
def test_respect_weak_type_for_float(self):
self.assertEqual(dtypes._respect_weak_type("float32", True), "float")
def test_resolve_weak_type_for_bfloat16(self):
self.assertEqual(dtypes._resolve_weak_type("bfloat16"), "float32")
def test_resolve_weak_type_for_bfloat16_with_precision(self):
self.assertEqual(
dtypes._resolve_weak_type("bfloat16", precision="64"), "float64"
)
def test_invalid_dtype_for_keras_promotion(self):
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion."
):
dtypes._least_upper_bound("invalid_dtype")
def test_resolve_weak_type_for_invalid_dtype(self):
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `dtype`. Expected one of"
):
dtypes._resolve_weak_type("invalid_dtype")
def test_resolve_weak_type_for_invalid_precision(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `precision`. Expected one of",
):
dtypes._resolve_weak_type("int32", precision="invalid_precision")
def test_cycle_detection_in_make_lattice_upper_bounds(self):
original_lattice_function = dtypes._type_promotion_lattice
def mock_lattice():
lattice = original_lattice_function()
lattice["int32"].append("float32")
lattice["float32"].append("int32")
return lattice
dtypes._type_promotion_lattice = mock_lattice
with self.assertRaisesRegex(
ValueError, "cycle detected in type promotion lattice for node"
):
dtypes._make_lattice_upper_bounds()
dtypes._type_promotion_lattice = original_lattice_function
def test_respect_weak_type_for_invalid_dtype(self):
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `dtype`. Expected one of"
):
dtypes._respect_weak_type("invalid_dtype", True)
def test_invalid_dtype_in_least_upper_bound(self):
invalid_dtype = "non_existent_dtype"
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion"
):
dtypes._least_upper_bound(invalid_dtype)
def test_empty_lub_in_least_upper_bound(self):
dtype1 = "float32"
dtype2 = "int32"
with patch.dict(
dtypes.LATTICE_UPPER_BOUNDS,
{"float32": set(), "int32": set()},
clear=True,
):
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound(dtype1, dtype2)
def test_valid_dtype_leading_to_single_lub_element(self):
self.assertEqual(
dtypes._least_upper_bound("float32", "int32"), "float32"
)
def test_valid_dtype_leading_to_keyerror_and_valueerror(self):
invalid_dtype = "non_existent_dtype"
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion"
):
dtypes._least_upper_bound(invalid_dtype)
def test_resolve_weak_type_bool(self):
self.assertEqual(dtypes._resolve_weak_type("bool"), "bool")
def test_resolve_weak_type_int(self):
self.assertEqual(
dtypes._resolve_weak_type("int32", precision="32"), "int32"
)
self.assertEqual(
dtypes._resolve_weak_type("int64", precision="64"), "int64"
)
def test_resolve_weak_type_uint(self):
self.assertEqual(
dtypes._resolve_weak_type("uint32", precision="32"), "uint32"
)
self.assertEqual(
dtypes._resolve_weak_type("uint64", precision="64"), "uint64"
)
def test_resolve_weak_type_float(self):
self.assertEqual(
dtypes._resolve_weak_type("float32", precision="32"), "float32"
)
self.assertEqual(
dtypes._resolve_weak_type("float64", precision="64"), "float64"
)
def test_least_upper_bound_ensure_order_independence(self):
# Test to ensure _least_upper_bound is order-independent.
result1 = dtypes._least_upper_bound("float32", "int32")
result2 = dtypes._least_upper_bound("int32", "float32")
self.assertEqual(result1, result2)
def test_least_upper_bound_single_element(self):
dtypes.LATTICE_UPPER_BOUNDS["test_dtype"] = {"test_dtype"}
self.assertEqual(dtypes._least_upper_bound("test_dtype"), "test_dtype")
def test_least_upper_bound_no_element(self):
dtypes.LATTICE_UPPER_BOUNDS["test_dtype"] = set()
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound("test_dtype")
def test_least_upper_bound_with_no_common_upper_bound(self):
with patch.dict(
dtypes.LATTICE_UPPER_BOUNDS,
{"test_dtype1": set(), "test_dtype2": set()},
clear=True,
):
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound("test_dtype1", "test_dtype2")
| keras/keras/backend/common/dtypes_test.py/0 | {
"file_path": "keras/keras/backend/common/dtypes_test.py",
"repo_id": "keras",
"token_count": 3708
} | 137 |
"""Test for distribution_lib.py."""
import functools
import os
from unittest import mock
import jax
import numpy as np
import pytest
from keras import backend
from keras import layers
from keras import models
from keras import testing
from keras.backend import distribution_lib as backend_dlib
from keras.distribution import distribution_lib
if backend.backend() == "jax":
# Due to https://github.com/google/jax/issues/17188, we can't
# override the XLA flag after the JAX back init. We have to
# run this at top level to let JAX pick the flag value.
xla_flags = os.getenv("XLA_FLAGS") or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in xla_flags:
os.environ["XLA_FLAGS"] = (
xla_flags + " --xla_force_host_platform_device_count=8"
)
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Backend specific test",
)
class JaxDistributionLibTest(testing.TestCase):
def test_list_devices(self):
self.assertEqual(len(distribution_lib.list_devices()), 8)
self.assertEqual(len(distribution_lib.list_devices("cpu")), 8)
self.assertEqual(len(distribution_lib.list_devices("cpu")), 8)
def test_device_conversion(self):
devices = distribution_lib.list_devices("cpu")
jax_devices = jax.devices("cpu")
for d, jax_d in zip(devices, jax_devices):
converted_jax_device = backend_dlib._to_jax_device(d)
self.assertIsInstance(converted_jax_device, jax.Device)
self.assertEqual(jax_d, converted_jax_device)
@mock.patch.object(jax.distributed, "initialize", return_value=None)
def test_initialize_with_all_job_addresses(self, mock_jax_initialze):
backend_dlib.initialize("10.0.0.1:1234,10.0.0.2:2345", 2, 0)
mock_jax_initialze.assert_called_once_with(
coordinator_address="10.0.0.1:1234", num_processes=2, process_id=0
)
def test_initialize_validate_job_and_process(self):
with self.assertRaisesRegex(
ValueError, "has 2 jobs, but num_processes is 3"
):
backend_dlib.initialize("10.0.0.1:1234,10.0.0.2:2345", 3, 0)
@mock.patch.object(jax.distributed, "initialize", return_value=None)
def test_initialize_with_coordinater_address(self, mock_jax_initialze):
backend_dlib.initialize("10.0.0.1:1234", 2, 0)
mock_jax_initialze.assert_called_once_with(
coordinator_address="10.0.0.1:1234", num_processes=2, process_id=0
)
def test_distribute_tensor(self):
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
inputs = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("batch", None)
)
@functools.partial(jax.jit, static_argnames="target_layout")
def test_function(inputs, target_layout):
return distribution_lib.distribute_tensor(inputs, target_layout)
result = test_function(inputs, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2))
# Test without jit
result = distribution_lib.distribute_tensor(inputs, target_layout)
self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2))
def test_distribute_variable(self):
# This test only verify the single worker/process behavior.
# The multi-process test lives in g3.
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
variable = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("model", None)
)
result = backend_dlib.distribute_variable(variable, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2))
def test_distribute_input_data(self):
# This test only verify the single worker/process behavior.
# The multi-process test lives in g3.
jax_mesh = jax.sharding.Mesh(
np.array(jax.devices()).reshape(2, 4), ("batch", "model")
)
input_data = jax.numpy.array(np.random.normal(size=(16, 8)))
target_layout = jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("batch", None)
)
result = backend_dlib.distribute_variable(input_data, target_layout)
# Note that the returned tensor has a different sharding implementation
# which is GSPMDSharding, but it should be equivalent as the target
# layout specified.
self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2))
def test_processes(self):
self.assertEqual(backend_dlib.process_id(), 0)
self.assertEqual(backend_dlib.num_processes(), 1)
def test_to_jax_mesh(self):
devices = [f"cpu:{i}" for i in range(8)]
shape = (4, 2)
axis_names = ["batch", "model"]
mesh = distribution_lib.DeviceMesh(shape, axis_names, devices)
jax_mesh = backend_dlib._to_jax_mesh(mesh)
self.assertIsInstance(jax_mesh, jax.sharding.Mesh)
self.assertEqual(jax_mesh.devices.shape, shape)
self.assertEqual(jax_mesh.axis_names, ("batch", "model"))
def test_to_jax_layout(self):
axes = ["data", None]
mesh = distribution_lib.DeviceMesh(
(4, 2), ["data", "model"], [f"cpu:{i}" for i in range(8)]
)
layout = distribution_lib.TensorLayout(axes, mesh)
jax_sharding = backend_dlib._to_jax_layout(layout)
jax_mesh = backend_dlib._to_jax_mesh(mesh)
self.assertEqual(
jax_sharding,
jax.sharding.NamedSharding(
jax_mesh, jax.sharding.PartitionSpec("data", None)
),
)
def test_validation_for_device_mesh(self):
axes = ["data", None]
layout = distribution_lib.TensorLayout(axes, device_mesh=None)
with self.assertRaisesRegex(
ValueError, "Cannot create sharding when device mesh is not set"
):
backend_dlib._to_jax_layout(layout)
def test_variable_assignment_reuse_layout(self):
shape = (4, 2)
axis_names = ["batch", "model"]
device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, backend_dlib.list_devices()
)
layout_map = distribution_lib.LayoutMap(device_mesh)
layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout(
[None, "model"]
)
layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"])
distribution = distribution_lib.ModelParallel(
device_mesh, layout_map, batch_dim_name="batch"
)
with distribution.scope():
dense_layer = layers.Dense(8)
dense_layer.build((16, 16))
self.assertEqual(
dense_layer.kernel._value.sharding.spec, (None, "model")
)
self.assertEqual(dense_layer.bias._value.sharding.spec, ("model",))
# Assign a numpy value to dense layer to mimic the model weight loading
new_kernel = np.random.normal(size=(16, 8))
new_bias = np.random.normal(size=(8))
dense_layer.kernel.assign(new_kernel)
dense_layer.bias.assign(new_bias)
# Make sure the loaded value still use the layout when it is
# initialized, even outside of the distribution scope.
self.assertEqual(
dense_layer.kernel._value.sharding.spec, (None, "model")
)
self.assertEqual(dense_layer.bias._value.sharding.spec, ("model",))
def test_e2e_data_parallel_model(self):
distribution = distribution_lib.DataParallel(
devices=backend_dlib.list_devices()
)
with distribution.scope():
inputs = layers.Input(shape=[28, 28, 1])
y = layers.Flatten()(inputs)
y = layers.Dense(units=200, use_bias=False, activation="relu")(y)
y = layers.Dropout(0.4)(y)
y = layers.Dense(units=10, activation="softmax")(y)
model = models.Model(inputs=inputs, outputs=y)
# Make sure all the weights are properly sharded.
for weight in model.weights:
self.assertTrue(weight._value.sharding.is_fully_replicated)
inputs = np.random.normal(size=(32, 28, 28, 1))
labels = np.random.normal(size=(32, 10))
with distribution.scope():
model.compile(loss="mse")
model.fit(inputs, labels)
def test_e2e_model_parallel_model(self):
shape = (4, 2)
axis_names = ["batch", "model"]
device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, backend_dlib.list_devices()
)
layout_map = distribution_lib.LayoutMap(device_mesh)
layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout(
[None, "model"]
)
layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"])
distribution = distribution_lib.ModelParallel(
device_mesh, layout_map, batch_dim_name="batch"
)
with distribution.scope():
inputs = layers.Input(shape=[28, 28, 1])
y = layers.Flatten()(inputs)
y = layers.Dense(units=200, use_bias=False, activation="relu")(y)
y = layers.Dropout(0.4)(y)
y = layers.Dense(units=10, activation="softmax")(y)
model = models.Model(inputs=inputs, outputs=y)
for weight in model.weights:
if "kernel" in weight.name:
self.assertEqual(weight._value.sharding.spec, (None, "model"))
elif "bias" in weight.name:
self.assertEqual(weight._value.sharding.spec, ("model",))
else:
self.assertTrue(weight._value.sharding.is_fully_replicated)
inputs = np.random.normal(size=(32, 28, 28, 1))
labels = np.random.normal(size=(32, 10))
with distribution.scope():
model.compile(loss="mse")
model.fit(inputs, labels)
def test_e2e_model_parallel_with_output_sharding(self):
shape = (4, 2)
axis_names = ["batch", "model"]
device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, backend_dlib.list_devices()
)
layout_map = distribution_lib.LayoutMap(device_mesh)
layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout(
[None, "model"]
)
layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"])
# Force the dense layer output to be batch parallel only, and not
# sharded on model dimension.
layout_map[".*dense.*output"] = ("batch", None)
distribution = distribution_lib.ModelParallel(
device_mesh, layout_map, batch_dim_name="batch"
)
sharding_capture = ShardingCaptureLayer()
with distribution.scope():
inputs = layers.Input(shape=[28, 28, 1])
y = layers.Flatten()(inputs)
y = layers.Dense(units=200, use_bias=False, activation="relu")(y)
y = sharding_capture(y)
y = layers.Dropout(0.4)(y)
y = layers.Dense(units=10, activation="softmax")(y)
model = models.Model(inputs=inputs, outputs=y)
for weight in model.weights:
if "kernel" in weight.name:
self.assertEqual(weight._value.sharding.spec, (None, "model"))
elif "bias" in weight.name:
self.assertEqual(weight._value.sharding.spec, ("model",))
else:
self.assertTrue(weight._value.sharding.is_fully_replicated)
inputs = np.random.normal(size=(32, 28, 28, 1))
labels = np.random.normal(size=(32, 10))
with distribution.scope():
model.compile(loss="mse")
model.fit(inputs, labels)
# Note that the intermediate_tensor_layout is only captured during the
# actual training, and not at the model building time.
intermediate_tensor_layout = jax.sharding.NamedSharding(
backend_dlib._to_jax_mesh(distribution.device_mesh),
jax.sharding.PartitionSpec("batch", None),
)
self.assertTrue(
sharding_capture.captured_input_sharding.is_equivalent_to(
intermediate_tensor_layout, ndim=2
)
)
class ShardingCaptureLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.captured_input_sharding = None
self.supports_masking = True
def call(self, inputs):
jax.debug.inspect_array_sharding(
inputs, callback=lambda x: self.capture_input_sharding(x)
)
return inputs
def capture_input_sharding(self, sharding):
self.captured_input_sharding = sharding
| keras/keras/backend/jax/distribution_lib_test.py/0 | {
"file_path": "keras/keras/backend/jax/distribution_lib_test.py",
"repo_id": "keras",
"token_count": 6015
} | 138 |
import numpy as np
import scipy.linalg as sl
from keras.backend import standardize_dtype
from keras.backend.common import dtypes
from keras.backend.numpy.core import convert_to_tensor
def cholesky(a):
return np.linalg.cholesky(a)
def det(a):
return np.linalg.det(a)
def eig(a):
return np.linalg.eig(a)
def inv(a):
return np.linalg.inv(a)
def lu_factor(a):
if a.ndim == 2:
return sl.lu_factor(a)
m, n = a.shape[-2:]
signature = "(m,n) -> (m,n), "
signature += "(m)" if m <= n else "(n)"
_lu_factor_gufunc = np.vectorize(
sl.lu_factor,
signature=signature,
)
return _lu_factor_gufunc(a)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if "int" in dtype or dtype == "bool":
dtype = dtypes.result_type(x.dtype, "float32")
return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims).astype(
dtype
)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return np.linalg.qr(x, mode=mode)
def solve(a, b):
return np.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if a.ndim == 2:
return sl.solve_triangular(a, b, lower=lower)
_vectorized_solve_triangular = np.vectorize(
lambda a, b: sl.solve_triangular(a, b, lower=lower),
signature="(n,n),(n,m)->(n,m)",
)
if b.ndim == a.ndim - 1:
b = np.expand_dims(b, axis=-1)
return _vectorized_solve_triangular(a, b).squeeze(axis=-1)
return _vectorized_solve_triangular(a, b)
def svd(x, full_matrices=True, compute_uv=True):
return np.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
| keras/keras/backend/numpy/linalg.py/0 | {
"file_path": "keras/keras/backend/numpy/linalg.py",
"repo_id": "keras",
"token_count": 906
} | 139 |
import torch
from keras.backend.common.stateless_scope import in_stateless_scope
from keras.ops.operation import Operation
class TorchLayer(torch.nn.Module):
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
self.torch_params = torch.nn.ParameterList(
[variable.value for variable in self.variables]
)
def parameters(self, recurse=True):
if not hasattr(self, "torch_params"):
self._track_variables()
return torch.nn.Module.parameters(self, recurse=recurse)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "torch_params"
):
from keras.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
| keras/keras/backend/torch/layer.py/0 | {
"file_path": "keras/keras/backend/torch/layer.py",
"repo_id": "keras",
"token_count": 534
} | 140 |
import torch
from keras import optimizers
from keras.backend.torch.optimizers import torch_parallel_optimizer
class SGD(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.SGD):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
if self.momentum != 0:
bufs = [
self.momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
for i in range(len(bufs)):
if bufs[i] is None:
bufs[i] = torch.clone(grads[i]).detach()
torch._foreach_mul_(bufs, self.momentum)
torch._foreach_add_(bufs, grads, alpha=-learning_rate)
if self.nesterov:
torch._foreach_add_(variables, grads, alpha=-learning_rate)
torch._foreach_add_(variables, bufs, alpha=self.momentum)
else:
torch._foreach_add_(variables, bufs)
else:
torch._foreach_add_(variables, grads, alpha=-learning_rate)
| keras/keras/backend/torch/optimizers/torch_sgd.py/0 | {
"file_path": "keras/keras/backend/torch/optimizers/torch_sgd.py",
"repo_id": "keras",
"token_count": 580
} | 141 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.