text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""random_crop_demo.py.py shows how to use the RandomCrop
preprocessing layer. Operates on an image of elephant. In this script the image
is loaded, then are passed through the preprocessing layers.
Finally, they are shown using matplotlib.
"""
import demo_utils
from keras_cv.layers.preprocessing import RandomCrop
def main():
many_elephants = demo_utils.load_elephant_tensor(output_size=(300, 300))
layer = RandomCrop(100, 200)
augmented = layer(many_elephants)
demo_utils.gallery_show(augmented.numpy())
if __name__ == "__main__":
main()
| keras-cv/examples/layers/preprocessing/classification/random_crop_demo.py/0 | {
"file_path": "keras-cv/examples/layers/preprocessing/classification/random_crop_demo.py",
"repo_id": "keras-cv",
"token_count": 339
} | 37 |
"""
Title: Plot an image gallery
Author: [lukewood](https://lukewood.xyz), updated by
[Suvaditya Mukherjee](https://twitter.com/halcyonrayes)
Date created: 2022/10/16
Last modified: 2022/06/24
Description: Visualize ground truth and predicted bounding boxes for a given
dataset.
"""
"""
Plotting images from a TensorFlow dataset is easy with KerasCV. Behold:
"""
import numpy as np
import tensorflow_datasets as tfds
import keras_cv
train_ds = tfds.load(
"cats_vs_dogs",
split="train",
with_info=False,
shuffle_files=True,
)
keras_cv.visualization.plot_image_gallery(
train_ds,
value_range=(0, 255),
scale=3,
)
"""
If you want to use plain NumPy arrays, you can do that too:
"""
# Prepare some sample NumPy arrays from random noise
samples = np.random.randint(0, 255, (20, 224, 224, 3))
keras_cv.visualization.plot_image_gallery(
samples, value_range=(0, 255), scale=3, rows=4, cols=5
)
| keras-cv/examples/visualization/plot_image_gallery.py/0 | {
"file_path": "keras-cv/examples/visualization/plot_image_gallery.py",
"repo_id": "keras-cv",
"token_count": 352
} | 38 |
/* Copyright 2022 The KerasCV Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "keras_cv/custom_ops/box_util.h"
#include <algorithm>
#include <cmath>
namespace tensorflow {
namespace kerascv {
namespace box {
const double kEPS = 1e-8;
// Min,max box dimensions (length, width, height). Boxes with dimensions that
// exceed these values will have box intersections of 0.
constexpr double kMinBoxDim = 1e-3;
constexpr double kMaxBoxDim = 1e6;
// A line with the representation a*x + b*y + c = 0.
struct Line {
double a = 0;
double b = 0;
double c = 0;
Line(const Vertex& v1, const Vertex& v2)
: a(v2.y - v1.y), b(v1.x - v2.x), c(v2.x * v1.y - v2.y * v1.x) {}
// Computes the line value for a vertex v as a * v.x + b * v.y + c
double LineValue(const Vertex& v) const { return a * v.x + b * v.y + c; }
// Computes the intersection point with the other line.
Vertex IntersectionPoint(const Line& other) const {
const double w = a * other.b - b * other.a;
CHECK_GT(std::fabs(w), kEPS) << "No intersection between the two lines.";
return Vertex((b * other.c - c * other.b) / w,
(c * other.a - a * other.c) / w);
}
};
// Computes the coordinates of its four vertices given a 2D rotated box,
std::vector<Vertex> ComputeBoxVertices(const double cx, const double cy,
const double w, const double h,
const double heading) {
const double dxcos = (w / 2.) * std::cos(heading);
const double dxsin = (w / 2.) * std::sin(heading);
const double dycos = (h / 2.) * std::cos(heading);
const double dysin = (h / 2.) * std::sin(heading);
return {Vertex(cx - dxcos + dysin, cy - dxsin - dycos),
Vertex(cx + dxcos + dysin, cy + dxsin - dycos),
Vertex(cx + dxcos - dysin, cy + dxsin + dycos),
Vertex(cx - dxcos - dysin, cy - dxsin + dycos)};
}
// Computes the intersection points between two rotated boxes, by following:
//
// 1. Initiazlizes the current intersection points with the vertices of one box,
// and the other box is taken as the cutting box;
//
// 2. For each cutting line in the cutting box (four cutting lines in total):
// For each point in the current intersection points:
// If the point is inside of the cutting line:
// Adds it to the new intersection points;
// if current point and its next point are in the opposite side of the
// cutting line:
// Computes the line of current points and its next point as tmp_line;
// Computes the intersection point between the cutting line and
// tmp_line;
// Adds the intersection point to the new intersection points;
// After checking each cutting line, sets current intersection points as
// new intersection points;
//
// 3. Returns the final intersection points.
std::vector<Vertex> ComputeIntersectionPoints(
const std::vector<Vertex>& rbox_1, const std::vector<Vertex>& rbox_2) {
std::vector<Vertex> intersection = rbox_1;
const int vertices_len = rbox_2.size();
for (int i = 0; i < rbox_2.size(); ++i) {
const int len = intersection.size();
if (len <= 2) {
break;
}
const Vertex& p = rbox_2[i];
const Vertex& q = rbox_2[(i + 1) % vertices_len];
Line cutting_line(p, q);
// Computes line value.
std::vector<double> line_values;
line_values.reserve(len);
for (int j = 0; j < len; ++j) {
line_values.push_back(cutting_line.LineValue(intersection[j]));
}
// Updates current intersection points.
std::vector<Vertex> new_intersection;
for (int j = 0; j < len; ++j) {
const double s_val = line_values[j];
const Vertex& s = intersection[j];
// Adds the current vertex.
if (s_val <= 0 || std::fabs(s_val) <= kEPS) {
new_intersection.push_back(s);
}
const double t_val = line_values[(j + 1) % len];
// Skips the checking of intersection point if the next vertex is on the
// line.
if (std::fabs(t_val) <= kEPS) {
continue;
}
// Adds the intersection point.
if ((s_val > 0 && t_val < 0) || (s_val < 0 && t_val > 0)) {
Line s_t_line(s, intersection[(j + 1) % len]);
new_intersection.push_back(cutting_line.IntersectionPoint(s_t_line));
}
}
intersection = new_intersection;
}
return intersection;
}
// Computes the area of a convex polygon,
double ComputePolygonArea(const std::vector<Vertex>& convex_polygon) {
const int len = convex_polygon.size();
if (len <= 2) {
return 0;
}
double area = 0;
for (int i = 0; i < len; ++i) {
const Vertex& p = convex_polygon[i];
const Vertex& q = convex_polygon[(i + 1) % len];
area += p.x * q.y - p.y * q.x;
}
return std::fabs(0.5 * area);
}
RotatedBox2D::RotatedBox2D(const double cx, const double cy, const double w,
const double h, const double heading)
: cx_(cx), cy_(cy), w_(w), h_(h), heading_(heading) {
// Compute loose bounds on dimensions of box that doesn't require computing
// full intersection. We can do this by trying to compute the largest circle
// swept by rotating the box around its center. The radius of that circle
// is the length of the ray from the center to the box corner. The upper
// bound for this value is the length of the longer dimension divided by two
// and then multiplied by root(2) (worst-case being a square box); we choose
// 1.5 as slightly higher than root(2), and then use these extrema to do
// simple extrema box checks without having to compute the true cos/sin value.
double max_dim = std::max(w_, h_) / 2. * 1.5;
loose_min_x_ = cx_ - max_dim;
loose_max_x_ = cx_ + max_dim;
loose_min_y_ = cy_ - max_dim;
loose_max_y_ = cy_ + max_dim;
extreme_box_dim_ = (w_ <= kMinBoxDim || h_ <= kMinBoxDim);
extreme_box_dim_ |= (w_ >= kMaxBoxDim || h_ >= kMaxBoxDim);
}
double RotatedBox2D::Area() const {
if (area_ < 0) {
const double area = ComputePolygonArea(box_vertices());
area_ = std::fabs(area) <= kEPS ? 0 : area;
}
return area_;
}
const std::vector<Vertex>& RotatedBox2D::box_vertices() const {
if (box_vertices_.empty()) {
box_vertices_ = ComputeBoxVertices(cx_, cy_, w_, h_, heading_);
}
return box_vertices_;
}
double RotatedBox2D::MinX() const {
const std::vector<Vertex>& vertices_ = this->box_vertices();
double res = vertices_[0].x;
for (auto v : vertices_) {
res = std::min(res, v.x);
}
return res;
}
double RotatedBox2D::MaxX() const {
const std::vector<Vertex>& vertices_ = this->box_vertices();
double res = vertices_[0].x;
for (auto v : vertices_) {
res = std::max(res, v.x);
}
return res;
}
double RotatedBox2D::MinY() const {
const std::vector<Vertex>& vertices_ = this->box_vertices();
double res = vertices_[0].y;
for (auto v : vertices_) {
res = std::min(res, v.y);
}
return res;
}
double RotatedBox2D::MaxY() const {
const std::vector<Vertex>& vertices_ = this->box_vertices();
double res = vertices_[0].y;
for (auto v : vertices_) {
res = std::max(res, v.y);
}
return res;
}
bool RotatedBox2D::NonZeroAndValid() const { return !extreme_box_dim_; }
bool RotatedBox2D::MaybeIntersects(const RotatedBox2D& other) const {
// If the box dimensions of either box are too small / large,
// assume they are not well-formed boxes (otherwise we are
// subject to issues due to catastrophic cancellation).
if (extreme_box_dim_ || other.extreme_box_dim_) {
return false;
}
// Check whether the loose extrema overlap -- if not, then there is
// no chance that the two boxes overlap even when computing the true,
// more expensive overlap.
if ((loose_min_x_ > other.loose_max_x_) ||
(loose_max_x_ < other.loose_min_x_) ||
(loose_min_y_ > other.loose_max_y_) ||
(loose_max_y_ < other.loose_min_y_)) {
return false;
}
return true;
}
double RotatedBox2D::Intersection(const RotatedBox2D& other) const {
// Do a fast intersection check - if the boxes are not near each other
// then we can return early. If they are close enough to maybe overlap,
// we do the full check.
if (!MaybeIntersects(other)) {
return 0.0;
}
// Computes the intersection polygon.
const std::vector<Vertex> intersection_polygon =
ComputeIntersectionPoints(box_vertices(), other.box_vertices());
// Computes the intersection area.
const double intersection_area = ComputePolygonArea(intersection_polygon);
return std::fabs(intersection_area) <= kEPS ? 0 : intersection_area;
}
double RotatedBox2D::IoU(const RotatedBox2D& other) const {
// Computes the intersection area.
const double intersection_area = Intersection(other);
if (intersection_area == 0) {
return 0;
}
// Computes the union area.
const double union_area = Area() + other.Area() - intersection_area;
if (std::fabs(union_area) <= kEPS) {
return 0;
}
return intersection_area / union_area;
}
bool RotatedBox2D::left_hand_side(const Vertex& point, const Vertex& v1,
const Vertex& v2) const {
double d1 = (point.y - v1.y) * (v2.x - v1.x);
double d2 = (point.x - v1.x) * (v2.y - v1.y);
return d1 >= d2;
}
bool RotatedBox2D::WithinBox2D(const Vertex& point) const {
const std::vector<Vertex>& vertices = this->box_vertices();
if (Area() <= kEPS) {
return false;
}
if (!this->left_hand_side(point, vertices[0], vertices[1])) return false;
if (!this->left_hand_side(point, vertices[1], vertices[2])) return false;
if (!this->left_hand_side(point, vertices[2], vertices[3])) return false;
if (!this->left_hand_side(point, vertices[3], vertices[0])) return false;
return true;
}
std::vector<Upright3DBox> ParseBoxesFromTensor(const Tensor& boxes_tensor) {
int num_boxes = boxes_tensor.dim_size(0);
const auto t_boxes_tensor = boxes_tensor.matrix<float>();
std::vector<Upright3DBox> bboxes3d;
bboxes3d.reserve(num_boxes);
for (int i = 0; i < num_boxes; ++i) {
const double center_x = t_boxes_tensor(i, 0);
const double center_y = t_boxes_tensor(i, 1);
const double center_z = t_boxes_tensor(i, 2);
const double dimension_x = t_boxes_tensor(i, 3);
const double dimension_y = t_boxes_tensor(i, 4);
const double dimension_z = t_boxes_tensor(i, 5);
const double heading = t_boxes_tensor(i, 6);
const double z_min = center_z - dimension_z / 2;
const double z_max = center_z + dimension_z / 2;
RotatedBox2D box2d(center_x, center_y, dimension_x, dimension_y, heading);
if (dimension_x <= 0 || dimension_y <= 0) {
bboxes3d.emplace_back(RotatedBox2D(), z_min, z_max);
} else {
bboxes3d.emplace_back(box2d, z_min, z_max);
}
}
return bboxes3d;
}
std::vector<Vertex> ParseVerticesFromTensor(const Tensor& points_tensor) {
int num_points = points_tensor.dim_size(0);
const auto t_points_tensor = points_tensor.matrix<float>();
std::vector<Vertex> points3d;
points3d.reserve(num_points);
for (int i = 0; i < num_points; ++i) {
const double x = t_points_tensor(i, 0);
const double y = t_points_tensor(i, 1);
const double z = t_points_tensor(i, 2);
Vertex point(x, y, z);
points3d.emplace_back(point);
}
return points3d;
}
std::vector<int> GetMinXIndexFromBoxes(std::vector<Upright3DBox>& boxes,
std::vector<double>& points) {
std::vector<int> res;
res.reserve(boxes.size());
auto p_begin = points.begin();
auto p_end = points.end();
for (auto box : boxes) {
// find the first element in points >= val
// returned index within [0, points_size]
// return points_size means all elements are < val
double x_min = box.rbox.MinX();
int idx = std::lower_bound(p_begin, p_end, x_min) - p_begin;
res.emplace_back(idx);
}
return res;
}
std::vector<int> GetMaxXIndexFromBoxes(std::vector<Upright3DBox>& boxes,
std::vector<double>& points) {
std::vector<int> res;
res.reserve(boxes.size());
auto p_begin = points.begin();
auto p_end = points.end();
for (auto box : boxes) {
double x_max = box.rbox.MaxX();
// find the last element in points <= val
// returned index within [-1, points_size - 1]
// return -1 means all elements > val
int idx = std::upper_bound(p_begin, p_end, x_max) - p_begin - 1;
res.emplace_back(idx);
}
return res;
}
std::vector<int> GetMinYIndexFromBoxes(std::vector<Upright3DBox>& boxes,
std::vector<double>& points) {
std::vector<int> res;
res.reserve(boxes.size());
auto p_begin = points.begin();
auto p_end = points.end();
for (auto box : boxes) {
// find the first element in points >= val
// returned index within [0, points_size]
// return points_size means all elements are < val
double y_min = box.rbox.MinY();
int idx = std::lower_bound(p_begin, p_end, y_min) - p_begin;
res.emplace_back(idx);
}
return res;
}
std::vector<int> GetMaxYIndexFromBoxes(std::vector<Upright3DBox>& boxes,
std::vector<double>& points) {
std::vector<int> res;
res.reserve(boxes.size());
auto p_begin = points.begin();
auto p_end = points.end();
for (auto box : boxes) {
double y_max = box.rbox.MaxY();
// find the last element in points <= val
// returned index within [-1, points_size - 1]
// return -1 means all elements > val
int idx = std::upper_bound(p_begin, p_end, y_max) - p_begin - 1;
res.emplace_back(idx);
}
return res;
}
bool Upright3DBox::NonZeroAndValid() const {
// If min is larger than max, the upright box is invalid.
//
// If the min and max are equal, the height of the box is 0. and thus the box
// is zero.
if (z_min - z_max >= 0.) {
return false;
}
return rbox.NonZeroAndValid();
}
bool Upright3DBox::WithinBox3D(const Vertex& point) const {
if (point.z > this->z_max || point.z < this->z_min) return false;
return this->rbox.WithinBox2D(point);
}
double Upright3DBox::IoU(const Upright3DBox& other) const {
// Check that both boxes are non-zero and valid. Otherwise,
// return 0.
if (!NonZeroAndValid() || !other.NonZeroAndValid()) {
return 0;
}
// Quickly check whether z's overlap; if they don't, we can return 0.
const double z_inter =
std::max(.0, std::min(z_max, other.z_max) - std::max(z_min, other.z_min));
if (z_inter == 0) {
return 0;
}
const double base_inter = rbox.Intersection(other.rbox);
if (base_inter == 0) {
return 0;
}
const double volume_1 = rbox.Area() * (z_max - z_min);
const double volume_2 = other.rbox.Area() * (other.z_max - other.z_min);
const double volume_inter = base_inter * z_inter;
const double volume_union = volume_1 + volume_2 - volume_inter;
return volume_inter > 0 ? volume_inter / volume_union : 0;
}
double Upright3DBox::Overlap(const Upright3DBox& other) const {
// Check that both boxes are non-zero and valid. Otherwise,
// return 0.
if (!NonZeroAndValid() || !other.NonZeroAndValid()) {
return 0;
}
const double z_inter =
std::max(.0, std::min(z_max, other.z_max) - std::max(z_min, other.z_min));
if (z_inter == 0) {
return 0;
}
const double base_inter = rbox.Intersection(other.rbox);
if (base_inter == 0) {
return 0;
}
const double volume_1 = rbox.Area() * (z_max - z_min);
const double volume_inter = base_inter * z_inter;
// Normalizes intersection of volume by the volume of this box.
return volume_inter > 0 ? volume_inter / volume_1 : 0;
}
} // namespace box
} // namespace kerascv
} // namespace tensorflow
| keras-cv/keras_cv/custom_ops/box_util.cc/0 | {
"file_path": "keras-cv/keras_cv/custom_ops/box_util.cc",
"repo_id": "keras-cv",
"token_count": 6291
} | 39 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import sys
import tensorflow as tf
from absl import flags
from keras_cv.datasets.pascal_voc import segmentation
from keras_cv.tests.test_case import TestCase
extracted_dir = os.path.join("VOCdevkit", "VOC2012")
class PascalVocSegmentationDataTest(TestCase):
def setUp(self):
super().setUp()
self.tempdir = self.get_tempdir()
# Note that this will not work with bazel, need to be rewritten into
# relying on FLAGS.test_srcdir
self.test_data_tar_path = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
os.path.pardir,
"test_data",
"VOC_mini.tar",
)
)
def get_tempdir(self):
try:
flags.FLAGS.test_tmpdir
except flags.UnparsedFlagAccessError:
# Need to initialize flags when running `pytest`.
flags.FLAGS(sys.argv, known_only=True)
return self.create_tempdir().full_path
def test_download_data(self):
# Since the original data package is too large, we use a small package
# as a replacement.
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
test_data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
self.assertTrue(os.path.exists(test_data_dir))
# Make sure the data is unzipped correctly and populated with correct
# content.
expected_subdirs = [
"Annotations",
"ImageSets",
"JPEGImages",
"SegmentationClass",
"SegmentationObject",
]
for sub_dir in expected_subdirs:
self.assertTrue(
os.path.exists(os.path.join(test_data_dir, sub_dir))
)
def test_skip_download_and_override(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
test_data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
# Touch a file in the test_data_dir and make sure it exists (not being
# overridden) when invoking the _download_data_file again
os.makedirs(os.path.join(test_data_dir, "Annotations", "dummy_dir"))
segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
override_extract=False,
)
self.assertTrue(
os.path.exists(
os.path.join(test_data_dir, "Annotations", "dummy_dir")
)
)
def test_get_image_ids(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
train_ids = ["2007_000032", "2007_000039", "2007_000063"]
eval_ids = ["2007_000033"]
train_eval_ids = train_ids + eval_ids
self.assertEquals(
segmentation._get_image_ids(data_dir, "train"), train_ids
)
self.assertEquals(
segmentation._get_image_ids(data_dir, "eval"), eval_ids
)
self.assertEquals(
segmentation._get_image_ids(data_dir, "trainval"), train_eval_ids
)
def test_parse_annotation_file(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
# One of the train file.
annotation_file = os.path.join(
data_dir, "Annotations", "2007_000032.xml"
)
metadata = segmentation._parse_annotation_data(annotation_file)
expected_result = {
"height": 281,
"width": 500,
"objects": [
{
"label": 0,
"pose": "frontal",
"bbox": [78, 104, 183, 375],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 0,
"pose": "left",
"bbox": [88, 133, 123, 197],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 14,
"pose": "rear",
"bbox": [180, 195, 229, 213],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 14,
"pose": "rear",
"bbox": [189, 26, 238, 44],
"is_truncated": False,
"is_difficult": False,
},
],
}
self.assertEquals(metadata, expected_result)
def test_decode_png_mask(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
mask_file = os.path.join(
data_dir, "SegmentationClass", "2007_000032.png"
)
mask = tf.io.decode_png(tf.io.read_file(mask_file))
segmentation._maybe_populate_voc_color_mapping()
mask = segmentation._decode_png_mask(mask)
self.assertEquals(mask.shape, (281, 500, 1))
self.assertEquals(
tf.reduce_max(mask), 255
) # The 255 value is for the boundary
self.assertEquals(
tf.reduce_min(mask), 0
) # The 0 value is for the background
# The mask contains two classes, 1 and 15, see the label section in the
# previous test case.
self.assertEquals(
tf.reduce_sum(tf.cast(tf.equal(mask, 1), tf.int32)), 4734
)
self.assertEquals(
tf.reduce_sum(tf.cast(tf.equal(mask, 15), tf.int32)), 866
)
def test_parse_single_image(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
image_file = os.path.join(data_dir, "JPEGImages", "2007_000032.jpg")
result_dict = segmentation._parse_single_image(image_file)
expected_result = {
"image/filename": "2007_000032.jpg",
"image/file_path": image_file,
"height": 281,
"width": 500,
"objects": [
{
"label": 0,
"pose": "frontal",
"bbox": [78, 104, 183, 375],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 0,
"pose": "left",
"bbox": [88, 133, 123, 197],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 14,
"pose": "rear",
"bbox": [180, 195, 229, 213],
"is_truncated": False,
"is_difficult": False,
},
{
"label": 14,
"pose": "rear",
"bbox": [189, 26, 238, 44],
"is_truncated": False,
"is_difficult": False,
},
],
"labels": [0, 14],
"segmentation/class/file_path": os.path.join(
data_dir, "SegmentationClass", "2007_000032.png"
),
"segmentation/object/file_path": os.path.join(
data_dir, "SegmentationObject", "2007_000032.png"
),
}
self.assertEquals(result_dict, expected_result)
def test_build_metadata(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
image_ids = segmentation._get_image_ids(data_dir, "trainval")
metadata = segmentation._build_metadata(data_dir, image_ids)
self.assertEquals(
metadata["image/filename"],
[
"2007_000032.jpg",
"2007_000039.jpg",
"2007_000063.jpg",
"2007_000033.jpg",
],
)
expected_keys = [
"image/filename",
"image/file_path",
"segmentation/class/file_path",
"segmentation/object/file_path",
"labels",
"width",
"height",
"objects/label",
"objects/pose",
"objects/bbox",
"objects/is_truncated",
"objects/is_difficult",
]
for key in expected_keys:
self.assertLen(metadata[key], 4)
def test_build_dataset(self):
local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/")
data_dir = segmentation._download_data_file(
data_url=pathlib.Path(self.test_data_tar_path).as_uri(),
extracted_dir=extracted_dir,
local_dir_path=local_data_dir,
)
image_ids = segmentation._get_image_ids(data_dir, "train")
metadata = segmentation._build_metadata(data_dir, image_ids)
segmentation._maybe_populate_voc_color_mapping()
dataset = segmentation._build_dataset_from_metadata(metadata)
entry = next(dataset.take(1).as_numpy_iterator())
self.assertEquals(entry["image/filename"], b"2007_000032.jpg")
expected_keys = [
"image",
"image/filename",
"labels",
"width",
"height",
"objects/label",
"objects/pose",
"objects/bbox",
"objects/is_truncated",
"objects/is_difficult",
"class_segmentation",
"object_segmentation",
]
for key in expected_keys:
self.assertIn(key, entry)
# Check the mask png content
png = entry["class_segmentation"]
self.assertEquals(png.shape, (281, 500, 1))
self.assertEquals(
tf.reduce_max(png), 255
) # The 255 value is for the boundary
self.assertEquals(
tf.reduce_min(png), 0
) # The 0 value is for the background
# The mask contains two classes, 1 and 15, see the label section in the
# previous test case.
self.assertEquals(
tf.reduce_sum(tf.cast(tf.equal(png, 1), tf.int32)), 4734
)
self.assertEquals(
tf.reduce_sum(tf.cast(tf.equal(png, 15), tf.int32)), 866
)
| keras-cv/keras_cv/datasets/pascal_voc/segmentation_test.py/0 | {
"file_path": "keras-cv/keras_cv/datasets/pascal_voc/segmentation_test.py",
"repo_id": "keras-cv",
"token_count": 6396
} | 40 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow.keras.layers import CenterCrop
from tensorflow.keras.layers import RandomHeight
from tensorflow.keras.layers import RandomWidth
from keras_cv.layers.augmenter import Augmenter
from keras_cv.layers.feature_pyramid import FeaturePyramid
from keras_cv.layers.fusedmbconv import FusedMBConvBlock
from keras_cv.layers.hierarchical_transformer_encoder import (
HierarchicalTransformerEncoder,
)
from keras_cv.layers.mbconv import MBConvBlock
from keras_cv.layers.object_detection.anchor_generator import AnchorGenerator
from keras_cv.layers.object_detection.box_matcher import BoxMatcher
from keras_cv.layers.object_detection.multi_class_non_max_suppression import (
MultiClassNonMaxSuppression,
)
from keras_cv.layers.object_detection.non_max_suppression import (
NonMaxSuppression,
)
from keras_cv.layers.object_detection_3d.centernet_label_encoder import (
CenterNetLabelEncoder,
)
from keras_cv.layers.object_detection_3d.voxelization import DynamicVoxelization
from keras_cv.layers.overlapping_patching_embedding import (
OverlappingPatchingAndEmbedding,
)
from keras_cv.layers.preprocessing.aug_mix import AugMix
from keras_cv.layers.preprocessing.auto_contrast import AutoContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing.channel_shuffle import ChannelShuffle
from keras_cv.layers.preprocessing.cut_mix import CutMix
from keras_cv.layers.preprocessing.equalization import Equalization
from keras_cv.layers.preprocessing.fourier_mix import FourierMix
from keras_cv.layers.preprocessing.grayscale import Grayscale
from keras_cv.layers.preprocessing.grid_mask import GridMask
from keras_cv.layers.preprocessing.jittered_resize import JitteredResize
from keras_cv.layers.preprocessing.mix_up import MixUp
from keras_cv.layers.preprocessing.mosaic import Mosaic
from keras_cv.layers.preprocessing.posterization import Posterization
from keras_cv.layers.preprocessing.rand_augment import RandAugment
from keras_cv.layers.preprocessing.random_apply import RandomApply
from keras_cv.layers.preprocessing.random_aspect_ratio import RandomAspectRatio
from keras_cv.layers.preprocessing.random_augmentation_pipeline import (
RandomAugmentationPipeline,
)
from keras_cv.layers.preprocessing.random_brightness import RandomBrightness
from keras_cv.layers.preprocessing.random_channel_shift import (
RandomChannelShift,
)
from keras_cv.layers.preprocessing.random_choice import RandomChoice
from keras_cv.layers.preprocessing.random_color_degeneration import (
RandomColorDegeneration,
)
from keras_cv.layers.preprocessing.random_color_jitter import RandomColorJitter
from keras_cv.layers.preprocessing.random_contrast import RandomContrast
from keras_cv.layers.preprocessing.random_crop import RandomCrop
from keras_cv.layers.preprocessing.random_crop_and_resize import (
RandomCropAndResize,
)
from keras_cv.layers.preprocessing.random_cutout import RandomCutout
from keras_cv.layers.preprocessing.random_flip import RandomFlip
from keras_cv.layers.preprocessing.random_gaussian_blur import (
RandomGaussianBlur,
)
from keras_cv.layers.preprocessing.random_hue import RandomHue
from keras_cv.layers.preprocessing.random_jpeg_quality import RandomJpegQuality
from keras_cv.layers.preprocessing.random_rotation import RandomRotation
from keras_cv.layers.preprocessing.random_saturation import RandomSaturation
from keras_cv.layers.preprocessing.random_sharpness import RandomSharpness
from keras_cv.layers.preprocessing.random_shear import RandomShear
from keras_cv.layers.preprocessing.random_translation import RandomTranslation
from keras_cv.layers.preprocessing.random_zoom import RandomZoom
from keras_cv.layers.preprocessing.repeated_augmentation import (
RepeatedAugmentation,
)
from keras_cv.layers.preprocessing.rescaling import Rescaling
from keras_cv.layers.preprocessing.resizing import Resizing
from keras_cv.layers.preprocessing.solarization import Solarization
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_dropping_points import ( # noqa: E501
FrustumRandomDroppingPoints,
)
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_point_feature_noise import ( # noqa: E501
FrustumRandomPointFeatureNoise,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_dropping_points import ( # noqa: E501
GlobalRandomDroppingPoints,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_flip import (
GlobalRandomFlip,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_rotation import (
GlobalRandomRotation,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_scaling import (
GlobalRandomScaling,
)
from keras_cv.layers.preprocessing_3d.waymo.global_random_translation import (
GlobalRandomTranslation,
)
from keras_cv.layers.preprocessing_3d.waymo.group_points_by_bounding_boxes import ( # noqa: E501
GroupPointsByBoundingBoxes,
)
from keras_cv.layers.preprocessing_3d.waymo.random_copy_paste import (
RandomCopyPaste,
)
from keras_cv.layers.preprocessing_3d.waymo.random_drop_box import RandomDropBox
from keras_cv.layers.preprocessing_3d.waymo.swap_background import (
SwapBackground,
)
from keras_cv.layers.regularization.drop_path import DropPath
from keras_cv.layers.regularization.dropblock_2d import DropBlock2D
from keras_cv.layers.regularization.squeeze_excite import SqueezeAndExcite2D
from keras_cv.layers.regularization.stochastic_depth import StochasticDepth
from keras_cv.layers.segformer_multihead_attention import (
SegFormerMultiheadAttention,
)
from keras_cv.layers.spatial_pyramid import SpatialPyramidPooling
from keras_cv.layers.transformer_encoder import TransformerEncoder
from keras_cv.layers.vit_det_layers import AddRelativePositionalEmbedding
from keras_cv.layers.vit_det_layers import MultiHeadAttentionWithRelativePE
from keras_cv.layers.vit_det_layers import ViTDetPatchingAndEmbedding
from keras_cv.layers.vit_det_layers import WindowedTransformerEncoder
from keras_cv.layers.vit_det_layers import WindowPartitioning
from keras_cv.layers.vit_layers import PatchingAndEmbedding
| keras-cv/keras_cv/layers/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/layers/__init__.py",
"repo_id": "keras-cv",
"token_count": 2271
} | 41 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_cv import layers as cv_layers
from keras_cv.tests.test_case import TestCase
def decode_predictions_output_shapes():
num_classes = 10
predictions_shape = (8, 98208, 4 + num_classes)
predictions = tf.random.stateless_uniform(
shape=predictions_shape,
seed=(2, 3),
minval=0.0,
maxval=1.0,
dtype=tf.float32,
)
box_pred = predictions[..., :4]
class_prediction = predictions[..., 4:]
layer = cv_layers.MultiClassNonMaxSuppression(
bounding_box_format="xyxy",
from_logits=True,
max_detections=100,
)
result = layer(box_prediction=box_pred, class_prediction=class_prediction)
return result
@pytest.mark.tf_keras_only
class NmsPredictionDecoderTest(TestCase):
def test_decode_predictions_output_shapes(self):
result = decode_predictions_output_shapes()
self.assertEqual(result["boxes"].shape, [8, 100, 4])
self.assertEqual(result["classes"].shape, [8, 100])
self.assertEqual(result["confidence"].shape, [8, 100])
| keras-cv/keras_cv/layers/object_detection/multi_class_non_max_suppression_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/multi_class_non_max_suppression_test.py",
"repo_id": "keras-cv",
"token_count": 614
} | 42 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.object_detection_3d.centernet_label_encoder import (
CenterNetLabelEncoder,
)
from keras_cv.tests.test_case import TestCase
class CenterNetLabelEncoderTest(TestCase):
def test_voxelization_output_shape_no_z(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 1000],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 20],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 7])
# last dimension only has x, y
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 2])
self.assertEqual(output["class_2"]["top_k_index"].shape, [2, 20, 2])
def test_voxelization_output_shape_with_z(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 10],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 20],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400, 4])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400, 4])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 4, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 4, 7])
# last dimension has x, y, z
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 3])
self.assertEqual(output["class_2"]["top_k_index"].shape, [2, 20, 3])
def test_voxelization_output_shape_missing_topk(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 1000],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 0],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 7])
# last dimension only has x, y
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 2])
self.assertEqual(output["class_2"]["top_k_index"], None)
| keras-cv/keras_cv/layers/object_detection_3d/centernet_label_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection_3d/centernet_label_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 2396
} | 43 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers.preprocessing.channel_shuffle import ChannelShuffle
from keras_cv.tests.test_case import TestCase
class ChannelShuffleTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
layer = ChannelShuffle(groups=3)
xs = layer(xs, training=True)
self.assertEqual(xs.shape, (2, 512, 512, 3))
def test_channel_shuffle_call_results_one_channel(self):
xs = tf.cast(
tf.stack(
[3 * tf.ones((40, 40, 1)), 2 * tf.ones((40, 40, 1))],
axis=0,
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=1)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 3.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
def test_channel_shuffle_call_results_multi_channel(self):
xs = tf.cast(
tf.stack(
[3 * tf.ones((40, 40, 20)), 2 * tf.ones((40, 40, 20))],
axis=0,
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=5)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 3.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 2.0))
def test_non_square_image(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((1024, 512, 1)), tf.ones((1024, 512, 1))],
axis=0,
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=1)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=1)
@tf.function
def augment(x):
return layer(x, training=True)
xs = augment(xs)
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
def test_in_single_image(self):
xs = tf.cast(
tf.ones((512, 512, 1)),
dtype=tf.float32,
)
layer = ChannelShuffle(groups=1)
xs = layer(xs, training=True)
self.assertTrue(np.any(ops.convert_to_numpy(xs) == 1.0))
@pytest.mark.skip(reason="flaky")
def test_channel_shuffle_on_batched_images_independently(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
layer = ChannelShuffle(groups=3)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_config_with_custom_name(self):
layer = ChannelShuffle(name="image_preproc")
config = layer.get_config()
layer_1 = ChannelShuffle.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = ChannelShuffle(groups=1)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = ChannelShuffle(groups=1, dtype="uint8")
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_config(self):
layer = ChannelShuffle(groups=5)
config = layer.get_config()
self.assertEqual(config["groups"], 5)
| keras-cv/keras_cv/layers/preprocessing/channel_shuffle_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/channel_shuffle_test.py",
"repo_id": "keras-cv",
"token_count": 2063
} | 44 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
@keras_cv_export("keras_cv.layers.RandomChoice")
class RandomChoice(BaseImageAugmentationLayer):
"""RandomChoice constructs a pipeline based on provided arguments.
The implemented policy does the following: for each input provided in
`call`(), the policy selects a random layer from the provided list of
`layers`. It then calls the `layer()` on the inputs.
Usage:
```python
# construct a list of layers
layers = keras_cv.layers.RandAugment.get_standard_policy(
value_range=(0, 255), magnitude=0.75, magnitude_stddev=0.3
)
layers = layers[:4] # slice out some layers you don't want for whatever
reason
layers = layers + [keras_cv.layers.GridMask()]
# create the pipeline.
pipeline = keras_cv.layers.RandomChoice(layers=layers)
augmented_images = pipeline(images)
```
Args:
layers: a list of `keras.Layers`. These are randomly inputs during
augmentation to augment the inputs passed in `call()`. The layers
passed should subclass `BaseImageAugmentationLayer`.
auto_vectorize: whether to use `tf.vectorized_map` or `tf.map_fn` to
apply the augmentations. This offers a significant performance
boost, but can only be used if all the layers provided to the
`layers` argument support auto vectorization.
batchwise: Boolean, whether to pass entire batches to the
underlying layer. When set to `True`, each batch is passed to a
single layer, instead of each sample to an independent layer. This
is useful when using `MixUp()`, `CutMix()`, `Mosaic()`, etc.
Defaults to `False`.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
layers,
auto_vectorize=False,
batchwise=False,
seed=None,
**kwargs,
):
super().__init__(**kwargs, seed=seed)
self.layers = layers
self.auto_vectorize = auto_vectorize
self.batchwise = batchwise
self.seed = seed
def _curry_call_layer(self, inputs, layer):
def call_layer():
return layer(inputs)
return call_layer
def _batch_augment(self, inputs):
if self.batchwise:
return self._augment(inputs)
else:
return super()._batch_augment(inputs)
def _augment(self, inputs, *args, **kwargs):
selected_op = self._random_generator.uniform(
(), minval=0, maxval=len(self.layers), dtype=tf.int32
)
# Warning:
# Do not replace the currying function with a lambda.
# Originally we used a lambda, but due to Python's
# lack of loop level scope this causes unexpected
# behavior running outside of graph mode.
#
# Autograph has an edge case where the behavior of Python for loop
# variables is inconsistent between Python and graph execution.
# By using a list comprehension and currying, we mitigate
# our code against both of these cases.
branch_fns = [
(i, self._curry_call_layer(inputs, layer))
for (i, layer) in enumerate(self.layers)
]
return tf.switch_case(
branch_index=selected_op,
branch_fns=branch_fns,
default=lambda: inputs,
)
def get_config(self):
config = super().get_config()
config.update(
{
"layers": self.layers,
"auto_vectorize": self.auto_vectorize,
"seed": self.seed,
"batchwise": self.batchwise,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_choice.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_choice.py",
"repo_id": "keras-cv",
"token_count": 1772
} | 45 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomGaussianBlur")
class RandomGaussianBlur(BaseImageAugmentationLayer):
"""Applies a Gaussian Blur with random strength to an image.
Args:
kernel_size: int, 2 element tuple or 2 element list. x and y dimensions
for the kernel used. If tuple or list, first element is used for the
x dimension and second element is used for y dimension. If int,
kernel will be squared.
factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `factor` controls the extent to which the
image is blurred. Mathematically, `factor` represents the `sigma`
value in a gaussian blur. `factor=0.0` makes this layer perform a
no-op operation, and high values make the blur stronger. In order to
ensure the value is always the same, please pass a tuple with two
identical floats: `(0.5, 0.5)`.
"""
def __init__(self, kernel_size, factor, **kwargs):
super().__init__(**kwargs)
self.factor = preprocessing.parse_factor(
factor, min_value=0.0, max_value=None, param_name="factor"
)
self.kernel_size = kernel_size
if isinstance(kernel_size, (tuple, list)):
self.x = kernel_size[0]
self.y = kernel_size[1]
else:
if isinstance(kernel_size, int):
self.x = self.y = kernel_size
else:
raise ValueError(
"`kernel_size` must be list, tuple or integer "
", got {} ".format(type(self.kernel_size))
)
def get_random_transformation(self, **kwargs):
# `factor` must not become too small otherwise numerical issues occur.
# keras.backend.epsilon() behaves like 0 without causing `nan`s
factor = tf.math.maximum(self.factor(), keras.backend.epsilon())
blur_v = RandomGaussianBlur.get_kernel(factor, self.y)
blur_h = RandomGaussianBlur.get_kernel(factor, self.x)
blur_v = tf.reshape(blur_v, [self.y, 1, 1, 1])
blur_h = tf.reshape(blur_h, [1, self.x, 1, 1])
return (blur_v, blur_h)
def augment_image(self, image, transformation=None, **kwargs):
image = tf.expand_dims(image, axis=0)
num_channels = tf.shape(image)[-1]
blur_v, blur_h = transformation
blur_h = tf.cast(
tf.tile(blur_h, [1, 1, num_channels, 1]), dtype=self.compute_dtype
)
blur_v = tf.cast(
tf.tile(blur_v, [1, 1, num_channels, 1]), dtype=self.compute_dtype
)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding="SAME"
)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding="SAME"
)
return tf.squeeze(blurred, axis=0)
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_label(self, label, transformation=None, **kwargs):
return label
def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask
@staticmethod
def get_kernel(factor, filter_size):
# We are running this in float32, regardless of layer's
# self.compute_dtype. Calculating blur_filter in lower precision will
# corrupt the final results.
x = tf.cast(
tf.range(-filter_size // 2 + 1, filter_size // 2 + 1),
dtype=tf.float32,
)
blur_filter = tf.exp(
-tf.pow(x, 2.0)
/ (2.0 * tf.pow(tf.cast(factor, dtype=tf.float32), 2.0))
)
blur_filter /= tf.reduce_sum(blur_filter)
return blur_filter
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor, "kernel_size": self.kernel_size})
return config
| keras-cv/keras_cv/layers/preprocessing/random_gaussian_blur.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_gaussian_blur.py",
"repo_id": "keras-cv",
"token_count": 2033
} | 46 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.keras import backend
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
# In order to support both unbatched and batched inputs, the horizontal
# and vertical axis is reverse indexed
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomZoom")
class RandomZoom(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly zooms images.
This layer will randomly zoom in or out on each axis of an image
independently, filling empty space according to `fill_mode`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype. By default, the layer will output
floats.
Args:
height_factor: a float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound for zooming vertically. When
represented as a single float, this value is used for both the upper and
lower bound. A positive value means zooming out, while a negative value
means zooming in. For instance, `height_factor=(0.2, 0.3)` result in an
output zoomed out by a random amount in the range `[+20%, +30%]`.
`height_factor=(-0.3, -0.2)` result in an output zoomed in by a random
amount in the range `[-30%, -20%]`.
width_factor: a float represented as fraction of value, or a tuple of size
2 representing lower and upper bound for zooming horizontally. When
represented as a single float, this value is used for both the upper and
lower bound. For instance, `width_factor=(0.2, 0.3)` result in an output
zooming out between 20% to 30%. `width_factor=(-0.3, -0.2)` result in an
output zooming in between 20% to 30%. Defaults to `None`, i.e., zooming
vertical and horizontal directions by preserving the aspect ratio. If
height_factor=0 and width_factor=None, it would result in images with
no zoom at all.
fill_mode: Points outside the boundaries of the input are filled according
to the given mode (one of `{"constant", "reflect", "wrap", "nearest"}`).
- *reflect*: `(d c b a | a b c d | d c b a)` The input is extended by
reflecting about the edge of the last pixel.
- *constant*: `(k k k k | a b c d | k k k k)` The input is extended by
filling all values beyond the edge with the same constant value k = 0.
- *wrap*: `(a b c d | a b c d | a b c d)` The input is extended by
wrapping around to the opposite edge.
- *nearest*: `(a a a a | a b c d | d d d d)` The input is extended by
the nearest pixel.
interpolation: Interpolation mode. Supported values: `"nearest"`,
`"bilinear"`.
seed: Integer. Used to create a random seed.
fill_value: a float represents the value to be filled outside the
boundaries when `fill_mode="constant"`.
Example:
>>> input_img = np.random.random((32, 224, 224, 3))
>>> layer = keras_cv.layers.RandomZoom(.5, .2)
>>> out_img = layer(input_img)
>>> out_img.shape
TensorShape([32, 224, 224, 3])
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
"""
def __init__(
self,
height_factor,
width_factor=None,
fill_mode="reflect",
interpolation="bilinear",
seed=None,
fill_value=0.0,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = height_factor
if isinstance(height_factor, (tuple, list)):
self.height_lower = height_factor[0]
self.height_upper = height_factor[1]
else:
self.height_lower = -height_factor
self.height_upper = height_factor
if abs(self.height_lower) > 1.0 or abs(self.height_upper) > 1.0:
raise ValueError(
"`height_factor` must have values between [-1, 1], "
f"got {height_factor}"
)
self.width_factor = width_factor
if width_factor is not None:
if isinstance(width_factor, (tuple, list)):
self.width_lower = width_factor[0]
self.width_upper = width_factor[1]
else:
self.width_lower = -width_factor
self.width_upper = width_factor
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`width_factor` must have values larger than -1, "
f"got {width_factor}"
)
preprocessing_utils.check_fill_mode_and_interpolation(
fill_mode, interpolation
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.interpolation = interpolation
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
height_zooms = self._random_generator.uniform(
shape=[batch_size, 1],
minval=1.0 + self.height_lower,
maxval=1.0 + self.height_upper,
)
if self.width_factor is not None:
width_zooms = self._random_generator.uniform(
shape=[batch_size, 1],
minval=1.0 + self.width_lower,
maxval=1.0 + self.width_upper,
)
else:
width_zooms = height_zooms
return {"height_zooms": height_zooms, "width_zooms": width_zooms}
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
width_zooms = transformation["width_zooms"]
height_zooms = transformation["height_zooms"]
transformation = {
"height_zooms": tf.expand_dims(height_zooms, axis=0),
"width_zooms": tf.expand_dims(width_zooms, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
images = preprocessing_utils.ensure_tensor(images, self.compute_dtype)
original_shape = images.shape
image_shape = tf.shape(images)
img_hd = tf.cast(image_shape[H_AXIS], tf.float32)
img_wd = tf.cast(image_shape[W_AXIS], tf.float32)
width_zooms = transformations["width_zooms"]
height_zooms = transformations["height_zooms"]
zooms = tf.cast(
tf.concat([width_zooms, height_zooms], axis=1), dtype=tf.float32
)
outputs = preprocessing_utils.transform(
images,
self.get_zoom_matrix(zooms, img_hd, img_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation=self.interpolation,
)
outputs.set_shape(original_shape)
return outputs
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
segmentation_masks = preprocessing_utils.ensure_tensor(
segmentation_masks, self.compute_dtype
)
original_shape = segmentation_masks.shape
mask_shape = tf.shape(segmentation_masks)
mask_hd = tf.cast(mask_shape[H_AXIS], tf.float32)
mask_wd = tf.cast(mask_shape[W_AXIS], tf.float32)
width_zooms = transformations["width_zooms"]
height_zooms = transformations["height_zooms"]
zooms = tf.cast(
tf.concat([width_zooms, height_zooms], axis=1), dtype=tf.float32
)
outputs = preprocessing_utils.transform(
segmentation_masks,
self.get_zoom_matrix(zooms, mask_hd, mask_wd),
fill_mode=self.fill_mode,
fill_value=self.fill_value,
interpolation="nearest",
)
outputs.set_shape(original_shape)
return outputs
def get_zoom_matrix(self, zooms, image_height, image_width, name=None):
"""Returns projective transform(s) for the given zoom(s).
Args:
zooms: A matrix of 2-element lists representing `[zx, zy]` to zoom for
each image (for a batch of images).
image_height: Height of the image(s) to be transformed.
image_width: Width of the image(s) to be transformed.
name: The name of the op.
Returns:
A tensor of shape `(num_images, 8)`. Projective transforms which can be
given to operation `image_projective_transform_v2`.
If one row of transforms is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
"""
with backend.name_scope(name or "zoom_matrix"):
num_zooms = tf.shape(zooms)[0]
# The zoom matrix looks like:
# [[zx 0 0]
# [0 zy 0]
# [0 0 1]]
# where the last entry is implicit.
# Zoom matrices are always float32.
x_offset = ((image_width - 1.0) / 2.0) * (1.0 - zooms[:, 0, None])
y_offset = ((image_height - 1.0) / 2.0) * (1.0 - zooms[:, 1, None])
return tf.concat(
values=[
zooms[:, 0, None],
tf.zeros((num_zooms, 1), tf.float32),
x_offset,
tf.zeros((num_zooms, 1), tf.float32),
zooms[:, 1, None],
y_offset,
tf.zeros((num_zooms, 2), tf.float32),
],
axis=1,
)
def get_config(self):
config = {
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_zoom.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_zoom.py",
"repo_id": "keras-cv",
"token_count": 5006
} | 47 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import random
POINT_CLOUDS = "point_clouds"
BOUNDING_BOXES = "bounding_boxes"
OBJECT_POINT_CLOUDS = "object_point_clouds"
OBJECT_BOUNDING_BOXES = "object_bounding_boxes"
ADDITIONAL_POINT_CLOUDS = "additional_point_clouds"
ADDITIONAL_BOUNDING_BOXES = "additional_bounding_boxes"
BOX_LABEL_INDEX = 7
POINTCLOUD_LABEL_INDEX = 3
POINTCLOUD_FEATURE_INDEX = 4
@keras_cv_export("keras_cv.layers.BaseAugmentationLayer3D")
class BaseAugmentationLayer3D(keras.layers.Layer):
"""Abstract base layer for data augmentation for 3D perception.
This layer contains base functionalities for preprocessing layers which
augment 3D perception related data, e.g. point_clouds and in the future,
images. The subclasses could avoid making certain mistakes and reduce code
duplications.
This layer requires you to implement one method: `augment_point_clouds()`,
which augments one or a sequence of point clouds during the training. There
are a few additional methods that you can implement for added functionality
on the layer:
`augment_bounding_boxes()`, which handles the bounding box augmentation, if
the layer supports that.
`get_random_transformation()`, which should produce a random transformation
setting. The transformation object, which could be any type, will be passed
to `augment_point_clouds` and `augment_bounding_boxes`, to coordinate the
randomness behavior, eg, in the RotateZ layer, the point_clouds and
bounding_boxes should be changed in the same way.
The `call()` method support two formats of inputs:
1. A dict of tensors with stable keys. The supported keys are:
`"point_clouds"` and `"bounding_boxes"` at the moment. We might add
more keys in future when we support more types of augmentation.
The output of the `call()` will be in two formats, which will be the same
structure as the inputs.
The `call()` will handle the logic detecting the training/inference mode,
unpack the inputs, forward to the correct function, and pack the output back
to the same structure as the inputs.
By default, the `call()` method leverages the `tf.vectorized_map()`
function. Auto-vectorization can be disabled by setting
`self.auto_vectorize = False` in your `__init__()` method. When disabled,
`call()` instead relies on `tf.map_fn()`. For example:
```python
class SubclassLayer(keras_cv.BaseImageAugmentationLayer):
def __init__(self):
super().__init__()
self.auto_vectorize = False
```
Example:
```python
class RandomRotateZ(keras_cv.BaseImageAugmentationLayer):
def __init__(self, max_rotation, **kwargs):
super().__init__(**kwargs)
self._max_rotation = max_rotation
def augment_pointclouds(self, point_clouds, transformation):
pose = transformation['pos']
# Rotate points.
pointcloud_xyz = geometry.CoordinateTransform(pointcloud[..., :3], pose)
pointcloud = tf.concat([pointcloud_xyz, pointcloud[..., 3:]], axis=-1)
return pointcloud, boxes
```
"""
def __init__(self, seed=None, **kwargs):
super().__init__(**kwargs)
self.auto_vectorize = False
self.seed = seed
self._random_generator = random.SeedGenerator(seed=self.seed)
@property
def auto_vectorize(self):
"""Control whether automatic vectorization occurs.
By default, the `call()` method leverages the `tf.vectorized_map()`
function. Auto-vectorization can be disabled by setting
`self.auto_vectorize = False` in your `__init__()` method. When
disabled, `call()` instead relies on `tf.map_fn()`. For example:
```python
class SubclassLayer(BaseImageAugmentationLayer):
def __init__(self):
super().__init__()
self.auto_vectorize = False
```
"""
return getattr(self, "_auto_vectorize", True)
@auto_vectorize.setter
def auto_vectorize(self, auto_vectorize):
self._auto_vectorize = auto_vectorize
@property
def _map_fn(self):
if self.auto_vectorize:
return tf.vectorized_map
else:
return tf.map_fn
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
"""Augment a single point cloud frame during training.
Args:
point_clouds: 3D point cloud input tensor to the layer. Forwarded from
`layer.call()`.
bounding_boxes: 3D bounding boxes to the layer. Forwarded from
`call()`.
transformation: The transformation object produced by
`get_random_transformation`. Used to coordinate the randomness
between point clouds, bounding boxs.
Returns:
output 3D tensor, which will be forward to `layer.call()`.
"""
raise NotImplementedError()
def get_random_transformation(self, point_clouds=None, bounding_boxes=None):
"""Produce random transformation config for one single input.
This is used to produce same randomness between
image/label/bounding_box.
Args:
point_clouds: 3D point clouds tensor from inputs.
bounding_boxes: 3D bounding boxes tensor from inputs.
Returns:
Any type of object, which will be forwarded to `augment_point_clouds`,
and `augment_bounding_box` as the `transformation` parameter.
"""
return None
def call(self, inputs):
if "3d_boxes" in inputs.keys():
# TODO(ianstenbit): Consider using the better format internally
# (in the KPL implementations) instead of wrapping it at call time.
point_clouds, bounding_boxes = convert_from_model_format(inputs)
inputs = {
POINT_CLOUDS: point_clouds,
BOUNDING_BOXES: bounding_boxes,
}
use_model_format = True
else:
point_clouds = inputs[POINT_CLOUDS]
bounding_boxes = inputs[BOUNDING_BOXES]
use_model_format = False
if point_clouds.shape.rank == 3 and bounding_boxes.shape.rank == 3:
outputs = self._augment(inputs)
elif point_clouds.shape.rank == 4 and bounding_boxes.shape.rank == 4:
outputs = self._batch_augment(inputs)
else:
raise ValueError(
"Point clouds augmentation layers are expecting inputs "
"point clouds and bounding boxes to be rank 3D (Frame, Point, "
"Feature) or 4D (Batch, Frame, Point, Feature) tensors. Got "
"shape: {} and {}".format(
point_clouds.shape, bounding_boxes.shape
)
)
if use_model_format:
return convert_to_model_format(outputs)
else:
return outputs
def _augment(self, inputs):
point_clouds = inputs.get(POINT_CLOUDS, None)
bounding_boxes = inputs.get(BOUNDING_BOXES, None)
transformation = self.get_random_transformation(
point_clouds=point_clouds,
bounding_boxes=bounding_boxes,
)
point_clouds, bounding_boxes = self.augment_point_clouds_bounding_boxes(
point_clouds,
bounding_boxes=bounding_boxes,
transformation=transformation,
)
result = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
# preserve any additional inputs unmodified by this layer.
for key in inputs.keys() - result.keys():
result[key] = inputs[key]
return result
def _batch_augment(self, inputs):
return self._map_fn(self._augment, inputs)
def convert_to_model_format(inputs):
point_clouds = {
"point_xyz": inputs["point_clouds"][..., :3],
"point_feature": inputs["point_clouds"][..., 3:-1],
"point_mask": tf.cast(inputs["point_clouds"][..., -1], tf.bool),
}
boxes = {
"boxes": inputs["bounding_boxes"][..., :7],
"classes": inputs["bounding_boxes"][..., 7],
"mask": tf.cast(inputs["bounding_boxes"][..., 8], tf.bool),
}
# Special case for when we have a difficulty field
if inputs["bounding_boxes"].shape[-1] > 8:
boxes["difficulty"] = inputs["bounding_boxes"][..., -1]
return {
"point_clouds": point_clouds,
"3d_boxes": boxes,
}
def convert_from_model_format(inputs):
point_clouds = tf.concat(
[
inputs["point_clouds"]["point_xyz"],
inputs["point_clouds"]["point_feature"],
tf.expand_dims(
tf.cast(
inputs["point_clouds"]["point_mask"],
inputs["point_clouds"]["point_xyz"].dtype,
),
axis=-1,
),
],
axis=-1,
)
box_tensors = [
inputs["3d_boxes"]["boxes"],
tf.expand_dims(
tf.cast(
inputs["3d_boxes"]["classes"], inputs["3d_boxes"]["boxes"].dtype
),
axis=-1,
),
tf.expand_dims(
tf.cast(
inputs["3d_boxes"]["mask"], inputs["3d_boxes"]["boxes"].dtype
),
axis=-1,
),
]
# Special case for when we have a difficulty field
if "difficulty" in inputs["3d_boxes"].keys():
box_tensors.append(
tf.expand_dims(
tf.cast(
inputs["3d_boxes"]["difficulty"],
inputs["3d_boxes"]["boxes"].dtype,
),
axis=-1,
)
)
boxes = tf.concat(box_tensors, axis=-1)
return point_clouds, boxes
| keras-cv/keras_cv/layers/preprocessing_3d/base_augmentation_layer_3d.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/base_augmentation_layer_3d.py",
"repo_id": "keras-cv",
"token_count": 4405
} | 48 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_scaling import (
GlobalRandomScaling,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalScalingTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(0.5, 1.5),
y_factor=(0.5, 1.5),
z_factor=(0.5, 1.5),
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_augment_point_clouds_and_bounding_boxes_with_same_scaling(self):
add_layer = GlobalRandomScaling(
x_factor=(0.5, 1.5),
y_factor=(0.5, 1.5),
z_factor=(0.5, 1.5),
preserve_aspect_ratio=True,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(1.0, 1.0),
y_factor=(1.0, 1.0),
z_factor=(1.0, 1.0),
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_2x_scaling_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(2.0, 2.0),
y_factor=(2.0, 2.0),
z_factor=(2.0, 2.0),
)
point_clouds = np.array(
[[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]] * 2] * 2
).astype("float32")
bounding_boxes = np.array([[[0, 1, 2, 3, 4, 5, 6]] * 2] * 2).astype(
"float32"
)
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
scaled_point_clouds = np.array(
[[[0, 2, 4, 3, 4, 5, 6, 7, 8, 9]] * 2] * 2
).astype("float32")
scaled_bounding_boxes = np.array(
[[[0, 2, 4, 6, 8, 10, 6]] * 2] * 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], scaled_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], scaled_bounding_boxes)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(0.5, 1.5),
y_factor=(0.5, 1.5),
z_factor=(0.5, 1.5),
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomScaling(
x_factor=(1.0, 1.0),
y_factor=(1.0, 1.0),
z_factor=(1.0, 1.0),
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_scaling_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_scaling_test.py",
"repo_id": "keras-cv",
"token_count": 2073
} | 49 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.layers.SqueezeAndExcite2D")
class SqueezeAndExcite2D(keras.layers.Layer):
"""
Implements Squeeze and Excite block as in
[Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf).
This layer tries to use a content aware mechanism to assign channel-wise
weights adaptively. It first squeezes the feature maps into a single value
using global average pooling, which are then fed into two Conv1D layers,
which act like fully-connected layers. The first layer reduces the
dimensionality of the feature maps, and second layer restores it to its
original value.
The resultant values are the adaptive weights for each channel. These
weights are then multiplied with the original inputs to scale the outputs
based on their individual weightages.
Args:
filters: Number of input and output filters. The number of input and
output filters is same.
bottleneck_filters: (Optional) Number of bottleneck filters. Defaults
to `0.25 * filters`
squeeze_activation: (Optional) String, callable (or
keras.layers.Layer) or keras.activations.Activation instance
denoting activation to be applied after squeeze convolution.
Defaults to `relu`.
excite_activation: (Optional) String, callable (or
keras.layers.Layer) or keras.activations.Activation instance
denoting activation to be applied after excite convolution.
Defaults to `sigmoid`.
Usage:
```python
# (...)
input = tf.ones((1, 5, 5, 16), dtype=tf.float32)
x = keras.layers.Conv2D(16, (3, 3))(input)
output = keras_cv.layers.SqueezeAndExciteBlock(16)(x)
# (...)
```
"""
def __init__(
self,
filters,
bottleneck_filters=None,
squeeze_activation="relu",
excite_activation="sigmoid",
**kwargs,
):
super().__init__(**kwargs)
self.filters = filters
if bottleneck_filters and bottleneck_filters >= filters:
raise ValueError(
"`bottleneck_filters` should be smaller than `filters`. Got "
f"`filters={filters}`, and "
f"`bottleneck_filters={bottleneck_filters}`."
)
if filters <= 0 or not isinstance(filters, int):
raise ValueError(
f"`filters` should be a positive integer. Got {filters}"
)
self.bottleneck_filters = bottleneck_filters or (filters // 4)
self.squeeze_activation = squeeze_activation
self.excite_activation = excite_activation
self.global_average_pool = keras.layers.GlobalAveragePooling2D(
keepdims=True
)
self.squeeze_conv = keras.layers.Conv2D(
self.bottleneck_filters,
(1, 1),
activation=self.squeeze_activation,
)
self.excite_conv = keras.layers.Conv2D(
self.filters, (1, 1), activation=self.excite_activation
)
def call(self, inputs, training=None):
x = self.global_average_pool(inputs) # x: (batch_size, 1, 1, filters)
x = self.squeeze_conv(x) # x: (batch_size, 1, 1, bottleneck_filters)
x = self.excite_conv(x) # x: (batch_size, 1, 1, filters)
x = ops.multiply(x, inputs) # x: (batch_size, h, w, filters)
return x
def get_config(self):
config = {
"filters": self.filters,
"bottleneck_filters": self.bottleneck_filters,
"squeeze_activation": self.squeeze_activation,
"excite_activation": self.excite_activation,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["squeeze_activation"], dict):
config["squeeze_activation"] = (
keras.saving.deserialize_keras_object(
config["squeeze_activation"]
)
)
if isinstance(config["excite_activation"], dict):
config["excite_activation"] = keras.saving.deserialize_keras_object(
config["excite_activation"]
)
return cls(**config)
| keras-cv/keras_cv/layers/regularization/squeeze_excite.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/squeeze_excite.py",
"repo_id": "keras-cv",
"token_count": 2068
} | 50 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from absl.testing import parameterized
import keras_cv
from keras_cv.tests.test_case import TestCase
class CenterNetBoxLoss(TestCase):
@parameterized.named_parameters(
(
"none",
"none",
(
2,
10,
),
),
("sum", "sum", ()),
("sum_over_batch_size", "sum_over_batch_size", ()),
)
def test_proper_output_shapes(self, reduction, target_size):
loss = keras_cv.losses.CenterNetBoxLoss(
num_heading_bins=4, anchor_size=[1.0, 1.0, 1.0], reduction=reduction
)
result = loss(
y_true=np.random.uniform(size=(2, 10, 7)),
# Predictions have xyz,lwh, and 2*4 values for heading.
y_pred=np.random.uniform(size=(2, 10, 6 + 2 * 4)),
)
self.assertEqual(result.shape, target_size)
| keras-cv/keras_cv/losses/centernet_box_loss_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/centernet_box_loss_test.py",
"repo_id": "keras-cv",
"token_count": 611
} | 51 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for loading pretrained model presets."""
import pytest
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.models.backbones.csp_darknet.csp_darknet_aliases import (
CSPDarkNetMBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
from keras_cv.tests.test_case import TestCase
@pytest.mark.large
class CSPDarkNetPresetSmokeTest(TestCase):
"""
A smoke test for CSPDarkNet presets we run continuously.
This only tests the smallest weights we have available. Run with:
`pytest keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets_test.py --run_large` # noqa: E501
"""
def setUp(self):
self.input_batch = tf.ones(shape=(2, 224, 224, 3))
def test_backbone_output(self):
model = CSPDarkNetBackbone.from_preset("csp_darknet_tiny")
model(self.input_batch)
def test_backbone_output_with_weights_tiny(self):
model = CSPDarkNetBackbone.from_preset("csp_darknet_tiny_imagenet")
outputs = model(tf.ones(shape=(1, 512, 512, 3)))
# The forward pass from a preset should be stable!
# This test should catch cases where we unintentionally change our
# network code in a way that would invalidate our preset weights.
# We should only update these numbers if we are updating a weights
# file, or have found a discrepancy with the upstream source.
expected = [-0.16216235, 0.7333651, 0.4312072, 0.738807, -0.2515305]
# Keep a high tolerance, so we are robust to different hardware.
self.assertAllClose(
ops.convert_to_numpy(outputs[0, 0, 0, :5]),
expected,
atol=0.01,
rtol=0.01,
)
def test_applications_model_output(self):
model = CSPDarkNetMBackbone()
model(self.input_batch)
def test_applications_model_output_with_preset(self):
model = CSPDarkNetBackbone.from_preset("csp_darknet_tiny_imagenet")
model(self.input_batch)
def test_preset_docstring(self):
"""Check we did our docstring formatting correctly."""
for name in CSPDarkNetBackbone.presets:
self.assertRegex(
CSPDarkNetBackbone.from_preset.__doc__,
name,
)
def test_unknown_preset_error(self):
# Not a preset name
with self.assertRaises(ValueError):
CSPDarkNetBackbone.from_preset("unknown_weights")
def test_load_weights_error(self):
# Try to load weights when none available
with self.assertRaises(ValueError):
CSPDarkNetBackbone.from_preset(
"csp_darknet_tiny", load_weights=True
)
@pytest.mark.extra_large
class CSPDarkNetPresetFullTest(TestCase):
"""
Test the full enumeration of our preset.
This tests every preset for CSPDarkNet and is only run manually.
Run with:
`pytest keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets_test.py --run_extra_large` # noqa: E501
"""
def test_load_csp_darknet(self):
input_data = tf.ones(shape=(2, 512, 512, 3))
for preset in CSPDarkNetBackbone.presets:
model = CSPDarkNetBackbone.from_preset(preset)
model(input_data)
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets_test.py",
"repo_id": "keras-cv",
"token_count": 1546
} | 52 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.models.backbones.efficientnet_v1.efficientnet_v1_backbone import (
EfficientNetV1Backbone,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946)
(ICML 2019)
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
""" # noqa: E501
class EfficientNetV1B0Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b0", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B1Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b1", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B2Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B3Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b3", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B4Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b4", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B5Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b5", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B6Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b6", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
class EfficientNetV1B7Backbone(EfficientNetV1Backbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return EfficientNetV1Backbone.from_preset("efficientnetv1_b7", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(
EfficientNetV1B0Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B0"),
)
setattr(
EfficientNetV1B1Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B1"),
)
setattr(
EfficientNetV1B2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B2"),
)
setattr(
EfficientNetV1B3Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B3"),
)
setattr(
EfficientNetV1B4Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B4"),
)
setattr(
EfficientNetV1B5Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B5"),
)
setattr(
EfficientNetV1B6Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B6"),
)
setattr(
EfficientNetV1B7Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="EfficientNetV1B7"),
)
| keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_v1/efficientnet_v1_aliases.py",
"repo_id": "keras-cv",
"token_count": 3998
} | 53 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import (
MiTB0Backbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone import (
MiTBackbone,
)
from keras_cv.tests.test_case import TestCase
class MixTransformerBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = MiTB0Backbone()
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = MiTB0Backbone(
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(self.get_temp_dir(), "mit_backbone.keras")
model.save(save_path)
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, MiTBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
model = MiTB0Backbone(
input_shape=(224, 224, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, 7, 7, 256))
| keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 888
} | 54 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ResNetV2 model preset configurations."""
backbone_presets_no_weights = {
"resnet18_v2": {
"metadata": {
"description": (
"ResNet model with 18 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 11183488,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet18_v2/2",
},
"resnet34_v2": {
"metadata": {
"description": (
"ResNet model with 34 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 21299072,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet34_v2/2",
},
"resnet50_v2": {
"metadata": {
"description": (
"ResNet model with 50 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 23564800,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet50_v2/2",
},
"resnet101_v2": {
"metadata": {
"description": (
"ResNet model with 101 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 42626560,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet101_v2/2",
},
"resnet152_v2": {
"metadata": {
"description": (
"ResNet model with 152 layers where the batch normalization "
"and ReLU activation precede the convolution layers (v2 style)."
),
"params": 58331648,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet152_v2/2",
},
}
backbone_presets_with_weights = {
"resnet50_v2_imagenet": {
"metadata": {
"description": (
"ResNet model with 50 layers where the batch normalization and "
"ReLU activation precede the convolution layers (v2 style). "
"Trained on Imagenet 2012 classification task."
),
"params": 23564800,
"official_name": "ResNetV2",
"path": "resnet_v2",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet50_v2_imagenet/2",
},
}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1671
} | 55 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
CrossStagePartial,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlock,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlockDepthwise,
)
class YoloXPAFPN(keras.layers.Layer):
"""The YoloX PAFPN.
YoloX PAFPN is an FPN layer used in YoloX models. The YoloX PAFPN is based
on the feature pyramid module used in Path Aggregation networks (PANet).
Arguments:
depth_multiplier: A float value used to calculate the base depth of the
model this changes based on the detection model being used. Defaults
to 1.0.
width_multiplier: A float value used to calculate the base width of the
model this changes based on the detection model being used. Defaults
to 1.0.
in_channels: A list representing the number of filters in the FPN
output. The length of the list will be same as the number of
outputs. Defaults to `(256, 512, 1024)`.
use_depthwise: a boolean value used to decide whether a depthwise conv
block should be used over a regular darknet block. Defaults to
`False`.
activation: the activation applied after the BatchNorm layer. One of
`"silu"`, `"relu"` or `"leaky_relu"`. Defaults to `"silu"`.
"""
def __init__(
self,
depth_multiplier=1.0,
width_multiplier=1.0,
in_channels=(256, 512, 1024),
use_depthwise=False,
activation="silu",
**kwargs
):
super().__init__(**kwargs)
self.in_channels = in_channels
ConvBlock = (
DarknetConvBlockDepthwise if use_depthwise else DarknetConvBlock
)
self.lateral_conv0 = DarknetConvBlock(
filters=int(in_channels[1] * width_multiplier),
kernel_size=1,
strides=1,
activation=activation,
)
self.C3_p4 = CrossStagePartial(
filters=int(in_channels[1] * width_multiplier),
num_bottlenecks=round(3 * depth_multiplier),
residual=False,
use_depthwise=use_depthwise,
activation=activation,
)
self.reduce_conv1 = DarknetConvBlock(
filters=int(in_channels[0] * width_multiplier),
kernel_size=1,
strides=1,
activation=activation,
)
self.C3_p3 = CrossStagePartial(
filters=int(in_channels[0] * width_multiplier),
num_bottlenecks=round(3 * depth_multiplier),
residual=False,
use_depthwise=use_depthwise,
activation=activation,
)
self.bu_conv2 = ConvBlock(
filters=int(in_channels[0] * width_multiplier),
kernel_size=3,
strides=2,
activation=activation,
)
self.C3_n3 = CrossStagePartial(
filters=int(in_channels[1] * width_multiplier),
num_bottlenecks=round(3 * depth_multiplier),
residual=False,
use_depthwise=use_depthwise,
activation=activation,
)
self.bu_conv1 = ConvBlock(
filters=int(in_channels[1] * width_multiplier),
kernel_size=3,
strides=2,
activation=activation,
)
self.C3_n4 = CrossStagePartial(
filters=int(in_channels[2] * width_multiplier),
num_bottlenecks=round(3 * depth_multiplier),
residual=False,
use_depthwise=use_depthwise,
activation=activation,
)
self.concat = keras.layers.Concatenate(axis=-1)
self.upsample_2x = keras.layers.UpSampling2D(2)
def call(self, inputs, training=False):
c3_output, c4_output, c5_output = inputs[3], inputs[4], inputs[5]
fpn_out0 = self.lateral_conv0(c5_output)
f_out0 = self.upsample_2x(fpn_out0)
f_out0 = self.concat([f_out0, c4_output])
f_out0 = self.C3_p4(f_out0)
fpn_out1 = self.reduce_conv1(f_out0)
f_out1 = self.upsample_2x(fpn_out1)
f_out1 = self.concat([f_out1, c3_output])
pan_out2 = self.C3_p3(f_out1)
p_out1 = self.bu_conv2(pan_out2)
p_out1 = self.concat([p_out1, fpn_out1])
pan_out1 = self.C3_n3(p_out1)
p_out0 = self.bu_conv1(pan_out1)
p_out0 = self.concat([p_out0, fpn_out0])
pan_out0 = self.C3_n4(p_out0)
return pan_out2, pan_out1, pan_out0
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_pafpn.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_pafpn.py",
"repo_id": "keras-cv",
"token_count": 2419
} | 56 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models import DeepLabV3Plus
from keras_cv.models import ResNet18V2Backbone
from keras_cv.models.backbones.test_backbone_presets import (
test_backbone_presets,
)
from keras_cv.tests.test_case import TestCase
class DeepLabV3PlusTest(TestCase):
def test_deeplab_v3_plus_construction(self):
backbone = ResNet18V2Backbone(input_shape=[256, 256, 3])
model = DeepLabV3Plus(backbone=backbone, num_classes=2)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
@pytest.mark.large
def test_deeplab_v3_plus_call(self):
backbone = ResNet18V2Backbone(input_shape=[256, 256, 3])
model = DeepLabV3Plus(backbone=backbone, num_classes=2)
images = np.random.uniform(size=(2, 256, 256, 3))
_ = model(images)
_ = model.predict(images)
@pytest.mark.large
def test_weights_change(self):
target_size = [256, 256, 3]
images = np.ones([1] + target_size)
labels = np.random.uniform(size=[1] + target_size)
ds = tf.data.Dataset.from_tensor_slices((images, labels))
ds = ds.repeat(2)
ds = ds.batch(2)
backbone = ResNet18V2Backbone(input_shape=target_size)
model = DeepLabV3Plus(backbone=backbone, num_classes=3)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
original_weights = model.segmentation_head.get_weights()
model.fit(ds, epochs=1)
updated_weights = model.segmentation_head.get_weights()
for w1, w2 in zip(original_weights, updated_weights):
self.assertNotAllEqual(w1, w2)
self.assertFalse(ops.any(ops.isnan(w2)))
@pytest.mark.large
def test_with_model_preset_forward_pass(self):
if not keras_3():
self.skipTest("TODO: #2246 Not supported for Keras 2")
model = DeepLabV3Plus.from_preset(
"deeplab_v3_plus_resnet50_pascalvoc",
num_classes=21,
input_shape=[256, 256, 3],
)
image = np.ones((1, 256, 256, 3))
output = ops.expand_dims(ops.argmax(model(image), axis=-1), axis=-1)
expected_output = np.zeros((1, 256, 256, 1))
self.assertAllClose(output, expected_output)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
target_size = [256, 256, 3]
backbone = ResNet18V2Backbone(input_shape=target_size)
model = DeepLabV3Plus(backbone=backbone, num_classes=2)
input_batch = np.ones(shape=[2] + target_size)
model_output = model(input_batch)
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, DeepLabV3Plus)
# Check that output matches.
restored_output = restored_model(input_batch)
self.assertAllClose(model_output, restored_output)
@pytest.mark.large
class DeepLabV3PlusSmokeTest(TestCase):
@parameterized.named_parameters(
*[(preset, preset) for preset in test_backbone_presets]
)
def test_backbone_preset(self, preset):
model = DeepLabV3Plus.from_preset(
preset,
num_classes=3,
)
xs = np.random.uniform(size=(1, 128, 128, 3))
output = model(xs)
self.assertEqual(output.shape, (1, 128, 128, 3))
| keras-cv/keras_cv/models/segmentation/deeplab_v3_plus/deeplab_v3_plus_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/deeplab_v3_plus/deeplab_v3_plus_test.py",
"repo_id": "keras-cv",
"token_count": 1945
} | 57 |
# Stable Diffusion v1-4 Model Card
Stable Diffusion is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input.
For more information about how Stable Diffusion functions, please have a look at [KerasCV's tutorial covering StableDiffusion](https://keras.io/guides/keras_cv/generate_images_with_stable_diffusion/).
The **Stable-Diffusion-v1-4** checkpoint was initialized with the weights of the [Stable-Diffusion-v1-2](https:/steps/huggingface.co/CompVis/stable-diffusion-v1-2)
checkpoint and subsequently fine-tuned on 225k steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598).
By loading this model you accept the CreativeML Open RAIL-M license at https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE
## Model Details
- **Developed by:** Robin Rombach, Patrick Esser
- **Model type:** Diffusion-based text-to-image generation model
- **Language(s):** English
- **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based.
- **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487).
- **Resources for more information:** [GitHub Repository](https://github.com/CompVis/stable-diffusion), [Paper](https://arxiv.org/abs/2112.10752).
- **Cite as:**
@InProceedings{Rombach_2022_CVPR,
author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn},
title = {High-Resolution Image Synthesis With Latent Diffusion Models},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2022},
pages = {10684-10695}
}
# Uses
## Direct Use
The model is intended for research purposes only. Possible research areas and
tasks include
- Safe deployment of models which have the potential to generate harmful content.
- Probing and understanding the limitations and biases of generative models.
- Generation of artworks and use in design and other artistic processes.
- Applications in educational or creative tools.
- Research on generative models.
Excluded uses are described below.
### Misuse, Malicious Use, and Out-of-Scope Use
_Note: This section is taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), but applies in the same way to Stable Diffusion v1_.
The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
#### Out-of-Scope Use
The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.
#### Misuse and Malicious Use
Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to:
- Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc.
- Intentionally promoting or propagating discriminatory content or harmful stereotypes.
- Impersonating individuals without their consent.
- Sexual content without consent of the people who might see it.
- Mis- and disinformation
- Representations of egregious violence and gore
- Sharing of copyrighted or licensed material in violation of its terms of use.
- Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use.
## Limitations and Bias
### Limitations
- The model does not achieve perfect photorealism
- The model cannot render legible text
- The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere”
- Faces and people in general may not be generated properly.
- The model was trained mainly with English captions and will not work as well in other languages.
- The auto-encoding part of the model is lossy
- The model was trained on a large-scale dataset
[LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material
and is not fit for product use without additional safety mechanisms and
considerations.
- No additional measures were used to deduplicate the dataset. As a result, we observe some degree of memorization for images that are duplicated in the training data.
The training data can be searched at [https://rom1504.github.io/clip-retrieval/](https://rom1504.github.io/clip-retrieval/) to possibly assist in the detection of memorized images.
### Bias
While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/),
which consists of images that are primarily limited to English descriptions.
Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for.
This affects the overall output of the model, as white and western cultures are often set as the default. Further, the
ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts.
## More information
More information on StableDiffusion can be found in the [HuggingFace model card](https://huggingface.co/CompVis/stable-diffusion-v1-4)
| keras-cv/keras_cv/models/stable_diffusion/README.md/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/README.md",
"repo_id": "keras-cv",
"token_count": 1753
} | 58 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import metrics
from tensorflow.keras import optimizers
from keras_cv.layers import preprocessing
from keras_cv.losses import SimCLRLoss
from keras_cv.models import DenseNet121Backbone
from keras_cv.tests.test_case import TestCase
from keras_cv.training import ContrastiveTrainer
# TODO(jbischof): revisit "extra_large" tag once development resumes.
# These tests are currently some of the slowest in our repo.
@pytest.mark.extra_large
class ContrastiveTrainerTest(TestCase):
def test_probe_requires_probe_optimizer(self):
trainer = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=self.build_probe(),
)
with self.assertRaises(ValueError):
trainer.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
def test_targets_required_if_probing(self):
trainer_with_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=self.build_probe(),
)
trainer_without_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=None,
)
images = tf.random.uniform((1, 50, 50, 3))
trainer_with_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
probe_optimizer=optimizers.Adam(),
probe_loss=keras.losses.CategoricalCrossentropy(from_logits=True),
)
trainer_without_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
with self.assertRaises(ValueError):
trainer_with_probing.fit(images)
def test_train_with_probing(self):
trainer_with_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=self.build_probe(num_classes=20),
)
images = tf.random.uniform((1, 50, 50, 3))
targets = np.ones((1, 20))
trainer_with_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
probe_metrics=[
metrics.TopKCategoricalAccuracy(3, "top3_probe_accuracy")
],
probe_optimizer=optimizers.Adam(),
probe_loss=keras.losses.CategoricalCrossentropy(from_logits=True),
)
trainer_with_probing.fit(images, targets)
def test_train_without_probing(self):
trainer_without_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=None,
)
images = tf.random.uniform((1, 50, 50, 3))
targets = np.ones((1, 20))
trainer_without_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
trainer_without_probing.fit(images)
trainer_without_probing.fit(images, targets)
def test_inference_not_supported(self):
trainer = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=None,
)
trainer.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
with self.assertRaises(NotImplementedError):
trainer(np.ones((1, 50, 50, 3)))
def test_encoder_must_have_flat_output(self):
with self.assertRaises(ValueError):
_ = ContrastiveTrainer(
# A DenseNet without pooling does not have a flat output
encoder=DenseNet121Backbone(include_rescaling=False),
augmenter=self.build_augmenter(),
projector=self.build_projector(),
probe=None,
)
def test_with_multiple_augmenters_and_projectors(self):
augmenter0 = preprocessing.RandomFlip("horizontal")
augmenter1 = preprocessing.RandomFlip("vertical")
projector0 = layers.Dense(64, name="projector0")
projector1 = keras.Sequential(
[projector0, layers.ReLU(), layers.Dense(64, name="projector1")]
)
trainer_without_probing = ContrastiveTrainer(
encoder=self.build_encoder(),
augmenter=(augmenter0, augmenter1),
projector=(projector0, projector1),
probe=None,
)
images = tf.random.uniform((1, 50, 50, 3))
trainer_without_probing.compile(
encoder_optimizer=optimizers.Adam(),
encoder_loss=SimCLRLoss(temperature=0.5),
)
trainer_without_probing.fit(images)
def build_augmenter(self):
return preprocessing.RandomFlip("horizontal")
def build_encoder(self):
return keras.Sequential(
[
DenseNet121Backbone(include_rescaling=False),
layers.GlobalAveragePooling2D(name="avg_pool"),
],
)
def build_projector(self):
return layers.Dense(128)
def build_probe(self, num_classes=20):
return layers.Dense(num_classes)
| keras-cv/keras_cv/training/contrastive/contrastive_trainer_test.py/0 | {
"file_path": "keras-cv/keras_cv/training/contrastive/contrastive_trainer_test.py",
"repo_id": "keras-cv",
"token_count": 2856
} | 59 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from tensorflow import keras
from keras_cv import core
def exhaustive_compare(obj1, obj2):
"""Exhaustively compared config of any two python
or Keras objects recursively.
If objects are python objects, a standard equality check is run.
If the objects are Keras objects a `get_config()` call is made.
The subsequent configs are then compared to determine if equality holds.
Args:
obj1: any object, can be a Keras object or python object.
obj2: any object, can be a Keras object or python object.
"""
classes_supporting_get_config = (
core.FactorSampler,
keras.layers.Layer,
keras.losses.Loss,
)
# If both objects are either one of list or tuple then their individual
# elements also must be checked exhaustively.
if isinstance(obj1, (list, tuple)) and isinstance(obj2, (list, tuple)):
# Length based checks.
if len(obj1) == 0 and len(obj2) == 0:
return True
if len(obj1) != len(obj2):
return False
# Exhaustive check for all elements.
for v1, v2 in list(zip(obj1, obj2)):
return exhaustive_compare(v1, v2)
# If the objects are dicts then we simply call the `config_equals` function
# which supports dicts.
elif isinstance(obj1, (dict)) and isinstance(obj2, (dict)):
return config_equals(v1, v2)
# If both objects are subclasses of Keras classes that support `get_config`
# method, then we compare their individual attributes using `config_equals`.
elif isinstance(obj1, classes_supporting_get_config) and isinstance(
obj2, classes_supporting_get_config
):
return config_equals(obj1.get_config(), obj2.get_config())
# Following checks are if either of the objects are _functions_, not methods
# or callables, since Layers and other unforeseen objects may also fit into
# this category. Specifically for Keras activation functions.
elif inspect.isfunction(obj1) and inspect.isfunction(obj2):
return keras.utils.serialize_keras_object(
obj1
) == keras.utils.serialize_keras_object(obj2)
elif inspect.isfunction(obj1) and not inspect.isfunction(obj2):
return keras.utils.serialize_keras_object(obj1) == obj2
elif inspect.isfunction(obj2) and not inspect.isfunction(obj1):
return obj1 == keras.utils.serialize_keras_object(obj2)
# Lastly check for primitive datatypes and objects that don't need
# additional preprocessing.
else:
return obj1 == obj2
def config_equals(config1, config2):
# Both `config1` and `config2` are python dicts. So the first check is to
# see if both of them have same keys.
if config1.keys() != config2.keys():
return False
# Iterate over all keys of the configs and compare each entry exhaustively.
for key in list(config1.keys()):
v1, v2 = config1[key], config2[key]
if not exhaustive_compare(v1, v2):
return False
return True
| keras-cv/keras_cv/utils/test_utils.py/0 | {
"file_path": "keras-cv/keras_cv/utils/test_utils.py",
"repo_id": "keras-cv",
"token_count": 1280
} | 60 |
# Keras FAQ: Kerasに関するよくある質問
- [Kerasを引用するには?](#keras)
- [KerasをGPUで動かすには?](#kerasgpu)
- [KerasをマルチGPUで動かすには?](#how-can-i-run-a-keras-model-on-multiple-gpus)
- ["sample","batch","epoch" の意味は?](#samplebatchepoch)
- [Keras modelを保存するには?](#keras-model)
- [training lossがtesting lossよりもはるかに大きいのはなぜ?](#training-losstesting-loss)
- [中間レイヤーの出力を得るには?](#_7)
- [メモリに載らない大きさのデータを扱うには?](#_8)
- [validation lossが減らなくなったときに学習を中断するには?](#validation-loss)
- [validation splitはどのように実行されますか?](#validation-split)
- [訓練時にデータはシャッフルされますか?](#_9)
- [各epochのtraining/validationのlossやaccuracyを記録するには?](#epochtrainingvalidationlossaccuracy)
- [レイヤーを "freeze" するには?](#freeze)
- [stateful RNNを利用するには?](#stateful-rnn)
- [Sequentialモデルからレイヤーを取り除くには?](#sequential)
- [Kerasで事前学習したモデルを使うには?](#keras_1)
- [KerasでHDF5ファイルを入力に使うには?](#kerashdf5)
- [Kerasの設定ファイルの保存場所は?](#keras_2)
- [開発中にKerasを用いて再現可能な結果を得るには?](#how-can-i-obtain-reproducible-results-using-keras-during-development)
- [Kerasでモデルを保存するためにHDF5やh5pyをインストールするには?](#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras)
---
### Kerasを引用するには?
Kerasがあなたの仕事の役に立ったなら,ぜひ著書のなかでKerasを引用してください.BibTexの例は以下の通りです:
```
@misc{chollet2015keras,
title={Keras},
author={Chollet, Fran\c{c}ois and others},
year={2015},
howpublished={\url{https://keras.io}},
}
```
---
### KerasをGPUで動かすには?
バックエンドで**TensorFlow**か**CNTK**を使っている場合,利用可能なGPUがあれば自動的にGPUが使われます.
バックエンドが**Theano**の場合,以下の方法があります:
**方法1**: Theanoフラグを使う:
```bash
THEANO_FLAGS=device=gpu,floatX=float32 python my_keras_script.py
```
'gpu'の部分はデバイス識別子に合わせて変更してください(例: `gpu0`,`gpu1`など).
**方法2**: `.theanorc`を使う:
[使い方](http://deeplearning.net/software/theano/library/config.html)
**方法3**: コードの先頭で,`theano.config.device`,`theano.config.floatX`を手動で設定:
```python
import theano
theano.config.device = 'gpu'
theano.config.floatX = 'float32'
```
---
### KerasをマルチGPUで動かすには?
**TensorFlow**バックエンドの使用を推奨します.複数のGPUで1つのモデルを実行するには**データ並列化**と**デバイス並列化**の2つの方法があります.
多くの場合,必要となるのはデータ並列化でしょう.
#### データ並列化
データ並列化は,ターゲットのモデルをデバイス毎に1つずつ複製することと,それぞれのレプリカを入力データ内の異なる部分の処理に用いることから成ります.Kerasには組み込みのユーティリティとして`keras.utils.multi_gpu_model`があり,どんなモデルに対してもデータ並列化バージョンを作成できて,最大8個のGPUで準線形の高速化を達成しています.
より詳細な情報は[マルチGPUモデル](/utils/#multi_gpu_model)を参照してください.簡単な例は次の通りです:
```python
from keras.utils import multi_gpu_model
# Replicates `model` on 8 GPUs.
# This assumes that your machine has 8 available GPUs.
parallel_model = multi_gpu_model(model, gpus=8)
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# This `fit` call will be distributed on 8 GPUs.
# Since the batch size is 256, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=256)
```
#### デバイス並列化
デバイス並列化は同じモデルを異なるデバイスで実行することから成っています.並列アーキテクチャを持つモデルには最適でしょう.例としては2つのブランチを持つようなモデルがあります.
これはTensorFlowのデバイススコープを使用することで実現できます.簡単な例は次の通りです:
```python
# Model where a shared LSTM is used to encode two different sequences in parallel
input_a = keras.Input(shape=(140, 256))
input_b = keras.Input(shape=(140, 256))
shared_lstm = keras.layers.LSTM(64)
# Process the first sequence on one GPU
with tf.device_scope('/gpu:0'):
encoded_a = shared_lstm(tweet_a)
# Process the next sequence on another GPU
with tf.device_scope('/gpu:1'):
encoded_b = shared_lstm(tweet_b)
# Concatenate results on CPU
with tf.device_scope('/cpu:0'):
merged_vector = keras.layers.concatenate([encoded_a, encoded_b],
axis=-1)
```
---
### "sample","batch","epoch" の意味は?
Kerasを正しく使うためには,以下の定義を知り,理解しておく必要があります:
- **Sample**: データセットの一つの要素.
- *例:* 一つの画像は畳み込みネットワークの一つの**sample**です
- *例:* 一つの音声ファイルは音声認識モデルのための一つの**sample**です
- **Batch**: *N*のsampleのまとまり.**batch**内のサンプルは独立して並列に処理されます. 訓練中は,batchの処理結果によりモデルが1回更新されます.
- 一般的に**batch**は,それぞれの入力のみの場合に比べて,入力データのばらつきをよく近似します.batchが大きいほど,その近似は精度が良くなります.しかし,そのようなbatchの処理には時間がかかるにも関わらず更新が一度しかされません.推論(もしくは評価,予測)のためには,メモリ領域を超えなくて済む最大のbatchサイズを選ぶのをおすすめします.(なぜなら,batchが大きければ,通常は高速な評価や予測につながるからです)
- **Epoch**: "データセット全体に対する1回の処理単位"と一般的に定義されている,任意の区切りのこと.訓練のフェーズを明確に区切って,ロギングや周期的な評価するのに利用されます.
- `evaluation_data` もしくは `evaluation_split` がKeras modelの `fit` 関数とともに使われるとき,その評価は,各**epoch**が終わる度に行われます.
- Kerasでは,**epoch**の終わりに実行されるように [callbacks](https://keras.io/ja/callbacks/) を追加することができます.これにより例えば,学習率を変化させることやモデルのチェックポイント(保存)が行えます.
---
### Keras modelを保存するには?
#### モデル全体の保存/読み込み(アーキテクチャ + 重み + オプティマイザの状態)
*Kerasのモデルを保存するのに,pickleやcPickleを使うことは推奨されません.*
`model.save(filepath)`を使うことで,単一のHDF5ファイルにKerasのモデルを保存できます.このHDF5ファイルは以下を含みます.
- 再構築可能なモデルの構造
- モデルの重み
- 学習時の設定 (loss,optimizer)
- optimizerの状態.これにより,学習を終えた時点から正確に学習を再開できます
`keras.models.load_model(filepath)`によりモデルを再インスタンス化できます.
`load_model`は,学習時の設定を利用して,モデルのコンパイルも行います(ただし,最初にモデルを定義した際に,一度もコンパイルされなかった場合を除く).
例:
```python
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
```
#### モデルのアーキテクチャのみの保存/読み込み
**モデルのアーキテクチャ**(weightパラメータや学習時の設定は含まない)のみを保存する場合は,以下のように行ってください:
```python
# save as JSON
json_string = model.to_json()
# save as YAML
yaml_string = model.to_yaml()
```
生成されたJSON / YAMLファイルは,人が読むことができ,必要に応じて編集可能です.
保存したデータから,以下のように新しいモデルを作成できます:
```python
# model reconstruction from JSON:
from keras.models import model_from_json
model = model_from_json(json_string)
# model reconstruction from YAML
from keras.models import model_from_yaml
model = model_from_yaml(yaml_string)
```
#### モデルの重みのみのセーブ/ロード
**モデルの重み**を保存する必要がある場合,以下のコードのようにHDF5を利用できます.
```python
model.save_weights('my_model_weights.h5')
```
モデルのインスタンス作成後,*同じ*アーキテクチャのモデルへ,予め保存しておいたweightパラメータをロードできます:
```python
model.load_weights('my_model_weights.h5')
```
例えば,ファインチューニングや転移学習のために,*異なる*アーキテクチャのモデル(ただしいくつか共通のレイヤーを保持)へweightsパラメータをロードする必要がある場合,*レイヤーの名前*を指定することでweightsパラメータをロードできます:
```python
model.load_weights('my_model_weights.h5', by_name=True)
```
`h5py`をインストールする方法については[Kerasでモデルを保存するためにHDF5やh5pyをインストールするには?](#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras)も参照してください.
例:
```python
"""
Assuming the original model looks like this:
model = Sequential()
model.add(Dense(2, input_dim=3, name='dense_1'))
model.add(Dense(3, name='dense_2'))
...
model.save_weights(fname)
"""
# new model
model = Sequential()
model.add(Dense(2, input_dim=3, name='dense_1')) # will be loaded
model.add(Dense(10, name='new_dense')) # will not be loaded
# load weights from first model; will only affect the first layer, dense_1.
model.load_weights(fname, by_name=True)
```
#### 保存済みモデルでのカスタムレイヤ(またはその他カスタムオブジェクト)の取り扱い
読み込もうとしているモデルにカスタムレイヤーやその他カスタムされたクラスや関数が含まれている場合,`custom_objects`引数を使ってロード機構にそのカスタムレイヤーなどを渡すことができます.
```python
from keras.models import load_model
# Assuming your model includes instance of an "AttentionLayer" class
model = load_model('my_model.h5', custom_objects={'AttentionLayer': AttentionLayer})
```
あるいは [custom object scope](https://keras.io/ja/utils/#customobjectscope)を使うことも出来ます:
```python
from keras.utils import CustomObjectScope
with CustomObjectScope({'AttentionLayer': AttentionLayer}):
model = load_model('my_model.h5')
```
カスタムオブジェクトは`load_model`, `model_from_json`, `model_from_yaml`と同じように取り扱えます:
```python
from keras.models import model_from_json
model = model_from_json(json_string, custom_objects={'AttentionLayer': AttentionLayer})
```
---
### training lossがtesting lossよりもはるかに大きいのはなぜ?
Kerasモデルにはtrainingとtestingという2つのモードがあります.DropoutやL1/L2正則化のような,正則化手法はtestingの際には機能しません.
さらに,training lossは訓練データの各バッチのlossの平均です.モデルは変化していくため,各epochの最初のバッチの誤差は最後のバッチの誤差よりもかなり大きくなります.一方,testing lossは各epochの最後の状態のモデルを使って計算されるため,誤差が小さくなります.
---
### 中間レイヤーの出力を得るには?
シンプルな方法は,着目しているレイヤーの出力を行うための新しい`Model`を作成することです:
```python
from keras.models import Model
model = ... # create the original model
layer_name = 'my_layer'
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
intermediate_output = intermediate_layer_model.predict(data)
```
別の方法として,ある入力が与えられたときに,あるレイヤーの出力を返すKeras functionを以下のように記述することでも可能です:
```python
from keras import backend as K
# with a Sequential model
get_3rd_layer_output = K.function([model.layers[0].input],
[model.layers[3].output])
layer_output = get_3rd_layer_output([x])[0]
```
同様に,TheanoやTensorFlowのfunctionを直接利用することもできます.
ただし,学習時とテスト時でモデルの振る舞いが異なる場合(例えば`Dropout`や`BatchNormalization`の利用時など),以下のようにlearning phaseフラグを利用してください:
```python
get_3rd_layer_output = K.function([model.layers[0].input, K.learning_phase()],
[model.layers[3].output])
# output in test mode = 0
layer_output = get_3rd_layer_output([x, 0])[0]
# output in train mode = 1
layer_output = get_3rd_layer_output([x, 1])[0]
```
---
### メモリに載らない大きさのデータを扱うには?
`model.train_on_batch(x, y)`と`model.test_on_batch(x, y)`を使うことでバッチ学習ができます.詳細は[モデルに関するドキュメント](/models/sequential)を見てください.
代わりに,訓練データのバッチを生成するジェネレータを記述して,`model.fit_generator(data_generator, samples_per_epoch, epochs)`の関数を使うこともできます.
実際のバッチ学習の方法については,[CIFAR10 example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py)を見てください.
---
### validation lossが減らなくなったときに学習を中断するには?
コールバック関数の`EarlyStopping`を利用してください:
```python
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
model.fit(x, y, validation_split=0.2, callbacks=[early_stopping])
```
詳細は[コールバックに関するドキュメント](/callbacks)を見てください.
---
### validation splitはどのように実行されますか?
`model.fit`の引数`validation_split`を例えば0.1に設定すると,データの*最後の10%*が検証のために利用されます.例えば,0.25に設定すると,データの最後の25%が検証に使われます.ここで,validation splitからデータを抽出する際にはデータがシャッフルされないことに注意してください.なので,検証は文字通り入力データの*最後の*x%のsampleに対して行われます.
(同じ`fit`関数が呼ばれるならば)全てのepochにおいて,同じ検証用データが使われます.
---
### 訓練時にデータはシャッフルされますか?
`model.fit`の引数`shuffle`が`True`であればシャッフルされます(初期値はTrueです).各epochで訓練データはランダムにシャッフルされます.
検証用データはシャッフルされません.
---
### 各epochのtraining/validationのlossやaccuracyを記録するには?
`model.fit`が返す`History`コールバックの`history` 見てください.`history`はlossや他の指標のリストを含んでいます.
```python
hist = model.fit(x, y, validation_split=0.2)
print(hist.history)
```
---
### レイヤーを"freeze"するには?
レイヤーを"freeze"することは学習からそのレイヤーを除外することを意味します,その場合,そのレイヤーの重みは更新されなくなります.
このことはモデルのファインチューニングやテキスト入力のための固定されたembeddingsを使用する際に有用です.
レイヤーのコンストラクタの`trainable`引数に真理値を渡すことで,レイヤーを訓練しないようにできます.
```python
frozen_layer = Dense(32, trainable=False)
```
加えて,インスタンス化後にレイヤーの`trainable`プロパティに`True`か`False`を設定することができます.設定の有効化のためには,`trainable`プロパティの変更後のモデルで`compile()`を呼ぶ必要があります.以下にその例を示します:
```python
x = Input(shape=(32,))
layer = Dense(32)
layer.trainable = False
y = layer(x)
frozen_model = Model(x, y)
# in the model below, the weights of `layer` will not be updated during training
frozen_model.compile(optimizer='rmsprop', loss='mse')
layer.trainable = True
trainable_model = Model(x, y)
# with this model the weights of the layer will be updated during training
# (which will also affect the above model since it uses the same layer instance)
trainable_model.compile(optimizer='rmsprop', loss='mse')
frozen_model.fit(data, labels) # this does NOT update the weights of `layer`
trainable_model.fit(data, labels) # this updates the weights of `layer`
```
---
### stateful RNNを利用するには?
RNNをstatefulにするとは,各バッチのサンプルの状態が,次のバッチのサンプルのための初期状態として再利用されるということを意味します.
stateful RNNが使われるときには以下のような状態となっているはずです:
- 全てのバッチのサンプル数が同じである
- `x1`と`x2`が連続するバッチであるとき,つまり各`i`について`x2[i]`は`x1[i]`のfollow-upシーケンスになっている
実際にstateful RNNを利用するには,以下を行う必要があります:
- `batch_size`引数をモデルの最初のレイヤーに渡して,バッチサイズを明示的に指定してください. 例えば,サンプル数が32,タイムステップが10,特徴量の次元が16の場合には,`batch_size=32`としてください.
- RNNレイヤーで`stateful=True`を指定してください.
- fit()を呼ぶときには`shuffle=False`を指定してください.
蓄積された状態をリセットするには:
- モデルの全てのレイヤーの状態をリセットするには,`model.reset_states()`を利用してください
- 特定のstateful RNNレイヤーの状態をリセットするには,`layer.reset_states()`を利用してください
例:
```python
x # this is our input data, of shape (32, 21, 16)
# we will feed it to our model in sequences of length 10
model = Sequential()
model.add(LSTM(32, input_shape=(10, 16), batch_size=32, stateful=True))
model.add(Dense(16, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# we train the network to predict the 11th timestep given the first 10:
model.train_on_batch(x[:, :10, :], np.reshape(x[:, 10, :], (32, 16)))
# the state of the network has changed. We can feed the follow-up sequences:
model.train_on_batch(x[:, 10:20, :], np.reshape(x[:, 20, :], (32, 16)))
# let's reset the states of the LSTM layer:
model.reset_states()
# another way to do it in this case:
model.layers[0].reset_states()
```
`predict`, `fit`, `train_on_batch`, `predict_classes`などの関数は*いずれも*statefulレイヤーの状態を更新することに注意してください.そのため,statefulな訓練だけでなく,statefulな予測も可能となります.
---
### Sequentialモデルからレイヤーを取り除くには?
`.pop()`を使うことで,Sequentialモデルへ最後に追加したレイヤーを削除できます:
```python
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=784))
model.add(Dense(32, activation='relu'))
print(len(model.layers)) # "2"
model.pop()
print(len(model.layers)) # "1"
```
---
### Kerasで事前学習したモデルを使うには?
以下の画像分類のためのモデルのコードと事前学習した重みが利用可能です:
- Xception
- VGG16
- VGG19
- ResNet50
- Inception v3
- Inception-ResNet v2
- MobileNet v1
これらのモデルは`keras.applications`からインポートできます:
```python
from keras.applications.xception import Xception
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.mobilenet import MobileNet
model = VGG16(weights='imagenet', include_top=True)
```
シンプルな使用例については, [Applications moduleについてのドキュメント](/applications)を見てください.
特徴量抽出やfine-tuningのために事前学習したモデルの使用例の詳細は,[このブログ記事](http://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)を見てください.
また,VGG16はいくつかのKerasのサンプルスクリプトの基礎になっています.
- [Style transfer](https://github.com/keras-team/keras/blob/master/examples/neural_style_transfer.py)
- [Feature visualization](https://github.com/keras-team/keras/blob/master/examples/conv_filter_visualization.py)
- [Deep dream](https://github.com/keras-team/keras/blob/master/examples/deep_dream.py)
---
### KerasでHDF5ファイルを入力に使うには?
`keras.utils.io_utils`から`HDF5Matrix`を使うことができます.
詳細は[HDF5Matrixに関するドキュメント](/utils/#hdf5matrix)を確認してください.
また,HDF5のデータセットを直接使うこともできます:
```python
import h5py
with h5py.File('input/file.hdf5', 'r') as f:
x_data = f['x_data']
model.predict(x_data)
```
`h5py`をインストールする方法については[Kerasでモデルを保存するためにHDF5やh5pyをインストールするには?](#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras)も参照してください.
---
### Kerasの設定ファイルの保存場所は?
Kerasの全てのデータが格納されているデフォルトのディレクトリは以下の場所です:
```bash
$HOME/.keras/
```
Windowsユーザは`$HOME`を`%USERPROFILE%`に置換する必要があることに注意してください.
(パーミッション等の問題によって,)Kerasが上記のディレクトリを作成できない場合には,`/tmp/.keras/`がバックアップとして使われます.
Kerasの設定ファイルはJSON形式で`$HOME/.keras/keras.json`に格納されます.
デフォルトの設定ファイルは以下のようになっています:
```
{
"image_data_format": "channels_last",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "tensorflow"
}
```
この設定ファイルは次のような項目を含んでいます:
- デフォルトで画像処理のレイヤーやユーティリティで使われる画像データのフォーマット(`channels_last`もしくは`channels_first`).
- 数値演算におけるゼロ除算を防ぐために使われる,数値の微小量`epsilon`.
- デフォルトの浮動小数点数データの型.
- デフォルトのバックエンド.[backendに関するドキュメント](/backend)を参照してください.
同様に,[`get_file()`](/utils/#get_file)でダウンロードされた,キャッシュ済のデータセットのファイルは,デフォルトでは`$HOME/.keras/datasets/`に格納されます.
---
### 開発中にKerasを用いて再現可能な結果を得るには?
モデルの開発中に,パフォーマンスの変化が実際のモデルやデータの変更によるものなのか,単に新しいランダムサンプルの結果によるものなのかを判断するために,実行毎に再現性のある結果を得られると便利な場合があります.以下のコードスニペットは,再現可能な結果を取得する方法の例を示しています.これは,Python 3環境のTensorFlowバックエンド向けです.
```python
import numpy as np
import tensorflow as tf
import random as rn
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# Rest of code follows ...
```
---
### Kerasでモデルを保存するためにHDF5やh5pyをインストールするには?
KerasのモデルをHDF5ファイルとして保存する場合(例えば`keras.callbacks.ModelCheckpoint`を用いるような時),KerasではPythonパッケージのh5pyを使います.Kerasはこのパッケージと依存関係があり,デフォルトでインストールされるはずです.Debianベースのディストリビューションでは`libhdf5`のインストールも追加で必要かもしれません:
```
sudo apt-get install libhdf5-serial-dev
```
h5pyがインストールされているかわからない場合はPythonシェルを開いて次のようにモジュールをロードできます.
```
import h5py
```
エラーなしでインポートできたらh5pyはインストールされています.そうでなければ詳細なインストール方法をこちらでご覧ください:http://docs.h5py.org/en/latest/build.html
| keras-docs-ja/sources/getting-started/faq.md/0 | {
"file_path": "keras-docs-ja/sources/getting-started/faq.md",
"repo_id": "keras-docs-ja",
"token_count": 12063
} | 61 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/wrappers.py#L92)</span>
### TimeDistributed
```python
keras.layers.wrappers.TimeDistributed(layer)
```
このラッパーにより,入力のすべての時間スライスにレイヤーを適用できます.
入力は少なくとも3次元である必要があります.
インデックスの次元は時間次元と見なされます.
例えば,32個のサンプルを持つバッチを考えます.各サンプルは16次元で構成される10個のベクトルを持ちます.
このバッチの入力のshapeは`(32, 10, 16)`となります(`input_shape`はサンプル数の次元を含まないため,`(10, 16)`となります).
このとき,10個のタイムスタンプのレイヤーそれぞれに`Dense`を適用するために,`TimeDistributed`を利用できます:
```python
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
# now model.output_shape == (None, 10, 8)
# subsequent layers: no need for input_shape
model.add(TimeDistributed(Dense(32)))
# now model.output_shape == (None, 10, 32)
```
出力のshapeは`(32, 10, 8)`です.
`TimeDistributed`は`Dense`だけでなく任意のレイヤーに使えます.
例えば,`Conv2D`に対して:
```python
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3)),
input_shape=(10, 299, 299, 3)))
```
__引数__
- __layer__: レイヤーインスタンス.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/wrappers.py#L206)</span>
### Bidirectional
```python
keras.layers.wrappers.Bidirectional(layer, merge_mode='concat', weights=None)
```
RNNのBidirectionalなラッパー.
__引数__
- __layer__: `Recurrent`のインスタンス.
- __merge_mode__: RNNのforwardとbackwardの出力同士を組み合わせる際のモード.{'sum', 'mul', 'concat', 'ave', None}のいずれか.Noneの場合,出力はリストになります.
__Raises__
- __ValueError__: `merge_mode`引数が不正な場合.
__例__
```python
model = Sequential()
model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10)))
model.add(Bidirectional(LSTM(10)))
model.add(Dense(5))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
| keras-docs-ja/sources/layers/wrappers.md/0 | {
"file_path": "keras-docs-ja/sources/layers/wrappers.md",
"repo_id": "keras-docs-ja",
"token_count": 1099
} | 62 |
## 콜백 함수의 사용법<sub>Usage of callbacks</sub>
콜백은 학습 과정에서 특정 단계에 적용할 함수 세트를 의미합니다. 콜백을 사용해서 학습 중인 모델의 내부 상태와 통계값을 확인할 수 있습니다. `Sequential`이나 `Model` 클래스의 `.fit()` 메서드에 키워드 인자 `callbacks`를 통해 콜백의 리스트를 전달할 수 있습니다. 학습의 각 단계마다 관련된 콜백이 호출됩니다.
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L537)</span>
### ProgbarLogger
```python
keras.callbacks.ProgbarLogger(count_mode='samples', stateful_metrics=None)
```
표준입출력<sub>stdout</sub>으로 평가 지표를 출력하는 콜백
__인자__
- __count_mode__: "steps"와 "samples" 중 하나.
검사한 샘플의 수와 검사한 단계(배치) 수 중 진행표시바<sub>progressbar</sub>에 표시할 항목.
- __stateful_metrics__: 평균으로 표시하지 *않을* 평가 지표의 `string` 이름을 담은 iterable 객체.
이 리스트의 평가 지표는 원래값 그대로 로그합니다.
그 외 평가 지표의 에폭의 평균값으로 로그합니다 (예. 손실 등).
__오류__
- __ValueError__: 유효하지 않은 `count_mode`의 경우 오류가 발생합니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L633)</span>
### ModelCheckpoint
```python
keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
```
에폭이 끝날 때마다 모델을 저장합니다.
`filepath`는 (`on_epoch_end`에서 전달된)
`epoch` 값과 `logs` 키를 이용하여 설정할 수 있습니다.
예를 들어 `filepath`가 `weights.{epoch:02d}-{val_loss:.2f}.hdf5`라면,
파일 이름에 에폭 번호와 검증 손실값<sub>validation loss</sub>을 넣어
모델의 체크포인트를 저장합니다.
__인자__
- __filepath__: `string`, 모델 파일을 저장할 경로.
- __monitor__: 기록할 항목.
- __verbose__: 상세 정보 표시 정도, 0 혹은 1.
- __save_best_only__: `True`인 경우
`__monitor__`값을 기준으로 최신이고 가장 좋은 모델은 덮어쓰지 않습니다.
- __save_weights_only__: `True`인 경우 모델의 가중치만 저장되고
(`model.save_weights(filepath)`), 아닌 경우
전체 모델이 저장됩니다 (`model.save(filepath)`).
- __mode__: {auto, min, max} 중 하나.
`save_best_only=True`이면
`monitor`값을 최대화할지 최소화할지에 따라
현재 저장 파일을 덮어쓸지 결정합니다. `monitor=val_acc`의 경우
`mode=max`가 되어야 하며, `monitor=val_loss`라면
`mode=min`이 되어야 합니다. `auto`의 경우
`monitor`값을 통해 자동으로 설정됩니다.
- __period__: 체크포인트를 저장할 간격(에폭의 수).
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L851)</span>
### RemoteMonitor
```python
keras.callbacks.RemoteMonitor(root='http://localhost:9000', path='/publish/epoch/end/', field='data', headers=None, send_as_json=False)
```
이벤트를 서버에 스트리밍하는 콜백
`requests` 라이브러리가 필요합니다.
기본값으로 이벤트를 `root + '/publish/epoch/end/'`으로 보냅니다.
요청은 HTTP POST를 이용합니다. `data`인자에 JSON 형식의 딕셔너리로 이벤트 데이터를 전달합니다.
`send_as_json=True`인 경우, `content-type`은
`application/json`입니다. 이외의 경우에는 직렬화<sub>serialized</sub>된 JSON을 전달합니다.
__인자__
- __root__: `string`, 표적 서버의 최상위 url.
- __path__: `string`, 이벤트가 보내질 `root`를 기준으로 한 상대 경로.
- __field__: `string`, 데이터가 저장될 JSON 필드.
폼 내에 payload가 보내지는 경우에만 필드가 사용됩니다
(`send_as_json=False`인 경우).
- __headers__: `dictaionary`, 선택적 커스텀 HTTP 헤더.
- __send_as_json__: `bool`, 요청을 `application/json`으로
보낼지 여부.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L946)</span>
### ReduceLROnPlateau
```python
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0)
```
평가 지표가 향상되지 않는 경우 학습률을 감소시킵니다.
학습이 잘 되지 않을 때 학습률을 2-10배 감소시키면
학습이 향상되기도 합니다. 이 콜백은 `patience` 개의 에폭 동안
평가 지표를 확인하여 학습에 향상이 없으면 학습률을 감소시킵니다.
__예시__
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
__인자__
- __monitor__: 관찰할 항목. 학습률을 감소시킬지 판단할 때 기준이 되는 항목.
- __factor__: 학습률을 줄이는 정도.
new_lr = lr * factor
- __patience__: 학습을 멈추기 전에 관찰하는 항목이 향상되지 않은 에폭의 수.
`patience`개의 에폭 동안 관찰하는 항목이 향상되지 않으면 학습을 멈춥니다.
검증 빈도 (`model.fit(validation_freq=5)`)가 1보다 크다면 매 에폭마다 검증 값<sub>validation quantity</sub>을 계산하지 않습니다.
- __verbose__: `int`. 0: 메세지 없음, 1: 메시지를 업데이트합니다.
- __mode__: {auto, min, max} 중 하나. `min` 모드에서는
관찰하는 항목이 더 이상 감소하지
않으면 학습을 멈춥니다. `max` 모드에서는
관찰하는 항목이 더 이상 증가하지
않으면 학습이 멈춥니다. `auto` 모드에서는
관찰하는 항목에 따라 자동으로 설정됩니다.
- __min_delta__: 새로운 최적값을 계산할 기준점. 유의미한 변화에서만 값을 업데이트하기 위해서입니다.
- __cooldown__: 학습률을 감소시킨 뒤 학습을 다시 정상적으로 진행하기 위해 기다려야하는 에폭의 수
- __min_lr__: 학습률의 하한선.
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L275)</span>
### Callback
```python
keras.callbacks.Callback()
```
새로운 콜백을 만드는데 사용되는 추상 베이스 클래스.
__속성__
- __params__: `dictionary`. 학습 매개변수
(예. 메세지 출력 여부, 배치 크기, 에폭 수…).
- __model__: `keras.models.Model`의 인스턴스.
학습 중인 모델의 참조.
콜백 메서드가 인자로 받는
`logs` 딕셔너리는 현재 배치 혹은 에폭과 관련된
값에 대한 키를 포함합니다.
현재 `Sequential` 모델 클래스의 `.fit()` 메서드는
콜백에 전달하는 `logs`에 다음의
값을 포함합니다:
on_epoch_end: 로그에 `acc`와 `loss`를 포함하고
`val_loss`와 (`fit`에서 검증을 사용하는 경우)
`val_acc`는(검증과 정확도 모니터링을 사용하는 경우)
선택적으로 포함됩니다.
on_batch_begin: 로그에 `size`와
현재 배치의 샘플의 수를 포함합니다.
on_batch_end: 로그에 `loss`를 포함하고, 선택적으로 `acc`를 포함합니다
(정확도 모니터링을 사용하는 경우).
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L477)</span>
### BaseLogger
```python
keras.callbacks.BaseLogger(stateful_metrics=None)
```
평가 지표의 에폭 평균을 축적하는 콜백.
이 콜백은 모든 케라스 모델에 자동으로 적용됩니다.
__인자__
- __stateful_metrics__: 에폭에 걸쳐 평균을 내면 *안 되는*
평가 지표 이름의 `string` Iterable.
`on_epoch_end`에서는 이 리스트의 평가 지표를 원래 값 그대로 로그합니다.
그 외 평가 지표는 `on_epoch_end`에서 평균을 구해 로그합니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L524)</span>
### TerminateOnNaN
```python
keras.callbacks.TerminateOnNaN()
```
NaN 손실이 발생했을 때 학습을 종료시키는 콜백.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L614)</span>
### History
```python
keras.callbacks.History()
```
`History` 객체에 이벤트를 기록하는 콜백.
이 콜백은 모든 케라스 모델에
자동적으로 적용됩니다. `History` 객체는
모델의 `fit` 메서드를 통해 반환됩니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L733)</span>
### EarlyStopping
```python
keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
```
관찰하는 항목이 향상되지 않으면 학습을 멈춥니다.
__인자__
- __monitor__: 관찰할 항목. 학습률을 감소시킬지 판단할 때 기준이 되는 항목.
- __min_delta__: 관찰하는 항목이 향상되었다고 판단하는
최소한의 변화량, 다시 말해 min_delta보다
절대 변화량이 작다면 향상되었다고 판단하지 않습니다.
- __patience__: 학습을 멈추기 전에 관찰하는 항목이 향상되지 않은 에폭의 수.
`patience`개의 에폭 동안 `monitor`의 값에 진전이 없으면 학습을 멈춥니다.
검증 빈도 (`model.fit(validation_freq=5)`)가 1보다 크다면 매 에폭마다 검증 값<sub>validation quantity</sub>을 계산하지 않습니다.
- __verbose__: 상세 정보 표시 정도.
- __mode__: {auto, min, max} 중 하나. `min` 모드에서는
관찰하는 항목이 더 이상 감소하지
않으면 학습을 멈춥니다. `max` 모드에서는
관찰하는 항목이 더 이상 증가하지
않으면 학습이 멈춥니다. `auto` 모드에서는
관찰하는 항목에 따라 자동으로 설정됩니다.
- __baseline__: 관찰하는 항목이 도달해야 하는 최소값.
모델의 향상도가 이 값보다 작으면 학습을 멈춥니다.
- __restore_best_weights__: 관찰 항목이 가장 좋은 값을 보인 에폭의 모델 가중치를 사용할지 여부.
`restore_best_weights=False`인 경우, 가장 최신 단계의 학습에서 나온
모델 가중치를 사용합니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L910)</span>
### LearningRateScheduler
```python
keras.callbacks.LearningRateScheduler(schedule, verbose=0)
```
학습률 스케쥴러.
__인자__
- __schedule__: 에폭 인덱스(`int`, 인덱스는 0에서 시작)와 현재
학습률을 입력값으로 받고, 새로운 학습률(`float`)을 반환하는 함수.
- __verbose__: `int`. 0: 메세지 없음, 1: 메시지를 업데이트합니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/tensorboard_v1.py#L20)</span>
### TensorBoard
```python
keras.callbacks.tensorboard_v1.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
```
TensorBoard 기초 시각화.
[TensorBoard](https://www.tensorflow.org/guide/summaries_and_tensorboard)는
텐서플로우가 제공하는 시각화 도구입니다.
이 콜백은 TensorBoard에 로그를 기록하여
학습과 테스트의 평가 지표를 동적 그래프와
모델 내 다양한 층에 대한 활성화 히스토그램을 통해
시각화를 돕습니다.
텐서플로우를 pip으로 설치했다면, 다음과 같이
명령줄<sub>command line</sub>에서 TensorBoard를 실행할 수 있습니다.
```sh
tensorboard --logdir=/full_path_to_your_logs
```
텐서플로우가 설치만 되어있다면
텐서플로우 외의 백엔드를 사용하는 경우에도
TensorBoard가 동작합니다. 하지만
손실과 평가 지표의 그래프를 보여주는 기능만 사용할 수 있습니다.
__인자__
- __log_dir__: TensorBoard에서 사용할 로그 파일을
저장할 위치 경로.
- __histogram_freq__: 모델의 층에 대해 활성화와 가중치 히스토그램을 계산할
(에폭 내) 빈도. 0인 경우 히스토그램을 계산하지
않습니다. 히스토그램 시각화를 하려면 검증 데이터가 지정되어야합니다.(또는, 데이터가 분할<sub>split></sub>되어 있어야합니다.)
- __batch_size__: 히스토그램을 계산하기 위해 네트워크에 전달할
입력값 배치의 크기.
- __write_graph__: TensorBoard에서 그래프를 시각화할지 여부.
`write_graph=True`인 경우 로그 파일이 상당히 커질 수 있습니다.
- __write_grads__: TensorBoard에서 그래디언트 히스토그램를 시각화할지 여부.
`histogram_freq`이 0보다 커야 합니다.
- __write_images__: TensorBoard에서 이미지로 시각화하기 위해 모델 가중치를
기록할지 여부.
- __embeddings_freq__: 선택된 임베딩 층을 저장할 (에폭 내) 빈도.
0인 경우, 임베딩을 계산하지 않습니다.
TensorBoard의 Embedding 탭에서 시각화할 데이터는
`embeddings_data`로 전달되어야 합니다.
- __embeddings_layer_names__: 관찰할 층 이름의 리스트.
`None`이나 빈 리스트의 경우 모든 임베딩 레이어를 관찰됩니다.
- __embeddings_metadata__: 층 이름을 층의 메타데이터가 저장되는
파일 이름에 매핑하는 `dictionary`. 메타데이터 파일 형식은
[세부사항](https://www.tensorflow.org/guide/embedding#metadata)을
참고하십시오. 모든 임베딩 층에서 동일한 메타데이터
파일을 사용하는 경우 `string`을 전달할 수 있습니다.
- __embeddings_data__: `embeddings_layer_names`에서 설정한 층에
임베딩할 데이터. Numpy 배열 (모델이 하나의 입력값을 갖는 경우) 혹은
Numpy 배열의 리스트 (모델이 여러개의 입력값을 갖는 경우).
임베딩에 대해서 [더 알아보려면](
https://www.tensorflow.org/guide/embedding).
- __update_freq__: `'batch'`, `'epoch'`, 혹은 `int`. `'batch'`를 사용하는 경우
각 배치 이후 손실과 평가 지표를 TensorBoard에 기록합니다.
`'epoch'`의 경우에도 동일합니다. `int`인 경우는 다음의 예시와 같습니다.
예를 들어 `10000`이라면, 10000개의 샘플마다 평가 지표와 손실을
TensorBoard에 기록합니다. 너무 자주 기록하면 학습이 느려질 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L1071)</span>
### CSVLogger
```python
keras.callbacks.CSVLogger(filename, separator=',', append=False)
```
에폭의 결과를 csv 파일로 스트리밍하는 콜백.
np.ndarray와 같이 1D iterable을 포함하여
`string`으로 표현할 수 있는 모든 값을 지원합니다.
__예시__
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
__인자__
- __filename__: csv 파일의 파일 이름, 예: 'run/log.csv'.
- __separator__: csv 파일에서 구분자로 사용할 `string`.
- __append__: `append=True`인 경우, 파일이 존재하면 파일의 뒤에 덧붙입니다(학습을 이어나갈 때
유용합니다). `append=False`인 경우, 기존의 파일을 덮어 씁니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks/callbacks.py#L1159)</span>
### LambdaCallback
```python
keras.callbacks.LambdaCallback(on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None, on_train_begin=None, on_train_end=None)
```
간단한 커스텀 콜백을 즉석에서 만드는 콜백.
이 콜백은 적절한 시점에 호출될 익명 함수를 통해 생성됩니다.
콜백은 다음과 같이 위치 인자를 전달받습니다.
- `on_epoch_begin`과 `on_epoch_end`는 다음과 같은 위치 인자를 전달 받습니다.
`epoch`, `logs`
- `on_batch_begin`과 `on_batch_end`는 다음과 같은 위치 인자를 전달 받습니다.
`batch`, `logs`
- `on_train_begin`과 `on_train_end`는 다음 위치적 인자를 전달 받습니다:
`logs`
__인자__
- __on_epoch_begin__: 각 에폭의 시작에 호출됩니다.
- __on_epoch_end__: 각 에폭의 끝에 호출됩니다.
- __on_batch_begin__: 각 배치의 시작에 호출됩니다.
- __on_batch_end__: 각 배치의 끝에 호출됩니다.
- __on_train_begin__: 모델 학습의 시작에 호출됩니다.
- __on_train_end__: 모델 학습의 끝에 호출됩니다.
__예시__
```python
# 각 배치가 시작할 때 배치 번호를 출력합니다
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch, logs: print(batch))
# 에폭 손실을 JSON 형식으로 파일에 스트림합니다
# 파일 내용은 완벽한 형식의 JSON이 아니며, 줄 마다 JSON 객체가 있는 형식입니다.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# 모델 학습을 끝낸 후 몇 개의 프로세스를 종료합니다.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
---
# 콜백 만들기
베이스 클래스인 `keras.callbacks.Callback`를 확장해서 커스텀 콜백을 만들 수 있습니다. 콜백은 클래스 속성인 `self.model`을 통해서 관련 모델에 접근할 수 있습니다.
다음은 학습 과정 중 각 배치의 손실 리스트를 저장하는 간단한 예시입니다:
```python
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
```
---
### 예시: 손실값의 기록
```python
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
model = Sequential()
model.add(Dense(10, input_dim=784, kernel_initializer='uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
history = LossHistory()
model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=0, callbacks=[history])
print(history.losses)
# 출력값
'''
[0.66047596406559383, 0.3547245744908703, ..., 0.25953155204159617, 0.25901699725311789]
'''
```
---
### 예시: 모델 체크포인트
```python
from keras.callbacks import ModelCheckpoint
model = Sequential()
model.add(Dense(10, input_dim=784, kernel_initializer='uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
'''
saves the model weights after each epoch if the validation loss decreased
'''
checkpointer = ModelCheckpoint(filepath='/tmp/weights.hdf5', verbose=1, save_best_only=True)
model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=0, validation_data=(X_test, Y_test), callbacks=[checkpointer])
```
| keras-docs-ko/sources/callbacks.md/0 | {
"file_path": "keras-docs-ko/sources/callbacks.md",
"repo_id": "keras-docs-ko",
"token_count": 13242
} | 63 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L58)</span>
### GaussianDropout
```python
keras.layers.GaussianDropout(rate)
```
평균이 1인 가우시안 분포를 가지는 노이즈를 곱합니다.
규제화<sub>regularization</sub> 층이므로, 학습 과정 중에만 활성화됩니다.
__인자__
- __rate__: `float`. `Dropout`과 동일한 개념의 드롭 확률. 곱해지는 노이즈는 `sqrt(rate / (1 - rate))`의 표준편차를 갖습니다.
__입력 형태__
임의의 형태입니다. 모델의 첫 번째 층으로 `GaussianDropout`을
사용하려면 키워드 인자 `input_shape`을 함께 사용하여 형태를 지정해야 합니다.
`input_shape`는 `int`의 튜플로 배치 축을 포함하지 않습니다.
__출력 형태__
입력 형태와 동일합니다.
__참조__
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](
http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L14)</span>
### GaussianNoise
```python
keras.layers.GaussianNoise(stddev)
```
평균이 0인 가우시안 분포를 가지는 노이즈를 더합니다.
이는 무작위 데이터 증강<sub>augmentation</sub> 기법의 하나로 과적합<sub>overfitting</sub>을 완화하는데 유용합니다.
가우시안 노이즈(GS)는 실수 입력값을 변형할 때 사용됩니다.
규제화 층이므로, 학습 과정 중에만 활성화됩니다.
__인자__
- __stddev__: `float`. 노이즈 분포의 표준 편차<sub>standard deviation</sub>.
__입력 형태__
임의의 형태입니다. 모델의 첫 번째 층으로 `GaussianNoise`을
사용하려면 키워드 인자 `input_shape`을 함께 사용하여 형태를 지정해야 합니다.
`input_shape`는 `int`의 튜플로 배치 축을 포함하지 않습니다.
__출력 형태__
입력 형태와 동일합니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L106)</span>
### AlphaDropout
```python
keras.layers.AlphaDropout(rate, noise_shape=None, seed=None)
```
입력에 알파 드롭아웃을 적용합니다.
알파 드롭아웃은 드롭아웃 이후에도 자기-정규화<sub>self-normalizing</sub> 특성이 유지
되도록 입력의 평균과 분산을 원래 값으로 유지하는 `Dropout`입니다.
알파 드롭아웃은 음수 포화<sub>saturation</sub> 값에서 무작위로 활성화 값을 지정하기 때문에,
Scaled Exponential Linear Unit(SELU)에서 학습이 잘 됩니다.
__인자__
- __rate__: `float`. `Dropout`과 동일한 개념의 드롭 확률. 곱해지는 노이즈는 `sqrt(rate / (1 - rate))`의 표준편차를 갖습니다.
- __noise_shape__: `int32`의 1차원 텐서. 무작위로 생성된 보관/삭제 플래그의 형태입니다.
- __seed__: `int`. 난수 생성에 사용할 시드.
__입력 형태__
임의의 형태입니다. 모델의 첫 번째 층으로 `AlphaDropout`을
사용하려면 키워드 인자 `input_shape`을 함께 사용하여 형태를 지정해야 합니다.
`input_shape`는 `int`의 튜플로 배치 축을 포함하지 않습니다.
__출력 형태__
입력 형태와 동일합니다.
__참조__
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
| keras-docs-ko/sources/layers/noise.md/0 | {
"file_path": "keras-docs-ko/sources/layers/noise.md",
"repo_id": "keras-docs-ko",
"token_count": 2362
} | 64 |
# Scikit-Learn API의 래퍼
`keras.wrappers.scikit_learn.py`의 래퍼를 통해 `Sequential` 케라스 모델을 (단일 입력에 한정하여) Scikit-Learn 작업의 일부로 사용할 수 있습니다.
두 가지 래퍼가 이용가능합니다
`keras.wrappers.scikit_learn.KerasClassifier(build_fn=None, **sk_params)`는 Scikit-Learn 분류 인터페이스를 시행하고,
`keras.wrappers.scikit_learn.KerasRegressor(build_fn=None, **sk_params)`는 Scikit-Learn 회귀 인터페이스를 시행합니다.
### 인자
- __build_fn__: 호출가능한 함수 혹은 클래스 인스턴스
- __sk_params__: 모델 생성 및 학습에 사용되는 매개변수(아래 `sk_params`설명 참조)
`build_fn`은 케라스 모델을 생성하고, 컴파일하고, 반환하여,
모델이 학습/예측할 수 있도록 합니다.
`build_fn`은 다음의 세 가지 값 중 하나를 전달받습니다.
1. 함수
2. `__call__` 메소드를 시행하는 클래스의 인스턴스
3. 비워두기. 이는 `KerasClassifier` 혹은 `KerasRegressor`를 상속받는 클래스를
만들어야 함을 뜻합니다. 이 경우 현재 클래스의 `__call__` 메소드가
기본 `build_fn`이 됩니다.
`sk_params`는 모델 매개변수와 조정 매개변수 둘 모두 전달받습니다.
유효한 모델 매개변수는 `build_fn`의 인자입니다.
`build_fn`은 scikit-learn의 다른 에스티메이터처럼 의무적으로 인자에 대한
기본값을 넣도록 하여, `sk_params`에 따로 값을 전달하지 않고 에스티메이터를 만들 수 있도록 한다는 점을
참고하십시오.
`sk_params`는 또한 `fit`, `predict`, `predict_proba`, `score` 메소드를
호출하는데 필요한 매개변수를 전달받습니다(예시: `epochs`, `batch_size`).
조정(예측) 매개변수는 다음과 같은 순서로 선택됩니다.
1. `fit`, `predict`, `predict_proba`, `score` 메소드에
등록된 값들
2. `sk_params`에 전달되는 값
3. `keras.models.Sequential`, `fit`, `predict`, `predict_proba`, `score` 메소드의
기본값
scikit-learn의 `grid_search` API를 사용하는 경우, `sk_params`에 전달되는 모델 생성 및 학습에 사용되는 매개변수는
조정 가능한 매개변수 입니다.
다시 말해, `grid_search`를 사용하여
최적의 `batch_size`나 `epochs`, 그리고 모델 매개변수를 찾아낼 수 있습니다.
| keras-docs-ko/sources/scikit-learn-api.md/0 | {
"file_path": "keras-docs-ko/sources/scikit-learn-api.md",
"repo_id": "keras-docs-ko",
"token_count": 1761
} | 65 |
site_name: Keras 中文文档
theme:
name: null
custom_dir: theme
static_templates:
- 404.html
include_search_page: true
search_index_only: false
highlightjs: true
hljs_languages: []
include_homepage_in_sidebar: true
prev_next_buttons_location: bottom
navigation_depth: 4
titles_only: false
sticky_navigation: true
collapse_navigation: true
docs_dir: sources
repo_url: http://github.com/keras-team/keras-docs-zh
site_url: http://keras.io/zh/
site_description: 'Keras,Python 深度学习库中文文档。'
dev_addr: '0.0.0.0:8000'
google_analytics: ['UA-61785484-1', 'keras.io']
nav:
- 主页: index.md
- 为什么选择 Keras?: why-use-keras.md
- 快速开始:
- Sequential 顺序模型指引: getting-started/sequential-model-guide.md
- 函数式 API 指引: getting-started/functional-api-guide.md
- FAQ 常见问题解答: getting-started/faq.md
- 模型:
- 关于 Keras 模型: models/about-keras-models.md
- Sequential 顺序模型: models/sequential.md
- Model (函数式 API): models/model.md
- 网络层:
- 关于 Keras 网络层: layers/about-keras-layers.md
- 核心网络层: layers/core.md
- 卷积层 Convolutional Layers: layers/convolutional.md
- 池化层 Pooling Layers: layers/pooling.md
- 局部连接层 Locally-connected Layers: layers/local.md
- 循环层 Recurrent Layers: layers/recurrent.md
- 嵌入层 Embedding Layers: layers/embeddings.md
- 融合层 Merge Layers: layers/merge.md
- 高级激活层 Advanced Activations Layers: layers/advanced-activations.md
- 标准化层 Normalization Layers: layers/normalization.md
- 噪声层 Noise layers: layers/noise.md
- 层封装器 Layer wrappers: layers/wrappers.md
- 编写你自己的层: layers/writing-your-own-keras-layers.md
- 数据预处理:
- 序列预处理: preprocessing/sequence.md
- 文本预处理: preprocessing/text.md
- 图像预处理: preprocessing/image.md
- 损失函数 Losses: losses.md
- 评估标准 Metrics: metrics.md
- 优化器 Optimizers: optimizers.md
- 激活函数 Activations: activations.md
- 回调 Callbacks: callbacks.md
- 常用数据集 Datasets: datasets.md
- 应用 Applications: applications.md
- 后端 Backend: backend.md
- 初始化 Initializers: initializers.md
- 正则化 Regularizers: regularizers.md
- 约束项 Constraints: constraints.md
- 可视化 Visualization: visualization.md
- Scikit-learn API: scikit-learn-api.md
- 工具 Utils: utils.md
- 贡献: contributing.md
- 经典样例:
- RNN 加法: examples/addition_rnn.md
- 自定义层 - antirectifier: examples/antirectifier.md
- Baby RNN: examples/babi_rnn.md
- Baby MemNN: examples/babi_memnn.md
- CIFAR-10 CNN: examples/cifar10_cnn.md
- CIFAR-10 ResNet: examples/cifar10_resnet.md
- 卷积滤波器可视化: examples/conv_filter_visualization.md
- 卷积 LSTM: examples/conv_lstm.md
- Deep Dream: examples/deep_dream.md
- 图片 OCR: examples/image_ocr.md
- 双向 LSTM: examples/imdb_bidirectional_lstm.md
- 1D CNN 文本分类: examples/imdb_cnn.md
- CNN-LSTM 情感分类: examples/imdb_cnn_lstm.md
- Fasttext 文本分类: examples/imdb_fasttext.md
- LSTM 情感分类: examples/imdb_lstm.md
- Sequence to sequence - 训练: examples/lstm_seq2seq.md
- Sequence to sequence - 预测: examples/lstm_seq2seq_restore.md
- Stateful LSTM: examples/lstm_stateful.md
- LSTM for 文本生成: examples/lstm_text_generation.md
- GAN 辅助分类器: examples/mnist_acgan.md
| keras-docs-zh/mkdocs.yml/0 | {
"file_path": "keras-docs-zh/mkdocs.yml",
"repo_id": "keras-docs-zh",
"token_count": 1619
} | 66 |
# 可视化 VGG16 的过滤器,通过输入空间梯度提升。
该脚本可以在几分钟内在 CPU 上运行完。
结果示例: 
```python
from __future__ import print_function
import time
import numpy as np
from PIL import Image as pil_image
from keras.preprocessing.image import save_img
from keras import layers
from keras.applications import vgg16
from keras import backend as K
def normalize(x):
"""用于标准化张量的实用函数。
# 参数
x: 输入张量。
# 返回
标准化的输入张量。
"""
return x / (K.sqrt(K.mean(K.square(x))) + K.epsilon())
def deprocess_image(x):
"""用于将 float 数组转换为有效 uint8 图像的实用函数。
# 参数
x: 表示生成图像的 numpy 数组。
# 返回
经处理的 numpy 阵列,可用于 imshow 等。
"""
# 标准化张量: center 为 0., 保证 std 为 0.25
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.25
# 裁剪为 [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# 转换为 RGB 数组
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def process_image(x, former):
"""用于将 float 数组转换为有效 uint8 图像转换回 float 数组的实用函数。
`deprocess_image` 反向操作。
# 参数
x: numpy 数组,可用于 imshow 等。
former: 前身 numpy 数组,
需要确定前者的均值和方差。
# 返回
一个处理后的 numpy 数组,表示一幅生成图像。
"""
if K.image_data_format() == 'channels_first':
x = x.transpose((2, 0, 1))
return (x / 255 - 0.5) * 4 * former.std() + former.mean()
def visualize_layer(model,
layer_name,
step=1.,
epochs=15,
upscaling_steps=9,
upscaling_factor=1.2,
output_dim=(412, 412),
filter_range=(0, None)):
"""可视化某个模型中一个转换层的最相关过滤器。
# 参数
model: 包含 layer_name 的模型。
layer_name: 需要可视化的层的名称。
必须是模型的一部分。
step: 梯度提升步长。
epochs: 梯度提升迭代轮次。
upscaling_steps: upscaling 步数。
起始图像为 (80, 80)。
upscaling_factor: 将图像缓慢提升到 output_dim 的因子。
output_dim: [img_width, img_height] 输出图像维度。
filter_range: 元组 [lower, upper]
决定需要计算的过滤器数目。
如果第二个值为 `None`,
最后一个过滤器将被推断为上边界。
"""
def _generate_filter_image(input_img,
layer_output,
filter_index):
"""为一个特定的过滤器生成图像。
# 参数
input_img: 输入图像张量。
layer_output: 输出图像张量。
filter_index: 需要处理的过滤器数目。
假设可用。
# 返回
要么是 None,如果无法生成图像。
要么是图像(数组)本身以及最后的 loss 组成的元组。
"""
s_time = time.time()
# 构建一个损失函数,使所考虑的层的第 n 个过滤器的激活最大化
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# 计算这种损失的输入图像的梯度
grads = K.gradients(loss, input_img)[0]
# 标准化技巧:将梯度标准化
grads = normalize(grads)
# 此函数返回给定输入图片的损失和梯度
iterate = K.function([input_img], [loss, grads])
# 从带有一些随机噪音的灰色图像开始
intermediate_dim = tuple(
int(x / (upscaling_factor ** upscaling_steps)) for x in output_dim)
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random(
(1, 3, intermediate_dim[0], intermediate_dim[1]))
else:
input_img_data = np.random.random(
(1, intermediate_dim[0], intermediate_dim[1], 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# 缓慢放大原始图像的尺寸可以防止可视化结构的主导高频现象发生
# (如果我们直接计算 412d-image 时该现象就会发生。)
# 作为每个后续维度的更好起点,因此它避免了较差的局部最小值
for up in reversed(range(upscaling_steps)):
# 执行 20 次梯度提升
for _ in range(epochs):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
# s一些过滤器被卡在了 0,我们可以跳过它们
if loss_value <= K.epsilon():
return None
# 计算放大维度
intermediate_dim = tuple(
int(x / (upscaling_factor ** up)) for x in output_dim)
# 放大
img = deprocess_image(input_img_data[0])
img = np.array(pil_image.fromarray(img).resize(intermediate_dim,
pil_image.BICUBIC))
input_img_data = np.expand_dims(
process_image(img, input_img_data[0]), 0)
# 解码生成的输入图像
img = deprocess_image(input_img_data[0])
e_time = time.time()
print('Costs of filter {:3}: {:5.0f} ( {:4.2f}s )'.format(filter_index,
loss_value,
e_time - s_time))
return img, loss_value
def _draw_filters(filters, n=None):
"""在 nxn 网格中绘制最佳过滤器。
# 参数
filters: 每个已处理过滤器的生成图像及其相应的损失的列表。
n: 网格维度。
如果为 None,将使用最大可能的方格
"""
if n is None:
n = int(np.floor(np.sqrt(len(filters))))
# 假设损失最大的过滤器看起来更好看。
# 我们只保留顶部 n*n 过滤器。
filters.sort(key=lambda x: x[1], reverse=True)
filters = filters[:n * n]
# 构建一个有足够空间的黑色图像
# 例如,8 x 8 个过滤器,总尺寸为 412 x 412,每个过滤器 5px 间隔的图像
MARGIN = 5
width = n * output_dim[0] + (n - 1) * MARGIN
height = n * output_dim[1] + (n - 1) * MARGIN
stitched_filters = np.zeros((width, height, 3), dtype='uint8')
# 用我们保存的过滤器填充图像
for i in range(n):
for j in range(n):
img, _ = filters[i * n + j]
width_margin = (output_dim[0] + MARGIN) * i
height_margin = (output_dim[1] + MARGIN) * j
stitched_filters[
width_margin: width_margin + output_dim[0],
height_margin: height_margin + output_dim[1], :] = img
# 将结果保存到磁盘
save_img('vgg_{0:}_{1:}x{1:}.png'.format(layer_name, n), stitched_filters)
# 这是输入图像的占位符
assert len(model.inputs) == 1
input_img = model.inputs[0]
# 获取每个『关键』图层的符号输出(我们给它们唯一的名称)。
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
output_layer = layer_dict[layer_name]
assert isinstance(output_layer, layers.Conv2D)
# 计算要处理的过滤范围
filter_lower = filter_range[0]
filter_upper = (filter_range[1]
if filter_range[1] is not None
else len(output_layer.get_weights()[1]))
assert(filter_lower >= 0
and filter_upper <= len(output_layer.get_weights()[1])
and filter_upper > filter_lower)
print('Compute filters {:} to {:}'.format(filter_lower, filter_upper))
# 迭代每个过滤器并生成其相应的图像
processed_filters = []
for f in range(filter_lower, filter_upper):
img_loss = _generate_filter_image(input_img, output_layer.output, f)
if img_loss is not None:
processed_filters.append(img_loss)
print('{} filter processed.'.format(len(processed_filters)))
# Finally draw and store the best filters to disk
_draw_filters(processed_filters)
if __name__ == '__main__':
# 我们想要可视化的图层的名称
# (see model definition at keras/applications/vgg16.py)
LAYER_NAME = 'block5_conv1'
# 构建 ImageNet 权重预训练的 VGG16 网络
vgg = vgg16.VGG16(weights='imagenet', include_top=False)
print('Model loaded.')
vgg.summary()
# 调用示例函数
visualize_layer(vgg, LAYER_NAME)
```
| keras-docs-zh/sources/examples/conv_filter_visualization.md/0 | {
"file_path": "keras-docs-zh/sources/examples/conv_filter_visualization.md",
"repo_id": "keras-docs-zh",
"token_count": 5560
} | 67 |
# 使用分层 RNN (HRNN) 对 MNIST 数字进行分类的示例。
HRNN 可以跨复杂序列跨多个时间层次学习。
通常,HRNN 的第一循环层将句子(例如单词向量)编码成句子向量。
然后,第二循环层将这样的向量序列(由第一层编码)编码为文档向量。
该文档向量被认为既可以保留上下文的单词级结构也可以保留句子级结构。
# 参考文献
- [A Hierarchical Neural Autoencoder for Paragraphs and Documents](https://arxiv.org/abs/1506.01057)
使用 HRNN 对段落和文档进行编码。
结果表明,HRNN 优于标准 RNN,并且可能在摘要或问题解答等更复杂的生成任务中发挥某些作用。
- [Hierarchical recurrent neural network for skeleton based action recognition](http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7298714)
通过 3 级双向 HRNN 与完全连接的层相结合,在基于骨骼的动作识别方面取得了最新的成果。
在下面的 MNIST 示例中,第一 LSTM 层首先将形状 (28, 1) 的每一列像素编码为形状 (128,) 的列矢量。
然后,第二个 LSTM 层将形状 (28, 128) 的这 28 个列向量编码为代表整个图像的图像向量。
添加最终的密集层以进行预测。
5 个轮次后:train acc:0.9858, val acc:0.9864
```python
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Input, Dense, TimeDistributed
from keras.layers import LSTM
# 训练参数。
batch_size = 32
num_classes = 10
epochs = 5
# 嵌入尺寸。
row_hidden = 128
col_hidden = 128
# 数据,分为训练集和测试集。
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 将数据重塑为 4D 以进行分层 RNN。
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# 将类向量转换为二进制类矩阵。
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
row, col, pixel = x_train.shape[1:]
# 4D 输入。
x = Input(shape=(row, col, pixel))
# 使用 TimeDistributed Wrapper 对一行像素进行编码。
encoded_rows = TimeDistributed(LSTM(row_hidden))(x)
# 对已编码行的列进行编码。
encoded_columns = LSTM(col_hidden)(encoded_rows)
# 最终预测和模型。
prediction = Dense(num_classes, activation='softmax')(encoded_columns)
model = Model(x, prediction)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# 训练。
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
# 评估。
scores = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
``` | keras-docs-zh/sources/examples/mnist_hierarchical_rnn.md/0 | {
"file_path": "keras-docs-zh/sources/examples/mnist_hierarchical_rnn.md",
"repo_id": "keras-docs-zh",
"token_count": 1699
} | 68 |
# 开始使用 Keras 函数式 API
Keras 函数式 API 是定义复杂模型(如多输出模型、有向无环图或具有共享层的模型)的方法。
这部分文档假设你已经对 `Sequential` 顺序模型比较熟悉。
让我们先从一些简单的示例开始。
-----
## 例一:全连接网络
`Sequential` 模型可能是实现这种网络的一个更好选择,但这个例子能够帮助我们进行一些简单的理解。
- 网络层的实例是可调用的,它以张量为参数,并且返回一个张量
- 输入和输出均为张量,它们都可以用来定义一个模型(`Model`)
- 这样的模型同 Keras 的 `Sequential` 模型一样,都可以被训练
```python
from keras.layers import Input, Dense
from keras.models import Model
# 这部分返回一个张量
inputs = Input(shape=(784,))
# 层的实例是可调用的,它以张量为参数,并且返回一个张量
output_1 = Dense(64, activation='relu')(inputs)
output_2 = Dense(64, activation='relu')(output_1)
predictions = Dense(10, activation='softmax')(output_2)
# 这部分创建了一个包含输入层和三个全连接层的模型
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels) # 开始训练
```
-----
## 所有的模型都可调用,就像网络层一样
利用函数式 API,可以轻易地重用训练好的模型:可以将任何模型看作是一个层,然后通过传递一个张量来调用它。注意,在调用模型时,您不仅重用模型的*结构*,还重用了它的权重。
```python
x = Input(shape=(784,))
# 这是可行的,并且返回上面定义的 10-way softmax。
y = model(x)
```
这种方式能允许我们快速创建可以处理*序列输入*的模型。只需一行代码,你就将图像分类模型转换为视频分类模型。
```python
from keras.layers import TimeDistributed
# 输入张量是 20 个时间步的序列,
# 每一个时间为一个 784 维的向量
input_sequences = Input(shape=(20, 784))
# 这部分将我们之前定义的模型应用于输入序列中的每个时间步。
# 之前定义的模型的输出是一个 10-way softmax,
# 因而下面的层的输出将是维度为 10 的 20 个向量的序列。
processed_sequences = TimeDistributed(model)(input_sequences)
```
-----
## 多输入多输出模型
以下是函数式 API 的一个很好的例子:具有多个输入和输出的模型。函数式 API 使处理大量交织的数据流变得容易。
来考虑下面的模型。我们试图预测 Twitter 上的一条新闻标题有多少转发和点赞数。模型的主要输入将是新闻标题本身,即一系列词语,但是为了增添趣味,我们的模型还添加了其他的辅助输入来接收额外的数据,例如新闻标题的发布的时间等。
该模型也将通过两个损失函数进行监督学习。较早地在模型中使用主损失函数,是深度学习模型的一个良好正则方法。
模型结构如下图所示:
<img src="/img/multi-input-multi-output-graph.png" alt="multi-input-multi-output-graph" style="width: 400px;"/>
让我们用函数式 API 来实现它。
主要输入接收新闻标题本身,即一个整数序列(每个整数编码一个词)。这些整数在 1 到 10,000 之间(10,000 个词的词汇表),且序列长度为 100 个词。
```python
from keras.layers import Input, Embedding, LSTM, Dense
from keras.models import Model
import numpy as np
np.random.seed(0) # 设置随机种子,用于复现结果
# 标题输入:接收一个含有 100 个整数的序列,每个整数在 1 到 10000 之间。
# 注意我们可以通过传递一个 "name" 参数来命名任何层。
main_input = Input(shape=(100,), dtype='int32', name='main_input')
# Embedding 层将输入序列编码为一个稠密向量的序列,
# 每个向量维度为 512。
x = Embedding(output_dim=512, input_dim=10000, input_length=100)(main_input)
# LSTM 层把向量序列转换成单个向量,
# 它包含整个序列的上下文信息
lstm_out = LSTM(32)(x)
```
在这里,我们插入辅助损失,使得即使在模型主损失很高的情况下,LSTM 层和 Embedding 层都能被平稳地训练。
```python
auxiliary_output = Dense(1, activation='sigmoid', name='aux_output')(lstm_out)
```
此时,我们将辅助输入数据与 LSTM 层的输出连接起来,输入到模型中:
```python
auxiliary_input = Input(shape=(5,), name='aux_input')
x = keras.layers.concatenate([lstm_out, auxiliary_input])
# 堆叠多个全连接网络层
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
x = Dense(64, activation='relu')(x)
# 最后添加主要的逻辑回归层
main_output = Dense(1, activation='sigmoid', name='main_output')(x)
```
然后定义一个具有两个输入和两个输出的模型:
```python
model = Model(inputs=[main_input, auxiliary_input], outputs=[main_output, auxiliary_output])
```
现在编译模型,并给辅助损失分配一个 0.2 的权重。如果要为不同的输出指定不同的 `loss_weights` 或 `loss`,可以使用列表或字典。
在这里,我们给 `loss` 参数传递单个损失函数,这个损失将用于所有的输出。
```python
model.compile(optimizer='rmsprop', loss='binary_crossentropy',
loss_weights=[1., 0.2])
```
我们可以通过传递输入数组和目标数组的列表来训练模型:
```python
headline_data = np.round(np.abs(np.random.rand(12, 100) * 100))
additional_data = np.random.randn(12, 5)
headline_labels = np.random.randn(12, 1)
additional_labels = np.random.randn(12, 1)
model.fit([headline_data, additional_data], [headline_labels, additional_labels],
epochs=50, batch_size=32)
```
由于输入和输出均被命名了(在定义时传递了一个 `name` 参数),我们也可以通过以下方式编译模型:
```python
model.compile(optimizer='rmsprop',
loss={'main_output': 'binary_crossentropy', 'aux_output': 'binary_crossentropy'},
loss_weights={'main_output': 1., 'aux_output': 0.2})
# 然后使用以下方式训练:
model.fit({'main_input': headline_data, 'aux_input': additional_data},
{'main_output': headline_labels, 'aux_output': additional_labels},
epochs=50, batch_size=32)
```
若使用此模型做推理,可以
```python
model.predict({'main_input': headline_data, 'aux_input': additional_data})
```
或者
```python
pred = model.predict([headline_data, additional_data])
```
-----
## 共享网络层
函数式 API 的另一个用途是使用共享网络层的模型。我们来看看共享层。
来考虑推特推文数据集。我们想要建立一个模型来分辨两条推文是否来自同一个人(例如,通过推文的相似性来对用户进行比较)。
实现这个目标的一种方法是建立一个模型,将两条推文编码成两个向量,连接向量,然后添加逻辑回归层;这将输出两条推文来自同一作者的概率。模型将接收一对对正负表示的推特数据。
由于这个问题是对称的,编码第一条推文的机制应该被完全重用来编码第二条推文(权重及其他全部)。这里我们使用一个共享的 LSTM 层来编码推文。
让我们使用函数式 API 来构建它。首先我们将一条推特转换为一个尺寸为 `(280, 256)` 的矩阵,即每条推特 280 字符,每个字符为 256 维的 one-hot 编码向量 (取 256 个常用字符)。
```python
import keras
from keras.layers import Input, LSTM, Dense
from keras.models import Model
tweet_a = Input(shape=(280, 256))
tweet_b = Input(shape=(280, 256))
```
要在不同的输入上共享同一个层,只需实例化该层一次,然后根据需要传入你想要的输入即可:
```python
# 这一层可以输入一个矩阵,并返回一个 64 维的向量
shared_lstm = LSTM(64)
# 当我们重用相同的图层实例多次,图层的权重也会被重用 (它其实就是同一层)
encoded_a = shared_lstm(tweet_a)
encoded_b = shared_lstm(tweet_b)
# 然后再连接两个向量:
merged_vector = keras.layers.concatenate([encoded_a, encoded_b], axis=-1)
# 再在上面添加一个逻辑回归层
predictions = Dense(1, activation='sigmoid')(merged_vector)
# 定义一个连接推特输入和预测的可训练的模型
model = Model(inputs=[tweet_a, tweet_b], outputs=predictions)
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit([data_a, data_b], labels, epochs=10)
```
让我们暂停一会,看看如何读取共享层的输出或输出尺寸。
-----
## 层「节点」的概念
每当你在某个输入上调用一个层时,都将创建一个新的张量(层的输出),并且为该层添加一个「节点」,将输入张量连接到输出张量。当多次调用同一个图层时,该图层将拥有多个节点索引 (0, 1, 2...)。
在之前版本的 Keras 中,可以通过 `layer.get_output()` 来获得层实例的输出张量,或者通过 `layer.output_shape` 来获取其输出形状。现在你依然可以这么做(除了 `get_output()` 已经被 `output` 属性替代)。但是如果一个层与多个输入连接呢?
只要一个层仅仅连接到一个输入,就不会有困惑,`.output` 会返回层的唯一输出:
```python
a = Input(shape=(280, 256))
lstm = LSTM(32)
encoded_a = lstm(a)
assert lstm.output == encoded_a
```
但是如果该层有多个输入,那就会出现问题:
```python
a = Input(shape=(280, 256))
b = Input(shape=(280, 256))
lstm = LSTM(32)
encoded_a = lstm(a)
encoded_b = lstm(b)
lstm.output
```
```
>> AttributeError: Layer lstm_1 has multiple inbound nodes,
hence the notion of "layer output" is ill-defined.
Use `get_output_at(node_index)` instead.
```
好吧,通过下面的方法可以解决:
```python
assert lstm.get_output_at(0) == encoded_a
assert lstm.get_output_at(1) == encoded_b
```
够简单,对吧?
`input_shape` 和 `output_shape` 这两个属性也是如此:只要该层只有一个节点,或者只要所有节点具有相同的输入/输出尺寸,那么「层输出/输入尺寸」的概念就被很好地定义,且将由 `layer.output_shape` / `layer.input_shape` 返回。但是比如说,如果将一个 `Conv2D` 层先应用于尺寸为 `(32,32,3)` 的输入,再应用于尺寸为 `(64, 64, 3)` 的输入,那么这个层就会有多个输入/输出尺寸,你将不得不通过指定它们所属节点的索引来获取它们:
```python
a = Input(shape=(32, 32, 3))
b = Input(shape=(64, 64, 3))
conv = Conv2D(16, (3, 3), padding='same')
conved_a = conv(a)
# 到目前为止只有一个输入,以下可行:
assert conv.input_shape == (None, 32, 32, 3)
conved_b = conv(b)
# 现在 `.input_shape` 属性不可行,但是这样可以:
assert conv.get_input_shape_at(0) == (None, 32, 32, 3)
assert conv.get_input_shape_at(1) == (None, 64, 64, 3)
```
-----
## 更多的例子
代码示例仍然是起步的最佳方式,所以这里还有更多的例子。
### Inception 模型
有关 Inception 结构的更多信息,请参阅 [Going Deeper with Convolutions](http://arxiv.org/abs/1409.4842)。
```python
from keras.layers import Conv2D, MaxPooling2D, Input
input_img = Input(shape=(256, 256, 3))
tower_1 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_1 = Conv2D(64, (3, 3), padding='same', activation='relu')(tower_1)
tower_2 = Conv2D(64, (1, 1), padding='same', activation='relu')(input_img)
tower_2 = Conv2D(64, (5, 5), padding='same', activation='relu')(tower_2)
tower_3 = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input_img)
tower_3 = Conv2D(64, (1, 1), padding='same', activation='relu')(tower_3)
output = keras.layers.concatenate([tower_1, tower_2, tower_3], axis=1)
```
### 卷积层上的残差连接
有关残差网络 (Residual Network) 的更多信息,请参阅 [Deep Residual Learning for Image Recognition](http://arxiv.org/abs/1512.03385)。
```python
from keras.layers import Conv2D, Input
# 输入张量为 3 通道 256x256 图像
x = Input(shape=(256, 256, 3))
# 3 输出通道(与输入通道相同)的 3x3 卷积核
y = Conv2D(3, (3, 3), padding='same')(x)
# 返回 x + y
z = keras.layers.add([x, y])
```
### 共享视觉模型
该模型在两个输入上重复使用同一个图像处理模块,以判断两个 MNIST 数字是否为相同的数字。
```python
from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten
from keras.models import Model
# 首先,定义视觉模型
digit_input = Input(shape=(27, 27, 1))
x = Conv2D(64, (3, 3))(digit_input)
x = Conv2D(64, (3, 3))(x)
x = MaxPooling2D((2, 2))(x)
out = Flatten()(x)
vision_model = Model(digit_input, out)
# 然后,定义区分数字的模型
digit_a = Input(shape=(27, 27, 1))
digit_b = Input(shape=(27, 27, 1))
# 视觉模型将被共享,包括权重和其他所有
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
concatenated = keras.layers.concatenate([out_a, out_b])
out = Dense(1, activation='sigmoid')(concatenated)
classification_model = Model([digit_a, digit_b], out)
```
### 视觉问答模型
当被问及关于图片的自然语言问题时,该模型可以选择正确的单词作答。
它通过将问题和图像编码成向量,然后连接两者,在上面训练一个逻辑回归,来从词汇表中挑选一个可能的单词作答。
```python
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.layers import Input, LSTM, Embedding, Dense
from keras.models import Model, Sequential
# 首先,让我们用 Sequential 来定义一个视觉模型。
# 这个模型会把一张图像编码为向量。
vision_model = Sequential()
vision_model.add(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(224, 224, 3)))
vision_model.add(Conv2D(64, (3, 3), activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
vision_model.add(Conv2D(128, (3, 3), activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
vision_model.add(Conv2D(256, (3, 3), activation='relu'))
vision_model.add(Conv2D(256, (3, 3), activation='relu'))
vision_model.add(MaxPooling2D((2, 2)))
vision_model.add(Flatten())
# 现在让我们用视觉模型来得到一个输出张量:
image_input = Input(shape=(224, 224, 3))
encoded_image = vision_model(image_input)
# 接下来,定义一个语言模型来将问题编码成一个向量。
# 每个问题最长 100 个词,词的索引从 1 到 9999.
question_input = Input(shape=(100,), dtype='int32')
embedded_question = Embedding(input_dim=10000, output_dim=256, input_length=100)(question_input)
encoded_question = LSTM(256)(embedded_question)
# 连接问题向量和图像向量:
merged = keras.layers.concatenate([encoded_question, encoded_image])
# 然后在上面训练一个 1000 词的逻辑回归模型:
output = Dense(1000, activation='softmax')(merged)
# 最终模型:
vqa_model = Model(inputs=[image_input, question_input], outputs=output)
# 下一步就是在真实数据上训练模型。
```
### 视频问答模型
现在我们已经训练了图像问答模型,我们可以很快地将它转换为视频问答模型。在适当的训练下,你可以给它展示一小段视频(例如 100 帧的人体动作),然后问它一个关于这段视频的问题(例如,「这个人在做什么运动?」 -> 「足球」)。
```python
from keras.layers import TimeDistributed
video_input = Input(shape=(100, 224, 224, 3))
# 这是基于之前定义的视觉模型(权重被重用)构建的视频编码
encoded_frame_sequence = TimeDistributed(vision_model)(video_input) # 输出为向量的序列
encoded_video = LSTM(256)(encoded_frame_sequence) # 输出为一个向量
# 这是问题编码器的模型级表示,重复使用与之前相同的权重:
question_encoder = Model(inputs=question_input, outputs=encoded_question)
# 让我们用它来编码这个问题:
video_question_input = Input(shape=(100,), dtype='int32')
encoded_video_question = question_encoder(video_question_input)
# 这就是我们的视频问答模式:
merged = keras.layers.concatenate([encoded_video, encoded_video_question])
output = Dense(1000, activation='softmax')(merged)
video_qa_model = Model(inputs=[video_input, video_question_input], outputs=output)
```
| keras-docs-zh/sources/getting-started/functional-api-guide.md/0 | {
"file_path": "keras-docs-zh/sources/getting-started/functional-api-guide.md",
"repo_id": "keras-docs-zh",
"token_count": 9481
} | 69 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/embeddings.py#L16)</span>
### Embedding
```python
keras.layers.Embedding(input_dim, output_dim, embeddings_initializer='uniform', embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None, mask_zero=False, input_length=None)
```
将正整数(索引值)转换为固定尺寸的稠密向量。
例如: [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
该层只能用作模型中的第一层。
__示例__
```python
model = Sequential()
model.add(Embedding(1000, 64, input_length=10))
# 模型将输入一个大小为 (batch, input_length) 的整数矩阵。
# 输入中最大的整数(即词索引)不应该大于 999 (词汇表大小)
# 现在 model.output_shape == (None, 10, 64),其中 None 是 batch 的维度。
input_array = np.random.randint(1000, size=(32, 10))
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
assert output_array.shape == (32, 10, 64)
```
__参数__
- __input_dim__: int > 0。词汇表大小,
即,最大整数 index + 1。
- __output_dim__: int >= 0。词向量的维度。
- __embeddings_initializer__: `embeddings` 矩阵的初始化方法
(详见 [initializers](../initializers.md))。
- __embeddings_regularizer__: `embeddings` matrix 的正则化方法
(详见 [regularizer](../regularizers.md))。
- __activity_regularizer__: 应用到层输出的正则化函数 (它的 "activation")。
(详见 [regularizer](../regularizers.md))。
- __embeddings_constraint__: `embeddings` matrix 的约束函数
(详见 [constraints](../constraints.md))。
- __mask_zero__: 是否把 0 看作为一个应该被遮蔽的特殊的 "padding" 值。
这对于可变长的[循环神经网络层](recurrent.md) 十分有用。
如果设定为 `True`,那么接下来的所有层都必须支持 masking,否则就会抛出异常。
如果 mask_zero 为 `True`,作为结果,索引 0 就不能被用于词汇表中
(input_dim 应该与 vocabulary + 1 大小相同)。
- __input_length__: 输入序列的长度,当它是固定的时。
如果你需要连接 `Flatten` 和 `Dense` 层,则这个参数是必须的
(没有它,dense 层的输出尺寸就无法计算)。
__输入尺寸__
尺寸为 `(batch_size, sequence_length)` 的 2D 张量。
__输出尺寸__
尺寸为 `(batch_size, sequence_length, output_dim)` 的 3D 张量。
__参考文献__
- [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
| keras-docs-zh/sources/layers/embeddings.md/0 | {
"file_path": "keras-docs-zh/sources/layers/embeddings.md",
"repo_id": "keras-docs-zh",
"token_count": 1317
} | 70 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/preprocessing/sequence.py#L16)</span>
### TimeseriesGenerator
```python
keras.preprocessing.sequence.TimeseriesGenerator(data, targets, length, sampling_rate=1, stride=1, start_index=0, end_index=None, shuffle=False, reverse=False, batch_size=128)
```
用于生成批量时序数据的实用工具类。
这个类以一系列由相等间隔以及一些时间序列参数(例如步长、历史长度等)汇集的数据点作为输入,以生成用于训练/验证的批次数据。
__参数__
- __data__: 可索引的生成器(例如列表或 Numpy 数组),包含连续数据点(时间步)。数据应该是 2D 的,且第 0 个轴为时间维度。
- __targets__: 对应于 `data` 的时间步的目标值。它应该与 `data` 的长度相同。
- __length__: 输出序列的长度(以时间步数表示)。
- __sampling_rate__: 序列内连续各个时间步之间的周期。对于周期 `r`, 时间步 `data[i]`, `data[i-r]`, ... `data[i - length]` 被用于生成样本序列。
- __stride__: 连续输出序列之间的周期. 对于周期 `s`, 连续输出样本将为 `data[i]`, `data[i+s]`, `data[i+2*s]` 等。
- __start_index__: 在 `start_index` 之前的数据点在输出序列中将不被使用。这对保留部分数据以进行测试或验证很有用。
- __end_index__: 在 `end_index` 之后的数据点在输出序列中将不被使用。这对保留部分数据以进行测试或验证很有用。
- __shuffle__: 是否打乱输出样本,还是按照时间顺序绘制它们。
- __reverse__: 布尔值: 如果 `true`, 每个输出样本中的时间步将按照时间倒序排列。
- __batch_size__: 每个批次中的时间序列样本数(可能除最后一个外)。
__返回__
一个 [Sequence](https://keras.io/zh/utils/#sequence) 实例。
__示例__
```python
from keras.preprocessing.sequence import TimeseriesGenerator
import numpy as np
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = TimeseriesGenerator(data, targets,
length=10, sampling_rate=2,
batch_size=2)
assert len(data_gen) == 20
batch_0 = data_gen[0]
x, y = batch_0
assert np.array_equal(x,
np.array([[[0], [2], [4], [6], [8]],
[[1], [3], [5], [7], [9]]]))
assert np.array_equal(y,
np.array([[10], [11]]))
```
---
### pad_sequences
```python
keras.preprocessing.sequence.pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.0)
```
将多个序列截断或补齐为相同长度。
该函数将一个 `num_samples` 的序列(整数列表)转化为一个 2D Numpy 矩阵,其尺寸为 `(num_samples, num_timesteps)`。 `num_timesteps` 要么是给定的 `maxlen` 参数,要么是最长序列的长度。
比 `num_timesteps` 短的序列将在末端以 `value` 值补齐。
比 `num_timesteps` 长的序列将会被截断以满足所需要的长度。补齐或截断发生的位置分别由参数 `pading` 和 `truncating` 决定。
向前补齐为默认操作。
__参数__
- __sequences__: 列表的列表,每一个元素是一个序列。
- __maxlen__: 整数,所有序列的最大长度。
- __dtype__: 输出序列的类型。
要使用可变长度字符串填充序列,可以使用 `object`。
- __padding__: 字符串,'pre' 或 'post' ,在序列的前端补齐还是在后端补齐。
- __truncating__: 字符串,'pre' 或 'post' ,移除长度大于 `maxlen` 的序列的值,要么在序列前端截断,要么在后端。
- __value__: 浮点数,表示用来补齐的值。
__返回__
- __x__: Numpy 矩阵,尺寸为 `(len(sequences), maxlen)`。
__异常__
- ValueError: 如果截断或补齐的值无效,或者序列条目的形状无效。
---
### skipgrams
```python
keras.preprocessing.sequence.skipgrams(sequence, vocabulary_size, window_size=4, negative_samples=1.0, shuffle=True, categorical=False, sampling_table=None, seed=None)
```
生成 skipgram 词对。
该函数将一个单词索引序列(整数列表)转化为以下形式的单词元组:
- (单词, 同窗口的单词),标签为 1(正样本)。
- (单词, 来自词汇表的随机单词),标签为 0(负样本)。
若要了解更多和 Skipgram 有关的知识,请参阅这份由 Mikolov 等人发表的经典论文: [Efficient Estimation of Word Representations in Vector Space](http://arxiv.org/pdf/1301.3781v3.pdf)
__参数__
- __sequence__: 一个编码为单词索引(整数)列表的词序列(句子)。如果使用一个 `sampling_table`,词索引应该以一个相关数据集的词的排名匹配(例如,10 将会编码为第 10 个最长出现的词)。注意词汇表中的索引 0 是非单词,将被跳过。
- __vocabulary_size__: 整数,最大可能词索引 + 1
- __window_size__: 整数,采样窗口大小(技术上是半个窗口)。词 `w_i` 的窗口是 `[i - window_size, i + window_size+1]`。
- __negative_samples__: 大于等于 0 的浮点数。0 表示非负(即随机)采样。1 表示与正样本数相同。
- __shuffle__: 是否在返回之前将这些词语打乱。
- __categorical__: 布尔值。如果 False,标签将为整数(例如 `[0, 1, 1 .. ]`),如果 True,标签将为分类,例如 `[[1,0],[0,1],[0,1] .. ]`。
- __sampling_table__: 尺寸为 `vocabulary_size` 的 1D 数组,其中第 i 项编码了排名为 i 的词的采样概率。
- __seed__: 随机种子。
__返回__
couples, labels: 其中 `couples` 是整数对,`labels` 是 0 或 1。
__注意__
按照惯例,词汇表中的索引 0 是非单词,将被跳过。
---
### make_sampling_table
```python
keras.preprocessing.sequence.make_sampling_table(size, sampling_factor=1e-05)
```
生成一个基于单词的概率采样表。
用来生成 `skipgrams` 的 `sampling_table` 参数。`sampling_table[i]` 是数据集中第 i 个最常见词的采样概率(出于平衡考虑,出现更频繁的词应该被更少地采样)。
采样概率根据 word2vec 中使用的采样分布生成:
```python
p(word) = (min(1, sqrt(word_frequency / sampling_factor) /
(word_frequency / sampling_factor)))
```
我们假设单词频率遵循 Zipf 定律(s=1),来导出 frequency(rank) 的数值近似:
`frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))`,其中 `gamma` 为 Euler-Mascheroni 常量。
__参数__
- __size__: 整数,可能采样的单词数量。
- __sampling_factor__: word2vec 公式中的采样因子。
__返回__
一个长度为 `size` 大小的 1D Numpy 数组,其中第 i 项是排名为 i 的单词的采样概率。
| keras-docs-zh/sources/preprocessing/sequence.md/0 | {
"file_path": "keras-docs-zh/sources/preprocessing/sequence.md",
"repo_id": "keras-docs-zh",
"token_count": 4011
} | 71 |
container-test:
docker build -t keras-io .
docker run --rm -d -p 8000:8000 --name keras-io-server keras-io
sleep 10
docker exec keras-io-server echo I am alive
docker kill keras-io-server
| keras-io/Makefile/0 | {
"file_path": "keras-io/Makefile",
"repo_id": "keras-io",
"token_count": 69
} | 72 |
<jupyter_start><jupyter_text>Denoising Diffusion Implicit Models**Author:** [András Béres](https://www.linkedin.com/in/andras-beres-789190210)**Date created:** 2022/06/24**Last modified:** 2022/06/24**Description:** Generating images of flowers with denoising diffusion implicit models. Introduction What are diffusion models?Recently, [denoising diffusion models](https://arxiv.org/abs/2006.11239), including[score-based generative models](https://arxiv.org/abs/1907.05600), gained popularity as apowerful class of generative models, that can [rival](https://arxiv.org/abs/2105.05233)even [generative adversarial networks (GANs)](https://arxiv.org/abs/1406.2661) in imagesynthesis quality. They tend to generate more diverse samples, while being stable totrain and easy to scale. Recent large diffusion models, such as[DALL-E 2](https://openai.com/dall-e-2/) and [Imagen](https://imagen.research.google/),have shown incredible text-to-image generation capability. One of their drawbacks ishowever, that they are slower to sample from, because they require multiple forward passesfor generating an image.Diffusion refers to the process of turning a structured signal (an image) into noisestep-by-step. By simulating diffusion, we can generate noisy images from our trainingimages, and can train a neural network to try to denoise them. Using the trained networkwe can simulate the opposite of diffusion, reverse diffusion, which is the process of animage emerging from noise.One-sentence summary: **diffusion models are trained to denoise noisy images, and cangenerate images by iteratively denoising pure noise.** Goal of this exampleThis code example intends to be a minimal but feature-complete (with a generation qualitymetric) implementation of diffusion models, with modest compute requirements andreasonable performance. My implementation choices and hyperparameter tuning were donewith these goals in mind.Since currently the literature of diffusion models is[mathematically quite complex](https://arxiv.org/abs/2206.00364)with multiple theoretical frameworks([score matching](https://arxiv.org/abs/1907.05600),[differential equations](https://arxiv.org/abs/2011.13456),[Markov chains](https://arxiv.org/abs/2006.11239)) and sometimes even[conflicting notations (see Appendix C.2)](https://arxiv.org/abs/2010.02502),it can be daunting trying to understandthem. My view of these models in this example will be that they learn to separate anoisy image into its image and Gaussian noise components.In this example I made effort to break down all long mathematical expressions intodigestible pieces and gave all variables explanatory names. I also included numerouslinks to relevant literature to help interested readers dive deeper into the topic, inthe hope that this code example will become a good starting point for practitionerslearning about diffusion models.In the following sections, we will implement a continuous time version of[Denoising Diffusion Implicit Models (DDIMs)](https://arxiv.org/abs/2010.02502)with deterministic sampling. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import math
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import keras
from keras import layers
from keras import ops<jupyter_output><empty_output><jupyter_text>Hyperparameters<jupyter_code># data
dataset_name = "oxford_flowers102"
dataset_repetitions = 5
num_epochs = 1 # train for at least 50 epochs for good results
image_size = 64
# KID = Kernel Inception Distance, see related section
kid_image_size = 75
kid_diffusion_steps = 5
plot_diffusion_steps = 20
# sampling
min_signal_rate = 0.02
max_signal_rate = 0.95
# architecture
embedding_dims = 32
embedding_max_frequency = 1000.0
widths = [32, 64, 96, 128]
block_depth = 2
# optimization
batch_size = 64
ema = 0.999
learning_rate = 1e-3
weight_decay = 1e-4<jupyter_output><empty_output><jupyter_text>Data pipelineWe will use the[Oxford Flowers 102](https://www.tensorflow.org/datasets/catalog/oxford_flowers102)dataset forgenerating images of flowers, which is a diverse natural dataset containing around 8,000images. Unfortunately the official splits are imbalanced, as most of the images arecontained in the test split. We create new splits (80% train, 20% validation) using the[Tensorflow Datasets slicing API](https://www.tensorflow.org/datasets/splits). We applycenter crops as preprocessing, and repeat the dataset multiple times (reason given in thenext section).<jupyter_code>def preprocess_image(data):
# center crop image
height = ops.shape(data["image"])[0]
width = ops.shape(data["image"])[1]
crop_size = ops.minimum(height, width)
image = tf.image.crop_to_bounding_box(
data["image"],
(height - crop_size) // 2,
(width - crop_size) // 2,
crop_size,
crop_size,
)
# resize and clip
# for image downsampling it is important to turn on antialiasing
image = tf.image.resize(image, size=[image_size, image_size], antialias=True)
return ops.clip(image / 255.0, 0.0, 1.0)
def prepare_dataset(split):
# the validation dataset is shuffled as well, because data order matters
# for the KID estimation
return (
tfds.load(dataset_name, split=split, shuffle_files=True)
.map(preprocess_image, num_parallel_calls=tf.data.AUTOTUNE)
.cache()
.repeat(dataset_repetitions)
.shuffle(10 * batch_size)
.batch(batch_size, drop_remainder=True)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
# load dataset
train_dataset = prepare_dataset("train[:80%]+validation[:80%]+test[:80%]")
val_dataset = prepare_dataset("train[80%:]+validation[80%:]+test[80%:]")<jupyter_output><empty_output><jupyter_text>Kernel inception distance[Kernel Inception Distance (KID)](https://arxiv.org/abs/1801.01401) is an image qualitymetric which was proposed as a replacement for the popular[Frechet Inception Distance (FID)](https://arxiv.org/abs/1706.08500).I prefer KID to FID because it is simpler toimplement, can be estimated per-batch, and is computationally lighter. More details[here](https://keras.io/examples/generative/gan_ada/kernel-inception-distance).In this example, the images are evaluated at the minimal possible resolution of theInception network (75x75 instead of 299x299), and the metric is only measured on thevalidation set for computational efficiency. We also limit the number of sampling stepsat evaluation to 5 for the same reason.Since the dataset is relatively small, we go over the train and validation splitsmultiple times per epoch, because the KID estimation is noisy and compute-intensive, sowe want to evaluate only after many iterations, but for many iterations.<jupyter_code>@keras.saving.register_keras_serializable()
class KID(keras.metrics.Metric):
def __init__(self, name, **kwargs):
super().__init__(name=name, **kwargs)
# KID is estimated per batch and is averaged across batches
self.kid_tracker = keras.metrics.Mean(name="kid_tracker")
# a pretrained InceptionV3 is used without its classification layer
# transform the pixel values to the 0-255 range, then use the same
# preprocessing as during pretraining
self.encoder = keras.Sequential(
[
keras.Input(shape=(image_size, image_size, 3)),
layers.Rescaling(255.0),
layers.Resizing(height=kid_image_size, width=kid_image_size),
layers.Lambda(keras.applications.inception_v3.preprocess_input),
keras.applications.InceptionV3(
include_top=False,
input_shape=(kid_image_size, kid_image_size, 3),
weights="imagenet",
),
layers.GlobalAveragePooling2D(),
],
name="inception_encoder",
)
def polynomial_kernel(self, features_1, features_2):
feature_dimensions = ops.cast(ops.shape(features_1)[1], dtype="float32")
return (
features_1 @ ops.transpose(features_2) / feature_dimensions + 1.0
) ** 3.0
def update_state(self, real_images, generated_images, sample_weight=None):
real_features = self.encoder(real_images, training=False)
generated_features = self.encoder(generated_images, training=False)
# compute polynomial kernels using the two sets of features
kernel_real = self.polynomial_kernel(real_features, real_features)
kernel_generated = self.polynomial_kernel(
generated_features, generated_features
)
kernel_cross = self.polynomial_kernel(real_features, generated_features)
# estimate the squared maximum mean discrepancy using the average kernel values
batch_size = real_features.shape[0]
batch_size_f = ops.cast(batch_size, dtype="float32")
mean_kernel_real = ops.sum(kernel_real * (1.0 - ops.eye(batch_size))) / (
batch_size_f * (batch_size_f - 1.0)
)
mean_kernel_generated = ops.sum(
kernel_generated * (1.0 - ops.eye(batch_size))
) / (batch_size_f * (batch_size_f - 1.0))
mean_kernel_cross = ops.mean(kernel_cross)
kid = mean_kernel_real + mean_kernel_generated - 2.0 * mean_kernel_cross
# update the average KID estimate
self.kid_tracker.update_state(kid)
def result(self):
return self.kid_tracker.result()
def reset_state(self):
self.kid_tracker.reset_state()<jupyter_output><empty_output><jupyter_text>Network architectureHere we specify the architecture of the neural network that we will use for denoising. Webuild a [U-Net](https://arxiv.org/abs/1505.04597) with identical input and outputdimensions. U-Net is a popular semantic segmentation architecture, whose main idea isthat it progressively downsamples and then upsamples its input image, and adds skipconnections between layers having the same resolution. These help with gradient flow andavoid introducing a representation bottleneck, unlike usual[autoencoders](https://www.deeplearningbook.org/contents/autoencoders.html). Based onthis, one can view[diffusion models as denoising autoencoders](https://benanne.github.io/2022/01/31/diffusion.html)without a bottleneck.The network takes two inputs, the noisy images and the variances of their noisecomponents. The latter is required since denoising a signal requires different operationsat different levels of noise. We transform the noise variances using sinusoidalembeddings, similarly to positional encodings used both in[transformers](https://arxiv.org/abs/1706.03762) and[NeRF](https://arxiv.org/abs/2003.08934). This helps the network to be[highly sensitive](https://arxiv.org/abs/2006.10739) to the noise level, which iscrucial for good performance. We implement sinusoidal embeddings using a[Lambda layer](https://keras.io/api/layers/core_layers/lambda/).Some other considerations:* We build the network using the[Keras Functional API](https://keras.io/guides/functional_api/), and use[closures](https://twitter.com/fchollet/status/1441927912836321280) to build blocks oflayers in a consistent style.* [Diffusion models](https://arxiv.org/abs/2006.11239) embed the index of the timestep ofthe diffusion process instead of the noise variance, while[score-based models (Table 1)](https://arxiv.org/abs/2206.00364)usually use some function of the noise level. Iprefer the latter so that we can change the sampling schedule at inference time, withoutretraining the network.* [Diffusion models](https://arxiv.org/abs/2006.11239) input the embedding to eachconvolution block separately. We only input it at the start of the network forsimplicity, which in my experience barely decreases performance, because the skip andresidual connections help the information propagate through the network properly.* In the literature it is common to use[attention layers](https://keras.io/api/layers/attention_layers/multi_head_attention/)at lower resolutions for better global coherence. I omitted it for simplicity.* We disable the learnable center and scale parameters of the batch normalization layers,since the following convolution layers make them redundant.* We initialize the last convolution's kernel to all zeros as a good practice, making thenetwork predict only zeros after initialization, which is the mean of its targets. Thiswill improve behaviour at the start of training and make the mean squared error lossstart at exactly 1.<jupyter_code>@keras.saving.register_keras_serializable()
def sinusoidal_embedding(x):
embedding_min_frequency = 1.0
frequencies = ops.exp(
ops.linspace(
ops.log(embedding_min_frequency),
ops.log(embedding_max_frequency),
embedding_dims // 2,
)
)
angular_speeds = ops.cast(2.0 * math.pi * frequencies, "float32")
embeddings = ops.concatenate(
[ops.sin(angular_speeds * x), ops.cos(angular_speeds * x)], axis=3
)
return embeddings
def ResidualBlock(width):
def apply(x):
input_width = x.shape[3]
if input_width == width:
residual = x
else:
residual = layers.Conv2D(width, kernel_size=1)(x)
x = layers.BatchNormalization(center=False, scale=False)(x)
x = layers.Conv2D(width, kernel_size=3, padding="same", activation="swish")(x)
x = layers.Conv2D(width, kernel_size=3, padding="same")(x)
x = layers.Add()([x, residual])
return x
return apply
def DownBlock(width, block_depth):
def apply(x):
x, skips = x
for _ in range(block_depth):
x = ResidualBlock(width)(x)
skips.append(x)
x = layers.AveragePooling2D(pool_size=2)(x)
return x
return apply
def UpBlock(width, block_depth):
def apply(x):
x, skips = x
x = layers.UpSampling2D(size=2, interpolation="bilinear")(x)
for _ in range(block_depth):
x = layers.Concatenate()([x, skips.pop()])
x = ResidualBlock(width)(x)
return x
return apply
def get_network(image_size, widths, block_depth):
noisy_images = keras.Input(shape=(image_size, image_size, 3))
noise_variances = keras.Input(shape=(1, 1, 1))
e = layers.Lambda(sinusoidal_embedding, output_shape=(1, 1, 32))(noise_variances)
e = layers.UpSampling2D(size=image_size, interpolation="nearest")(e)
x = layers.Conv2D(widths[0], kernel_size=1)(noisy_images)
x = layers.Concatenate()([x, e])
skips = []
for width in widths[:-1]:
x = DownBlock(width, block_depth)([x, skips])
for _ in range(block_depth):
x = ResidualBlock(widths[-1])(x)
for width in reversed(widths[:-1]):
x = UpBlock(width, block_depth)([x, skips])
x = layers.Conv2D(3, kernel_size=1, kernel_initializer="zeros")(x)
return keras.Model([noisy_images, noise_variances], x, name="residual_unet")<jupyter_output><empty_output><jupyter_text>This showcases the power of the Functional API. Note how we built a relatively complexU-Net with skip connections, residual blocks, multiple inputs, and sinusoidal embeddingsin 80 lines of code! Diffusion model Diffusion scheduleLet us say, that a diffusion process starts at time = 0, and ends at time = 1. Thisvariable will be called diffusion time, and can be either discrete (common in diffusionmodels) or continuous (common in score-based models). I choose the latter, so that thenumber of sampling steps can be changed at inference time.We need to have a function that tells us at each point in the diffusion process the noiselevels and signal levels of the noisy image corresponding to the actual diffusion time.This will be called the diffusion schedule (see `diffusion_schedule()`).This schedule outputs two quantities: the `noise_rate` and the `signal_rate`(corresponding to sqrt(1 - alpha) and sqrt(alpha) in the DDIM paper, respectively). Wegenerate the noisy image by weighting the random noise and the training image by theircorresponding rates and adding them together.Since the (standard normal) random noises and the (normalized) images both have zero meanand unit variance, the noise rate and signal rate can be interpreted as the standarddeviation of their components in the noisy image, while the squares of their rates can beinterpreted as their variance (or their power in the signal processing sense). The rateswill always be set so that their squared sum is 1, meaning that the noisy images willalways have unit variance, just like its unscaled components.We will use a simplified, continuous version of the[cosine schedule (Section 3.2)](https://arxiv.org/abs/2102.09672),that is quite commonly used in the literature.This schedule is symmetric, slow towards the start and end of the diffusion process, andit also has a nice geometric interpretation, using the[trigonometric properties of the unit circle](https://en.wikipedia.org/wiki/Unit_circle/media/File:Circle-trig6.svg): Training processThe training procedure (see `train_step()` and `denoise()`) of denoising diffusion modelsis the following: we sample random diffusion times uniformly, and mix the training imageswith random gaussian noises at rates corresponding to the diffusion times. Then, we trainthe model to separate the noisy image to its two components.Usually, the neural network is trained to predict the unscaled noise component, fromwhich the predicted image component can be calculated using the signal and noise rates.Pixelwise[mean squared error](https://keras.io/api/losses/regression_losses/mean_squared_error-function) shouldbe used theoretically, however I recommend using[mean absolute error](https://keras.io/api/losses/regression_losses/mean_absolute_error-function)instead (similarly to[this](https://github.com/lucidrains/denoising-diffusion-pytorch/blob/master/denoising_diffusion_pytorch/denoising_diffusion_pytorch.pyL371)implementation), which produces better results on this dataset. Sampling (reverse diffusion)When sampling (see `reverse_diffusion()`), at each step we take the previous estimate ofthe noisy image and separate it into image and noise using our network. Then we recombinethese components using the signal and noise rate of the following step.Though a similar view is shown in[Equation 12 of DDIMs](https://arxiv.org/abs/2010.02502), I believe the above explanationof the sampling equation is not widely known.This example only implements the deterministic sampling procedure from DDIM, whichcorresponds to *eta = 0* in the paper. One can also use stochastic sampling (in whichcase the model becomes a[Denoising Diffusion Probabilistic Model (DDPM)](https://arxiv.org/abs/2006.11239)),where a part of the predicted noise isreplaced with the same or larger amount of random noise([see Equation 16 and below](https://arxiv.org/abs/2010.02502)).Stochastic sampling can be used without retraining the network (since both models aretrained the same way), and it can improve sample quality, while on the other handrequiring more sampling steps usually.<jupyter_code>@keras.saving.register_keras_serializable()
class DiffusionModel(keras.Model):
def __init__(self, image_size, widths, block_depth):
super().__init__()
self.normalizer = layers.Normalization()
self.network = get_network(image_size, widths, block_depth)
self.ema_network = keras.models.clone_model(self.network)
def compile(self, **kwargs):
super().compile(**kwargs)
self.noise_loss_tracker = keras.metrics.Mean(name="n_loss")
self.image_loss_tracker = keras.metrics.Mean(name="i_loss")
self.kid = KID(name="kid")
@property
def metrics(self):
return [self.noise_loss_tracker, self.image_loss_tracker, self.kid]
def denormalize(self, images):
# convert the pixel values back to 0-1 range
images = self.normalizer.mean + images * self.normalizer.variance**0.5
return ops.clip(images, 0.0, 1.0)
def diffusion_schedule(self, diffusion_times):
# diffusion times -> angles
start_angle = ops.cast(ops.arccos(max_signal_rate), "float32")
end_angle = ops.cast(ops.arccos(min_signal_rate), "float32")
diffusion_angles = start_angle + diffusion_times * (end_angle - start_angle)
# angles -> signal and noise rates
signal_rates = ops.cos(diffusion_angles)
noise_rates = ops.sin(diffusion_angles)
# note that their squared sum is always: sin^2(x) + cos^2(x) = 1
return noise_rates, signal_rates
def denoise(self, noisy_images, noise_rates, signal_rates, training):
# the exponential moving average weights are used at evaluation
if training:
network = self.network
else:
network = self.ema_network
# predict noise component and calculate the image component using it
pred_noises = network([noisy_images, noise_rates**2], training=training)
pred_images = (noisy_images - noise_rates * pred_noises) / signal_rates
return pred_noises, pred_images
def reverse_diffusion(self, initial_noise, diffusion_steps):
# reverse diffusion = sampling
num_images = initial_noise.shape[0]
step_size = 1.0 / diffusion_steps
# important line:
# at the first sampling step, the "noisy image" is pure noise
# but its signal rate is assumed to be nonzero (min_signal_rate)
next_noisy_images = initial_noise
for step in range(diffusion_steps):
noisy_images = next_noisy_images
# separate the current noisy image to its components
diffusion_times = ops.ones((num_images, 1, 1, 1)) - step * step_size
noise_rates, signal_rates = self.diffusion_schedule(diffusion_times)
pred_noises, pred_images = self.denoise(
noisy_images, noise_rates, signal_rates, training=False
)
# network used in eval mode
# remix the predicted components using the next signal and noise rates
next_diffusion_times = diffusion_times - step_size
next_noise_rates, next_signal_rates = self.diffusion_schedule(
next_diffusion_times
)
next_noisy_images = (
next_signal_rates * pred_images + next_noise_rates * pred_noises
)
# this new noisy image will be used in the next step
return pred_images
def generate(self, num_images, diffusion_steps):
# noise -> images -> denormalized images
initial_noise = keras.random.normal(
shape=(num_images, image_size, image_size, 3)
)
generated_images = self.reverse_diffusion(initial_noise, diffusion_steps)
generated_images = self.denormalize(generated_images)
return generated_images
def train_step(self, images):
# normalize images to have standard deviation of 1, like the noises
images = self.normalizer(images, training=True)
noises = keras.random.normal(shape=(batch_size, image_size, image_size, 3))
# sample uniform random diffusion times
diffusion_times = keras.random.uniform(
shape=(batch_size, 1, 1, 1), minval=0.0, maxval=1.0
)
noise_rates, signal_rates = self.diffusion_schedule(diffusion_times)
# mix the images with noises accordingly
noisy_images = signal_rates * images + noise_rates * noises
with tf.GradientTape() as tape:
# train the network to separate noisy images to their components
pred_noises, pred_images = self.denoise(
noisy_images, noise_rates, signal_rates, training=True
)
noise_loss = self.loss(noises, pred_noises) # used for training
image_loss = self.loss(images, pred_images) # only used as metric
gradients = tape.gradient(noise_loss, self.network.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, self.network.trainable_weights))
self.noise_loss_tracker.update_state(noise_loss)
self.image_loss_tracker.update_state(image_loss)
# track the exponential moving averages of weights
for weight, ema_weight in zip(self.network.weights, self.ema_network.weights):
ema_weight.assign(ema * ema_weight + (1 - ema) * weight)
# KID is not measured during the training phase for computational efficiency
return {m.name: m.result() for m in self.metrics[:-1]}
def test_step(self, images):
# normalize images to have standard deviation of 1, like the noises
images = self.normalizer(images, training=False)
noises = keras.random.normal(shape=(batch_size, image_size, image_size, 3))
# sample uniform random diffusion times
diffusion_times = keras.random.uniform(
shape=(batch_size, 1, 1, 1), minval=0.0, maxval=1.0
)
noise_rates, signal_rates = self.diffusion_schedule(diffusion_times)
# mix the images with noises accordingly
noisy_images = signal_rates * images + noise_rates * noises
# use the network to separate noisy images to their components
pred_noises, pred_images = self.denoise(
noisy_images, noise_rates, signal_rates, training=False
)
noise_loss = self.loss(noises, pred_noises)
image_loss = self.loss(images, pred_images)
self.image_loss_tracker.update_state(image_loss)
self.noise_loss_tracker.update_state(noise_loss)
# measure KID between real and generated images
# this is computationally demanding, kid_diffusion_steps has to be small
images = self.denormalize(images)
generated_images = self.generate(
num_images=batch_size, diffusion_steps=kid_diffusion_steps
)
self.kid.update_state(images, generated_images)
return {m.name: m.result() for m in self.metrics}
def plot_images(self, epoch=None, logs=None, num_rows=3, num_cols=6):
# plot random generated images for visual evaluation of generation quality
generated_images = self.generate(
num_images=num_rows * num_cols,
diffusion_steps=plot_diffusion_steps,
)
plt.figure(figsize=(num_cols * 2.0, num_rows * 2.0))
for row in range(num_rows):
for col in range(num_cols):
index = row * num_cols + col
plt.subplot(num_rows, num_cols, index + 1)
plt.imshow(generated_images[index])
plt.axis("off")
plt.tight_layout()
plt.show()
plt.close()<jupyter_output><empty_output><jupyter_text>Training<jupyter_code># create and compile the model
model = DiffusionModel(image_size, widths, block_depth)
# below tensorflow 2.9:
# pip install tensorflow_addons
# import tensorflow_addons as tfa
# optimizer=tfa.optimizers.AdamW
model.compile(
optimizer=keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
),
loss=keras.losses.mean_absolute_error,
)
# pixelwise mean absolute error is used as loss
# save the best model based on the validation KID metric
checkpoint_path = "checkpoints/diffusion_model.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
save_weights_only=True,
monitor="val_kid",
mode="min",
save_best_only=True,
)
# calculate mean and variance of training dataset for normalization
model.normalizer.adapt(train_dataset)
# run training and plot generated images periodically
model.fit(
train_dataset,
epochs=num_epochs,
validation_data=val_dataset,
callbacks=[
keras.callbacks.LambdaCallback(on_epoch_end=model.plot_images),
checkpoint_callback,
],
)<jupyter_output><empty_output><jupyter_text>Inference<jupyter_code># load the best model and generate images
model.load_weights(checkpoint_path)
model.plot_images()<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/ddim.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/ddim.ipynb",
"repo_id": "keras-io",
"token_count": 9744
} | 73 |
<jupyter_start><jupyter_text>Text Generation using FNet**Author:** [Darshan Deshpande](https://twitter.com/getdarshan)**Date created:** 2021/10/05**Last modified:** 2021/10/05**Description:** FNet transformer for text generation in Keras. IntroductionThe original transformer implementation (Vaswani et al., 2017) was one of the majorbreakthroughs in Natural Language Processing, giving rise to important architectures such BERT and GPT.However, the drawback of these architectures isthat the self-attention mechanism they use is computationally expensive. The FNetarchitecture proposes to replace this self-attention attention with a leaner mechanism:a Fourier transformation-based linear mixer for input tokens.The FNet model was able to achieve 92-97% of BERT's accuracy while training 80% faster onGPUs and almost 70% faster on TPUs. This type of design provides an efficient and smallmodel size, leading to faster inference times.In this example, we will implement and train this architecture on the Cornell MovieDialog corpus to show the applicability of this model to text generation. Imports<jupyter_code>import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import os
# Defining hyperparameters
VOCAB_SIZE = 8192
MAX_SAMPLES = 50000
BUFFER_SIZE = 20000
MAX_LENGTH = 40
EMBED_DIM = 256
LATENT_DIM = 512
NUM_HEADS = 8
BATCH_SIZE = 64<jupyter_output><empty_output><jupyter_text>Loading dataWe will be using the Cornell Dialog Corpus. We will parse the movie conversations intoquestions and answers sets.<jupyter_code>path_to_zip = keras.utils.get_file(
"cornell_movie_dialogs.zip",
origin="http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip",
extract=True,
)
path_to_dataset = os.path.join(
os.path.dirname(path_to_zip), "cornell movie-dialogs corpus"
)
path_to_movie_lines = os.path.join(path_to_dataset, "movie_lines.txt")
path_to_movie_conversations = os.path.join(path_to_dataset, "movie_conversations.txt")
def load_conversations():
# Helper function for loading the conversation splits
id2line = {}
with open(path_to_movie_lines, errors="ignore") as file:
lines = file.readlines()
for line in lines:
parts = line.replace("\n", "").split(" +++$+++ ")
id2line[parts[0]] = parts[4]
inputs, outputs = [], []
with open(path_to_movie_conversations, "r") as file:
lines = file.readlines()
for line in lines:
parts = line.replace("\n", "").split(" +++$+++ ")
# get conversation in a list of line ID
conversation = [line[1:-1] for line in parts[3][1:-1].split(", ")]
for i in range(len(conversation) - 1):
inputs.append(id2line[conversation[i]])
outputs.append(id2line[conversation[i + 1]])
if len(inputs) >= MAX_SAMPLES:
return inputs, outputs
return inputs, outputs
questions, answers = load_conversations()
# Splitting training and validation sets
train_dataset = tf.data.Dataset.from_tensor_slices((questions[:40000], answers[:40000]))
val_dataset = tf.data.Dataset.from_tensor_slices((questions[40000:], answers[40000:]))<jupyter_output><empty_output><jupyter_text>Preprocessing and Tokenization<jupyter_code>def preprocess_text(sentence):
sentence = tf.strings.lower(sentence)
# Adding a space between the punctuation and the last word to allow better tokenization
sentence = tf.strings.regex_replace(sentence, r"([?.!,])", r" \1 ")
# Replacing multiple continuous spaces with a single space
sentence = tf.strings.regex_replace(sentence, r"\s\s+", " ")
# Replacing non english words with spaces
sentence = tf.strings.regex_replace(sentence, r"[^a-z?.!,]+", " ")
sentence = tf.strings.strip(sentence)
sentence = tf.strings.join(["[start]", sentence, "[end]"], separator=" ")
return sentence
vectorizer = layers.TextVectorization(
VOCAB_SIZE,
standardize=preprocess_text,
output_mode="int",
output_sequence_length=MAX_LENGTH,
)
# We will adapt the vectorizer to both the questions and answers
# This dataset is batched to parallelize and speed up the process
vectorizer.adapt(tf.data.Dataset.from_tensor_slices((questions + answers)).batch(128))<jupyter_output><empty_output><jupyter_text>Tokenizing and padding sentences using `TextVectorization`<jupyter_code>def vectorize_text(inputs, outputs):
inputs, outputs = vectorizer(inputs), vectorizer(outputs)
# One extra padding token to the right to match the output shape
outputs = tf.pad(outputs, [[0, 1]])
return (
{"encoder_inputs": inputs, "decoder_inputs": outputs[:-1]},
{"outputs": outputs[1:]},
)
train_dataset = train_dataset.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
val_dataset = val_dataset.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
train_dataset = (
train_dataset.cache()
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE)
.prefetch(tf.data.AUTOTUNE)
)
val_dataset = val_dataset.cache().batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Creating the FNet EncoderThe FNet paper proposes a replacement for the standard attention mechanism used by theTransformer architecture (Vaswani et al., 2017).The outputs of the FFT layer are complex numbers. To avoid dealing with complex layers,only the real part (the magnitude) is extracted.The dense layers that follow the Fourier transformation act as convolutions applied onthe frequency domain.<jupyter_code>class FNetEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.dense_proj = keras.Sequential(
[
layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs):
# Casting the inputs to complex64
inp_complex = tf.cast(inputs, tf.complex64)
# Projecting the inputs to the frequency domain using FFT2D and
# extracting the real part of the output
fft = tf.math.real(tf.signal.fft2d(inp_complex))
proj_input = self.layernorm_1(inputs + fft)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)<jupyter_output><empty_output><jupyter_text>Creating the DecoderThe decoder architecture remains the same as the one proposed by (Vaswani et al., 2017)in the original transformer architecture, consisting of an embedding, positionalencoding, two masked multi-head attention layers and finally the dense output layers.The architecture that follows is taken from[Deep Learning with Python, second edition, chapter 11](https://www.manning.com/books/deep-learning-with-python-second-edition).<jupyter_code>class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=vocab_size, output_dim=embed_dim
)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=embed_dim
)
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.embed_dim = embed_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
class FNetDecoder(layers.Layer):
def __init__(self, embed_dim, latent_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.latent_dim = latent_dim
self.num_heads = num_heads
self.attention_1 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.attention_2 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.dense_proj = keras.Sequential(
[
layers.Dense(latent_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
self.layernorm_3 = layers.LayerNormalization()
self.supports_masking = True
def call(self, inputs, encoder_outputs, mask=None):
causal_mask = self.get_causal_attention_mask(inputs)
if mask is not None:
padding_mask = tf.cast(mask[:, tf.newaxis, :], dtype="int32")
padding_mask = tf.minimum(padding_mask, causal_mask)
attention_output_1 = self.attention_1(
query=inputs, value=inputs, key=inputs, attention_mask=causal_mask
)
out_1 = self.layernorm_1(inputs + attention_output_1)
attention_output_2 = self.attention_2(
query=out_1,
value=encoder_outputs,
key=encoder_outputs,
attention_mask=padding_mask,
)
out_2 = self.layernorm_2(out_1 + attention_output_2)
proj_output = self.dense_proj(out_2)
return self.layernorm_3(out_2 + proj_output)
def get_causal_attention_mask(self, inputs):
input_shape = tf.shape(inputs)
batch_size, sequence_length = input_shape[0], input_shape[1]
i = tf.range(sequence_length)[:, tf.newaxis]
j = tf.range(sequence_length)
mask = tf.cast(i >= j, dtype="int32")
mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
mult = tf.concat(
[tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)],
axis=0,
)
return tf.tile(mask, mult)
def create_model():
encoder_inputs = keras.Input(shape=(None,), dtype="int32", name="encoder_inputs")
x = PositionalEmbedding(MAX_LENGTH, VOCAB_SIZE, EMBED_DIM)(encoder_inputs)
encoder_outputs = FNetEncoder(EMBED_DIM, LATENT_DIM)(x)
encoder = keras.Model(encoder_inputs, encoder_outputs)
decoder_inputs = keras.Input(shape=(None,), dtype="int32", name="decoder_inputs")
encoded_seq_inputs = keras.Input(
shape=(None, EMBED_DIM), name="decoder_state_inputs"
)
x = PositionalEmbedding(MAX_LENGTH, VOCAB_SIZE, EMBED_DIM)(decoder_inputs)
x = FNetDecoder(EMBED_DIM, LATENT_DIM, NUM_HEADS)(x, encoded_seq_inputs)
x = layers.Dropout(0.5)(x)
decoder_outputs = layers.Dense(VOCAB_SIZE, activation="softmax")(x)
decoder = keras.Model(
[decoder_inputs, encoded_seq_inputs], decoder_outputs, name="outputs"
)
decoder_outputs = decoder([decoder_inputs, encoder_outputs])
fnet = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs, name="fnet")
return fnet<jupyter_output><empty_output><jupyter_text>Creating and Training the model<jupyter_code>fnet = create_model()
fnet.compile("adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])<jupyter_output><empty_output><jupyter_text>Here, the `epochs` parameter is set to a single epoch, but in practice the model will take around**20-30 epochs** of training to start outputting comprehensible sentences. Although accuracyis not a good measure for this task, we will use it just to get a hint of the improvementof the network.<jupyter_code>fnet.fit(train_dataset, epochs=1, validation_data=val_dataset)<jupyter_output><empty_output><jupyter_text>Performing inference<jupyter_code>VOCAB = vectorizer.get_vocabulary()
def decode_sentence(input_sentence):
# Mapping the input sentence to tokens and adding start and end tokens
tokenized_input_sentence = vectorizer(
tf.constant("[start] " + preprocess_text(input_sentence) + " [end]")
)
# Initializing the initial sentence consisting of only the start token.
tokenized_target_sentence = tf.expand_dims(VOCAB.index("[start]"), 0)
decoded_sentence = ""
for i in range(MAX_LENGTH):
# Get the predictions
predictions = fnet.predict(
{
"encoder_inputs": tf.expand_dims(tokenized_input_sentence, 0),
"decoder_inputs": tf.expand_dims(
tf.pad(
tokenized_target_sentence,
[[0, MAX_LENGTH - tf.shape(tokenized_target_sentence)[0]]],
),
0,
),
}
)
# Calculating the token with maximum probability and getting the corresponding word
sampled_token_index = tf.argmax(predictions[0, i, :])
sampled_token = VOCAB[sampled_token_index.numpy()]
# If sampled token is the end token then stop generating and return the sentence
if tf.equal(sampled_token_index, VOCAB.index("[end]")):
break
decoded_sentence += sampled_token + " "
tokenized_target_sentence = tf.concat(
[tokenized_target_sentence, [sampled_token_index]], 0
)
return decoded_sentence
decode_sentence("Where have you been all this time?")<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/text_generation_fnet.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/text_generation_fnet.ipynb",
"repo_id": "keras-io",
"token_count": 5365
} | 74 |
# Teach StableDiffusion new concepts via Textual Inversion
**Authors:** Ian Stenbit, [lukewood](https://lukewood.xyz)<br>
**Date created:** 2022/12/09<br>
**Last modified:** 2022/12/09<br>
**Description:** Learning new visual concepts with KerasCV's StableDiffusion implementation.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/fine_tune_via_textual_inversion.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/fine_tune_via_textual_inversion.py)
---
## Textual Inversion
Since its release, StableDiffusion has quickly become a favorite amongst
the generative machine learning community.
The high volume of traffic has led to open source contributed improvements,
heavy prompt engineering, and even the invention of novel algorithms.
Perhaps the most impressive new algorithm being used is
[Textual Inversion](https://github.com/rinongal/textual_inversion), presented in
[_An Image is Worth One Word: Personalizing Text-to-Image Generation using Textual Inversion_](https://textual-inversion.github.io/).
Textual Inversion is the process of teaching an image generator a specific visual concept
through the use of fine-tuning. In the diagram below, you can see an
example of this process where the authors teach the model new concepts, calling them
"S_*".

Conceptually, textual inversion works by learning a token embedding for a new text
token, keeping the remaining components of StableDiffusion frozen.
This guide shows you how to fine-tune the StableDiffusion model shipped in KerasCV
using the Textual-Inversion algorithm. By the end of the guide, you will be able to
write the "Gandalf the Gray as a <my-funny-cat-token>".

First, let's import the packages we need, and create a
StableDiffusion instance so we can use some of its subcomponents for fine-tuning.
```python
!pip install -q git+https://github.com/keras-team/keras-cv.git
!pip install -q tensorflow==2.11.0
```
```python
import math
import keras_cv
import numpy as np
import tensorflow as tf
from keras_cv import layers as cv_layers
from keras_cv.models.stable_diffusion import NoiseScheduler
from tensorflow import keras
import matplotlib.pyplot as plt
stable_diffusion = keras_cv.models.StableDiffusion()
```
<div class="k-default-codeblock">
```
By using this model checkpoint, you acknowledge that its usage is subject to the terms of the CreativeML Open RAIL-M license at https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE
```
</div>
Next, let's define a visualization utility to show off the generated images:
```python
def plot_images(images):
plt.figure(figsize=(20, 20))
for i in range(len(images)):
ax = plt.subplot(1, len(images), i + 1)
plt.imshow(images[i])
plt.axis("off")
```
---
## Assembling a text-image pair dataset
In order to train the embedding of our new token, we first must assemble a dataset
consisting of text-image pairs.
Each sample from the dataset must contain an image of the concept we are teaching
StableDiffusion, as well as a caption accurately representing the content of the image.
In this tutorial, we will teach StableDiffusion the concept of Luke and Ian's GitHub
avatars:

First, let's construct an image dataset of cat dolls:
```python
def assemble_image_dataset(urls):
# Fetch all remote files
files = [tf.keras.utils.get_file(origin=url) for url in urls]
# Resize images
resize = keras.layers.Resizing(height=512, width=512, crop_to_aspect_ratio=True)
images = [keras.utils.load_img(img) for img in files]
images = [keras.utils.img_to_array(img) for img in images]
images = np.array([resize(img) for img in images])
# The StableDiffusion image encoder requires images to be normalized to the
# [-1, 1] pixel value range
images = images / 127.5 - 1
# Create the tf.data.Dataset
image_dataset = tf.data.Dataset.from_tensor_slices(images)
# Shuffle and introduce random noise
image_dataset = image_dataset.shuffle(50, reshuffle_each_iteration=True)
image_dataset = image_dataset.map(
cv_layers.RandomCropAndResize(
target_size=(512, 512),
crop_area_factor=(0.8, 1.0),
aspect_ratio_factor=(1.0, 1.0),
),
num_parallel_calls=tf.data.AUTOTUNE,
)
image_dataset = image_dataset.map(
cv_layers.RandomFlip(mode="horizontal"),
num_parallel_calls=tf.data.AUTOTUNE,
)
return image_dataset
```
Next, we assemble a text dataset:
```python
MAX_PROMPT_LENGTH = 77
placeholder_token = "<my-funny-cat-token>"
def pad_embedding(embedding):
return embedding + (
[stable_diffusion.tokenizer.end_of_text] * (MAX_PROMPT_LENGTH - len(embedding))
)
stable_diffusion.tokenizer.add_tokens(placeholder_token)
def assemble_text_dataset(prompts):
prompts = [prompt.format(placeholder_token) for prompt in prompts]
embeddings = [stable_diffusion.tokenizer.encode(prompt) for prompt in prompts]
embeddings = [np.array(pad_embedding(embedding)) for embedding in embeddings]
text_dataset = tf.data.Dataset.from_tensor_slices(embeddings)
text_dataset = text_dataset.shuffle(100, reshuffle_each_iteration=True)
return text_dataset
```
Finally, we zip our datasets together to produce a text-image pair dataset.
```python
def assemble_dataset(urls, prompts):
image_dataset = assemble_image_dataset(urls)
text_dataset = assemble_text_dataset(prompts)
# the image dataset is quite short, so we repeat it to match the length of the
# text prompt dataset
image_dataset = image_dataset.repeat()
# we use the text prompt dataset to determine the length of the dataset. Due to
# the fact that there are relatively few prompts we repeat the dataset 5 times.
# we have found that this anecdotally improves results.
text_dataset = text_dataset.repeat(5)
return tf.data.Dataset.zip((image_dataset, text_dataset))
```
In order to ensure our prompts are descriptive, we use extremely generic prompts.
Let's try this out with some sample images and prompts.
```python
train_ds = assemble_dataset(
urls=[
"https://i.imgur.com/VIedH1X.jpg",
"https://i.imgur.com/eBw13hE.png",
"https://i.imgur.com/oJ3rSg7.png",
"https://i.imgur.com/5mCL6Df.jpg",
"https://i.imgur.com/4Q6WWyI.jpg",
],
prompts=[
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
],
)
```
---
## On the importance of prompt accuracy
During our first attempt at writing this guide we included images of groups of these cat
dolls in our dataset but continued to use the generic prompts listed above.
Our results were anecdotally poor. For example, here's cat doll gandalf using this method:

It's conceptually close, but it isn't as great as it can be.
In order to remedy this, we began experimenting with splitting our images into images of
singular cat dolls and groups of cat dolls.
Following this split, we came up with new prompts for the group shots.
Training on text-image pairs that accurately represent the content boosted the quality
of our results *substantially*. This speaks to the importance of prompt accuracy.
In addition to separating the images into singular and group images, we also remove some
inaccurate prompts; such as "a dark photo of the {}"
Keeping this in mind, we assemble our final training dataset below:
```python
single_ds = assemble_dataset(
urls=[
"https://i.imgur.com/VIedH1X.jpg",
"https://i.imgur.com/eBw13hE.png",
"https://i.imgur.com/oJ3rSg7.png",
"https://i.imgur.com/5mCL6Df.jpg",
"https://i.imgur.com/4Q6WWyI.jpg",
],
prompts=[
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
],
)
```

Looks great!
Next, we assemble a dataset of groups of our GitHub avatars:
```python
group_ds = assemble_dataset(
urls=[
"https://i.imgur.com/yVmZ2Qa.jpg",
"https://i.imgur.com/JbyFbZJ.jpg",
"https://i.imgur.com/CCubd3q.jpg",
],
prompts=[
"a photo of a group of {}",
"a rendering of a group of {}",
"a cropped photo of the group of {}",
"the photo of a group of {}",
"a photo of a clean group of {}",
"a photo of my group of {}",
"a photo of a cool group of {}",
"a close-up photo of a group of {}",
"a bright photo of the group of {}",
"a cropped photo of a group of {}",
"a photo of the group of {}",
"a good photo of the group of {}",
"a photo of one group of {}",
"a close-up photo of the group of {}",
"a rendition of the group of {}",
"a photo of the clean group of {}",
"a rendition of a group of {}",
"a photo of a nice group of {}",
"a good photo of a group of {}",
"a photo of the nice group of {}",
"a photo of the small group of {}",
"a photo of the weird group of {}",
"a photo of the large group of {}",
"a photo of a cool group of {}",
"a photo of a small group of {}",
],
)
```

Finally, we concatenate the two datasets:
```python
train_ds = single_ds.concatenate(group_ds)
train_ds = train_ds.batch(1).shuffle(
train_ds.cardinality(), reshuffle_each_iteration=True
)
```
---
## Adding a new token to the text encoder
Next, we create a new text encoder for the StableDiffusion model and add our new
embedding for '<my-funny-cat-token>' into the model.
```python
tokenized_initializer = stable_diffusion.tokenizer.encode("cat")[1]
new_weights = stable_diffusion.text_encoder.layers[2].token_embedding(
tf.constant(tokenized_initializer)
)
# Get len of .vocab instead of tokenizer
new_vocab_size = len(stable_diffusion.tokenizer.vocab)
# The embedding layer is the 2nd layer in the text encoder
old_token_weights = stable_diffusion.text_encoder.layers[
2
].token_embedding.get_weights()
old_position_weights = stable_diffusion.text_encoder.layers[
2
].position_embedding.get_weights()
old_token_weights = old_token_weights[0]
new_weights = np.expand_dims(new_weights, axis=0)
new_weights = np.concatenate([old_token_weights, new_weights], axis=0)
```
Let's construct a new TextEncoder and prepare it.
```python
# Have to set download_weights False so we can init (otherwise tries to load weights)
new_encoder = keras_cv.models.stable_diffusion.TextEncoder(
keras_cv.models.stable_diffusion.stable_diffusion.MAX_PROMPT_LENGTH,
vocab_size=new_vocab_size,
download_weights=False,
)
for index, layer in enumerate(stable_diffusion.text_encoder.layers):
# Layer 2 is the embedding layer, so we omit it from our weight-copying
if index == 2:
continue
new_encoder.layers[index].set_weights(layer.get_weights())
new_encoder.layers[2].token_embedding.set_weights([new_weights])
new_encoder.layers[2].position_embedding.set_weights(old_position_weights)
stable_diffusion._text_encoder = new_encoder
stable_diffusion._text_encoder.compile(jit_compile=True)
```
---
## Training
Now we can move on to the exciting part: training!
In TextualInversion, the only piece of the model that is trained is the embedding vector.
Let's freeze the rest of the model.
```python
stable_diffusion.diffusion_model.trainable = False
stable_diffusion.decoder.trainable = False
stable_diffusion.text_encoder.trainable = True
stable_diffusion.text_encoder.layers[2].trainable = True
def traverse_layers(layer):
if hasattr(layer, "layers"):
for layer in layer.layers:
yield layer
if hasattr(layer, "token_embedding"):
yield layer.token_embedding
if hasattr(layer, "position_embedding"):
yield layer.position_embedding
for layer in traverse_layers(stable_diffusion.text_encoder):
if isinstance(layer, keras.layers.Embedding) or "clip_embedding" in layer.name:
layer.trainable = True
else:
layer.trainable = False
new_encoder.layers[2].position_embedding.trainable = False
```
Let's confirm the proper weights are set to trainable.
```python
all_models = [
stable_diffusion.text_encoder,
stable_diffusion.diffusion_model,
stable_diffusion.decoder,
]
print([[w.shape for w in model.trainable_weights] for model in all_models])
```
<div class="k-default-codeblock">
```
[[TensorShape([49409, 768])], [], []]
```
</div>
---
## Training the new embedding
In order to train the embedding, we need a couple of utilities.
We import a NoiseScheduler from KerasCV, and define the following utilities below:
- `sample_from_encoder_outputs` is a wrapper around the base StableDiffusion image
encoder which samples from the statistical distribution produced by the image
encoder, rather than taking just the mean (like many other SD applications)
- `get_timestep_embedding` produces an embedding for a specified timestep for the
diffusion model
- `get_position_ids` produces a tensor of position IDs for the text encoder (which is just a
series from `[1, MAX_PROMPT_LENGTH]`)
```python
# Remove the top layer from the encoder, which cuts off the variance and only returns
# the mean
training_image_encoder = keras.Model(
stable_diffusion.image_encoder.input,
stable_diffusion.image_encoder.layers[-2].output,
)
def sample_from_encoder_outputs(outputs):
mean, logvar = tf.split(outputs, 2, axis=-1)
logvar = tf.clip_by_value(logvar, -30.0, 20.0)
std = tf.exp(0.5 * logvar)
sample = tf.random.normal(tf.shape(mean))
return mean + std * sample
def get_timestep_embedding(timestep, dim=320, max_period=10000):
half = dim // 2
freqs = tf.math.exp(
-math.log(max_period) * tf.range(0, half, dtype=tf.float32) / half
)
args = tf.convert_to_tensor([timestep], dtype=tf.float32) * freqs
embedding = tf.concat([tf.math.cos(args), tf.math.sin(args)], 0)
return embedding
def get_position_ids():
return tf.convert_to_tensor([list(range(MAX_PROMPT_LENGTH))], dtype=tf.int32)
```
Next, we implement a `StableDiffusionFineTuner`, which is a subclass of `keras.Model`
that overrides `train_step` to train the token embeddings of our text encoder.
This is the core of the Textual Inversion algorithm.
Abstractly speaking, the train step takes a sample from the output of the frozen SD
image encoder's latent distribution for a training image, adds noise to that sample, and
then passes that noisy sample to the frozen diffusion model.
The hidden state of the diffusion model is the output of the text encoder for the prompt
corresponding to the image.
Our final goal state is that the diffusion model is able to separate the noise from the
sample using the text encoding as hidden state, so our loss is the mean-squared error of
the noise and the output of the diffusion model (which has, ideally, removed the image
latents from the noise).
We compute gradients for only the token embeddings of the text encoder, and in the
train step we zero-out the gradients for all tokens other than the token that we're
learning.
See in-line code comments for more details about the train step.
```python
class StableDiffusionFineTuner(keras.Model):
def __init__(self, stable_diffusion, noise_scheduler, **kwargs):
super().__init__(**kwargs)
self.stable_diffusion = stable_diffusion
self.noise_scheduler = noise_scheduler
def train_step(self, data):
images, embeddings = data
with tf.GradientTape() as tape:
# Sample from the predicted distribution for the training image
latents = sample_from_encoder_outputs(training_image_encoder(images))
# The latents must be downsampled to match the scale of the latents used
# in the training of StableDiffusion. This number is truly just a "magic"
# constant that they chose when training the model.
latents = latents * 0.18215
# Produce random noise in the same shape as the latent sample
noise = tf.random.normal(tf.shape(latents))
batch_dim = tf.shape(latents)[0]
# Pick a random timestep for each sample in the batch
timesteps = tf.random.uniform(
(batch_dim,),
minval=0,
maxval=noise_scheduler.train_timesteps,
dtype=tf.int64,
)
# Add noise to the latents based on the timestep for each sample
noisy_latents = self.noise_scheduler.add_noise(latents, noise, timesteps)
# Encode the text in the training samples to use as hidden state in the
# diffusion model
encoder_hidden_state = self.stable_diffusion.text_encoder(
[embeddings, get_position_ids()]
)
# Compute timestep embeddings for the randomly-selected timesteps for each
# sample in the batch
timestep_embeddings = tf.map_fn(
fn=get_timestep_embedding,
elems=timesteps,
fn_output_signature=tf.float32,
)
# Call the diffusion model
noise_pred = self.stable_diffusion.diffusion_model(
[noisy_latents, timestep_embeddings, encoder_hidden_state]
)
# Compute the mean-squared error loss and reduce it.
loss = self.compiled_loss(noise_pred, noise)
loss = tf.reduce_mean(loss, axis=2)
loss = tf.reduce_mean(loss, axis=1)
loss = tf.reduce_mean(loss)
# Load the trainable weights and compute the gradients for them
trainable_weights = self.stable_diffusion.text_encoder.trainable_weights
grads = tape.gradient(loss, trainable_weights)
# Gradients are stored in indexed slices, so we have to find the index
# of the slice(s) which contain the placeholder token.
index_of_placeholder_token = tf.reshape(tf.where(grads[0].indices == 49408), ())
condition = grads[0].indices == 49408
condition = tf.expand_dims(condition, axis=-1)
# Override the gradients, zeroing out the gradients for all slices that
# aren't for the placeholder token, effectively freezing the weights for
# all other tokens.
grads[0] = tf.IndexedSlices(
values=tf.where(condition, grads[0].values, 0),
indices=grads[0].indices,
dense_shape=grads[0].dense_shape,
)
self.optimizer.apply_gradients(zip(grads, trainable_weights))
return {"loss": loss}
```
Before we start training, let's take a look at what StableDiffusion produces for our
token.
```python
generated = stable_diffusion.text_to_image(
f"an oil painting of {placeholder_token}", seed=1337, batch_size=3
)
plot_images(generated)
```
<div class="k-default-codeblock">
```
25/25 [==============================] - 19s 314ms/step
```
</div>

As you can see, the model still thinks of our token as a cat, as this was the seed token
we used to initialize our custom token.
Now, to get started with training, we can just `compile()` our model like any other
Keras model. Before doing so, we also instantiate a noise scheduler for training and
configure our training parameters such as learning rate and optimizer.
```python
noise_scheduler = NoiseScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
train_timesteps=1000,
)
trainer = StableDiffusionFineTuner(stable_diffusion, noise_scheduler, name="trainer")
EPOCHS = 50
learning_rate = keras.optimizers.schedules.CosineDecay(
initial_learning_rate=1e-4, decay_steps=train_ds.cardinality() * EPOCHS
)
optimizer = keras.optimizers.Adam(
weight_decay=0.004, learning_rate=learning_rate, epsilon=1e-8, global_clipnorm=10
)
trainer.compile(
optimizer=optimizer,
# We are performing reduction manually in our train step, so none is required here.
loss=keras.losses.MeanSquaredError(reduction="none"),
)
```
To monitor training, we can produce a `keras.callbacks.Callback` to produce a few images
every epoch using our custom token.
We create three callbacks with different prompts so that we can see how they progress
over the course of training. We use a fixed seed so that we can easily see the
progression of the learned token.
```python
class GenerateImages(keras.callbacks.Callback):
def __init__(
self, stable_diffusion, prompt, steps=50, frequency=10, seed=None, **kwargs
):
super().__init__(**kwargs)
self.stable_diffusion = stable_diffusion
self.prompt = prompt
self.seed = seed
self.frequency = frequency
self.steps = steps
def on_epoch_end(self, epoch, logs):
if epoch % self.frequency == 0:
images = self.stable_diffusion.text_to_image(
self.prompt, batch_size=3, num_steps=self.steps, seed=self.seed
)
plot_images(
images,
)
cbs = [
GenerateImages(
stable_diffusion, prompt=f"an oil painting of {placeholder_token}", seed=1337
),
GenerateImages(
stable_diffusion, prompt=f"gandalf the gray as a {placeholder_token}", seed=1337
),
GenerateImages(
stable_diffusion,
prompt=f"two {placeholder_token} getting married, photorealistic, high quality",
seed=1337,
),
]
```
Now, all that is left to do is to call `model.fit()`!
```python
trainer.fit(
train_ds,
epochs=EPOCHS,
callbacks=cbs,
)
```
<div class="k-default-codeblock">
```
Epoch 1/50
50/50 [==============================] - 16s 318ms/step
50/50 [==============================] - 16s 318ms/step
50/50 [==============================] - 16s 318ms/step
250/250 [==============================] - 194s 469ms/step - loss: 0.1533
Epoch 2/50
250/250 [==============================] - 68s 269ms/step - loss: 0.1557
Epoch 3/50
250/250 [==============================] - 68s 269ms/step - loss: 0.1359
Epoch 4/50
250/250 [==============================] - 68s 269ms/step - loss: 0.1693
Epoch 5/50
250/250 [==============================] - 68s 269ms/step - loss: 0.1475
Epoch 6/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1472
Epoch 7/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1533
Epoch 8/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1450
Epoch 9/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1639
Epoch 10/50
250/250 [==============================] - 68s 269ms/step - loss: 0.1351
Epoch 11/50
50/50 [==============================] - 16s 316ms/step
50/50 [==============================] - 16s 316ms/step
50/50 [==============================] - 16s 317ms/step
250/250 [==============================] - 116s 464ms/step - loss: 0.1474
Epoch 12/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1737
Epoch 13/50
250/250 [==============================] - 68s 269ms/step - loss: 0.1427
Epoch 14/50
250/250 [==============================] - 68s 269ms/step - loss: 0.1698
Epoch 15/50
250/250 [==============================] - 68s 270ms/step - loss: 0.1424
Epoch 16/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1339
Epoch 17/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1397
Epoch 18/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1469
Epoch 19/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1649
Epoch 20/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1582
Epoch 21/50
50/50 [==============================] - 16s 315ms/step
50/50 [==============================] - 16s 316ms/step
50/50 [==============================] - 16s 316ms/step
250/250 [==============================] - 116s 462ms/step - loss: 0.1331
Epoch 22/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1319
Epoch 23/50
250/250 [==============================] - 68s 267ms/step - loss: 0.1521
Epoch 24/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1486
Epoch 25/50
250/250 [==============================] - 68s 267ms/step - loss: 0.1449
Epoch 26/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1349
Epoch 27/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1454
Epoch 28/50
250/250 [==============================] - 68s 268ms/step - loss: 0.1394
Epoch 29/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1489
Epoch 30/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1338
Epoch 31/50
50/50 [==============================] - 16s 315ms/step
50/50 [==============================] - 16s 320ms/step
50/50 [==============================] - 16s 315ms/step
250/250 [==============================] - 116s 462ms/step - loss: 0.1328
Epoch 32/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1693
Epoch 33/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1420
Epoch 34/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1255
Epoch 35/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1239
Epoch 36/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1558
Epoch 37/50
250/250 [==============================] - 68s 267ms/step - loss: 0.1527
Epoch 38/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1461
Epoch 39/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1555
Epoch 40/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1515
Epoch 41/50
50/50 [==============================] - 16s 315ms/step
50/50 [==============================] - 16s 315ms/step
50/50 [==============================] - 16s 315ms/step
250/250 [==============================] - 116s 461ms/step - loss: 0.1291
Epoch 42/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1474
Epoch 43/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1908
Epoch 44/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1506
Epoch 45/50
250/250 [==============================] - 68s 267ms/step - loss: 0.1424
Epoch 46/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1601
Epoch 47/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1312
Epoch 48/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1524
Epoch 49/50
250/250 [==============================] - 67s 266ms/step - loss: 0.1477
Epoch 50/50
250/250 [==============================] - 67s 267ms/step - loss: 0.1397
<keras.callbacks.History at 0x7f183aea3eb8>
```
</div>















It's pretty fun to see how the model learns our new token over time. Play around with it
and see how you can tune training parameters and your training dataset to produce the
best images!
---
## Taking the Fine Tuned Model for a Spin
Now for the really fun part. We've learned a token embedding for our custom token, so
now we can generate images with StableDiffusion the same way we would for any other
token!
Here are some fun example prompts to get you started, with sample outputs from our cat
doll token!
```python
generated = stable_diffusion.text_to_image(
f"Gandalf as a {placeholder_token} fantasy art drawn by disney concept artists, "
"golden colour, high quality, highly detailed, elegant, sharp focus, concept art, "
"character concepts, digital painting, mystery, adventure",
batch_size=3,
)
plot_images(generated)
```
<div class="k-default-codeblock">
```
25/25 [==============================] - 8s 316ms/step
```
</div>

```python
generated = stable_diffusion.text_to_image(
f"A masterpiece of a {placeholder_token} crying out to the heavens. "
f"Behind the {placeholder_token}, an dark, evil shade looms over it - sucking the "
"life right out of it.",
batch_size=3,
)
plot_images(generated)
```
<div class="k-default-codeblock">
```
25/25 [==============================] - 8s 314ms/step
```
</div>

```python
generated = stable_diffusion.text_to_image(
f"An evil {placeholder_token}.", batch_size=3
)
plot_images(generated)
```
<div class="k-default-codeblock">
```
25/25 [==============================] - 8s 322ms/step
```
</div>

```python
generated = stable_diffusion.text_to_image(
f"A mysterious {placeholder_token} approaches the great pyramids of egypt.",
batch_size=3,
)
plot_images(generated)
```
<div class="k-default-codeblock">
```
25/25 [==============================] - 8s 315ms/step
```
</div>

---
## Conclusions
Using the Textual Inversion algorithm you can teach StableDiffusion new concepts!
Some possible next steps to follow:
- Try out your own prompts
- Teach the model a style
- Gather a dataset of your favorite pet cat or dog and teach the model about it
| keras-io/examples/generative/md/fine_tune_via_textual_inversion.md/0 | {
"file_path": "keras-io/examples/generative/md/fine_tune_via_textual_inversion.md",
"repo_id": "keras-io",
"token_count": 12565
} | 75 |
# Vector-Quantized Variational Autoencoders
**Author:** [Sayak Paul](https://twitter.com/RisingSayak)<br>
**Date created:** 2021/07/21<br>
**Last modified:** 2021/06/27<br>
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/vq_vae.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/vq_vae.py)
**Description:** Training a VQ-VAE for image reconstruction and codebook sampling for generation.
In this example, we develop a Vector Quantized Variational Autoencoder (VQ-VAE).
VQ-VAE was proposed in
[Neural Discrete Representation Learning](https://arxiv.org/abs/1711.00937)
by van der Oord et al. In standard VAEs, the latent space is continuous and is sampled
from a Gaussian distribution. It is generally harder to learn such a continuous
distribution via gradient descent. VQ-VAEs, on the other hand,
operate on a discrete latent space, making the optimization problem simpler. It does so
by maintaining a discrete *codebook*. The codebook is developed by
discretizing the distance between continuous embeddings and the encoded
outputs. These discrete code words are then fed to the decoder, which is trained
to generate reconstructed samples.
For an overview of VQ-VAEs, please refer to the original paper and
[this video explanation](https://www.youtube.com/watch?v=VZFVUrYcig0).
If you need a refresher on VAEs, you can refer to
[this book chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-12/).
VQ-VAEs are one of the main recipes behind [DALL-E](https://openai.com/blog/dall-e/)
and the idea of a codebook is used in [VQ-GANs](https://arxiv.org/abs/2012.09841).
This example uses implementation details from the
[official VQ-VAE tutorial](https://github.com/deepmind/sonnet/blob/master/sonnet/examples/vqvae_example.ipynb)
from DeepMind.
## Requirements
To run this example, you will need TensorFlow 2.5 or higher, as well as
TensorFlow Probability, which can be installed using the command below.
```python
!pip install -q tensorflow-probability
```
---
## Imports
```python
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_probability as tfp
import tensorflow as tf
```
---
## `VectorQuantizer` layer
First, we implement a custom layer for the vector quantizer, which is the layer in between
the encoder and decoder. Consider an output from the encoder, with shape `(batch_size, height, width,
num_filters)`. The vector quantizer will first flatten this output, only keeping the
`num_filters` dimension intact. So, the shape would become `(batch_size * height * width,
num_filters)`. The rationale behind this is to treat the total number of filters as the size for
the latent embeddings.
An embedding table is then initialized to learn a codebook. We measure the L2-normalized
distance between the flattened encoder outputs and code words of this codebook. We take the
code that yields the minimum distance, and we apply one-hot encoding to achieve quantization.
This way, the code yielding the minimum distance to the corresponding encoder output is
mapped as one and the remaining codes are mapped as zeros.
Since the quantization process is not differentiable, we apply a
[straight-through estimator](https://www.hassanaskary.com/python/pytorch/deep%20learning/2020/09/19/intuitive-explanation-of-straight-through-estimators.html)
in between the decoder and the encoder, so that the decoder gradients are directly propagated
to the encoder. As the encoder and decoder share the same channel space, the decoder gradients are
still meaningful to the encoder.
```python
class VectorQuantizer(layers.Layer):
def __init__(self, num_embeddings, embedding_dim, beta=0.25, **kwargs):
super().__init__(**kwargs)
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
# The `beta` parameter is best kept between [0.25, 2] as per the paper.
self.beta = beta
# Initialize the embeddings which we will quantize.
w_init = tf.random_uniform_initializer()
self.embeddings = tf.Variable(
initial_value=w_init(
shape=(self.embedding_dim, self.num_embeddings), dtype="float32"
),
trainable=True,
name="embeddings_vqvae",
)
def call(self, x):
# Calculate the input shape of the inputs and
# then flatten the inputs keeping `embedding_dim` intact.
input_shape = tf.shape(x)
flattened = tf.reshape(x, [-1, self.embedding_dim])
# Quantization.
encoding_indices = self.get_code_indices(flattened)
encodings = tf.one_hot(encoding_indices, self.num_embeddings)
quantized = tf.matmul(encodings, self.embeddings, transpose_b=True)
# Reshape the quantized values back to the original input shape
quantized = tf.reshape(quantized, input_shape)
# Calculate vector quantization loss and add that to the layer. You can learn more
# about adding losses to different layers here:
# https://keras.io/guides/making_new_layers_and_models_via_subclassing/. Check
# the original paper to get a handle on the formulation of the loss function.
commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2)
codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2)
self.add_loss(self.beta * commitment_loss + codebook_loss)
# Straight-through estimator.
quantized = x + tf.stop_gradient(quantized - x)
return quantized
def get_code_indices(self, flattened_inputs):
# Calculate L2-normalized distance between the inputs and the codes.
similarity = tf.matmul(flattened_inputs, self.embeddings)
distances = (
tf.reduce_sum(flattened_inputs ** 2, axis=1, keepdims=True)
+ tf.reduce_sum(self.embeddings ** 2, axis=0)
- 2 * similarity
)
# Derive the indices for minimum distances.
encoding_indices = tf.argmin(distances, axis=1)
return encoding_indices
```
**A note on straight-through estimation**:
This line of code does the straight-through estimation part: `quantized = x +
tf.stop_gradient(quantized - x)`. During backpropagation, `(quantized - x)` won't be
included in the computation graph and the gradients obtained for `quantized`
will be copied for `inputs`. Thanks to [this video](https://youtu.be/VZFVUrYcig0?t=1393)
for helping me understand this technique.
---
## Encoder and decoder
Now for the encoder and the decoder for the VQ-VAE. We will keep them small so
that their capacity is a good fit for the MNIST dataset. The implementation of the encoder and
decoder come from
[this example](https://keras.io/examples/generative/vae).
Note that activations _other than ReLU_ may not work for the encoder and decoder layers in the
quantization architecture: Leaky ReLU activated layers, for example, have proven difficult to
train, resulting in intermittent loss spikes that the model has trouble recovering from.
```python
def get_encoder(latent_dim=16):
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(
encoder_inputs
)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
encoder_outputs = layers.Conv2D(latent_dim, 1, padding="same")(x)
return keras.Model(encoder_inputs, encoder_outputs, name="encoder")
def get_decoder(latent_dim=16):
latent_inputs = keras.Input(shape=get_encoder(latent_dim).output.shape[1:])
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(
latent_inputs
)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, padding="same")(x)
return keras.Model(latent_inputs, decoder_outputs, name="decoder")
```
---
## Standalone VQ-VAE model
```python
def get_vqvae(latent_dim=16, num_embeddings=64):
vq_layer = VectorQuantizer(num_embeddings, latent_dim, name="vector_quantizer")
encoder = get_encoder(latent_dim)
decoder = get_decoder(latent_dim)
inputs = keras.Input(shape=(28, 28, 1))
encoder_outputs = encoder(inputs)
quantized_latents = vq_layer(encoder_outputs)
reconstructions = decoder(quantized_latents)
return keras.Model(inputs, reconstructions, name="vq_vae")
get_vqvae().summary()
```
<div class="k-default-codeblock">
```
Model: "vq_vae"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_4 (InputLayer) [(None, 28, 28, 1)] 0
_________________________________________________________________
encoder (Functional) (None, 7, 7, 16) 19856
_________________________________________________________________
vector_quantizer (VectorQuan (None, 7, 7, 16) 1024
_________________________________________________________________
decoder (Functional) (None, 28, 28, 1) 28033
=================================================================
Total params: 48,913
Trainable params: 48,913
Non-trainable params: 0
_________________________________________________________________
```
</div>
Note that the output channels of the encoder should match the `latent_dim` for the vector
quantizer.
---
## Wrapping up the training loop inside `VQVAETrainer`
```python
class VQVAETrainer(keras.models.Model):
def __init__(self, train_variance, latent_dim=32, num_embeddings=128, **kwargs):
super().__init__(**kwargs)
self.train_variance = train_variance
self.latent_dim = latent_dim
self.num_embeddings = num_embeddings
self.vqvae = get_vqvae(self.latent_dim, self.num_embeddings)
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.vq_loss_tracker = keras.metrics.Mean(name="vq_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.vq_loss_tracker,
]
def train_step(self, x):
with tf.GradientTape() as tape:
# Outputs from the VQ-VAE.
reconstructions = self.vqvae(x)
# Calculate the losses.
reconstruction_loss = (
tf.reduce_mean((x - reconstructions) ** 2) / self.train_variance
)
total_loss = reconstruction_loss + sum(self.vqvae.losses)
# Backpropagation.
grads = tape.gradient(total_loss, self.vqvae.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.vqvae.trainable_variables))
# Loss tracking.
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.vq_loss_tracker.update_state(sum(self.vqvae.losses))
# Log results.
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"vqvae_loss": self.vq_loss_tracker.result(),
}
```
---
## Load and preprocess the MNIST dataset
```python
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
x_train_scaled = (x_train / 255.0) - 0.5
x_test_scaled = (x_test / 255.0) - 0.5
data_variance = np.var(x_train / 255.0)
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
```
</div>
---
## Train the VQ-VAE model
```python
vqvae_trainer = VQVAETrainer(data_variance, latent_dim=16, num_embeddings=128)
vqvae_trainer.compile(optimizer=keras.optimizers.Adam())
vqvae_trainer.fit(x_train_scaled, epochs=30, batch_size=128)
```
<div class="k-default-codeblock">
```
Epoch 1/30
469/469 [==============================] - 18s 6ms/step - loss: 2.2962 - reconstruction_loss: 0.3869 - vqvae_loss: 1.5950
Epoch 2/30
469/469 [==============================] - 3s 6ms/step - loss: 2.2980 - reconstruction_loss: 0.1692 - vqvae_loss: 2.1108
Epoch 3/30
469/469 [==============================] - 3s 6ms/step - loss: 1.1356 - reconstruction_loss: 0.1281 - vqvae_loss: 0.9997
Epoch 4/30
469/469 [==============================] - 3s 6ms/step - loss: 0.6112 - reconstruction_loss: 0.1030 - vqvae_loss: 0.5031
Epoch 5/30
469/469 [==============================] - 3s 6ms/step - loss: 0.4375 - reconstruction_loss: 0.0883 - vqvae_loss: 0.3464
Epoch 6/30
469/469 [==============================] - 3s 6ms/step - loss: 0.3579 - reconstruction_loss: 0.0788 - vqvae_loss: 0.2775
Epoch 7/30
469/469 [==============================] - 3s 5ms/step - loss: 0.3197 - reconstruction_loss: 0.0725 - vqvae_loss: 0.2457
Epoch 8/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2960 - reconstruction_loss: 0.0673 - vqvae_loss: 0.2277
Epoch 9/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2798 - reconstruction_loss: 0.0640 - vqvae_loss: 0.2152
Epoch 10/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2681 - reconstruction_loss: 0.0612 - vqvae_loss: 0.2061
Epoch 11/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2578 - reconstruction_loss: 0.0590 - vqvae_loss: 0.1986
Epoch 12/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2551 - reconstruction_loss: 0.0574 - vqvae_loss: 0.1974
Epoch 13/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2526 - reconstruction_loss: 0.0560 - vqvae_loss: 0.1961
Epoch 14/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2485 - reconstruction_loss: 0.0546 - vqvae_loss: 0.1936
Epoch 15/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2462 - reconstruction_loss: 0.0533 - vqvae_loss: 0.1926
Epoch 16/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2445 - reconstruction_loss: 0.0523 - vqvae_loss: 0.1920
Epoch 17/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2427 - reconstruction_loss: 0.0515 - vqvae_loss: 0.1911
Epoch 18/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2405 - reconstruction_loss: 0.0505 - vqvae_loss: 0.1898
Epoch 19/30
469/469 [==============================] - 3s 6ms/step - loss: 0.2368 - reconstruction_loss: 0.0495 - vqvae_loss: 0.1871
Epoch 20/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2310 - reconstruction_loss: 0.0486 - vqvae_loss: 0.1822
Epoch 21/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2245 - reconstruction_loss: 0.0475 - vqvae_loss: 0.1769
Epoch 22/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2205 - reconstruction_loss: 0.0469 - vqvae_loss: 0.1736
Epoch 23/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2195 - reconstruction_loss: 0.0465 - vqvae_loss: 0.1730
Epoch 24/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2187 - reconstruction_loss: 0.0461 - vqvae_loss: 0.1726
Epoch 25/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2180 - reconstruction_loss: 0.0458 - vqvae_loss: 0.1721
Epoch 26/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2163 - reconstruction_loss: 0.0454 - vqvae_loss: 0.1709
Epoch 27/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2156 - reconstruction_loss: 0.0452 - vqvae_loss: 0.1704
Epoch 28/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2146 - reconstruction_loss: 0.0449 - vqvae_loss: 0.1696
Epoch 29/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2139 - reconstruction_loss: 0.0447 - vqvae_loss: 0.1692
Epoch 30/30
469/469 [==============================] - 3s 5ms/step - loss: 0.2127 - reconstruction_loss: 0.0444 - vqvae_loss: 0.1682
<tensorflow.python.keras.callbacks.History at 0x7f96402f4e50>
```
</div>
---
## Reconstruction results on the test set
```python
def show_subplot(original, reconstructed):
plt.subplot(1, 2, 1)
plt.imshow(original.squeeze() + 0.5)
plt.title("Original")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(reconstructed.squeeze() + 0.5)
plt.title("Reconstructed")
plt.axis("off")
plt.show()
trained_vqvae_model = vqvae_trainer.vqvae
idx = np.random.choice(len(x_test_scaled), 10)
test_images = x_test_scaled[idx]
reconstructions_test = trained_vqvae_model.predict(test_images)
for test_image, reconstructed_image in zip(test_images, reconstructions_test):
show_subplot(test_image, reconstructed_image)
```










These results look decent. You are encouraged to play with different hyperparameters
(especially the number of embeddings and the dimensions of the embeddings) and observe how
they affect the results.
---
## Visualizing the discrete codes
```python
encoder = vqvae_trainer.vqvae.get_layer("encoder")
quantizer = vqvae_trainer.vqvae.get_layer("vector_quantizer")
encoded_outputs = encoder.predict(test_images)
flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1])
codebook_indices = quantizer.get_code_indices(flat_enc_outputs)
codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1])
for i in range(len(test_images)):
plt.subplot(1, 2, 1)
plt.imshow(test_images[i].squeeze() + 0.5)
plt.title("Original")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(codebook_indices[i])
plt.title("Code")
plt.axis("off")
plt.show()
```










The figure above shows that the discrete codes have been able to capture some
regularities from the dataset. Now, how do we sample from this codebook to create
novel images? Since these codes are discrete and we imposed a categorical distribution
on them, we cannot use them yet to generate anything meaningful until we can generate likely
sequences of codes that we can give to the decoder.
The authors use a PixelCNN to train these codes so that they can be used as powerful priors to
generate novel examples. PixelCNN was proposed in
[Conditional Image Generation with PixelCNN Decoders](https://arxiv.org/abs/1606.05328)
by van der Oord et al. We will borrow code from
[this example](https://keras.io/examples/generative/pixelcnn/)
by van der Oord et al. We borrow the implementation from
[this PixelCNN example](https://keras.io/examples/generative/pixelcnn/). It's an autoregressive
generative model where the outputs are conditional on the prior ones. In other words, a PixelCNN
generates an image on a pixel-by-pixel basis. For the purpose in this example, however, its task
is to generate code book indices instead of pixels directly. The trained VQ-VAE decoder is used
to map the indices generated by the PixelCNN back into the pixel space.
---
## PixelCNN hyperparameters
```python
num_residual_blocks = 2
num_pixelcnn_layers = 2
pixelcnn_input_shape = encoded_outputs.shape[1:-1]
print(f"Input shape of the PixelCNN: {pixelcnn_input_shape}")
```
<div class="k-default-codeblock">
```
Input shape of the PixelCNN: (7, 7)
```
</div>
This input shape represents the reduction in the resolution performed by the encoder. With "same" padding,
this exactly halves the "resolution" of the output shape for each stride-2 convolution layer. So, with these
two layers, we end up with an encoder output tensor of 7x7 on axes 2 and 3, with the first axis as the batch
size and the last axis being the code book embedding size. Since the quantization layer in the autoencoder
maps these 7x7 tensors to indices of the code book, these output layer axis sizes must be matched by the
PixelCNN as the input shape. The task of the PixelCNN for this architecture is to generate _likely_ 7x7
arrangements of codebook indices.
Note that this shape is something to optimize for in larger-sized image domains, along with the code
book sizes. Since the PixelCNN is autoregressive, it needs to pass over each codebook index sequentially
in order to generate novel images from the codebook. Each stride-2 (or rather more correctly a
stride (2, 2)) convolution layer will divide the image generation time by four. Note, however, that there
is probably a lower bound on this part: when the number of codes for the image to reconstruct is too small,
it has insufficient information for the decoder to represent the level of detail in the image, so the
output quality will suffer. This can be amended at least to some extent by using a larger code book.
Since the autoregressive part of the image generation procedure uses codebook indices, there is far less of
a performance penalty on using a larger code book as the lookup time for a larger-sized code from a larger
code book is much smaller in comparison to iterating over a larger sequence of code book indices, although
the size of the code book does impact on the batch size that can pass through the image generation procedure.
Finding the sweet spot for this trade-off can require some architecture tweaking and could very well differ
per dataset.
---
## PixelCNN model
Majority of this comes from
[this example](https://keras.io/examples/generative/pixelcnn/).
## Notes
Thanks to [Rein van 't Veer](https://github.com/reinvantveer) for improving this example with
copy-edits and minor code clean-ups.
```python
# The first layer is the PixelCNN layer. This layer simply
# builds on the 2D convolutional layer, but includes masking.
class PixelConvLayer(layers.Layer):
def __init__(self, mask_type, **kwargs):
super().__init__()
self.mask_type = mask_type
self.conv = layers.Conv2D(**kwargs)
def build(self, input_shape):
# Build the conv2d layer to initialize kernel variables
self.conv.build(input_shape)
# Use the initialized kernel to create the mask
kernel_shape = self.conv.kernel.get_shape()
self.mask = np.zeros(shape=kernel_shape)
self.mask[: kernel_shape[0] // 2, ...] = 1.0
self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0
if self.mask_type == "B":
self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0
def call(self, inputs):
self.conv.kernel.assign(self.conv.kernel * self.mask)
return self.conv(inputs)
# Next, we build our residual block layer.
# This is just a normal residual block, but based on the PixelConvLayer.
class ResidualBlock(keras.layers.Layer):
def __init__(self, filters, **kwargs):
super().__init__(**kwargs)
self.conv1 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
self.pixel_conv = PixelConvLayer(
mask_type="B",
filters=filters // 2,
kernel_size=3,
activation="relu",
padding="same",
)
self.conv2 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
def call(self, inputs):
x = self.conv1(inputs)
x = self.pixel_conv(x)
x = self.conv2(x)
return keras.layers.add([inputs, x])
pixelcnn_inputs = keras.Input(shape=pixelcnn_input_shape, dtype=tf.int32)
ohe = tf.one_hot(pixelcnn_inputs, vqvae_trainer.num_embeddings)
x = PixelConvLayer(
mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same"
)(ohe)
for _ in range(num_residual_blocks):
x = ResidualBlock(filters=128)(x)
for _ in range(num_pixelcnn_layers):
x = PixelConvLayer(
mask_type="B",
filters=128,
kernel_size=1,
strides=1,
activation="relu",
padding="valid",
)(x)
out = keras.layers.Conv2D(
filters=vqvae_trainer.num_embeddings, kernel_size=1, strides=1, padding="valid"
)(x)
pixel_cnn = keras.Model(pixelcnn_inputs, out, name="pixel_cnn")
pixel_cnn.summary()
```
<div class="k-default-codeblock">
```
Model: "pixel_cnn"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_9 (InputLayer) [(None, 7, 7)] 0
_________________________________________________________________
tf.one_hot (TFOpLambda) (None, 7, 7, 128) 0
_________________________________________________________________
pixel_conv_layer (PixelConvL (None, 7, 7, 128) 802944
_________________________________________________________________
residual_block (ResidualBloc (None, 7, 7, 128) 98624
_________________________________________________________________
residual_block_1 (ResidualBl (None, 7, 7, 128) 98624
_________________________________________________________________
pixel_conv_layer_3 (PixelCon (None, 7, 7, 128) 16512
_________________________________________________________________
pixel_conv_layer_4 (PixelCon (None, 7, 7, 128) 16512
_________________________________________________________________
conv2d_21 (Conv2D) (None, 7, 7, 128) 16512
=================================================================
Total params: 1,049,728
Trainable params: 1,049,728
Non-trainable params: 0
_________________________________________________________________
```
</div>
---
## Prepare data to train the PixelCNN
We will train the PixelCNN to learn a categorical distribution of the discrete codes.
First, we will generate code indices using the encoder and vector quantizer we just
trained. Our training objective will be to minimize the crossentropy loss between these
indices and the PixelCNN outputs. Here, the number of categories is equal to the number
of embeddings present in our codebook (128 in our case). The PixelCNN model is
trained to learn a distribution (as opposed to minimizing the L1/L2 loss), which is where
it gets its generative capabilities from.
```python
# Generate the codebook indices.
encoded_outputs = encoder.predict(x_train_scaled)
flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1])
codebook_indices = quantizer.get_code_indices(flat_enc_outputs)
codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1])
print(f"Shape of the training data for PixelCNN: {codebook_indices.shape}")
```
<div class="k-default-codeblock">
```
Shape of the training data for PixelCNN: (60000, 7, 7)
```
</div>
---
## PixelCNN training
```python
pixel_cnn.compile(
optimizer=keras.optimizers.Adam(3e-4),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
pixel_cnn.fit(
x=codebook_indices,
y=codebook_indices,
batch_size=128,
epochs=30,
validation_split=0.1,
)
```
<div class="k-default-codeblock">
```
Epoch 1/30
422/422 [==============================] - 4s 8ms/step - loss: 1.8550 - accuracy: 0.5959 - val_loss: 1.3127 - val_accuracy: 0.6268
Epoch 2/30
422/422 [==============================] - 3s 7ms/step - loss: 1.2207 - accuracy: 0.6402 - val_loss: 1.1722 - val_accuracy: 0.6482
Epoch 3/30
422/422 [==============================] - 3s 7ms/step - loss: 1.1412 - accuracy: 0.6536 - val_loss: 1.1313 - val_accuracy: 0.6552
Epoch 4/30
422/422 [==============================] - 3s 7ms/step - loss: 1.1060 - accuracy: 0.6601 - val_loss: 1.1058 - val_accuracy: 0.6596
Epoch 5/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0828 - accuracy: 0.6646 - val_loss: 1.1020 - val_accuracy: 0.6603
Epoch 6/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0649 - accuracy: 0.6682 - val_loss: 1.0809 - val_accuracy: 0.6638
Epoch 7/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0515 - accuracy: 0.6710 - val_loss: 1.0712 - val_accuracy: 0.6659
Epoch 8/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0406 - accuracy: 0.6733 - val_loss: 1.0647 - val_accuracy: 0.6671
Epoch 9/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0312 - accuracy: 0.6752 - val_loss: 1.0633 - val_accuracy: 0.6674
Epoch 10/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0235 - accuracy: 0.6771 - val_loss: 1.0554 - val_accuracy: 0.6695
Epoch 11/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0162 - accuracy: 0.6788 - val_loss: 1.0518 - val_accuracy: 0.6694
Epoch 12/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0105 - accuracy: 0.6799 - val_loss: 1.0541 - val_accuracy: 0.6693
Epoch 13/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0050 - accuracy: 0.6811 - val_loss: 1.0481 - val_accuracy: 0.6705
Epoch 14/30
422/422 [==============================] - 3s 7ms/step - loss: 1.0011 - accuracy: 0.6820 - val_loss: 1.0462 - val_accuracy: 0.6709
Epoch 15/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9964 - accuracy: 0.6831 - val_loss: 1.0459 - val_accuracy: 0.6709
Epoch 16/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9922 - accuracy: 0.6840 - val_loss: 1.0444 - val_accuracy: 0.6704
Epoch 17/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9884 - accuracy: 0.6848 - val_loss: 1.0405 - val_accuracy: 0.6725
Epoch 18/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9846 - accuracy: 0.6859 - val_loss: 1.0400 - val_accuracy: 0.6722
Epoch 19/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9822 - accuracy: 0.6864 - val_loss: 1.0394 - val_accuracy: 0.6728
Epoch 20/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9787 - accuracy: 0.6872 - val_loss: 1.0393 - val_accuracy: 0.6717
Epoch 21/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9761 - accuracy: 0.6878 - val_loss: 1.0398 - val_accuracy: 0.6725
Epoch 22/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9733 - accuracy: 0.6884 - val_loss: 1.0376 - val_accuracy: 0.6726
Epoch 23/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9708 - accuracy: 0.6890 - val_loss: 1.0352 - val_accuracy: 0.6732
Epoch 24/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9685 - accuracy: 0.6894 - val_loss: 1.0369 - val_accuracy: 0.6723
Epoch 25/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9660 - accuracy: 0.6901 - val_loss: 1.0384 - val_accuracy: 0.6733
Epoch 26/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9638 - accuracy: 0.6908 - val_loss: 1.0355 - val_accuracy: 0.6728
Epoch 27/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9619 - accuracy: 0.6912 - val_loss: 1.0325 - val_accuracy: 0.6739
Epoch 28/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9594 - accuracy: 0.6917 - val_loss: 1.0334 - val_accuracy: 0.6736
Epoch 29/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9582 - accuracy: 0.6920 - val_loss: 1.0366 - val_accuracy: 0.6733
Epoch 30/30
422/422 [==============================] - 3s 7ms/step - loss: 0.9561 - accuracy: 0.6926 - val_loss: 1.0336 - val_accuracy: 0.6728
<tensorflow.python.keras.callbacks.History at 0x7f95838ef750>
```
</div>
We can improve these scores with more training and hyperparameter tuning.
---
## Codebook sampling
Now that our PixelCNN is trained, we can sample distinct codes from its outputs and pass
them to our decoder to generate novel images.
```python
# Create a mini sampler model.
inputs = layers.Input(shape=pixel_cnn.input_shape[1:])
outputs = pixel_cnn(inputs, training=False)
categorical_layer = tfp.layers.DistributionLambda(tfp.distributions.Categorical)
outputs = categorical_layer(outputs)
sampler = keras.Model(inputs, outputs)
```
We now construct a prior to generate images. Here, we will generate 10 images.
```python
# Create an empty array of priors.
batch = 10
priors = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:])
batch, rows, cols = priors.shape
# Iterate over the priors because generation has to be done sequentially pixel by pixel.
for row in range(rows):
for col in range(cols):
# Feed the whole array and retrieving the pixel value probabilities for the next
# pixel.
probs = sampler.predict(priors)
# Use the probabilities to pick pixel values and append the values to the priors.
priors[:, row, col] = probs[:, row, col]
print(f"Prior shape: {priors.shape}")
```
<div class="k-default-codeblock">
```
Prior shape: (10, 7, 7)
```
</div>
We can now use our decoder to generate the images.
```python
# Perform an embedding lookup.
pretrained_embeddings = quantizer.embeddings
priors_ohe = tf.one_hot(priors.astype("int32"), vqvae_trainer.num_embeddings).numpy()
quantized = tf.matmul(
priors_ohe.astype("float32"), pretrained_embeddings, transpose_b=True
)
quantized = tf.reshape(quantized, (-1, *(encoded_outputs.shape[1:])))
# Generate novel images.
decoder = vqvae_trainer.vqvae.get_layer("decoder")
generated_samples = decoder.predict(quantized)
for i in range(batch):
plt.subplot(1, 2, 1)
plt.imshow(priors[i])
plt.title("Code")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(generated_samples[i].squeeze() + 0.5)
plt.title("Generated Sample")
plt.axis("off")
plt.show()
```










We can enhance the quality of these generated samples by tweaking the PixelCNN.
---
## Additional notes
* After the VQ-VAE paper was initially released, the authors developed an exponential
moving averaging scheme to update the embeddings inside the quantizer. If you're
interested you can check out
[this snippet](https://github.com/deepmind/sonnet/blob/master/sonnet/python/modules/nets/vqvae.py#L124).
* To further enhance the quality of the generated samples,
[VQ-VAE-2](https://arxiv.org/abs/1906.00446) was proposed that follows a cascaded
approach to learn the codebook and to generate the images.
| keras-io/examples/generative/md/vq_vae.md/0 | {
"file_path": "keras-io/examples/generative/md/vq_vae.md",
"repo_id": "keras-io",
"token_count": 13397
} | 76 |
"""
Title: Graph attention network (GAT) for node classification
Author: [akensert](https://github.com/akensert)
Date created: 2021/09/13
Last modified: 2021/12/26
Description: An implementation of a Graph Attention Network (GAT) for node classification.
Accelerator: GPU
"""
"""
## Introduction
[Graph neural networks](https://en.wikipedia.org/wiki/Graph_neural_network)
is the prefered neural network architecture for processing data structured as
graphs (for example, social networks or molecule structures), yielding
better results than fully-connected networks or convolutional networks.
In this tutorial, we will implement a specific graph neural network known as a
[Graph Attention Network](https://arxiv.org/abs/1710.10903) (GAT) to predict labels of
scientific papers based on what type of papers cite them (using the
[Cora](https://linqs.soe.ucsc.edu/data) dataset).
### References
For more information on GAT, see the original paper
[Graph Attention Networks](https://arxiv.org/abs/1710.10903) as well as
[DGL's Graph Attention Networks](https://docs.dgl.ai/en/0.4.x/tutorials/models/1_gnn/9_gat.html)
documentation.
"""
"""
### Import packages
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import os
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", 6)
pd.set_option("display.max_rows", 6)
np.random.seed(2)
"""
## Obtain the dataset
The preparation of the [Cora dataset](https://linqs.soe.ucsc.edu/data) follows that of the
[Node classification with Graph Neural Networks](https://keras.io/examples/graph/gnn_citations/)
tutorial. Refer to this tutorial for more details on the dataset and exploratory data analysis.
In brief, the Cora dataset consists of two files: `cora.cites` which contains *directed links* (citations) between
papers; and `cora.content` which contains *features* of the corresponding papers and one
of seven labels (the *subject* of the paper).
"""
zip_file = keras.utils.get_file(
fname="cora.tgz",
origin="https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz",
extract=True,
)
data_dir = os.path.join(os.path.dirname(zip_file), "cora")
citations = pd.read_csv(
os.path.join(data_dir, "cora.cites"),
sep="\t",
header=None,
names=["target", "source"],
)
papers = pd.read_csv(
os.path.join(data_dir, "cora.content"),
sep="\t",
header=None,
names=["paper_id"] + [f"term_{idx}" for idx in range(1433)] + ["subject"],
)
class_values = sorted(papers["subject"].unique())
class_idx = {name: id for id, name in enumerate(class_values)}
paper_idx = {name: idx for idx, name in enumerate(sorted(papers["paper_id"].unique()))}
papers["paper_id"] = papers["paper_id"].apply(lambda name: paper_idx[name])
citations["source"] = citations["source"].apply(lambda name: paper_idx[name])
citations["target"] = citations["target"].apply(lambda name: paper_idx[name])
papers["subject"] = papers["subject"].apply(lambda value: class_idx[value])
print(citations)
print(papers)
"""
### Split the dataset
"""
# Obtain random indices
random_indices = np.random.permutation(range(papers.shape[0]))
# 50/50 split
train_data = papers.iloc[random_indices[: len(random_indices) // 2]]
test_data = papers.iloc[random_indices[len(random_indices) // 2 :]]
"""
### Prepare the graph data
"""
# Obtain paper indices which will be used to gather node states
# from the graph later on when training the model
train_indices = train_data["paper_id"].to_numpy()
test_indices = test_data["paper_id"].to_numpy()
# Obtain ground truth labels corresponding to each paper_id
train_labels = train_data["subject"].to_numpy()
test_labels = test_data["subject"].to_numpy()
# Define graph, namely an edge tensor and a node feature tensor
edges = tf.convert_to_tensor(citations[["target", "source"]])
node_states = tf.convert_to_tensor(papers.sort_values("paper_id").iloc[:, 1:-1])
# Print shapes of the graph
print("Edges shape:\t\t", edges.shape)
print("Node features shape:", node_states.shape)
"""
## Build the model
GAT takes as input a graph (namely an edge tensor and a node feature tensor) and
outputs \[updated\] node states. The node states are, for each target node, neighborhood
aggregated information of *N*-hops (where *N* is decided by the number of layers of the
GAT). Importantly, in contrast to the
[graph convolutional network](https://arxiv.org/abs/1609.02907) (GCN)
the GAT makes use of attention machanisms
to aggregate information from neighboring nodes (or *source nodes*). In other words, instead of simply
averaging/summing node states from source nodes (*source papers*) to the target node (*target papers*),
GAT first applies normalized attention scores to each source node state and then sums.
"""
"""
### (Multi-head) graph attention layer
The GAT model implements multi-head graph attention layers. The `MultiHeadGraphAttention`
layer is simply a concatenation (or averaging) of multiple graph attention layers
(`GraphAttention`), each with separate learnable weights `W`. The `GraphAttention` layer
does the following:
Consider inputs node states `h^{l}` which are linearly transformed by `W^{l}`, resulting in `z^{l}`.
For each target node:
1. Computes pair-wise attention scores `a^{l}^{T}(z^{l}_{i}||z^{l}_{j})` for all `j`,
resulting in `e_{ij}` (for all `j`).
`||` denotes a concatenation, `_{i}` corresponds to the target node, and `_{j}`
corresponds to a given 1-hop neighbor/source node.
2. Normalizes `e_{ij}` via softmax, so as the sum of incoming edges' attention scores
to the target node (`sum_{k}{e_{norm}_{ik}}`) will add up to 1.
3. Applies attention scores `e_{norm}_{ij}` to `z_{j}`
and adds it to the new target node state `h^{l+1}_{i}`, for all `j`.
"""
class GraphAttention(layers.Layer):
def __init__(
self,
units,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
**kwargs,
):
super().__init__(**kwargs)
self.units = units
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[0][-1], self.units),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name="kernel",
)
self.kernel_attention = self.add_weight(
shape=(self.units * 2, 1),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name="kernel_attention",
)
self.built = True
def call(self, inputs):
node_states, edges = inputs
# Linearly transform node states
node_states_transformed = tf.matmul(node_states, self.kernel)
# (1) Compute pair-wise attention scores
node_states_expanded = tf.gather(node_states_transformed, edges)
node_states_expanded = tf.reshape(
node_states_expanded, (tf.shape(edges)[0], -1)
)
attention_scores = tf.nn.leaky_relu(
tf.matmul(node_states_expanded, self.kernel_attention)
)
attention_scores = tf.squeeze(attention_scores, -1)
# (2) Normalize attention scores
attention_scores = tf.math.exp(tf.clip_by_value(attention_scores, -2, 2))
attention_scores_sum = tf.math.unsorted_segment_sum(
data=attention_scores,
segment_ids=edges[:, 0],
num_segments=tf.reduce_max(edges[:, 0]) + 1,
)
attention_scores_sum = tf.repeat(
attention_scores_sum, tf.math.bincount(tf.cast(edges[:, 0], "int32"))
)
attention_scores_norm = attention_scores / attention_scores_sum
# (3) Gather node states of neighbors, apply attention scores and aggregate
node_states_neighbors = tf.gather(node_states_transformed, edges[:, 1])
out = tf.math.unsorted_segment_sum(
data=node_states_neighbors * attention_scores_norm[:, tf.newaxis],
segment_ids=edges[:, 0],
num_segments=tf.shape(node_states)[0],
)
return out
class MultiHeadGraphAttention(layers.Layer):
def __init__(self, units, num_heads=8, merge_type="concat", **kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.merge_type = merge_type
self.attention_layers = [GraphAttention(units) for _ in range(num_heads)]
def call(self, inputs):
atom_features, pair_indices = inputs
# Obtain outputs from each attention head
outputs = [
attention_layer([atom_features, pair_indices])
for attention_layer in self.attention_layers
]
# Concatenate or average the node states from each head
if self.merge_type == "concat":
outputs = tf.concat(outputs, axis=-1)
else:
outputs = tf.reduce_mean(tf.stack(outputs, axis=-1), axis=-1)
# Activate and return node states
return tf.nn.relu(outputs)
"""
### Implement training logic with custom `train_step`, `test_step`, and `predict_step` methods
Notice, the GAT model operates on the entire graph (namely, `node_states` and
`edges`) in all phases (training, validation and testing). Hence, `node_states` and
`edges` are passed to the constructor of the `keras.Model` and used as attributes.
The difference between the phases are the indices (and labels), which gathers
certain outputs (`tf.gather(outputs, indices)`).
"""
class GraphAttentionNetwork(keras.Model):
def __init__(
self,
node_states,
edges,
hidden_units,
num_heads,
num_layers,
output_dim,
**kwargs,
):
super().__init__(**kwargs)
self.node_states = node_states
self.edges = edges
self.preprocess = layers.Dense(hidden_units * num_heads, activation="relu")
self.attention_layers = [
MultiHeadGraphAttention(hidden_units, num_heads) for _ in range(num_layers)
]
self.output_layer = layers.Dense(output_dim)
def call(self, inputs):
node_states, edges = inputs
x = self.preprocess(node_states)
for attention_layer in self.attention_layers:
x = attention_layer([x, edges]) + x
outputs = self.output_layer(x)
return outputs
def train_step(self, data):
indices, labels = data
with tf.GradientTape() as tape:
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute loss
loss = self.compiled_loss(labels, tf.gather(outputs, indices))
# Compute gradients
grads = tape.gradient(loss, self.trainable_weights)
# Apply gradients (update weights)
optimizer.apply_gradients(zip(grads, self.trainable_weights))
# Update metric(s)
self.compiled_metrics.update_state(labels, tf.gather(outputs, indices))
return {m.name: m.result() for m in self.metrics}
def predict_step(self, data):
indices = data
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute probabilities
return tf.nn.softmax(tf.gather(outputs, indices))
def test_step(self, data):
indices, labels = data
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute loss
loss = self.compiled_loss(labels, tf.gather(outputs, indices))
# Update metric(s)
self.compiled_metrics.update_state(labels, tf.gather(outputs, indices))
return {m.name: m.result() for m in self.metrics}
"""
### Train and evaluate
"""
# Define hyper-parameters
HIDDEN_UNITS = 100
NUM_HEADS = 8
NUM_LAYERS = 3
OUTPUT_DIM = len(class_values)
NUM_EPOCHS = 100
BATCH_SIZE = 256
VALIDATION_SPLIT = 0.1
LEARNING_RATE = 3e-1
MOMENTUM = 0.9
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.SGD(LEARNING_RATE, momentum=MOMENTUM)
accuracy_fn = keras.metrics.SparseCategoricalAccuracy(name="acc")
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_acc", min_delta=1e-5, patience=5, restore_best_weights=True
)
# Build model
gat_model = GraphAttentionNetwork(
node_states, edges, HIDDEN_UNITS, NUM_HEADS, NUM_LAYERS, OUTPUT_DIM
)
# Compile model
gat_model.compile(loss=loss_fn, optimizer=optimizer, metrics=[accuracy_fn])
gat_model.fit(
x=train_indices,
y=train_labels,
validation_split=VALIDATION_SPLIT,
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
callbacks=[early_stopping],
verbose=2,
)
_, test_accuracy = gat_model.evaluate(x=test_indices, y=test_labels, verbose=0)
print("--" * 38 + f"\nTest Accuracy {test_accuracy*100:.1f}%")
"""
### Predict (probabilities)
"""
test_probs = gat_model.predict(x=test_indices)
mapping = {v: k for (k, v) in class_idx.items()}
for i, (probs, label) in enumerate(zip(test_probs[:10], test_labels[:10])):
print(f"Example {i+1}: {mapping[label]}")
for j, c in zip(probs, class_idx.keys()):
print(f"\tProbability of {c: <24} = {j*100:7.3f}%")
print("---" * 20)
"""
## Conclusions
The results look OK! The GAT model seems to correctly predict the subjects of the papers,
based on what they cite, about 80% of the time. Further improvements could be
made by fine-tuning the hyper-parameters of the GAT. For instance, try changing the number of layers,
the number of hidden units, or the optimizer/learning rate; add regularization (e.g., dropout);
or modify the preprocessing step. We could also try to implement *self-loops*
(i.e., paper X cites paper X) and/or make the graph *undirected*.
"""
| keras-io/examples/graph/gat_node_classification.py/0 | {
"file_path": "keras-io/examples/graph/gat_node_classification.py",
"repo_id": "keras-io",
"token_count": 5330
} | 77 |
<jupyter_start><jupyter_text>Simple custom layer example: Antirectifier**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2016/01/06**Last modified:** 2023/11/20**Description:** Demonstration of custom layer creation. IntroductionThis example shows how to create custom layers, using the Antirectifier layer (originally proposed as a Keras example script in January 2016), an alternativeto ReLU. Instead of zeroing-out the negative part of the input, it splits the negative and positive parts and returns the concatenation of the absolute valueof both. This avoids loss of information, at the cost of an increase in dimensionality. To fix the dimensionality increase, we linearly combine thefeatures back to a space of the original size. Setup<jupyter_code>import keras
from keras import layers
from keras import ops<jupyter_output><empty_output><jupyter_text>The Antirectifier layerTo implement a custom layer:- Create the state variables via `add_weight()` in `__init__` or `build()`.Similarly, you can also create sublayers.- Implement the `call()` method, taking the layer's input tensor(s) andreturn the output tensor(s).- Optionally, you can also enable serialization by implementing `get_config()`,which returns a configuration dictionary.See also the guide[Making new layers and models via subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/).<jupyter_code>class Antirectifier(layers.Layer):
def __init__(self, initializer="he_normal", **kwargs):
super().__init__(**kwargs)
self.initializer = keras.initializers.get(initializer)
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer=self.initializer,
name="kernel",
trainable=True,
)
def call(self, inputs):
inputs -= ops.mean(inputs, axis=-1, keepdims=True)
pos = ops.relu(inputs)
neg = ops.relu(-inputs)
concatenated = ops.concatenate([pos, neg], axis=-1)
mixed = ops.matmul(concatenated, self.kernel)
return mixed
def get_config(self):
# Implement get_config to enable serialization. This is optional.
base_config = super().get_config()
config = {"initializer": keras.initializers.serialize(self.initializer)}
return dict(list(base_config.items()) + list(config.items()))<jupyter_output><empty_output><jupyter_text>Let's test-drive it on MNIST<jupyter_code># Training parameters
batch_size = 128
num_classes = 10
epochs = 20
# The data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784)
x_test = x_test.reshape(-1, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Build the model
model = keras.Sequential(
[
keras.Input(shape=(784,)),
layers.Dense(256),
Antirectifier(),
layers.Dense(256),
Antirectifier(),
layers.Dropout(0.5),
layers.Dense(10),
]
)
# Compile the model
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.RMSprop(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train the model
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.15)
# Test the model
model.evaluate(x_test, y_test)<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/antirectifier.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/antirectifier.ipynb",
"repo_id": "keras-io",
"token_count": 1321
} | 78 |
<jupyter_start><jupyter_text>Trainer pattern**Author:** [nkovela1](https://nkovela1.github.io/)**Date created:** 2022/09/19**Last modified:** 2022/09/26**Description:** Guide on how to share a custom training step across multiple Keras models. IntroductionThis example shows how to create a custom training step using the "Trainer pattern",which can then be shared across multiple Keras models. This pattern overrides the`train_step()` method of the `keras.Model` class, allowing for training loopsbeyond plain supervised learning.The Trainer pattern can also easily be adapted to more complex models with largercustom training steps, such as[this end-to-end GAN model](https://keras.io/guides/customizing_what_happens_in_fit/wrapping-up-an-endtoend-gan-example),by putting the custom training step in the Trainer class definition. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
# Load MNIST dataset and standardize the data
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0<jupyter_output><empty_output><jupyter_text>Define the Trainer classA custom training and evaluation step can be created by overridingthe `train_step()` and `test_step()` method of a `Model` subclass:<jupyter_code>class MyTrainer(keras.Model):
def __init__(self, model):
super().__init__()
self.model = model
# Create loss and metrics here.
self.loss_fn = keras.losses.SparseCategoricalCrossentropy()
self.accuracy_metric = keras.metrics.SparseCategoricalAccuracy()
@property
def metrics(self):
# List metrics here.
return [self.accuracy_metric]
def train_step(self, data):
x, y = data
with tf.GradientTape() as tape:
y_pred = self.model(x, training=True) # Forward pass
# Compute loss value
loss = self.loss_fn(y, y_pred)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics
for metric in self.metrics:
metric.update_state(y, y_pred)
# Return a dict mapping metric names to current value.
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
x, y = data
# Inference step
y_pred = self.model(x, training=False)
# Update metrics
for metric in self.metrics:
metric.update_state(y, y_pred)
return {m.name: m.result() for m in self.metrics}
def call(self, x):
# Equivalent to `call()` of the wrapped keras.Model
x = self.model(x)
return x<jupyter_output><empty_output><jupyter_text>Define multiple models to share the custom training stepLet's define two different models that can share our Trainer class and its custom `train_step()`:<jupyter_code># A model defined using Sequential API
model_a = keras.models.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(256, activation="relu"),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation="softmax"),
]
)
# A model defined using Functional API
func_input = keras.Input(shape=(28, 28, 1))
x = keras.layers.Flatten(input_shape=(28, 28))(func_input)
x = keras.layers.Dense(512, activation="relu")(x)
x = keras.layers.Dropout(0.4)(x)
func_output = keras.layers.Dense(10, activation="softmax")(x)
model_b = keras.Model(func_input, func_output)<jupyter_output><empty_output><jupyter_text>Create Trainer class objects from the models<jupyter_code>trainer_1 = MyTrainer(model_a)
trainer_2 = MyTrainer(model_b)<jupyter_output><empty_output><jupyter_text>Compile and fit the models to the MNIST dataset<jupyter_code>trainer_1.compile(optimizer=keras.optimizers.SGD())
trainer_1.fit(
x_train, y_train, epochs=5, batch_size=64, validation_data=(x_test, y_test)
)
trainer_2.compile(optimizer=keras.optimizers.Adam())
trainer_2.fit(
x_train, y_train, epochs=5, batch_size=64, validation_data=(x_test, y_test)
)<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/trainer_pattern.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/trainer_pattern.ipynb",
"repo_id": "keras-io",
"token_count": 1650
} | 79 |
"""
Title: Data Parallel Training with KerasNLP and tf.distribute
Author: Anshuman Mishra
Date created: 2023/07/07
Last modified: 2023/07/07
Description: Data Parallel training with KerasNLP and tf.distribute.
Accelerator: GPU
"""
"""
## Introduction
Distributed training is a technique used to train deep learning models on multiple devices
or machines simultaneously. It helps to reduce training time and allows for training larger
models with more data. KerasNLP is a library that provides tools and utilities for natural
language processing tasks, including distributed training.
In this tutorial, we will use KerasNLP to train a BERT-based masked language model (MLM)
on the wikitext-2 dataset (a 2 million word dataset of wikipedia articles). The MLM task
involves predicting the masked words in a sentence, which helps the model learn contextual
representations of words.
This guide focuses on data parallelism, in particular synchronous data parallelism, where
each accelerator (a GPU or TPU) holds a complete replica of the model, and sees a
different partial batch of the input data. Partial gradients are computed on each device,
aggregated, and used to compute a global gradient update.
Specifically, this guide teaches you how to use the `tf.distribute` API to train Keras
models on multiple GPUs, with minimal changes to your code, in the following two setups:
- On multiple GPUs (typically 2 to 8) installed on a single machine (single host,
multi-device training). This is the most common setup for researchers and small-scale
industry workflows.
- On a cluster of many machines, each hosting one or multiple GPUs (multi-worker
distributed training). This is a good setup for large-scale industry workflows, e.g.
training high-resolution text summarization models on billion word datasets on 20-100 GPUs.
"""
"""shell
pip install -q --upgrade keras-nlp
pip install -q --upgrade keras # Upgrade to Keras 3.
"""
"""
## Imports
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
import keras_nlp
"""
Before we start any training, let's configure our single GPU to show up as two logical
devices.
When you are training with two or more phsyical GPUs, this is totally uncessary. This
is just a trick to show real distributed training on the default colab GPU runtime,
which has only one GPU available.
"""
"""shell
nvidia-smi --query-gpu=memory.total --format=csv,noheader
"""
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.set_logical_device_configuration(
physical_devices[0],
[
tf.config.LogicalDeviceConfiguration(memory_limit=15360 // 2),
tf.config.LogicalDeviceConfiguration(memory_limit=15360 // 2),
],
)
logical_devices = tf.config.list_logical_devices("GPU")
logical_devices
EPOCHS = 3
"""
To do single-host, multi-device synchronous training with a Keras model, you would use
the `tf.distribute.MirroredStrategy` API. Here's how it works:
- Instantiate a `MirroredStrategy`, optionally configuring which specific devices you
want to use (by default the strategy will use all GPUs available).
- Use the strategy object to open a scope, and within this scope, create all the Keras
objects you need that contain variables. Typically, that means **creating & compiling the
model** inside the distribution scope.
- Train the model via `fit()` as usual.
"""
strategy = tf.distribute.MirroredStrategy()
print(f"Number of devices: {strategy.num_replicas_in_sync}")
"""
Base batch size and learning rate
"""
base_batch_size = 32
base_learning_rate = 1e-4
"""
Calculate scaled batch size and learning rate
"""
scaled_batch_size = base_batch_size * strategy.num_replicas_in_sync
scaled_learning_rate = base_learning_rate * strategy.num_replicas_in_sync
"""
Now, we need to download and preprocess the wikitext-2 dataset. This dataset will be
used for pretraining the BERT model. We will filter out short lines to ensure that the
data has enough context for training.
"""
keras.utils.get_file(
origin="https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip",
extract=True,
)
wiki_dir = os.path.expanduser("~/.keras/datasets/wikitext-2/")
# Load wikitext-103 and filter out short lines.
wiki_train_ds = (
tf.data.TextLineDataset(
wiki_dir + "wiki.train.tokens",
)
.filter(lambda x: tf.strings.length(x) > 100)
.shuffle(buffer_size=500)
.batch(scaled_batch_size)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
wiki_val_ds = (
tf.data.TextLineDataset(wiki_dir + "wiki.valid.tokens")
.filter(lambda x: tf.strings.length(x) > 100)
.shuffle(buffer_size=500)
.batch(scaled_batch_size)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
wiki_test_ds = (
tf.data.TextLineDataset(wiki_dir + "wiki.test.tokens")
.filter(lambda x: tf.strings.length(x) > 100)
.shuffle(buffer_size=500)
.batch(scaled_batch_size)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
"""
In the above code, we download the wikitext-2 dataset and extract it. Then, we define
three datasets: wiki_train_ds, wiki_val_ds, and wiki_test_ds. These datasets are
filtered to remove short lines and are batched for efficient training.
"""
"""
It's a common practice to use a decayed learning rate in NLP training/tuning. We'll
use `PolynomialDecay` schedule here.
"""
total_training_steps = sum(1 for _ in wiki_train_ds.as_numpy_iterator()) * EPOCHS
lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=scaled_learning_rate,
decay_steps=total_training_steps,
end_learning_rate=0.0,
)
class PrintLR(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
print(
f"\nLearning rate for epoch {epoch + 1} is {model_dist.optimizer.learning_rate.numpy()}"
)
"""
Let's also make a callback to TensorBoard, this will enable visualization of different
metrics while we train the model in later part of this tutorial. We put all the callbacks
together as follows:
"""
callbacks = [
tf.keras.callbacks.TensorBoard(log_dir="./logs"),
PrintLR(),
]
print(tf.config.list_physical_devices("GPU"))
"""
With the datasets prepared, we now initialize and compile our model and optimizer within
the `strategy.scope()`:
"""
with strategy.scope():
# Everything that creates variables should be under the strategy scope.
# In general this is only model construction & `compile()`.
model_dist = keras_nlp.models.BertMaskedLM.from_preset("bert_tiny_en_uncased")
# This line just sets pooled_dense layer as non-trainiable, we do this to avoid
# warnings of this layer being unused
model_dist.get_layer("bert_backbone").get_layer("pooled_dense").trainable = False
model_dist.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.AdamW(learning_rate=scaled_learning_rate),
weighted_metrics=[keras.metrics.SparseCategoricalAccuracy()],
jit_compile=False,
)
model_dist.fit(
wiki_train_ds, validation_data=wiki_val_ds, epochs=EPOCHS, callbacks=callbacks
)
"""
After fitting our model under the scope, we evaluate it normally!
"""
model_dist.evaluate(wiki_test_ds)
"""
For distributed training across multiple machines (as opposed to training that only leverages
multiple devices on a single machine), there are two distribution strategies you
could use: `MultiWorkerMirroredStrategy` and `ParameterServerStrategy`:
- `tf.distribute.MultiWorkerMirroredStrategy` implements a synchronous CPU/GPU
multi-worker solution to work with Keras-style model building and training loop,
using synchronous reduction of gradients across the replicas.
- `tf.distribute.experimental.ParameterServerStrategy` implements an asynchronous CPU/GPU
multi-worker solution, where the parameters are stored on parameter servers, and
workers update the gradients to parameter servers asynchronously.
### Further reading
1. [TensorFlow distributed training guide](https://www.tensorflow.org/guide/distributed_training)
2. [Tutorial on multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras)
3. [MirroredStrategy docs](https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy)
4. [MultiWorkerMirroredStrategy docs](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/MultiWorkerMirroredStrategy)
5. [Distributed training in tf.keras with Weights & Biases](https://towardsdatascience.com/distributed-training-in-tf-keras-with-w-b-ccf021f9322e)
"""
| keras-io/examples/nlp/data_parallel_training_with_keras_nlp.py/0 | {
"file_path": "keras-io/examples/nlp/data_parallel_training_with_keras_nlp.py",
"repo_id": "keras-io",
"token_count": 2718
} | 80 |
<jupyter_start><jupyter_text>Parameter-efficient fine-tuning of GPT-2 with LoRA**Author:** [Abheesht Sharma](https://github.com/abheesht17/), [Matthew Watson](https://github.com/mattdangerw/)**Date created:** 2023/05/27**Last modified:** 2023/05/27**Description:** Use KerasNLP to fine-tune a GPT-2 LLM with LoRA. IntroductionLarge Language Models (LLMs) have been shown to be effective at a variety of NLPtasks. An LLM is first pre-trained on a large corpus of text in aself-supervised fashion. Pre-training helps LLMs learn general-purpose knowledge,such as statistical relationships between words. An LLM can then be fine-tunedon a downstream task of interest (such as sentiment analysis).However, LLMs are extremely large in size, and we don't need to train all theparameters in the model while fine-tuning, especially because datasets on whichthe model is fine-tuned are relatively small. Another way of saying this isthat LLMs are over-parametrized for fine-tuning. This is where[Low-Rank Adaptation (LoRA)](https://arxiv.org/abs/2106.09685) comes in; itsignificantly reduces the number of trainable parameters. This results in adecrease in training time and GPU memory usage, while maintaining the qualityof the outputs.In this example, we will explain LoRA in technical terms, show how the technicalexplanation translates to code, hack KerasNLP's[GPT-2 model](https://keras.io/api/keras_nlp/models/gpt2/) and fine-tuneit on the next token prediction task using LoRA. We will compare LoRA GPT-2with a fully fine-tuned GPT-2 in terms of the quality of the generated text,training time and GPU memory usage.Note: This example runs on the TensorFlow backend purely for the`tf.config.experimental.get_memory_info` API to easily plot memory usage.Outside of the memory usage callback, this example will run on `jax` and `torch`backends. SetupBefore we start implementing the pipeline, let's install and import all thelibraries we need. We'll be using the KerasNLP library.Secondly, let's enable mixed precision training. This will help us reduce thetraining time.<jupyter_code>!pip install -q --upgrade keras-nlp
!pip install -q --upgrade keras # Upgrade to Keras 3.
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras_nlp
import keras
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import time
keras.mixed_precision.set_global_policy("mixed_float16")<jupyter_output><empty_output><jupyter_text>Let's also define our hyperparameters.<jupyter_code># General hyperparameters
BATCH_SIZE = 32
NUM_BATCHES = 500
EPOCHS = 1 # Can be set to a higher value for better results
MAX_SEQUENCE_LENGTH = 128
MAX_GENERATION_LENGTH = 200
GPT2_PRESET = "gpt2_base_en"
# LoRA-specific hyperparameters
RANK = 4
ALPHA = 32.0<jupyter_output><empty_output><jupyter_text>DatasetLet's load a Reddit dataset. We will fine-tune both the GPT-2 model and theLoRA GPT-2 model on a subset of this dataset. The aim is to produce text similarin style to Reddit posts.<jupyter_code>reddit_ds = tfds.load("reddit_tifu", split="train", as_supervised=True)<jupyter_output><empty_output><jupyter_text>The dataset has two fields: `document` and `title`.<jupyter_code>for document, title in reddit_ds:
print(document.numpy())
print(title.numpy())
break<jupyter_output><empty_output><jupyter_text>We'll now batch the dataset and retain only the `document` field because we arefine-tuning the model on the next word prediction task. Take a subsetof the dataset for the purpose of this example.<jupyter_code>train_ds = (
reddit_ds.map(lambda document, _: document)
.batch(BATCH_SIZE)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
train_ds = train_ds.take(NUM_BATCHES)<jupyter_output><empty_output><jupyter_text>Helper functionsBefore we begin fine-tuning the models, let's define a few helper functions andclasses. Callback for tracking GPU memory usageWe'll define a custom callback function which tracks GPU memory usage. Thecallback function uses TensorFlow's `tf.config.experimental.get_memory_info`API.Here, we assume that we are using a single GPU, `GPU:0`.<jupyter_code>class GPUMemoryCallback(keras.callbacks.Callback):
def __init__(
self,
target_batches,
print_stats=False,
**kwargs,
):
super().__init__(**kwargs)
self.target_batches = target_batches
self.print_stats = print_stats
self.memory_usage = []
self.labels = []
def _compute_memory_usage(self):
memory_stats = tf.config.experimental.get_memory_info("GPU:0")
# Convert bytes to GB and store in list.
peak_usage = round(memory_stats["peak"] / (2**30), 3)
self.memory_usage.append(peak_usage)
def on_epoch_begin(self, epoch, logs=None):
self._compute_memory_usage()
self.labels.append(f"epoch {epoch} start")
def on_train_batch_begin(self, batch, logs=None):
if batch in self.target_batches:
self._compute_memory_usage()
self.labels.append(f"batch {batch}")
def on_epoch_end(self, epoch, logs=None):
self._compute_memory_usage()
self.labels.append(f"epoch {epoch} end")<jupyter_output><empty_output><jupyter_text>Function for text generationHere is a helper function to generate text.<jupyter_code>def generate_text(model, input_text, max_length=200):
start = time.time()
output = model.generate(input_text, max_length=max_length)
print("\nOutput:")
print(output)
end = time.time()
print(f"Total Time Elapsed: {end - start:.2f}s")<jupyter_output><empty_output><jupyter_text>Define optimizer and lossWe will use AdamW optimizer and cross-entropy loss for training both models.<jupyter_code>def get_optimizer_and_loss():
optimizer = keras.optimizers.AdamW(
learning_rate=5e-5,
weight_decay=0.01,
epsilon=1e-6,
global_clipnorm=1.0, # Gradient clipping.
)
# Exclude layernorm and bias terms from weight decay.
optimizer.exclude_from_weight_decay(var_names=["bias"])
optimizer.exclude_from_weight_decay(var_names=["gamma"])
optimizer.exclude_from_weight_decay(var_names=["beta"])
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
return optimizer, loss<jupyter_output><empty_output><jupyter_text>Fine-tune GPT-2Let's load the model and preprocessor first. We use a sequence length of 128instead of 1024 (which is the default sequence length). This will limit ourability to predict long sequences, but will allow us to run this example quicklyon Colab.<jupyter_code>preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
"gpt2_base_en",
sequence_length=MAX_SEQUENCE_LENGTH,
)
gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset(
"gpt2_base_en", preprocessor=preprocessor
)
gpt2_lm.summary()<jupyter_output><empty_output><jupyter_text>Initialize the GPU memory tracker callback object, and compile the model. Weuse the Adam optimizer with a linearly decaying learning rate.<jupyter_code>gpu_memory_callback = GPUMemoryCallback(
target_batches=[5, 10, 25, 50, 100, 150, 200, 300, 400, 500],
print_stats=True,
)
optimizer, loss = get_optimizer_and_loss()
gpt2_lm.compile(
optimizer=optimizer,
loss=loss,
weighted_metrics=["accuracy"],
)<jupyter_output><empty_output><jupyter_text>We are all set to train the model!<jupyter_code>gpt2_lm.fit(train_ds, epochs=EPOCHS, callbacks=[gpu_memory_callback])
gpt2_lm_memory_usage = gpu_memory_callback.memory_usage<jupyter_output><empty_output><jupyter_text>As a final step, let's generate some text. We will harness the power of XLA. Thefirst call to `generate()` will be slow because of XLA compilation, butsubsequent calls will be super-fast. :)<jupyter_code>generate_text(gpt2_lm, "I like basketball", max_length=MAX_GENERATION_LENGTH)
generate_text(gpt2_lm, "That Italian restaurant is", max_length=MAX_GENERATION_LENGTH)<jupyter_output><empty_output><jupyter_text>LoRA GPT-2In this section, we discuss the technical details of LoRA, build a LoRA GPT-2model, fine-tune it and generate text. What exactly is LoRA?LoRA is a parameter-efficient fine-tuning technique for LLMs. It freezes theweights of the LLM, and injects trainable rank-decomposition matrices. Let'sunderstand this more clearly.Assume we have an `n x n` pre-trained dense layer (or weight matrix), `W0`. Weinitialize two dense layers, `A` and `B`, of shapes `n x rank`, and `rank x n`,respectively. `rank` is much smaller than `n`. In the paper, values between 1and 4 are shown to work well. LoRA equationThe original equation is `output = W0x + b0`, where `x` is the input, `W0` and`b0` are the weight matrix and bias terms of the original dense layer (frozen).The LoRA equation is: `output = W0x + b0 + BAx`, where `A` and `B` are therank-decomposition matrices.LoRA is based on the idea that updates to the weights of the pre-trainedlanguage model have a low "intrinsic rank" since pre-trained language models areover-parametrized. Predictive performance of full fine-tuning can be replicatedeven by constraining `W0`'s updates to low-rank decomposition matrices. Number of trainable parametersLet's do some quick math. Suppose `n` is 768, and `rank` is 4. `W0` has`768 x 768 = 589,824` parameters, whereas the LoRA layers, `A` and `B` togetherhave `768 x 4 + 4 x 768 = 6,144` parameters. So, for the dense layer, we go from`589,824` trainable parameters to `6,144` trainable parameters! Why does LoRA reduce memory footprint?Even though the total number of parameters increase (since we are adding LoRAlayers), the memory footprint reduces, because the number of trainableparameters reduces. Let's dive deeper into this.The memory usage of a model can be split into four parts:- Model memory: This is the memory required to store the model weights. Thiswill be slightly higher for LoRA than GPT-2.- Forward pass memory: This mostly depends on batch size, sequence length, etc.We keep this constant for both models for a fair comparison.- Backward pass memory: This is the memory required to store the gradients.Note that the gradients are computed only for the trainable parameters.- Optimizer memory: This is the memory required to store the optimizer state.For example, the Adam optimizer stores the "1st moment vectors" and"2nd moment vectors" for the trainable parameters.Since, with LoRA, there is a huge reduction in the number of trainableparameters, the optimizer memory and the memory required to store the gradientsfor LoRA is much less than GPT-2. This is where most of the memory savingshappen. Why is LoRA so popular?- Reduces GPU memory usage;- Faster training; and- No additional inference latency. Create LoRA layerAccording to the technical description above, let's create a LoRA layer. Ina transformer model, the LoRA layer is created and injected for the query andvalue projection matrices. In `keras.layers.MultiHeadAttention`, the query/valueprojection layers are `keras.layers.EinsumDense` layers.<jupyter_code>import math
class LoraLayer(keras.layers.Layer):
def __init__(
self,
original_layer,
rank=8,
alpha=32,
trainable=False,
**kwargs,
):
# We want to keep the name of this layer the same as the original
# dense layer.
original_layer_config = original_layer.get_config()
name = original_layer_config["name"]
kwargs.pop("name", None)
super().__init__(name=name, trainable=trainable, **kwargs)
self.rank = rank
self.alpha = alpha
self._scale = alpha / rank
self._num_heads = original_layer_config["output_shape"][-2]
self._hidden_dim = self._num_heads * original_layer_config["output_shape"][-1]
# Layers.
# Original dense layer.
self.original_layer = original_layer
# No matter whether we are training the model or are in inference mode,
# this layer should be frozen.
self.original_layer.trainable = False
# LoRA dense layers.
self.A = keras.layers.Dense(
units=rank,
use_bias=False,
# Note: the original paper mentions that normal distribution was
# used for initialization. However, the official LoRA implementation
# uses "Kaiming/He Initialization".
kernel_initializer=keras.initializers.VarianceScaling(
scale=math.sqrt(5), mode="fan_in", distribution="uniform"
),
trainable=trainable,
name=f"lora_A",
)
# B has the same `equation` and `output_shape` as the original layer.
# `equation = abc,cde->abde`, where `a`: batch size, `b`: sequence
# length, `c`: `hidden_dim`, `d`: `num_heads`,
# `e`: `hidden_dim//num_heads`. The only difference is that in layer `B`,
# `c` represents `rank`.
self.B = keras.layers.EinsumDense(
equation=original_layer_config["equation"],
output_shape=original_layer_config["output_shape"],
kernel_initializer="zeros",
trainable=trainable,
name=f"lora_B",
)
def call(self, inputs):
original_output = self.original_layer(inputs)
if self.trainable:
# If we are fine-tuning the model, we will add LoRA layers' output
# to the original layer's output.
lora_output = self.B(self.A(inputs)) * self._scale
return original_output + lora_output
# If we are in inference mode, we "merge" the LoRA layers' weights into
# the original layer's weights - more on this in the text generation
# section!
return original_output<jupyter_output><empty_output><jupyter_text>Inject LoRA layer into the modelWe will now hack the original GPT-2 model and inject LoRA layers into it. Let'sdo a couple of things before doing that:- Delete previous model;- Reset "peak" GPU memory usage using `tf.config.experimental.reset_memory_stats`;- Load a new GPT-2 model.<jupyter_code>del gpt2_lm
del optimizer
del loss
# This resets "peak" memory usage to "current" memory usage.
tf.config.experimental.reset_memory_stats("GPU:0")
# Load the original model.
preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset(
"gpt2_base_en",
sequence_length=128,
)
lora_model = keras_nlp.models.GPT2CausalLM.from_preset(
"gpt2_base_en",
preprocessor=preprocessor,
)<jupyter_output><empty_output><jupyter_text>We will now override the original query/value projection matrices with ournew LoRA layers.<jupyter_code>for layer_idx in range(lora_model.backbone.num_layers):
# Change query dense layer.
decoder_layer = lora_model.backbone.get_layer(f"transformer_layer_{layer_idx}")
self_attention_layer = decoder_layer._self_attention_layer
# Allow mutation to Keras layer state.
self_attention_layer._tracker.locked = False
# Change query dense layer.
self_attention_layer._query_dense = LoraLayer(
self_attention_layer._query_dense,
rank=RANK,
alpha=ALPHA,
trainable=True,
)
# Change value dense layer.
self_attention_layer._value_dense = LoraLayer(
self_attention_layer._value_dense,
rank=RANK,
alpha=ALPHA,
trainable=True,
)<jupyter_output><empty_output><jupyter_text>Let's now do a forward pass to make sure we still have a valid chain ofcomputation.<jupyter_code>lora_model(preprocessor(["LoRA is very useful for quick LLM finetuning"])[0])
pass<jupyter_output><empty_output><jupyter_text>Freeze the entire LLM, only the LoRA layers should be trainable.<jupyter_code>for layer in lora_model._flatten_layers():
lst_of_sublayers = list(layer._flatten_layers())
if len(lst_of_sublayers) == 1: # "leaves of the model"
if layer.name in ["lora_A", "lora_B"]:
layer.trainable = True
else:
layer.trainable = False<jupyter_output><empty_output><jupyter_text>Print the model's summary and see if the number of non-trainable parameters andtotal parameters are correct.In a previous section, we had calculated the number of parameters associated withthe LoRA layers to be 6,144. The total trainable parameters in the model shouldbe `num_layers * (query, value) * 6,144 = 12 * 2 * 6,144 = 147,456`. Thenumber of non-trainable parameters should be the same as the total number ofparameters in the original GPT-2 model, which is `124,439,808`.<jupyter_code>lora_model.summary()<jupyter_output><empty_output><jupyter_text>Fine-tune LoRA GPT-2Now that we have hacked and verified the LoRA GPT-2 model, let's train it!<jupyter_code>gpu_memory_callback = GPUMemoryCallback(
target_batches=[5, 10, 25, 50, 100, 150, 200, 300, 400, 500],
print_stats=True,
)
optimizer, loss = get_optimizer_and_loss()
lora_model.compile(
optimizer=optimizer,
loss=loss,
weighted_metrics=["accuracy"],
)
lora_model.fit(
train_ds,
epochs=EPOCHS,
callbacks=[gpu_memory_callback],
)
lora_model_memory_usage = gpu_memory_callback.memory_usage<jupyter_output><empty_output><jupyter_text>And we are done fine-tuning the model! Before we generate text, let's comparethe training time and memory usage of the two models. The training time of GPT-2on a 16 GB Tesla T4 (Colab) is 7 minutes, and for LoRA, it is 5 minutes, a 30%decrease. The memory usage of LoRA GPT-2 is roughly 35% times less than GPT-2.<jupyter_code>plt.bar(
["GPT-2", "LoRA GPT-2"],
[max(gpt2_lm_memory_usage), max(lora_model_memory_usage)],
color=["red", "blue"],
)
plt.xlabel("Time")
plt.ylabel("GPU Memory Usage (in GB)")
plt.title("GPU Memory Usage Comparison")
plt.legend()
plt.show()<jupyter_output><empty_output><jupyter_text>Merge weights and generate text!One of the biggest advantages of LoRA over other adapter methods is that itdoes not incur any additional inference latency. Let's understand why.Recall our LoRA equation: `output = W0x + b0 + BAx`. We can rewrite this as:`output = = Wx + b0 = (W0 + BA)x + b0`, where `W = W0 + BA`. This means that ifwe merge the weights of the original model and the adapter, we will be essentiallydoing the same computation as the original model!<jupyter_code>for layer_idx in range(lora_model.backbone.num_layers):
self_attention_layer = lora_model.backbone.get_layer(
f"transformer_layer_{layer_idx}"
)._self_attention_layer
# Merge query dense layer.
query_lora_layer = self_attention_layer._query_dense
A_weights = query_lora_layer.A.kernel # (768, 1) (a, b)
B_weights = query_lora_layer.B.kernel # (1, 12, 64) (b, c, d)
increment_weights = tf.einsum("ab,bcd->acd", A_weights, B_weights) * (ALPHA / RANK)
query_lora_layer.original_layer.kernel.assign_add(increment_weights)
# Merge value dense layer.
value_lora_layer = self_attention_layer._value_dense
A_weights = value_lora_layer.A.kernel # (768, 1) (a, b)
B_weights = value_lora_layer.B.kernel # (1, 12, 64) (b, c, d)
increment_weights = tf.einsum("ab,bcd->acd", A_weights, B_weights) * (ALPHA / RANK)
value_lora_layer.original_layer.kernel.assign_add(increment_weights)<jupyter_output><empty_output><jupyter_text>We are now all set to generate text with our LoRA model :).<jupyter_code># Freezing weights not necessary during generation since no weights are updated.
generate_text(lora_model, "I like basketball", max_length=MAX_GENERATION_LENGTH)
generate_text(
lora_model, "That Italian restaurant is", max_length=MAX_GENERATION_LENGTH
)<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/parameter_efficient_finetuning_of_gpt2_with_lora.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/parameter_efficient_finetuning_of_gpt2_with_lora.ipynb",
"repo_id": "keras-io",
"token_count": 6936
} | 81 |
# Review Classification using Active Learning
**Author:** [Darshan Deshpande](https://twitter.com/getdarshan)<br>
**Date created:** 2021/10/29<br>
**Last modified:** 2021/10/29<br>
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/active_learning_review_classification.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/active_learning_review_classification.py)
**Description:** Demonstrating the advantages of active learning through review classification.
---
## Introduction
With the growth of data-centric Machine Learning, Active Learning has grown in popularity
amongst businesses and researchers. Active Learning seeks to progressively
train ML models so that the resultant model requires lesser amount of training data to
achieve competitive scores.
The structure of an Active Learning pipeline involves a classifier and an oracle. The
oracle is an annotator that cleans, selects, labels the data, and feeds it to the model
when required. The oracle is a trained individual or a group of individuals that
ensure consistency in labeling of new data.
The process starts with annotating a small subset of the full dataset and training an
initial model. The best model checkpoint is saved and then tested on a balanced test
set. The test set must be carefully sampled because the full training process will be
dependent on it. Once we have the initial evaluation scores, the oracle is tasked with
labeling more samples; the number of data points to be sampled is usually determined by
the business requirements. After that, the newly sampled data is added to the training
set, and the training procedure repeats. This cycle continues until either an
acceptable score is reached or some other business metric is met.
This tutorial provides a basic demonstration of how Active Learning works by
demonstrating a ratio-based (least confidence) sampling strategy that results in lower
overall false positive and negative rates when compared to a model trained on the entire
dataset. This sampling falls under the domain of *uncertainty sampling*, in which new
datasets are sampled based on the uncertainty that the model outputs for the
corresponding label. In our example, we compare our model's false positive and false
negative rates and annotate the new data based on their ratio.
Some other sampling techniques include:
1. [Committee sampling](https://www.researchgate.net/publication/51909346_Committee-Based_Sample_Selection_for_Probabilistic_Classifiers):
Using multiple models to vote for the best data points to be sampled
2. [Entropy reduction](https://www.researchgate.net/publication/51909346_Committee-Based_Sample_Selection_for_Probabilistic_Classifiers):
Sampling according to an entropy threshold, selecting more of the samples that produce the highest entropy score.
3. [Minimum margin based sampling](https://arxiv.org/abs/1906.00025v1):
Selects data points closest to the decision boundary
---
## Importing required libraries
```python
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import re
import string
tfds.disable_progress_bar()
```
---
## Loading and preprocessing the data
We will be using the IMDB reviews dataset for our experiments. This dataset has 50,000
reviews in total, including training and testing splits. We will merge these splits and
sample our own, balanced training, validation and testing sets.
```python
dataset = tfds.load(
"imdb_reviews",
split="train + test",
as_supervised=True,
batch_size=-1,
shuffle_files=False,
)
reviews, labels = tfds.as_numpy(dataset)
print("Total examples:", reviews.shape[0])
```
<div class="k-default-codeblock">
```
Total examples: 50000
```
</div>
Active learning starts with labeling a subset of data.
For the ratio sampling technique that we will be using, we will need well-balanced training,
validation and testing splits.
```python
val_split = 2500
test_split = 2500
train_split = 7500
# Separating the negative and positive samples for manual stratification
x_positives, y_positives = reviews[labels == 1], labels[labels == 1]
x_negatives, y_negatives = reviews[labels == 0], labels[labels == 0]
# Creating training, validation and testing splits
x_val, y_val = (
tf.concat((x_positives[:val_split], x_negatives[:val_split]), 0),
tf.concat((y_positives[:val_split], y_negatives[:val_split]), 0),
)
x_test, y_test = (
tf.concat(
(
x_positives[val_split : val_split + test_split],
x_negatives[val_split : val_split + test_split],
),
0,
),
tf.concat(
(
y_positives[val_split : val_split + test_split],
y_negatives[val_split : val_split + test_split],
),
0,
),
)
x_train, y_train = (
tf.concat(
(
x_positives[val_split + test_split : val_split + test_split + train_split],
x_negatives[val_split + test_split : val_split + test_split + train_split],
),
0,
),
tf.concat(
(
y_positives[val_split + test_split : val_split + test_split + train_split],
y_negatives[val_split + test_split : val_split + test_split + train_split],
),
0,
),
)
# Remaining pool of samples are stored separately. These are only labeled as and when required
x_pool_positives, y_pool_positives = (
x_positives[val_split + test_split + train_split :],
y_positives[val_split + test_split + train_split :],
)
x_pool_negatives, y_pool_negatives = (
x_negatives[val_split + test_split + train_split :],
y_negatives[val_split + test_split + train_split :],
)
# Creating TF Datasets for faster prefetching and parallelization
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
pool_negatives = tf.data.Dataset.from_tensor_slices(
(x_pool_negatives, y_pool_negatives)
)
pool_positives = tf.data.Dataset.from_tensor_slices(
(x_pool_positives, y_pool_positives)
)
print(f"Initial training set size: {len(train_dataset)}")
print(f"Validation set size: {len(val_dataset)}")
print(f"Testing set size: {len(test_dataset)}")
print(f"Unlabeled negative pool: {len(pool_negatives)}")
print(f"Unlabeled positive pool: {len(pool_positives)}")
```
<div class="k-default-codeblock">
```
Initial training set size: 15000
Validation set size: 5000
Testing set size: 5000
Unlabeled negative pool: 12500
Unlabeled positive pool: 12500
```
</div>
### Fitting the `TextVectorization` layer
Since we are working with text data, we will need to encode the text strings as vectors which
would then be passed through an `Embedding` layer. To make this tokenization process
faster, we use the `map()` function with its parallelization functionality.
```python
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, f"[{re.escape(string.punctuation)}]", ""
)
vectorizer = layers.TextVectorization(
3000, standardize=custom_standardization, output_sequence_length=150
)
# Adapting the dataset
vectorizer.adapt(
train_dataset.map(lambda x, y: x, num_parallel_calls=tf.data.AUTOTUNE).batch(256)
)
def vectorize_text(text, label):
text = vectorizer(text)
return text, label
train_dataset = train_dataset.map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
pool_negatives = pool_negatives.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
pool_positives = pool_positives.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
val_dataset = val_dataset.batch(256).map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
)
test_dataset = test_dataset.batch(256).map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
)
```
---
## Creating Helper Functions
```python
# Helper function for merging new history objects with older ones
def append_history(losses, val_losses, accuracy, val_accuracy, history):
losses = losses + history.history["loss"]
val_losses = val_losses + history.history["val_loss"]
accuracy = accuracy + history.history["binary_accuracy"]
val_accuracy = val_accuracy + history.history["val_binary_accuracy"]
return losses, val_losses, accuracy, val_accuracy
# Plotter function
def plot_history(losses, val_losses, accuracies, val_accuracies):
plt.plot(losses)
plt.plot(val_losses)
plt.legend(["train_loss", "val_loss"])
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
plt.plot(accuracies)
plt.plot(val_accuracies)
plt.legend(["train_accuracy", "val_accuracy"])
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.show()
```
---
## Creating the Model
We create a small bidirectional LSTM model. When using Active Learning, you should make sure
that the model architecture is capable of overfitting to the initial data.
Overfitting gives a strong hint that the model will have enough capacity for
future, unseen data.
```python
def create_model():
model = keras.models.Sequential(
[
layers.Input(shape=(150,)),
layers.Embedding(input_dim=3000, output_dim=128),
layers.Bidirectional(layers.LSTM(32, return_sequences=True)),
layers.GlobalMaxPool1D(),
layers.Dense(20, activation="relu"),
layers.Dropout(0.5),
layers.Dense(1, activation="sigmoid"),
]
)
model.summary()
return model
```
---
## Training on the entire dataset
To show the effectiveness of Active Learning, we will first train the model on the entire
dataset containing 40,000 labeled samples. This model will be used for comparison later.
```python
def train_full_model(full_train_dataset, val_dataset, test_dataset):
model = create_model()
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
# We will save the best model at every epoch and load the best one for evaluation on the test set
history = model.fit(
full_train_dataset.batch(256),
epochs=20,
validation_data=val_dataset,
callbacks=[
keras.callbacks.EarlyStopping(patience=4, verbose=1),
keras.callbacks.ModelCheckpoint(
"FullModelCheckpoint.h5", verbose=1, save_best_only=True
),
],
)
# Plot history
plot_history(
history.history["loss"],
history.history["val_loss"],
history.history["binary_accuracy"],
history.history["val_binary_accuracy"],
)
# Loading the best checkpoint
model = keras.models.load_model("FullModelCheckpoint.h5")
print("-" * 100)
print(
"Test set evaluation: ",
model.evaluate(test_dataset, verbose=0, return_dict=True),
)
print("-" * 100)
return model
# Sampling the full train dataset to train on
full_train_dataset = (
train_dataset.concatenate(pool_positives)
.concatenate(pool_negatives)
.cache()
.shuffle(20000)
)
# Training the full model
full_dataset_model = train_full_model(full_train_dataset, val_dataset, test_dataset)
```
<div class="k-default-codeblock">
```
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 150, 128) 384000
bidirectional (Bidirectiona (None, 150, 64) 41216
l)
global_max_pooling1d (Globa (None, 64) 0
lMaxPooling1D)
dense (Dense) (None, 20) 1300
dropout (Dropout) (None, 20) 0
dense_1 (Dense) (None, 1) 21
=================================================================
Total params: 426,537
Trainable params: 426,537
Non-trainable params: 0
_________________________________________________________________
Epoch 1/20
156/157 [============================>.] - ETA: 0s - loss: 0.5150 - binary_accuracy: 0.7615 - false_negatives: 3314.0000 - false_positives: 6210.0000
Epoch 00001: val_loss improved from inf to 0.47791, saving model to FullModelCheckpoint.h5
157/157 [==============================] - 25s 103ms/step - loss: 0.5148 - binary_accuracy: 0.7617 - false_negatives: 3316.0000 - false_positives: 6217.0000 - val_loss: 0.4779 - val_binary_accuracy: 0.7858 - val_false_negatives: 970.0000 - val_false_positives: 101.0000
Epoch 2/20
156/157 [============================>.] - ETA: 0s - loss: 0.3659 - binary_accuracy: 0.8500 - false_negatives: 2833.0000 - false_positives: 3158.0000
Epoch 00002: val_loss improved from 0.47791 to 0.35345, saving model to FullModelCheckpoint.h5
157/157 [==============================] - 9s 59ms/step - loss: 0.3656 - binary_accuracy: 0.8501 - false_negatives: 2836.0000 - false_positives: 3159.0000 - val_loss: 0.3535 - val_binary_accuracy: 0.8502 - val_false_negatives: 363.0000 - val_false_positives: 386.0000
Epoch 3/20
156/157 [============================>.] - ETA: 0s - loss: 0.3319 - binary_accuracy: 0.8653 - false_negatives: 2507.0000 - false_positives: 2873.0000
Epoch 00003: val_loss improved from 0.35345 to 0.33150, saving model to FullModelCheckpoint.h5
157/157 [==============================] - 9s 55ms/step - loss: 0.3319 - binary_accuracy: 0.8652 - false_negatives: 2512.0000 - false_positives: 2878.0000 - val_loss: 0.3315 - val_binary_accuracy: 0.8576 - val_false_negatives: 423.0000 - val_false_positives: 289.0000
Epoch 4/20
156/157 [============================>.] - ETA: 0s - loss: 0.3130 - binary_accuracy: 0.8764 - false_negatives: 2398.0000 - false_positives: 2538.0000
Epoch 00004: val_loss did not improve from 0.33150
157/157 [==============================] - 9s 55ms/step - loss: 0.3129 - binary_accuracy: 0.8763 - false_negatives: 2404.0000 - false_positives: 2542.0000 - val_loss: 0.3328 - val_binary_accuracy: 0.8586 - val_false_negatives: 263.0000 - val_false_positives: 444.0000
Epoch 5/20
156/157 [============================>.] - ETA: 0s - loss: 0.2918 - binary_accuracy: 0.8867 - false_negatives: 2141.0000 - false_positives: 2385.0000
Epoch 00005: val_loss did not improve from 0.33150
157/157 [==============================] - 9s 55ms/step - loss: 0.2917 - binary_accuracy: 0.8867 - false_negatives: 2143.0000 - false_positives: 2388.0000 - val_loss: 0.3762 - val_binary_accuracy: 0.8468 - val_false_negatives: 476.0000 - val_false_positives: 290.0000
Epoch 6/20
156/157 [============================>.] - ETA: 0s - loss: 0.2819 - binary_accuracy: 0.8901 - false_negatives: 2112.0000 - false_positives: 2277.0000
Epoch 00006: val_loss did not improve from 0.33150
157/157 [==============================] - 9s 55ms/step - loss: 0.2819 - binary_accuracy: 0.8902 - false_negatives: 2112.0000 - false_positives: 2282.0000 - val_loss: 0.4018 - val_binary_accuracy: 0.8312 - val_false_negatives: 694.0000 - val_false_positives: 150.0000
Epoch 7/20
156/157 [============================>.] - ETA: 0s - loss: 0.2650 - binary_accuracy: 0.8992 - false_negatives: 1902.0000 - false_positives: 2122.0000
Epoch 00007: val_loss improved from 0.33150 to 0.32843, saving model to FullModelCheckpoint.h5
157/157 [==============================] - 9s 55ms/step - loss: 0.2649 - binary_accuracy: 0.8992 - false_negatives: 1908.0000 - false_positives: 2123.0000 - val_loss: 0.3284 - val_binary_accuracy: 0.8578 - val_false_negatives: 274.0000 - val_false_positives: 437.0000
Epoch 8/20
157/157 [==============================] - ETA: 0s - loss: 0.2508 - binary_accuracy: 0.9051 - false_negatives: 1821.0000 - false_positives: 1974.0000
Epoch 00008: val_loss did not improve from 0.32843
157/157 [==============================] - 9s 55ms/step - loss: 0.2508 - binary_accuracy: 0.9051 - false_negatives: 1821.0000 - false_positives: 1974.0000 - val_loss: 0.4806 - val_binary_accuracy: 0.8194 - val_false_negatives: 788.0000 - val_false_positives: 115.0000
Epoch 9/20
156/157 [============================>.] - ETA: 0s - loss: 0.2377 - binary_accuracy: 0.9112 - false_negatives: 1771.0000 - false_positives: 1775.0000
Epoch 00009: val_loss did not improve from 0.32843
157/157 [==============================] - 9s 54ms/step - loss: 0.2378 - binary_accuracy: 0.9112 - false_negatives: 1775.0000 - false_positives: 1777.0000 - val_loss: 0.3378 - val_binary_accuracy: 0.8562 - val_false_negatives: 335.0000 - val_false_positives: 384.0000
Epoch 10/20
156/157 [============================>.] - ETA: 0s - loss: 0.2209 - binary_accuracy: 0.9195 - false_negatives: 1591.0000 - false_positives: 1623.0000
Epoch 00010: val_loss did not improve from 0.32843
157/157 [==============================] - 9s 55ms/step - loss: 0.2211 - binary_accuracy: 0.9195 - false_negatives: 1594.0000 - false_positives: 1627.0000 - val_loss: 0.3475 - val_binary_accuracy: 0.8556 - val_false_negatives: 425.0000 - val_false_positives: 297.0000
Epoch 11/20
156/157 [============================>.] - ETA: 0s - loss: 0.2060 - binary_accuracy: 0.9251 - false_negatives: 1512.0000 - false_positives: 1479.0000
Epoch 00011: val_loss did not improve from 0.32843
157/157 [==============================] - 9s 55ms/step - loss: 0.2061 - binary_accuracy: 0.9251 - false_negatives: 1517.0000 - false_positives: 1479.0000 - val_loss: 0.3823 - val_binary_accuracy: 0.8522 - val_false_negatives: 276.0000 - val_false_positives: 463.0000
Epoch 00011: early stopping
```
</div>


<div class="k-default-codeblock">
```
----------------------------------------------------------------------------------------------------
Test set evaluation: {'loss': 0.34183189272880554, 'binary_accuracy': 0.8579999804496765, 'false_negatives': 295.0, 'false_positives': 415.0}
----------------------------------------------------------------------------------------------------
```
</div>
---
## Training via Active Learning
The general process we follow when performing Active Learning is demonstrated below:

The pipeline can be summarized in five parts:
1. Sample and annotate a small, balanced training dataset
2. Train the model on this small subset
3. Evaluate the model on a balanced testing set
4. If the model satisfies the business criteria, deploy it in a real time setting
5. If it doesn't pass the criteria, sample a few more samples according to the ratio of
false positives and negatives, add them to the training set and repeat from step 2 till
the model passes the tests or till all available data is exhausted.
For the code below, we will perform sampling using the following formula:<br/>

Active Learning techniques use callbacks extensively for progress tracking. We will be
using model checkpointing and early stopping for this example. The `patience` parameter
for Early Stopping can help minimize overfitting and the time required. We have set it
`patience=4` for now but since the model is robust, we can increase the patience level if
desired.
Note: We are not loading the checkpoint after the first training iteration. In my
experience working on Active Learning techniques, this helps the model probe the
newly formed loss landscape. Even if the model fails to improve in the second iteration,
we will still gain insight about the possible future false positive and negative rates.
This will help us sample a better set in the next iteration where the model will have a
greater chance to improve.
```python
def train_active_learning_models(
train_dataset,
pool_negatives,
pool_positives,
val_dataset,
test_dataset,
num_iterations=3,
sampling_size=5000,
):
# Creating lists for storing metrics
losses, val_losses, accuracies, val_accuracies = [], [], [], []
model = create_model()
# We will monitor the false positives and false negatives predicted by our model
# These will decide the subsequent sampling ratio for every Active Learning loop
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
# Defining checkpoints.
# The checkpoint callback is reused throughout the training since it only saves the best overall model.
checkpoint = keras.callbacks.ModelCheckpoint(
"AL_Model.h5", save_best_only=True, verbose=1
)
# Here, patience is set to 4. This can be set higher if desired.
early_stopping = keras.callbacks.EarlyStopping(patience=4, verbose=1)
print(f"Starting to train with {len(train_dataset)} samples")
# Initial fit with a small subset of the training set
history = model.fit(
train_dataset.cache().shuffle(20000).batch(256),
epochs=20,
validation_data=val_dataset,
callbacks=[checkpoint, early_stopping],
)
# Appending history
losses, val_losses, accuracies, val_accuracies = append_history(
losses, val_losses, accuracies, val_accuracies, history
)
for iteration in range(num_iterations):
# Getting predictions from previously trained model
predictions = model.predict(test_dataset)
# Generating labels from the output probabilities
rounded = tf.where(tf.greater(predictions, 0.5), 1, 0)
# Evaluating the number of zeros and ones incorrrectly classified
_, _, false_negatives, false_positives = model.evaluate(test_dataset, verbose=0)
print("-" * 100)
print(
f"Number of zeros incorrectly classified: {false_negatives}, Number of ones incorrectly classified: {false_positives}"
)
# This technique of Active Learning demonstrates ratio based sampling where
# Number of ones/zeros to sample = Number of ones/zeros incorrectly classified / Total incorrectly classified
if false_negatives != 0 and false_positives != 0:
total = false_negatives + false_positives
sample_ratio_ones, sample_ratio_zeros = (
false_positives / total,
false_negatives / total,
)
# In the case where all samples are correctly predicted, we can sample both classes equally
else:
sample_ratio_ones, sample_ratio_zeros = 0.5, 0.5
print(
f"Sample ratio for positives: {sample_ratio_ones}, Sample ratio for negatives:{sample_ratio_zeros}"
)
# Sample the required number of ones and zeros
sampled_dataset = pool_negatives.take(
int(sample_ratio_zeros * sampling_size)
).concatenate(pool_positives.take(int(sample_ratio_ones * sampling_size)))
# Skip the sampled data points to avoid repetition of sample
pool_negatives = pool_negatives.skip(int(sample_ratio_zeros * sampling_size))
pool_positives = pool_positives.skip(int(sample_ratio_ones * sampling_size))
# Concatenating the train_dataset with the sampled_dataset
train_dataset = train_dataset.concatenate(sampled_dataset).prefetch(
tf.data.AUTOTUNE
)
print(f"Starting training with {len(train_dataset)} samples")
print("-" * 100)
# We recompile the model to reset the optimizer states and retrain the model
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
history = model.fit(
train_dataset.cache().shuffle(20000).batch(256),
validation_data=val_dataset,
epochs=20,
callbacks=[
checkpoint,
keras.callbacks.EarlyStopping(patience=4, verbose=1),
],
)
# Appending the history
losses, val_losses, accuracies, val_accuracies = append_history(
losses, val_losses, accuracies, val_accuracies, history
)
# Loading the best model from this training loop
model = keras.models.load_model("AL_Model.h5")
# Plotting the overall history and evaluating the final model
plot_history(losses, val_losses, accuracies, val_accuracies)
print("-" * 100)
print(
"Test set evaluation: ",
model.evaluate(test_dataset, verbose=0, return_dict=True),
)
print("-" * 100)
return model
active_learning_model = train_active_learning_models(
train_dataset, pool_negatives, pool_positives, val_dataset, test_dataset
)
```
<div class="k-default-codeblock">
```
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_1 (Embedding) (None, 150, 128) 384000
bidirectional_1 (Bidirectio (None, 150, 64) 41216
nal)
global_max_pooling1d_1 (Glo (None, 64) 0
balMaxPooling1D)
dense_2 (Dense) (None, 20) 1300
dropout_1 (Dropout) (None, 20) 0
dense_3 (Dense) (None, 1) 21
=================================================================
Total params: 426,537
Trainable params: 426,537
Non-trainable params: 0
_________________________________________________________________
Starting to train with 15000 samples
Epoch 1/20
59/59 [==============================] - ETA: 0s - loss: 0.6235 - binary_accuracy: 0.6679 - false_negatives_1: 3111.0000 - false_positives_1: 1870.0000
Epoch 00001: val_loss improved from inf to 0.43017, saving model to AL_Model.h5
59/59 [==============================] - 13s 87ms/step - loss: 0.6235 - binary_accuracy: 0.6679 - false_negatives_1: 3111.0000 - false_positives_1: 1870.0000 - val_loss: 0.4302 - val_binary_accuracy: 0.8286 - val_false_negatives_1: 513.0000 - val_false_positives_1: 344.0000
Epoch 2/20
58/59 [============================>.] - ETA: 0s - loss: 0.4381 - binary_accuracy: 0.8232 - false_negatives_1: 1412.0000 - false_positives_1: 1213.0000
Epoch 00002: val_loss improved from 0.43017 to 0.40090, saving model to AL_Model.h5
59/59 [==============================] - 4s 64ms/step - loss: 0.4373 - binary_accuracy: 0.8235 - false_negatives_1: 1423.0000 - false_positives_1: 1225.0000 - val_loss: 0.4009 - val_binary_accuracy: 0.8248 - val_false_negatives_1: 674.0000 - val_false_positives_1: 202.0000
Epoch 3/20
58/59 [============================>.] - ETA: 0s - loss: 0.3810 - binary_accuracy: 0.8544 - false_negatives_1: 1115.0000 - false_positives_1: 1047.0000
Epoch 00003: val_loss improved from 0.40090 to 0.36085, saving model to AL_Model.h5
59/59 [==============================] - 4s 61ms/step - loss: 0.3805 - binary_accuracy: 0.8545 - false_negatives_1: 1123.0000 - false_positives_1: 1060.0000 - val_loss: 0.3608 - val_binary_accuracy: 0.8408 - val_false_negatives_1: 231.0000 - val_false_positives_1: 565.0000
Epoch 4/20
58/59 [============================>.] - ETA: 0s - loss: 0.3436 - binary_accuracy: 0.8647 - false_negatives_1: 995.0000 - false_positives_1: 1014.0000
Epoch 00004: val_loss improved from 0.36085 to 0.35469, saving model to AL_Model.h5
59/59 [==============================] - 4s 61ms/step - loss: 0.3428 - binary_accuracy: 0.8654 - false_negatives_1: 999.0000 - false_positives_1: 1020.0000 - val_loss: 0.3547 - val_binary_accuracy: 0.8452 - val_false_negatives_1: 266.0000 - val_false_positives_1: 508.0000
Epoch 5/20
58/59 [============================>.] - ETA: 0s - loss: 0.3166 - binary_accuracy: 0.8834 - false_negatives_1: 835.0000 - false_positives_1: 897.0000
Epoch 00005: val_loss did not improve from 0.35469
59/59 [==============================] - 4s 60ms/step - loss: 0.3163 - binary_accuracy: 0.8835 - false_negatives_1: 839.0000 - false_positives_1: 908.0000 - val_loss: 0.3554 - val_binary_accuracy: 0.8508 - val_false_negatives_1: 382.0000 - val_false_positives_1: 364.0000
Epoch 6/20
58/59 [============================>.] - ETA: 0s - loss: 0.2935 - binary_accuracy: 0.8944 - false_negatives_1: 757.0000 - false_positives_1: 811.0000
Epoch 00006: val_loss did not improve from 0.35469
59/59 [==============================] - 4s 60ms/step - loss: 0.2938 - binary_accuracy: 0.8945 - false_negatives_1: 765.0000 - false_positives_1: 818.0000 - val_loss: 0.3718 - val_binary_accuracy: 0.8458 - val_false_negatives_1: 345.0000 - val_false_positives_1: 426.0000
Epoch 7/20
58/59 [============================>.] - ETA: 0s - loss: 0.2794 - binary_accuracy: 0.9003 - false_negatives_1: 732.0000 - false_positives_1: 748.0000
Epoch 00007: val_loss did not improve from 0.35469
59/59 [==============================] - 3s 59ms/step - loss: 0.2797 - binary_accuracy: 0.9001 - false_negatives_1: 749.0000 - false_positives_1: 749.0000 - val_loss: 0.3825 - val_binary_accuracy: 0.8406 - val_false_negatives_1: 228.0000 - val_false_positives_1: 569.0000
Epoch 8/20
58/59 [============================>.] - ETA: 0s - loss: 0.2526 - binary_accuracy: 0.9147 - false_negatives_1: 620.0000 - false_positives_1: 647.0000
Epoch 00008: val_loss did not improve from 0.35469
59/59 [==============================] - 4s 60ms/step - loss: 0.2561 - binary_accuracy: 0.9134 - false_negatives_1: 620.0000 - false_positives_1: 679.0000 - val_loss: 0.4109 - val_binary_accuracy: 0.8258 - val_false_negatives_1: 622.0000 - val_false_positives_1: 249.0000
Epoch 00008: early stopping
----------------------------------------------------------------------------------------------------
Number of zeros incorrectly classified: 665.0, Number of ones incorrectly classified: 234.0
Sample ratio for positives: 0.26028921023359286, Sample ratio for negatives:0.7397107897664071
Starting training with 19999 samples
----------------------------------------------------------------------------------------------------
Epoch 1/20
78/79 [============================>.] - ETA: 0s - loss: 0.2955 - binary_accuracy: 0.8902 - false_negatives_2: 1091.0000 - false_positives_2: 1101.0000
Epoch 00001: val_loss did not improve from 0.35469
79/79 [==============================] - 15s 83ms/step - loss: 0.2956 - binary_accuracy: 0.8901 - false_negatives_2: 1095.0000 - false_positives_2: 1102.0000 - val_loss: 0.4136 - val_binary_accuracy: 0.8238 - val_false_negatives_2: 156.0000 - val_false_positives_2: 725.0000
Epoch 2/20
78/79 [============================>.] - ETA: 0s - loss: 0.2657 - binary_accuracy: 0.9047 - false_negatives_2: 953.0000 - false_positives_2: 949.0000
Epoch 00002: val_loss did not improve from 0.35469
79/79 [==============================] - 5s 61ms/step - loss: 0.2659 - binary_accuracy: 0.9047 - false_negatives_2: 954.0000 - false_positives_2: 951.0000 - val_loss: 0.4079 - val_binary_accuracy: 0.8386 - val_false_negatives_2: 510.0000 - val_false_positives_2: 297.0000
Epoch 3/20
78/79 [============================>.] - ETA: 0s - loss: 0.2475 - binary_accuracy: 0.9126 - false_negatives_2: 892.0000 - false_positives_2: 854.0000
Epoch 00003: val_loss did not improve from 0.35469
79/79 [==============================] - 5s 58ms/step - loss: 0.2474 - binary_accuracy: 0.9126 - false_negatives_2: 893.0000 - false_positives_2: 855.0000 - val_loss: 0.4207 - val_binary_accuracy: 0.8364 - val_false_negatives_2: 228.0000 - val_false_positives_2: 590.0000
Epoch 4/20
78/79 [============================>.] - ETA: 0s - loss: 0.2319 - binary_accuracy: 0.9193 - false_negatives_2: 805.0000 - false_positives_2: 807.0000
Epoch 00004: val_loss did not improve from 0.35469
79/79 [==============================] - 5s 57ms/step - loss: 0.2319 - binary_accuracy: 0.9192 - false_negatives_2: 807.0000 - false_positives_2: 808.0000 - val_loss: 0.4080 - val_binary_accuracy: 0.8310 - val_false_negatives_2: 264.0000 - val_false_positives_2: 581.0000
Epoch 5/20
78/79 [============================>.] - ETA: 0s - loss: 0.2133 - binary_accuracy: 0.9260 - false_negatives_2: 728.0000 - false_positives_2: 750.0000
Epoch 00005: val_loss did not improve from 0.35469
79/79 [==============================] - 5s 57ms/step - loss: 0.2133 - binary_accuracy: 0.9259 - false_negatives_2: 729.0000 - false_positives_2: 752.0000 - val_loss: 0.4054 - val_binary_accuracy: 0.8394 - val_false_negatives_2: 371.0000 - val_false_positives_2: 432.0000
Epoch 6/20
78/79 [============================>.] - ETA: 0s - loss: 0.1982 - binary_accuracy: 0.9361 - false_negatives_2: 639.0000 - false_positives_2: 636.0000
Epoch 00006: val_loss did not improve from 0.35469
79/79 [==============================] - 5s 57ms/step - loss: 0.1980 - binary_accuracy: 0.9362 - false_negatives_2: 639.0000 - false_positives_2: 636.0000 - val_loss: 0.5185 - val_binary_accuracy: 0.8284 - val_false_negatives_2: 590.0000 - val_false_positives_2: 268.0000
Epoch 7/20
78/79 [============================>.] - ETA: 0s - loss: 0.1887 - binary_accuracy: 0.9409 - false_negatives_2: 606.0000 - false_positives_2: 575.0000
Epoch 00007: val_loss did not improve from 0.35469
79/79 [==============================] - 5s 57ms/step - loss: 0.1886 - binary_accuracy: 0.9408 - false_negatives_2: 606.0000 - false_positives_2: 577.0000 - val_loss: 0.6881 - val_binary_accuracy: 0.7886 - val_false_negatives_2: 893.0000 - val_false_positives_2: 164.0000
Epoch 8/20
78/79 [============================>.] - ETA: 0s - loss: 0.1778 - binary_accuracy: 0.9443 - false_negatives_2: 575.0000 - false_positives_2: 538.0000
Epoch 00008: val_loss did not improve from 0.35469
79/79 [==============================] - 5s 57ms/step - loss: 0.1776 - binary_accuracy: 0.9443 - false_negatives_2: 575.0000 - false_positives_2: 538.0000 - val_loss: 0.5921 - val_binary_accuracy: 0.8244 - val_false_negatives_2: 634.0000 - val_false_positives_2: 244.0000
Epoch 9/20
78/79 [============================>.] - ETA: 0s - loss: 0.1598 - binary_accuracy: 0.9505 - false_negatives_2: 507.0000 - false_positives_2: 481.0000
Epoch 00009: val_loss did not improve from 0.35469
79/79 [==============================] - 5s 57ms/step - loss: 0.1597 - binary_accuracy: 0.9506 - false_negatives_2: 507.0000 - false_positives_2: 481.0000 - val_loss: 0.5393 - val_binary_accuracy: 0.8214 - val_false_negatives_2: 542.0000 - val_false_positives_2: 351.0000
Epoch 00009: early stopping
----------------------------------------------------------------------------------------------------
Number of zeros incorrectly classified: 270.0, Number of ones incorrectly classified: 498.0
Sample ratio for positives: 0.6484375, Sample ratio for negatives:0.3515625
Starting training with 24998 samples
----------------------------------------------------------------------------------------------------
Epoch 1/20
97/98 [============================>.] - ETA: 0s - loss: 0.3554 - binary_accuracy: 0.8609 - false_negatives_3: 1714.0000 - false_positives_3: 1739.0000
Epoch 00001: val_loss improved from 0.35469 to 0.34182, saving model to AL_Model.h5
98/98 [==============================] - 17s 82ms/step - loss: 0.3548 - binary_accuracy: 0.8613 - false_negatives_3: 1720.0000 - false_positives_3: 1748.0000 - val_loss: 0.3418 - val_binary_accuracy: 0.8528 - val_false_negatives_3: 369.0000 - val_false_positives_3: 367.0000
Epoch 2/20
97/98 [============================>.] - ETA: 0s - loss: 0.3176 - binary_accuracy: 0.8785 - false_negatives_3: 1473.0000 - false_positives_3: 1544.0000
Epoch 00002: val_loss did not improve from 0.34182
98/98 [==============================] - 6s 56ms/step - loss: 0.3179 - binary_accuracy: 0.8784 - false_negatives_3: 1479.0000 - false_positives_3: 1560.0000 - val_loss: 0.4785 - val_binary_accuracy: 0.8102 - val_false_negatives_3: 793.0000 - val_false_positives_3: 156.0000
Epoch 3/20
97/98 [============================>.] - ETA: 0s - loss: 0.2986 - binary_accuracy: 0.8893 - false_negatives_3: 1353.0000 - false_positives_3: 1396.0000
Epoch 00003: val_loss did not improve from 0.34182
98/98 [==============================] - 5s 56ms/step - loss: 0.2985 - binary_accuracy: 0.8893 - false_negatives_3: 1366.0000 - false_positives_3: 1402.0000 - val_loss: 0.3473 - val_binary_accuracy: 0.8542 - val_false_negatives_3: 340.0000 - val_false_positives_3: 389.0000
Epoch 4/20
97/98 [============================>.] - ETA: 0s - loss: 0.2822 - binary_accuracy: 0.8970 - false_negatives_3: 1253.0000 - false_positives_3: 1305.0000
Epoch 00004: val_loss did not improve from 0.34182
98/98 [==============================] - 6s 56ms/step - loss: 0.2820 - binary_accuracy: 0.8971 - false_negatives_3: 1257.0000 - false_positives_3: 1316.0000 - val_loss: 0.3849 - val_binary_accuracy: 0.8386 - val_false_negatives_3: 537.0000 - val_false_positives_3: 270.0000
Epoch 5/20
97/98 [============================>.] - ETA: 0s - loss: 0.2666 - binary_accuracy: 0.9047 - false_negatives_3: 1130.0000 - false_positives_3: 1237.0000
Epoch 00005: val_loss did not improve from 0.34182
98/98 [==============================] - 6s 56ms/step - loss: 0.2666 - binary_accuracy: 0.9048 - false_negatives_3: 1142.0000 - false_positives_3: 1238.0000 - val_loss: 0.3731 - val_binary_accuracy: 0.8444 - val_false_negatives_3: 251.0000 - val_false_positives_3: 527.0000
Epoch 00005: early stopping
----------------------------------------------------------------------------------------------------
Number of zeros incorrectly classified: 392.0, Number of ones incorrectly classified: 356.0
Sample ratio for positives: 0.47593582887700536, Sample ratio for negatives:0.5240641711229946
Starting training with 29997 samples
----------------------------------------------------------------------------------------------------
Epoch 1/20
117/118 [============================>.] - ETA: 0s - loss: 0.3345 - binary_accuracy: 0.8720 - false_negatives_4: 1835.0000 - false_positives_4: 1998.0000
Epoch 00001: val_loss did not improve from 0.34182
118/118 [==============================] - 20s 96ms/step - loss: 0.3343 - binary_accuracy: 0.8722 - false_negatives_4: 1835.0000 - false_positives_4: 1999.0000 - val_loss: 0.3478 - val_binary_accuracy: 0.8488 - val_false_negatives_4: 250.0000 - val_false_positives_4: 506.0000
Epoch 2/20
117/118 [============================>.] - ETA: 0s - loss: 0.3061 - binary_accuracy: 0.8842 - false_negatives_4: 1667.0000 - false_positives_4: 1801.0000
Epoch 00002: val_loss improved from 0.34182 to 0.33779, saving model to AL_Model.h5
118/118 [==============================] - 7s 56ms/step - loss: 0.3059 - binary_accuracy: 0.8843 - false_negatives_4: 1670.0000 - false_positives_4: 1802.0000 - val_loss: 0.3378 - val_binary_accuracy: 0.8534 - val_false_negatives_4: 335.0000 - val_false_positives_4: 398.0000
Epoch 3/20
117/118 [============================>.] - ETA: 0s - loss: 0.2923 - binary_accuracy: 0.8921 - false_negatives_4: 1626.0000 - false_positives_4: 1607.0000
Epoch 00003: val_loss did not improve from 0.33779
118/118 [==============================] - 7s 56ms/step - loss: 0.2923 - binary_accuracy: 0.8921 - false_negatives_4: 1626.0000 - false_positives_4: 1611.0000 - val_loss: 0.3413 - val_binary_accuracy: 0.8486 - val_false_negatives_4: 269.0000 - val_false_positives_4: 488.0000
Epoch 4/20
117/118 [============================>.] - ETA: 0s - loss: 0.2746 - binary_accuracy: 0.8997 - false_negatives_4: 1459.0000 - false_positives_4: 1546.0000
Epoch 00004: val_loss did not improve from 0.33779
118/118 [==============================] - 7s 55ms/step - loss: 0.2746 - binary_accuracy: 0.8996 - false_negatives_4: 1465.0000 - false_positives_4: 1546.0000 - val_loss: 0.3810 - val_binary_accuracy: 0.8326 - val_false_negatives_4: 169.0000 - val_false_positives_4: 668.0000
Epoch 5/20
117/118 [============================>.] - ETA: 0s - loss: 0.2598 - binary_accuracy: 0.9066 - false_negatives_4: 1336.0000 - false_positives_4: 1462.0000
Epoch 00005: val_loss did not improve from 0.33779
118/118 [==============================] - 7s 56ms/step - loss: 0.2597 - binary_accuracy: 0.9066 - false_negatives_4: 1337.0000 - false_positives_4: 1465.0000 - val_loss: 0.4038 - val_binary_accuracy: 0.8332 - val_false_negatives_4: 643.0000 - val_false_positives_4: 191.0000
Epoch 6/20
117/118 [============================>.] - ETA: 0s - loss: 0.2461 - binary_accuracy: 0.9132 - false_negatives_4: 1263.0000 - false_positives_4: 1337.0000
Epoch 00006: val_loss did not improve from 0.33779
118/118 [==============================] - 7s 55ms/step - loss: 0.2462 - binary_accuracy: 0.9132 - false_negatives_4: 1263.0000 - false_positives_4: 1341.0000 - val_loss: 0.3546 - val_binary_accuracy: 0.8500 - val_false_negatives_4: 359.0000 - val_false_positives_4: 391.0000
Epoch 00006: early stopping
```
</div>


<div class="k-default-codeblock">
```
----------------------------------------------------------------------------------------------------
Test set evaluation: {'loss': 0.34248775243759155, 'binary_accuracy': 0.854200005531311, 'false_negatives_4': 348.0, 'false_positives_4': 381.0}
----------------------------------------------------------------------------------------------------
```
</div>
---
## Conclusion
Active Learning is a growing area of research. This example demonstrates the cost-efficiency
benefits of using Active Learning, as it eliminates the need to annotate large amounts of
data, saving resources.
The following are some noteworthy observations from this example:
1. We only require 30,000 samples to reach the same (if not better) scores as the model
trained on the full datatset. This means that in a real life setting, we save the effort
required for annotating 10,000 images!
2. The number of false negatives and false positives are well balanced at the end of the
training as compared to the skewed ratio obtained from the full training. This makes the
model slightly more useful in real life scenarios where both the labels hold equal
importance.
For further reading about the types of sampling ratios, training techniques or available
open source libraries/implementations, you can refer to the resources below:
1. [Active Learning Literature Survey](http://burrsettles.com/pub/settles.activelearning.pdf) (Burr Settles, 2010).
2. [modAL](https://github.com/modAL-python/modAL): A Modular Active Learning framework.
3. Google's unofficial [Active Learning playground](https://github.com/google/active-learning).
| keras-io/examples/nlp/md/active_learning_review_classification.md/0 | {
"file_path": "keras-io/examples/nlp/md/active_learning_review_classification.md",
"repo_id": "keras-io",
"token_count": 16493
} | 82 |
"""
Title: FeatureSpace advanced use cases
Author: [Dimitre Oliveira](https://www.linkedin.com/in/dimitre-oliveira-7a1a0113a/)
Date created: 2023/07/01
Last modified: 2023/07/01
Description: How to use FeatureSpace for advanced preprocessing use cases.
Accelerator: None
"""
"""
## Introduction
This example is an extension of the
[Structured data classification with FeatureSpace](https://keras.io/examples/structured_data/structured_data_classification_with_feature_space/)
code example, and here we will extend it to cover more complex use
cases of the [`keras.utils.FeatureSpace`](https://keras.io/api/utils/feature_space/)
preprocessing utility, like feature hashing, feature crosses, handling missing values and
integrating [Keras preprocessing layers](https://keras.io/api/layers/preprocessing_layers/)
with FeatureSpace.
The general task still is structured data classification (also known as tabular data
classification) using a data that includes numerical features, integer categorical
features, and string categorical features.
"""
"""
### The dataset
[Our dataset](https://archive.ics.uci.edu/dataset/222/bank+marketing) is provided by a
Portuguese banking institution.
It's a CSV file with 4119 rows. Each row contains information about marketing campaigns
based on phone calls, and each column describes an attribute of the client. We use the
features to predict whether the client subscribed ('yes') or not ('no') to the product
(bank term deposit).
Here's the description of each feature:
Column| Description| Feature Type
------|------------|-------------
Age | Age of the client | Numerical
Job | Type of job | Categorical
Marital | Marital status | Categorical
Education | Education level of the client | Categorical
Default | Has credit in default? | Categorical
Housing | Has housing loan? | Categorical
Loan | Has personal loan? | Categorical
Contact | Contact communication type | Categorical
Month | Last contact month of year | Categorical
Day_of_week | Last contact day of the week | Categorical
Duration | Last contact duration, in seconds | Numerical
Campaign | Number of contacts performed during this campaign and for this client | Numerical
Pdays | Number of days that passed by after the client was last contacted from a previous campaign | Numerical
Previous | Number of contacts performed before this campaign and for this client | Numerical
Poutcome | Outcome of the previous marketing campaign | Categorical
Emp.var.rate | Employment variation rate | Numerical
Cons.price.idx | Consumer price index | Numerical
Cons.conf.idx | Consumer confidence index | Numerical
Euribor3m | Euribor 3 month rate | Numerical
Nr.employed | Number of employees | Numerical
Y | Has the client subscribed a term deposit? | Target
**Important note regarding the feature `duration`**: this attribute highly affects the
output target (e.g., if duration=0 then y='no'). Yet, the duration is not known before a
call is performed. Also, after the end of the call y is obviously known. Thus, this input
should only be included for benchmark purposes and should be discarded if the intention
is to have a realistic predictive model. For this reason we will drop it.
"""
"""
## Setup
"""
import pandas as pd
import tensorflow as tf
from pathlib import Path
from zipfile import ZipFile
from tensorflow.keras.utils import FeatureSpace
"""
## Load the data
Let's download the data and load it into a Pandas dataframe:
"""
data_url = "https://archive.ics.uci.edu/static/public/222/bank+marketing.zip"
data_zipped_path = tf.keras.utils.get_file("bank_marketing.zip", data_url, extract=True)
keras_datasets_path = Path(data_zipped_path).parents[0]
with ZipFile(f"{keras_datasets_path}/bank-additional.zip", "r") as zip:
# Extract files
zip.extractall(path=keras_datasets_path)
dataframe = pd.read_csv(
f"{keras_datasets_path}/bank-additional/bank-additional.csv", sep=";"
)
"""
We will create a new feature `previously_contacted` to be able to demonstrate some useful
preprocessing techniques, this feature is based on `pdays`. According to the dataset
information if `pdays = 999` it means that the client was not previously contacted, so
let's create a feature to capture that.
"""
# Droping `duration` to avoid target leak
dataframe.drop("duration", axis=1, inplace=True)
# Creating the new feature `previously_contacted`
dataframe["previously_contacted"] = dataframe["pdays"].map(
lambda x: 0 if x == 999 else 1
)
"""
The dataset includes 4119 samples with 21 columns per sample (20 features, plus the
target label), here's a preview of a few samples:
"""
print(f"Dataframe shape: {dataframe.shape}")
display(dataframe.head())
"""
The column, "y", indicates whether the client has subscribed a term deposit or not.
"""
"""
## Train/validation split
Let's split the data into a training and validation set:
"""
valid_dataframe = dataframe.sample(frac=0.2, random_state=0)
train_dataframe = dataframe.drop(valid_dataframe.index)
print(
f"Using {len(train_dataframe)} samples for training and "
f"{len(valid_dataframe)} for validation"
)
"""
## Generating TF datasets
Let's generate
[`tf.data.Dataset`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) objects
for each dataframe, since our target column `y` is a string we also need to encode it as
an integer to be able to train our model with it. To achieve this we will create a
`StringLookup` layer that will map the strings "no" and "yes" into "0" and "1"
respectively.
"""
label_lookup = tf.keras.layers.StringLookup(
# the order here is important since the first index will be encoded as 0
vocabulary=["no", "yes"],
num_oov_indices=0,
)
def encode_label(x, y):
encoded_y = label_lookup(y)
return x, encoded_y
def dataframe_to_dataset(dataframe):
dataframe = dataframe.copy()
labels = dataframe.pop("y")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.map(encode_label, num_parallel_calls=tf.data.AUTOTUNE)
ds = ds.shuffle(buffer_size=len(dataframe))
return ds
train_ds = dataframe_to_dataset(train_dataframe)
valid_ds = dataframe_to_dataset(valid_dataframe)
"""
Each `Dataset` yields a tuple `(input, target)` where `input` is a dictionary of features
and `target` is the value `0` or `1`:
"""
for x, y in dataframe_to_dataset(train_dataframe).take(1):
print(f"Input: {x}")
print(f"Target: {y}")
"""
## Preprocessing
Usually our data is not on the proper or best format for modeling, this is why most of
the time we need to do some kind of preprocessing on the features to make them compatible
with the model or to extract the most of them for the task. We need to do this
preprocessing step for training but but at inference we also need to make sure that the
data goes through the same process, this where a utility like `FeatureSpace` shines, we
can define all the preprocessing once and re-use it at different stages of our system.
Here we will see how to use `FeatureSpace` to perform more complex transformations and
its flexibility, then combine everything together into a single component to preprocess
data for our model.
"""
"""
The `FeatureSpace` utility learns how to process the data by using the `adapt()` function
to learn from it, this requires a dataset containing only feature, so let's create it
together with a utility function to show the preprocessing example in practice:
"""
train_ds_with_no_labels = train_ds.map(lambda x, _: x)
def example_feature_space(dataset, feature_space, feature_names):
feature_space.adapt(dataset)
for x in dataset.take(1):
inputs = {feature_name: x[feature_name] for feature_name in feature_names}
preprocessed_x = feature_space(inputs)
print(f"Input: {[{k:v.numpy()} for k, v in inputs.items()]}")
print(
f"Preprocessed output: {[{k:v.numpy()} for k, v in preprocessed_x.items()]}"
)
"""
### Feature hashing
"""
"""
**Feature hashing** means hashing or encoding a set of values into a defined number of
bins, in this case we have `campaign` (number of contacts performed during this campaign
and for a client) which is a numerical feature that can assume a varying range of values
and we will hash it into 4 bins, this means that any possible value of the original
feature will be placed into one of those possible 4 bins. The output here can be a
one-hot encoded vector or a single number.
"""
feature_space = FeatureSpace(
features={
"campaign": FeatureSpace.integer_hashed(num_bins=4, output_mode="one_hot")
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["campaign"])
"""
**Feature hashing** can also be used for string features.
"""
feature_space = FeatureSpace(
features={
"education": FeatureSpace.string_hashed(num_bins=3, output_mode="one_hot")
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["education"])
"""
For numerical features we can get a similar behavior by using the `float_discretized`
option, the main difference between this and `integer_hashed` is that with the former we
bin the values while keeping some numerical relationship (close values will likely be
placed at the same bin) while the later (hashing) we cannot guarantee that those numbers
will be hashed into the same bin, it depends on the hashing function.
"""
feature_space = FeatureSpace(
features={"age": FeatureSpace.float_discretized(num_bins=3, output_mode="one_hot")},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["age"])
"""
### Feature indexing
"""
"""
**Indexing** a string feature essentially means creating a discrete numerical
representation for it, this is especially important for string features since most models
only accept numerical features. This transformation will place the string values into
different categories. The output here can be a one-hot encoded vector or a single number.
Note that by specifying `num_oov_indices=1` we leave one spot at our output vector for
OOV (out of vocabulary) values this is an important tool to handle missing or unseen
values after the training (values that were not seen during the `adapt()` step)
"""
feature_space = FeatureSpace(
features={
"default": FeatureSpace.string_categorical(
num_oov_indices=1, output_mode="one_hot"
)
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["default"])
"""
We also can do **feature indexing** for integer features, this can be quite important for
some datasets where categorical features are replaced by numbers, for instance features
like `sex` or `gender` where values like (`1 and 0`) do not have a numerical relationship
between them, they are just different categories, this behavior can be perfectly captured
by this transformation.
On this dataset we can use the feature that we created `previously_contacted`. For this
case we want to explicitly set `num_oov_indices=0`, the reason is that we only expect two
possible values for the feature, anything else would be either wrong input or an issue
with the data creation, for this reason we would probably just want the code to throw an
error so that we can be aware of the issue and fix it.
"""
feature_space = FeatureSpace(
features={
"previously_contacted": FeatureSpace.integer_categorical(
num_oov_indices=0, output_mode="one_hot"
)
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["previously_contacted"])
"""
### Feature crosses (mixing features of diverse types)
With **crosses** we can do feature interactions between an arbitrary number of features
of mixed types as long as they are categorical features, you can think of instead of
having a feature {'age': 20} and another {'job': 'entrepreneur'} we can have
{'age_X_job': 20_entrepreneur}, but with `FeatureSpace` and **crosses** we can apply
specific preprocessing to each individual feature and to the feature cross itself. This
option can be very powerful for specific use cases, here might be a good option since age
combined with job can have different meanings for the banking domain.
We will cross `age` and `job` and hash the combination output of them into a vector
representation of size 8. The output here can be a one-hot encoded vector or a single
number.
Sometimes the combination of multiple features can result into on a super large feature
space, think about crossing someone's ZIP code with its last name, the possibilities
would be in the thousands, that is why the `crossing_dim` parameter is so important it
limits the output dimension of the cross feature.
Note that the combination of possible values of the 6 bins of `age` and the 12 values of
`job` would be 72, so by choosing `crossing_dim = 8` we are choosing to constrain the
output vector.
"""
feature_space = FeatureSpace(
features={
"age": FeatureSpace.integer_hashed(num_bins=6, output_mode="one_hot"),
"job": FeatureSpace.string_categorical(
num_oov_indices=0, output_mode="one_hot"
),
},
crosses=[
FeatureSpace.cross(
feature_names=("age", "job"),
crossing_dim=8,
output_mode="one_hot",
)
],
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["age", "job"])
"""
### FeatureSpace using a Keras preprocessing layer
To be a really flexible and extensible feature we cannot only rely on those pre-defined
transformation, we must be able to re-use other transformations from the Keras/TensorFlow
ecosystem and customize our own, this is why `FeatureSpace` is also designed to work with
[Keras preprocessing layers](https://keras.io/guides/preprocessing_layers/), this way we
can use sophisticated data transformations provided by the framework, you can even create
your own custom Keras preprocessing layers and use it in the same way.
Here we are going to use the
[`tf.keras.layers.TextVectorization`](https://keras.io/api/layers/preprocessing_layers/text/text_vectorization/#textvectorization-class)
preprocessing layer to create a TF-IDF
feature from our data. Note that this feature is not a really good use case for TF-IDF,
this is just for demonstration purposes.
"""
custom_layer = tf.keras.layers.TextVectorization(output_mode="tf_idf")
feature_space = FeatureSpace(
features={
"education": FeatureSpace.feature(
preprocessor=custom_layer, dtype="string", output_mode="float"
)
},
output_mode="dict",
)
example_feature_space(train_ds_with_no_labels, feature_space, ["education"])
"""
## Configuring the final `FeatureSpace`
Now that we know how to use `FeatureSpace` for more complex use cases let's pick the ones
that looks more useful for this task and create the final `FeatureSpace` component.
To configure how each feature should be preprocessed,
we instantiate a `keras.utils.FeatureSpace`, and we
pass to it a dictionary that maps the name of our features
to the feature transformation function.
"""
feature_space = FeatureSpace(
features={
# Categorical features encoded as integers
"previously_contacted": FeatureSpace.integer_categorical(num_oov_indices=0),
# Categorical features encoded as string
"marital": FeatureSpace.string_categorical(num_oov_indices=0),
"education": FeatureSpace.string_categorical(num_oov_indices=0),
"default": FeatureSpace.string_categorical(num_oov_indices=0),
"housing": FeatureSpace.string_categorical(num_oov_indices=0),
"loan": FeatureSpace.string_categorical(num_oov_indices=0),
"contact": FeatureSpace.string_categorical(num_oov_indices=0),
"month": FeatureSpace.string_categorical(num_oov_indices=0),
"day_of_week": FeatureSpace.string_categorical(num_oov_indices=0),
"poutcome": FeatureSpace.string_categorical(num_oov_indices=0),
# Categorical features to hash and bin
"job": FeatureSpace.string_hashed(num_bins=3),
# Numerical features to hash and bin
"pdays": FeatureSpace.integer_hashed(num_bins=4),
# Numerical features to normalize and bin
"age": FeatureSpace.float_discretized(num_bins=4),
# Numerical features to normalize
"campaign": FeatureSpace.float_normalized(),
"previous": FeatureSpace.float_normalized(),
"emp.var.rate": FeatureSpace.float_normalized(),
"cons.price.idx": FeatureSpace.float_normalized(),
"cons.conf.idx": FeatureSpace.float_normalized(),
"euribor3m": FeatureSpace.float_normalized(),
"nr.employed": FeatureSpace.float_normalized(),
},
# Specify feature cross with a custom crossing dim.
crosses=[
FeatureSpace.cross(feature_names=("age", "job"), crossing_dim=8),
FeatureSpace.cross(
feature_names=("default", "housing", "loan"), crossing_dim=6
),
FeatureSpace.cross(
feature_names=("poutcome", "previously_contacted"), crossing_dim=2
),
],
output_mode="concat",
)
"""
## Adapt the `FeatureSpace` to the training data
Before we start using the `FeatureSpace` to build a model, we have
to adapt it to the training data. During `adapt()`, the `FeatureSpace` will:
- Index the set of possible values for categorical features.
- Compute the mean and variance for numerical features to normalize.
- Compute the value boundaries for the different bins for numerical features to
discretize.
- Any other kind of preprocessing required by custom layers.
Note that `adapt()` should be called on a `tf.data.Dataset` which yields dicts
of feature values -- no labels.
But first let's batch the datasets
"""
train_ds = train_ds.batch(32)
valid_ds = valid_ds.batch(32)
train_ds_with_no_labels = train_ds.map(lambda x, _: x)
feature_space.adapt(train_ds_with_no_labels)
"""
At this point, the `FeatureSpace` can be called on a dict of raw feature values, and
because we set `output_mode="concat"` it will return a single concatenate vector for each
sample, combining encoded features and feature crosses.
"""
for x, _ in train_ds.take(1):
preprocessed_x = feature_space(x)
print(f"preprocessed_x shape: {preprocessed_x.shape}")
print(f"preprocessed_x sample: \n{preprocessed_x[0]}")
"""
## Saving the `FeatureSpace`
At this point we can choose to save our `FeatureSpace` component, this have many
advantages like re-using it on different experiments that use the same model, saving time
if you need to re-run the preprocessing step, and mainly for model deployment, where by
loading it you can be sure that you will be applying the same preprocessing steps don't
matter the device or environment, this is a great way to reduce
[training/servingskew](https://developers.google.com/machine-learning/guides/rules-of-ml#training-serving_skew).
"""
feature_space.save("myfeaturespace.keras")
"""
## Preprocessing with `FeatureSpace` as part of the tf.data pipeline
We will opt to use our component asynchronously by making it part of the tf.data
pipeline, as noted at the
[previous guide](https://keras.io/examples/structured_data/structured_data_classification_with_feature_space/)
This enables asynchronous parallel preprocessing of the data on CPU before it
hits the model. Usually, this is always the right thing to do during training.
Let's create a training and validation dataset of preprocessed batches:
"""
preprocessed_train_ds = train_ds.map(
lambda x, y: (feature_space(x), y), num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
preprocessed_valid_ds = valid_ds.map(
lambda x, y: (feature_space(x), y), num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
"""
## Model
We will take advantage of our `FeatureSpace` component to build the model, as we want the
model to be compatible with our preprocessing function, let's use the the `FeatureSpace`
feature map as the input of our model.
"""
encoded_features = feature_space.get_encoded_features()
print(encoded_features)
"""
This model is quite trivial only for demonstration purposes so don't pay too much
attention to the architecture.
"""
x = tf.keras.layers.Dense(64, activation="relu")(encoded_features)
x = tf.keras.layers.Dropout(0.5)(x)
output = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(inputs=encoded_features, outputs=output)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
"""
## Training
Let's train our model for 20 epochs. Note that feature preprocessing is happening as part
of the tf.data pipeline, not as part of the model.
"""
model.fit(
preprocessed_train_ds, validation_data=preprocessed_valid_ds, epochs=20, verbose=2
)
"""
## Inference on new data with the end-to-end model
Now, we can build our inference model (which includes the `FeatureSpace`) to make
predictions based on dicts of raw features values, as follows:
"""
"""
### Loading the `FeatureSpace`
First let's load the `FeatureSpace` that we saved a few moment ago, this can be quite
handy if you train a model but want to do inference at different time, possibly using a
different device or environment.
"""
loaded_feature_space = tf.keras.models.load_model("myfeaturespace.keras")
"""
### Building the inference end-to-end model
To build the inference model we need both the feature input map and the preprocessing
encoded Keras tensors.
"""
dict_inputs = loaded_feature_space.get_inputs()
encoded_features = loaded_feature_space.get_encoded_features()
print(encoded_features)
print(dict_inputs)
outputs = model(encoded_features)
inference_model = tf.keras.Model(inputs=dict_inputs, outputs=outputs)
sample = {
"age": 30,
"job": "blue-collar",
"marital": "married",
"education": "basic.9y",
"default": "no",
"housing": "yes",
"loan": "no",
"contact": "cellular",
"month": "may",
"day_of_week": "fri",
"campaign": 2,
"pdays": 999,
"previous": 0,
"poutcome": "nonexistent",
"emp.var.rate": -1.8,
"cons.price.idx": 92.893,
"cons.conf.idx": -46.2,
"euribor3m": 1.313,
"nr.employed": 5099.1,
"previously_contacted": 0,
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = inference_model.predict(input_dict)
print(
f"This particular client has a {100 * predictions[0][0]:.2f}% probability "
"of subscribing a term deposit, as evaluated by our model."
)
| keras-io/examples/structured_data/feature_space_advanced.py/0 | {
"file_path": "keras-io/examples/structured_data/feature_space_advanced.py",
"repo_id": "keras-io",
"token_count": 7094
} | 83 |
# Structured data classification from scratch
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2020/06/09<br>
**Last modified:** 2020/06/09<br>
**Description:** Binary classification of structured data including numerical and categorical features.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/structured_data/ipynb/structured_data_classification_from_scratch.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/structured_data/structured_data_classification_from_scratch.py)
---
## Introduction
This example demonstrates how to do structured data classification, starting from a raw
CSV file. Our data includes both numerical and categorical features. We will use Keras
preprocessing layers to normalize the numerical features and vectorize the categorical
ones.
Note that this example should be run with TensorFlow 2.5 or higher.
### The dataset
[Our dataset](https://archive.ics.uci.edu/ml/datasets/heart+Disease) is provided by the
Cleveland Clinic Foundation for Heart Disease.
It's a CSV file with 303 rows. Each row contains information about a patient (a
**sample**), and each column describes an attribute of the patient (a **feature**). We
use the features to predict whether a patient has a heart disease (**binary
classification**).
Here's the description of each feature:
Column| Description| Feature Type
------------|--------------------|----------------------
Age | Age in years | Numerical
Sex | (1 = male; 0 = female) | Categorical
CP | Chest pain type (0, 1, 2, 3, 4) | Categorical
Trestbpd | Resting blood pressure (in mm Hg on admission) | Numerical
Chol | Serum cholesterol in mg/dl | Numerical
FBS | fasting blood sugar in 120 mg/dl (1 = true; 0 = false) | Categorical
RestECG | Resting electrocardiogram results (0, 1, 2) | Categorical
Thalach | Maximum heart rate achieved | Numerical
Exang | Exercise induced angina (1 = yes; 0 = no) | Categorical
Oldpeak | ST depression induced by exercise relative to rest | Numerical
Slope | Slope of the peak exercise ST segment | Numerical
CA | Number of major vessels (0-3) colored by fluoroscopy | Both numerical & categorical
Thal | 3 = normal; 6 = fixed defect; 7 = reversible defect | Categorical
Target | Diagnosis of heart disease (1 = true; 0 = false) | Target
---
## Setup
```python
import os
# TensorFlow is the only backend that supports string inputs.
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import pandas as pd
import keras
from keras import layers
```
---
## Preparing the data
Let's download the data and load it into a Pandas dataframe:
```python
file_url = "http://storage.googleapis.com/download.tensorflow.org/data/heart.csv"
dataframe = pd.read_csv(file_url)
```
The dataset includes 303 samples with 14 columns per sample (13 features, plus the target
label):
```python
dataframe.shape
```
<div class="k-default-codeblock">
```
(303, 14)
```
</div>
Here's a preview of a few samples:
```python
dataframe.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
<div class="k-default-codeblock">
```
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
```
</div>
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>age</th>
<th>sex</th>
<th>cp</th>
<th>trestbps</th>
<th>chol</th>
<th>fbs</th>
<th>restecg</th>
<th>thalach</th>
<th>exang</th>
<th>oldpeak</th>
<th>slope</th>
<th>ca</th>
<th>thal</th>
<th>target</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>63</td>
<td>1</td>
<td>1</td>
<td>145</td>
<td>233</td>
<td>1</td>
<td>2</td>
<td>150</td>
<td>0</td>
<td>2.3</td>
<td>3</td>
<td>0</td>
<td>fixed</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>67</td>
<td>1</td>
<td>4</td>
<td>160</td>
<td>286</td>
<td>0</td>
<td>2</td>
<td>108</td>
<td>1</td>
<td>1.5</td>
<td>2</td>
<td>3</td>
<td>normal</td>
<td>1</td>
</tr>
<tr>
<th>2</th>
<td>67</td>
<td>1</td>
<td>4</td>
<td>120</td>
<td>229</td>
<td>0</td>
<td>2</td>
<td>129</td>
<td>1</td>
<td>2.6</td>
<td>2</td>
<td>2</td>
<td>reversible</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>37</td>
<td>1</td>
<td>3</td>
<td>130</td>
<td>250</td>
<td>0</td>
<td>0</td>
<td>187</td>
<td>0</td>
<td>3.5</td>
<td>3</td>
<td>0</td>
<td>normal</td>
<td>0</td>
</tr>
<tr>
<th>4</th>
<td>41</td>
<td>0</td>
<td>2</td>
<td>130</td>
<td>204</td>
<td>0</td>
<td>2</td>
<td>172</td>
<td>0</td>
<td>1.4</td>
<td>1</td>
<td>0</td>
<td>normal</td>
<td>0</td>
</tr>
</tbody>
</table>
</div>
The last column, "target", indicates whether the patient has a heart disease (1) or not
(0).
Let's split the data into a training and validation set:
```python
val_dataframe = dataframe.sample(frac=0.2, random_state=1337)
train_dataframe = dataframe.drop(val_dataframe.index)
print(
f"Using {len(train_dataframe)} samples for training "
f"and {len(val_dataframe)} for validation"
)
```
<div class="k-default-codeblock">
```
Using 242 samples for training and 61 for validation
```
</div>
Let's generate `tf.data.Dataset` objects for each dataframe:
```python
def dataframe_to_dataset(dataframe):
dataframe = dataframe.copy()
labels = dataframe.pop("target")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.shuffle(buffer_size=len(dataframe))
return ds
train_ds = dataframe_to_dataset(train_dataframe)
val_ds = dataframe_to_dataset(val_dataframe)
```
Each `Dataset` yields a tuple `(input, target)` where `input` is a dictionary of features
and `target` is the value `0` or `1`:
```python
for x, y in train_ds.take(1):
print("Input:", x)
print("Target:", y)
```
<div class="k-default-codeblock">
```
Input: {'age': <tf.Tensor: shape=(), dtype=int64, numpy=64>, 'sex': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'cp': <tf.Tensor: shape=(), dtype=int64, numpy=4>, 'trestbps': <tf.Tensor: shape=(), dtype=int64, numpy=128>, 'chol': <tf.Tensor: shape=(), dtype=int64, numpy=263>, 'fbs': <tf.Tensor: shape=(), dtype=int64, numpy=0>, 'restecg': <tf.Tensor: shape=(), dtype=int64, numpy=0>, 'thalach': <tf.Tensor: shape=(), dtype=int64, numpy=105>, 'exang': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'oldpeak': <tf.Tensor: shape=(), dtype=float64, numpy=0.2>, 'slope': <tf.Tensor: shape=(), dtype=int64, numpy=2>, 'ca': <tf.Tensor: shape=(), dtype=int64, numpy=1>, 'thal': <tf.Tensor: shape=(), dtype=string, numpy=b'reversible'>}
Target: tf.Tensor(0, shape=(), dtype=int64)
```
</div>
Let's batch the datasets:
```python
train_ds = train_ds.batch(32)
val_ds = val_ds.batch(32)
```
---
## Feature preprocessing with Keras layers
The following features are categorical features encoded as integers:
- `sex`
- `cp`
- `fbs`
- `restecg`
- `exang`
- `ca`
We will encode these features using **one-hot encoding**. We have two options
here:
- Use `CategoryEncoding()`, which requires knowing the range of input values
and will error on input outside the range.
- Use `IntegerLookup()` which will build a lookup table for inputs and reserve
an output index for unkown input values.
For this example, we want a simple solution that will handle out of range inputs
at inference, so we will use `IntegerLookup()`.
We also have a categorical feature encoded as a string: `thal`. We will create an
index of all possible features and encode output using the `StringLookup()` layer.
Finally, the following feature are continuous numerical features:
- `age`
- `trestbps`
- `chol`
- `thalach`
- `oldpeak`
- `slope`
For each of these features, we will use a `Normalization()` layer to make sure the mean
of each feature is 0 and its standard deviation is 1.
Below, we define 3 utility functions to do the operations:
- `encode_numerical_feature` to apply featurewise normalization to numerical features.
- `encode_string_categorical_feature` to first turn string inputs into integer indices,
then one-hot encode these integer indices.
- `encode_integer_categorical_feature` to one-hot encode integer categorical features.
```python
def encode_numerical_feature(feature, name, dataset):
# Create a Normalization layer for our feature
normalizer = layers.Normalization()
# Prepare a Dataset that only yields our feature
feature_ds = dataset.map(lambda x, y: x[name])
feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))
# Learn the statistics of the data
normalizer.adapt(feature_ds)
# Normalize the input feature
encoded_feature = normalizer(feature)
return encoded_feature
def encode_categorical_feature(feature, name, dataset, is_string):
lookup_class = layers.StringLookup if is_string else layers.IntegerLookup
# Create a lookup layer which will turn strings into integer indices
lookup = lookup_class(output_mode="binary")
# Prepare a Dataset that only yields our feature
feature_ds = dataset.map(lambda x, y: x[name])
feature_ds = feature_ds.map(lambda x: tf.expand_dims(x, -1))
# Learn the set of possible string values and assign them a fixed integer index
lookup.adapt(feature_ds)
# Turn the string input into integer indices
encoded_feature = lookup(feature)
return encoded_feature
```
---
## Build a model
With this done, we can create our end-to-end model:
```python
# Categorical features encoded as integers
sex = keras.Input(shape=(1,), name="sex", dtype="int64")
cp = keras.Input(shape=(1,), name="cp", dtype="int64")
fbs = keras.Input(shape=(1,), name="fbs", dtype="int64")
restecg = keras.Input(shape=(1,), name="restecg", dtype="int64")
exang = keras.Input(shape=(1,), name="exang", dtype="int64")
ca = keras.Input(shape=(1,), name="ca", dtype="int64")
# Categorical feature encoded as string
thal = keras.Input(shape=(1,), name="thal", dtype="string")
# Numerical features
age = keras.Input(shape=(1,), name="age")
trestbps = keras.Input(shape=(1,), name="trestbps")
chol = keras.Input(shape=(1,), name="chol")
thalach = keras.Input(shape=(1,), name="thalach")
oldpeak = keras.Input(shape=(1,), name="oldpeak")
slope = keras.Input(shape=(1,), name="slope")
all_inputs = [
sex,
cp,
fbs,
restecg,
exang,
ca,
thal,
age,
trestbps,
chol,
thalach,
oldpeak,
slope,
]
# Integer categorical features
sex_encoded = encode_categorical_feature(sex, "sex", train_ds, False)
cp_encoded = encode_categorical_feature(cp, "cp", train_ds, False)
fbs_encoded = encode_categorical_feature(fbs, "fbs", train_ds, False)
restecg_encoded = encode_categorical_feature(restecg, "restecg", train_ds, False)
exang_encoded = encode_categorical_feature(exang, "exang", train_ds, False)
ca_encoded = encode_categorical_feature(ca, "ca", train_ds, False)
# String categorical features
thal_encoded = encode_categorical_feature(thal, "thal", train_ds, True)
# Numerical features
age_encoded = encode_numerical_feature(age, "age", train_ds)
trestbps_encoded = encode_numerical_feature(trestbps, "trestbps", train_ds)
chol_encoded = encode_numerical_feature(chol, "chol", train_ds)
thalach_encoded = encode_numerical_feature(thalach, "thalach", train_ds)
oldpeak_encoded = encode_numerical_feature(oldpeak, "oldpeak", train_ds)
slope_encoded = encode_numerical_feature(slope, "slope", train_ds)
all_features = layers.concatenate(
[
sex_encoded,
cp_encoded,
fbs_encoded,
restecg_encoded,
exang_encoded,
slope_encoded,
ca_encoded,
thal_encoded,
age_encoded,
trestbps_encoded,
chol_encoded,
thalach_encoded,
oldpeak_encoded,
]
)
x = layers.Dense(32, activation="relu")(all_features)
x = layers.Dropout(0.5)(x)
output = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(all_inputs, output)
model.compile("adam", "binary_crossentropy", metrics=["accuracy"])
```
Let's visualize our connectivity graph:
```python
# `rankdir='LR'` is to make the graph horizontal.
keras.utils.plot_model(model, show_shapes=True, rankdir="LR")
```

---
## Train the model
```python
model.fit(train_ds, epochs=50, validation_data=val_ds)
```
<div class="k-default-codeblock">
```
Epoch 1/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 5s 46ms/step - accuracy: 0.3932 - loss: 0.8749 - val_accuracy: 0.3303 - val_loss: 0.7814
Epoch 2/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 1s 7ms/step - accuracy: 0.4262 - loss: 0.8375 - val_accuracy: 0.4914 - val_loss: 0.6980
Epoch 3/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - accuracy: 0.4835 - loss: 0.7350 - val_accuracy: 0.6541 - val_loss: 0.6320
Epoch 4/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.5932 - loss: 0.6665 - val_accuracy: 0.7543 - val_loss: 0.5743
Epoch 5/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.5861 - loss: 0.6600 - val_accuracy: 0.7683 - val_loss: 0.5360
Epoch 6/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.6489 - loss: 0.6020 - val_accuracy: 0.7748 - val_loss: 0.4998
Epoch 7/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.6880 - loss: 0.5668 - val_accuracy: 0.7699 - val_loss: 0.4800
Epoch 8/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.7572 - loss: 0.5009 - val_accuracy: 0.7559 - val_loss: 0.4573
Epoch 9/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.7492 - loss: 0.5192 - val_accuracy: 0.8060 - val_loss: 0.4414
Epoch 10/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - accuracy: 0.7212 - loss: 0.4973 - val_accuracy: 0.8077 - val_loss: 0.4259
Epoch 11/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.7616 - loss: 0.4704 - val_accuracy: 0.7904 - val_loss: 0.4143
Epoch 12/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8374 - loss: 0.4342 - val_accuracy: 0.7872 - val_loss: 0.4061
Epoch 13/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.7863 - loss: 0.4630 - val_accuracy: 0.7888 - val_loss: 0.3980
Epoch 14/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.7742 - loss: 0.4492 - val_accuracy: 0.7996 - val_loss: 0.3998
Epoch 15/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8083 - loss: 0.4280 - val_accuracy: 0.8060 - val_loss: 0.3855
Epoch 16/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8058 - loss: 0.4191 - val_accuracy: 0.8217 - val_loss: 0.3819
Epoch 17/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8071 - loss: 0.4111 - val_accuracy: 0.8389 - val_loss: 0.3763
Epoch 18/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - accuracy: 0.8533 - loss: 0.3676 - val_accuracy: 0.8373 - val_loss: 0.3792
Epoch 19/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8170 - loss: 0.3850 - val_accuracy: 0.8357 - val_loss: 0.3744
Epoch 20/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8207 - loss: 0.3767 - val_accuracy: 0.8168 - val_loss: 0.3759
Epoch 21/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8151 - loss: 0.3596 - val_accuracy: 0.8217 - val_loss: 0.3685
Epoch 22/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.7988 - loss: 0.4087 - val_accuracy: 0.8184 - val_loss: 0.3701
Epoch 23/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8180 - loss: 0.3632 - val_accuracy: 0.8217 - val_loss: 0.3614
Epoch 24/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8295 - loss: 0.3504 - val_accuracy: 0.8200 - val_loss: 0.3683
Epoch 25/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8386 - loss: 0.3864 - val_accuracy: 0.8200 - val_loss: 0.3655
Epoch 26/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8482 - loss: 0.3345 - val_accuracy: 0.8044 - val_loss: 0.3639
Epoch 27/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - accuracy: 0.8340 - loss: 0.3470 - val_accuracy: 0.8077 - val_loss: 0.3616
Epoch 28/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8418 - loss: 0.3684 - val_accuracy: 0.8060 - val_loss: 0.3629
Epoch 29/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8309 - loss: 0.3147 - val_accuracy: 0.8060 - val_loss: 0.3637
Epoch 30/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8722 - loss: 0.3151 - val_accuracy: 0.8044 - val_loss: 0.3672
Epoch 31/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - accuracy: 0.8746 - loss: 0.3043 - val_accuracy: 0.8060 - val_loss: 0.3637
Epoch 32/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8794 - loss: 0.3245 - val_accuracy: 0.8200 - val_loss: 0.3685
Epoch 33/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - accuracy: 0.8644 - loss: 0.3541 - val_accuracy: 0.8357 - val_loss: 0.3714
Epoch 34/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8867 - loss: 0.3007 - val_accuracy: 0.8373 - val_loss: 0.3680
Epoch 35/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8737 - loss: 0.3168 - val_accuracy: 0.8357 - val_loss: 0.3695
Epoch 36/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8191 - loss: 0.3298 - val_accuracy: 0.8357 - val_loss: 0.3736
Epoch 37/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8613 - loss: 0.3543 - val_accuracy: 0.8357 - val_loss: 0.3745
Epoch 38/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8835 - loss: 0.2835 - val_accuracy: 0.8357 - val_loss: 0.3707
Epoch 39/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8784 - loss: 0.2893 - val_accuracy: 0.8357 - val_loss: 0.3716
Epoch 40/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8919 - loss: 0.2587 - val_accuracy: 0.8168 - val_loss: 0.3770
Epoch 41/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8882 - loss: 0.2660 - val_accuracy: 0.8217 - val_loss: 0.3674
Epoch 42/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8790 - loss: 0.2931 - val_accuracy: 0.8200 - val_loss: 0.3723
Epoch 43/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8851 - loss: 0.2892 - val_accuracy: 0.8200 - val_loss: 0.3733
Epoch 44/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8504 - loss: 0.3189 - val_accuracy: 0.8200 - val_loss: 0.3755
Epoch 45/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8610 - loss: 0.3116 - val_accuracy: 0.8184 - val_loss: 0.3788
Epoch 46/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - accuracy: 0.8956 - loss: 0.2544 - val_accuracy: 0.8184 - val_loss: 0.3738
Epoch 47/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.9080 - loss: 0.2895 - val_accuracy: 0.8217 - val_loss: 0.3750
Epoch 48/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8706 - loss: 0.2993 - val_accuracy: 0.8217 - val_loss: 0.3757
Epoch 49/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - accuracy: 0.8724 - loss: 0.2979 - val_accuracy: 0.8184 - val_loss: 0.3781
Epoch 50/50
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - accuracy: 0.8609 - loss: 0.2937 - val_accuracy: 0.8217 - val_loss: 0.3791
<keras.src.callbacks.history.History at 0x7efc32e01780>
```
</div>
We quickly get to 80% validation accuracy.
---
## Inference on new data
To get a prediction for a new sample, you can simply call `model.predict()`. There are
just two things you need to do:
1. wrap scalars into a list so as to have a batch dimension (models only process batches
of data, not single samples)
2. Call `convert_to_tensor` on each feature
```python
sample = {
"age": 60,
"sex": 1,
"cp": 1,
"trestbps": 145,
"chol": 233,
"fbs": 1,
"restecg": 2,
"thalach": 150,
"exang": 0,
"oldpeak": 2.3,
"slope": 3,
"ca": 0,
"thal": "fixed",
}
input_dict = {name: tf.convert_to_tensor([value]) for name, value in sample.items()}
predictions = model.predict(input_dict)
print(
f"This particular patient had a {100 * predictions[0][0]:.1f} "
"percent probability of having a heart disease, "
"as evaluated by our model."
)
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 252ms/step
This particular patient had a 27.6 percent probability of having a heart disease, as evaluated by our model.
```
</div> | keras-io/examples/structured_data/md/structured_data_classification_from_scratch.md/0 | {
"file_path": "keras-io/examples/structured_data/md/structured_data_classification_from_scratch.md",
"repo_id": "keras-io",
"token_count": 9241
} | 84 |
# Timeseries forecasting for weather prediction
**Authors:** [Prabhanshu Attri](https://prabhanshu.com/github), [Yashika Sharma](https://github.com/yashika51), [Kristi Takach](https://github.com/ktakattack), [Falak Shah](https://github.com/falaktheoptimist)<br>
**Date created:** 2020/06/23<br>
**Last modified:** 2023/11/22<br>
**Description:** This notebook demonstrates how to do timeseries forecasting using a LSTM model.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/timeseries/ipynb/timeseries_weather_forecasting.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/timeseries/timeseries_weather_forecasting.py)
---
## Setup
```python
import pandas as pd
import matplotlib.pyplot as plt
import keras
```
---
## Climate Data Time-Series
We will be using Jena Climate dataset recorded by the
[Max Planck Institute for Biogeochemistry](https://www.bgc-jena.mpg.de/wetter/).
The dataset consists of 14 features such as temperature, pressure, humidity etc, recorded once per
10 minutes.
**Location**: Weather Station, Max Planck Institute for Biogeochemistry
in Jena, Germany
**Time-frame Considered**: Jan 10, 2009 - December 31, 2016
The table below shows the column names, their value formats, and their description.
Index| Features |Format |Description
-----|---------------|-------------------|-----------------------
1 |Date Time |01.01.2009 00:10:00|Date-time reference
2 |p (mbar) |996.52 |The pascal SI derived unit of pressure used to quantify internal pressure. Meteorological reports typically state atmospheric pressure in millibars.
3 |T (degC) |-8.02 |Temperature in Celsius
4 |Tpot (K) |265.4 |Temperature in Kelvin
5 |Tdew (degC) |-8.9 |Temperature in Celsius relative to humidity. Dew Point is a measure of the absolute amount of water in the air, the DP is the temperature at which the air cannot hold all the moisture in it and water condenses.
6 |rh (%) |93.3 |Relative Humidity is a measure of how saturated the air is with water vapor, the %RH determines the amount of water contained within collection objects.
7 |VPmax (mbar) |3.33 |Saturation vapor pressure
8 |VPact (mbar) |3.11 |Vapor pressure
9 |VPdef (mbar) |0.22 |Vapor pressure deficit
10 |sh (g/kg) |1.94 |Specific humidity
11 |H2OC (mmol/mol)|3.12 |Water vapor concentration
12 |rho (g/m ** 3) |1307.75 |Airtight
13 |wv (m/s) |1.03 |Wind speed
14 |max. wv (m/s) |1.75 |Maximum wind speed
15 |wd (deg) |152.3 |Wind direction in degrees
```python
from zipfile import ZipFile
uri = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip"
zip_path = keras.utils.get_file(origin=uri, fname="jena_climate_2009_2016.csv.zip")
zip_file = ZipFile(zip_path)
zip_file.extractall()
csv_path = "jena_climate_2009_2016.csv"
df = pd.read_csv(csv_path)
```
---
## Raw Data Visualization
To give us a sense of the data we are working with, each feature has been plotted below.
This shows the distinct pattern of each feature over the time period from 2009 to 2016.
It also shows where anomalies are present, which will be addressed during normalization.
```python
titles = [
"Pressure",
"Temperature",
"Temperature in Kelvin",
"Temperature (dew point)",
"Relative Humidity",
"Saturation vapor pressure",
"Vapor pressure",
"Vapor pressure deficit",
"Specific humidity",
"Water vapor concentration",
"Airtight",
"Wind speed",
"Maximum wind speed",
"Wind direction in degrees",
]
feature_keys = [
"p (mbar)",
"T (degC)",
"Tpot (K)",
"Tdew (degC)",
"rh (%)",
"VPmax (mbar)",
"VPact (mbar)",
"VPdef (mbar)",
"sh (g/kg)",
"H2OC (mmol/mol)",
"rho (g/m**3)",
"wv (m/s)",
"max. wv (m/s)",
"wd (deg)",
]
colors = [
"blue",
"orange",
"green",
"red",
"purple",
"brown",
"pink",
"gray",
"olive",
"cyan",
]
date_time_key = "Date Time"
def show_raw_visualization(data):
time_data = data[date_time_key]
fig, axes = plt.subplots(
nrows=7, ncols=2, figsize=(15, 20), dpi=80, facecolor="w", edgecolor="k"
)
for i in range(len(feature_keys)):
key = feature_keys[i]
c = colors[i % (len(colors))]
t_data = data[key]
t_data.index = time_data
t_data.head()
ax = t_data.plot(
ax=axes[i // 2, i % 2],
color=c,
title="{} - {}".format(titles[i], key),
rot=25,
)
ax.legend([titles[i]])
plt.tight_layout()
show_raw_visualization(df)
```

---
## Data Preprocessing
Here we are picking ~300,000 data points for training. Observation is recorded every
10 mins, that means 6 times per hour. We will resample one point per hour since no
drastic change is expected within 60 minutes. We do this via the `sampling_rate`
argument in `timeseries_dataset_from_array` utility.
We are tracking data from past 720 timestamps (720/6=120 hours). This data will be
used to predict the temperature after 72 timestamps (72/6=12 hours).
Since every feature has values with
varying ranges, we do normalization to confine feature values to a range of `[0, 1]` before
training a neural network.
We do this by subtracting the mean and dividing by the standard deviation of each feature.
71.5 % of the data will be used to train the model, i.e. 300,693 rows. `split_fraction` can
be changed to alter this percentage.
The model is shown data for first 5 days i.e. 720 observations, that are sampled every
hour. The temperature after 72 (12 hours * 6 observation per hour) observation will be
used as a label.
```python
split_fraction = 0.715
train_split = int(split_fraction * int(df.shape[0]))
step = 6
past = 720
future = 72
learning_rate = 0.001
batch_size = 256
epochs = 10
def normalize(data, train_split):
data_mean = data[:train_split].mean(axis=0)
data_std = data[:train_split].std(axis=0)
return (data - data_mean) / data_std
```
We can see from the correlation heatmap, few parameters like Relative Humidity and
Specific Humidity are redundant. Hence we will be using select features, not all.
```python
print(
"The selected parameters are:",
", ".join([titles[i] for i in [0, 1, 5, 7, 8, 10, 11]]),
)
selected_features = [feature_keys[i] for i in [0, 1, 5, 7, 8, 10, 11]]
features = df[selected_features]
features.index = df[date_time_key]
features.head()
features = normalize(features.values, train_split)
features = pd.DataFrame(features)
features.head()
train_data = features.loc[0 : train_split - 1]
val_data = features.loc[train_split:]
```
<div class="k-default-codeblock">
```
The selected parameters are: Pressure, Temperature, Saturation vapor pressure, Vapor pressure deficit, Specific humidity, Airtight, Wind speed
```
</div>
# Training dataset
The training dataset labels starts from the 792nd observation (720 + 72).
```python
start = past + future
end = start + train_split
x_train = train_data[[i for i in range(7)]].values
y_train = features.iloc[start:end][[1]]
sequence_length = int(past / step)
```
The `timeseries_dataset_from_array` function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as length of the
sequences/windows, spacing between two sequence/windows, etc., to produce batches of
sub-timeseries inputs and targets sampled from the main timeseries.
```python
dataset_train = keras.preprocessing.timeseries_dataset_from_array(
x_train,
y_train,
sequence_length=sequence_length,
sampling_rate=step,
batch_size=batch_size,
)
```
---
## Validation dataset
The validation dataset must not contain the last 792 rows as we won't have label data for
those records, hence 792 must be subtracted from the end of the data.
The validation label dataset must start from 792 after train_split, hence we must add
past + future (792) to label_start.
```python
x_end = len(val_data) - past - future
label_start = train_split + past + future
x_val = val_data.iloc[:x_end][[i for i in range(7)]].values
y_val = features.iloc[label_start:][[1]]
dataset_val = keras.preprocessing.timeseries_dataset_from_array(
x_val,
y_val,
sequence_length=sequence_length,
sampling_rate=step,
batch_size=batch_size,
)
for batch in dataset_train.take(1):
inputs, targets = batch
print("Input shape:", inputs.numpy().shape)
print("Target shape:", targets.numpy().shape)
```
<div class="k-default-codeblock">
```
Input shape: (256, 120, 7)
Target shape: (256, 1)
```
</div>
---
## Training
```python
inputs = keras.layers.Input(shape=(inputs.shape[1], inputs.shape[2]))
lstm_out = keras.layers.LSTM(32)(inputs)
outputs = keras.layers.Dense(1)(lstm_out)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss="mse")
model.summary()
```
<div class="k-default-codeblock">
```
CUDA backend failed to initialize: Found cuSOLVER version 11405, but JAX was built against version 11502, which is newer. The copy of cuSOLVER that is installed must be at least as new as the version against which JAX was built. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)
```
</div>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">120</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ lstm (<span style="color: #0087ff; text-decoration-color: #0087ff">LSTM</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">5,120</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">33</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">5,153</span> (20.13 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">5,153</span> (20.13 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
We'll use the `ModelCheckpoint` callback to regularly save checkpoints, and
the `EarlyStopping` callback to interrupt training when the validation loss
is not longer improving.
```python
path_checkpoint = "model_checkpoint.weights.h5"
es_callback = keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=5)
modelckpt_callback = keras.callbacks.ModelCheckpoint(
monitor="val_loss",
filepath=path_checkpoint,
verbose=1,
save_weights_only=True,
save_best_only=True,
)
history = model.fit(
dataset_train,
epochs=epochs,
validation_data=dataset_val,
callbacks=[es_callback, modelckpt_callback],
)
```
<div class="k-default-codeblock">
```
Epoch 1/10
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 0s 70ms/step - loss: 0.3008
Epoch 1: val_loss improved from inf to 0.15039, saving model to model_checkpoint.weights.h5
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 104s 88ms/step - loss: 0.3007 - val_loss: 0.1504
Epoch 2/10
1171/1172 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 66ms/step - loss: 0.1397
Epoch 2: val_loss improved from 0.15039 to 0.14231, saving model to model_checkpoint.weights.h5
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 97s 83ms/step - loss: 0.1396 - val_loss: 0.1423
Epoch 3/10
1171/1172 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 69ms/step - loss: 0.1242
Epoch 3: val_loss did not improve from 0.14231
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 101s 86ms/step - loss: 0.1242 - val_loss: 0.1513
Epoch 4/10
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 0s 68ms/step - loss: 0.1182
Epoch 4: val_loss did not improve from 0.14231
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 102s 87ms/step - loss: 0.1182 - val_loss: 0.1503
Epoch 5/10
1171/1172 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 67ms/step - loss: 0.1160
Epoch 5: val_loss did not improve from 0.14231
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 100s 85ms/step - loss: 0.1160 - val_loss: 0.1500
Epoch 6/10
1171/1172 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 69ms/step - loss: 0.1130
Epoch 6: val_loss did not improve from 0.14231
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 100s 86ms/step - loss: 0.1130 - val_loss: 0.1469
Epoch 7/10
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 0s 70ms/step - loss: 0.1106
Epoch 7: val_loss improved from 0.14231 to 0.13916, saving model to model_checkpoint.weights.h5
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 104s 89ms/step - loss: 0.1106 - val_loss: 0.1392
Epoch 8/10
1171/1172 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 66ms/step - loss: 0.1097
Epoch 8: val_loss improved from 0.13916 to 0.13257, saving model to model_checkpoint.weights.h5
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 98s 84ms/step - loss: 0.1097 - val_loss: 0.1326
Epoch 9/10
1171/1172 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 68ms/step - loss: 0.1075
Epoch 9: val_loss improved from 0.13257 to 0.13057, saving model to model_checkpoint.weights.h5
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 100s 85ms/step - loss: 0.1075 - val_loss: 0.1306
Epoch 10/10
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 0s 66ms/step - loss: 0.1065
Epoch 10: val_loss improved from 0.13057 to 0.12671, saving model to model_checkpoint.weights.h5
1172/1172 ━━━━━━━━━━━━━━━━━━━━ 98s 84ms/step - loss: 0.1065 - val_loss: 0.1267
```
</div>
We can visualize the loss with the function below. After one point, the loss stops
decreasing.
```python
def visualize_loss(history, title):
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, "b", label="Training loss")
plt.plot(epochs, val_loss, "r", label="Validation loss")
plt.title(title)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
visualize_loss(history, "Training and Validation Loss")
```

---
## Prediction
The trained model above is now able to make predictions for 5 sets of values from
validation set.
```python
def show_plot(plot_data, delta, title):
labels = ["History", "True Future", "Model Prediction"]
marker = [".-", "rx", "go"]
time_steps = list(range(-(plot_data[0].shape[0]), 0))
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, val in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10, label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future + 5) * 2])
plt.xlabel("Time-Step")
plt.show()
return
for x, y in dataset_val.take(5):
show_plot(
[x[0][:, 1].numpy(), y[0].numpy(), model.predict(x)[0]],
12,
"Single Step Prediction",
)
```
<div class="k-default-codeblock">
```
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step
```
</div>

<div class="k-default-codeblock">
```
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step
```
</div>

<div class="k-default-codeblock">
```
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step
```
</div>

<div class="k-default-codeblock">
```
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step
```
</div>

<div class="k-default-codeblock">
```
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step
```
</div>

| keras-io/examples/timeseries/md/timeseries_weather_forecasting.md/0 | {
"file_path": "keras-io/examples/timeseries/md/timeseries_weather_forecasting.md",
"repo_id": "keras-io",
"token_count": 7605
} | 85 |
<jupyter_start><jupyter_text>Highly accurate boundaries segmentation using BASNet**Author:** [Hamid Ali](https://github.com/hamidriasat)**Date created:** 2023/05/30**Last modified:** 2023/07/13**Description:** Boundaries aware segmentation model trained on the DUTS dataset. IntroductionDeep semantic segmentation algorithms have improved a lot recently, but still fails to correctlypredict pixels around object boundaries. In this example we implement**Boundary-Aware Segmentation Network (BASNet)**, using two stage predict and refinearchitecture, and a hybrid loss it can predict highly accurate boundaries and fine structuresfor image segmentation. References:- [Boundary-Aware Segmentation Network for Mobile and Web Applications](https://arxiv.org/abs/2101.04704)- [BASNet Keras Implementation](https://github.com/hamidriasat/BASNet/tree/basnet_keras)- [Learning to Detect Salient Objects with Image-level Supervision](https://openaccess.thecvf.com/content_cvpr_2017/html/Wang_Learning_to_Detect_CVPR_2017_paper.html) Download the DataWe will use the [DUTS-TE](http://saliencydetection.net/duts/) dataset for training. It has 5,019images but we will use 140 for training and validation to save notebook running time. DUTS isrelatively large salient object segmentation dataset. which contain diversified textures andstructures common to real-world images in both foreground and background.<jupyter_code>!wget http://saliencydetection.net/duts/download/DUTS-TE.zip
!unzip -q DUTS-TE.zip
import os
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import keras_cv
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, backend<jupyter_output><empty_output><jupyter_text>Define Hyperparameters<jupyter_code>IMAGE_SIZE = 288
BATCH_SIZE = 4
OUT_CLASSES = 1
TRAIN_SPLIT_RATIO = 0.90
DATA_DIR = "./DUTS-TE/"<jupyter_output><empty_output><jupyter_text>Create TensorFlow DatasetWe will use `load_paths()` to load and split 140 paths into train and validation set, and`load_dataset()` to convert paths into `tf.data.Dataset` object.<jupyter_code>def load_paths(path, split_ratio):
images = sorted(glob(os.path.join(path, "DUTS-TE-Image/*")))[:140]
masks = sorted(glob(os.path.join(path, "DUTS-TE-Mask/*")))[:140]
len_ = int(len(images) * split_ratio)
return (images[:len_], masks[:len_]), (images[len_:], masks[len_:])
def read_image(path, size, mode):
x = keras.utils.load_img(path, target_size=size, color_mode=mode)
x = keras.utils.img_to_array(x)
x = (x / 255.0).astype(np.float32)
return x
def preprocess(x_batch, y_batch, img_size, out_classes):
def f(_x, _y):
_x, _y = _x.decode(), _y.decode()
_x = read_image(_x, (img_size, img_size), mode="rgb") # image
_y = read_image(_y, (img_size, img_size), mode="grayscale") # mask
return _x, _y
images, masks = tf.numpy_function(f, [x_batch, y_batch], [tf.float32, tf.float32])
images.set_shape([img_size, img_size, 3])
masks.set_shape([img_size, img_size, out_classes])
return images, masks
def load_dataset(image_paths, mask_paths, img_size, out_classes, batch, shuffle=True):
dataset = tf.data.Dataset.from_tensor_slices((image_paths, mask_paths))
if shuffle:
dataset = dataset.cache().shuffle(buffer_size=1000)
dataset = dataset.map(
lambda x, y: preprocess(x, y, img_size, out_classes),
num_parallel_calls=tf.data.AUTOTUNE,
)
dataset = dataset.batch(batch)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
train_paths, val_paths = load_paths(DATA_DIR, TRAIN_SPLIT_RATIO)
train_dataset = load_dataset(
train_paths[0], train_paths[1], IMAGE_SIZE, OUT_CLASSES, BATCH_SIZE, shuffle=True
)
val_dataset = load_dataset(
val_paths[0], val_paths[1], IMAGE_SIZE, OUT_CLASSES, BATCH_SIZE, shuffle=False
)
print(f"Train Dataset: {train_dataset}")
print(f"Validation Dataset: {val_dataset}")<jupyter_output><empty_output><jupyter_text>Visualize Data<jupyter_code>def display(display_list):
title = ["Input Image", "True Mask", "Predicted Mask"]
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(title[i])
plt.imshow(keras.utils.array_to_img(display_list[i]), cmap="gray")
plt.axis("off")
plt.show()
for image, mask in val_dataset.take(1):
display([image[0], mask[0]])<jupyter_output><empty_output><jupyter_text>Analyze MaskLets print unique values of above displayed mask. You can see despite belonging to one class, it'sintensity is changing between low(0) to high(255). This variation in intensity makes it hard fornetwork to generate good segmentation map for **salient or camouflaged object segmentation**.Because of its Residual Refined Module (RMs), BASNet is good in generating highly accurateboundaries and fine structures.<jupyter_code>print(f"Unique values count: {len(np.unique((mask[0] * 255)))}")
print("Unique values:")
print(np.unique((mask[0] * 255)).astype(int))<jupyter_output><empty_output><jupyter_text>Building the BASNet ModelBASNet comprises of a predict-refine architecture and a hybrid loss. The predict-refinearchitecture consists of a densely supervised encoder-decoder network and a residual refinementmodule, which are respectively used to predict and refine a segmentation probability map.<jupyter_code>def basic_block(x_input, filters, stride=1, down_sample=None, activation=None):
"""Creates a residual(identity) block with two 3*3 convolutions."""
residual = x_input
x = layers.Conv2D(filters, (3, 3), strides=stride, padding="same", use_bias=False)(
x_input
)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(filters, (3, 3), strides=(1, 1), padding="same", use_bias=False)(
x
)
x = layers.BatchNormalization()(x)
if down_sample is not None:
residual = down_sample
x = layers.Add()([x, residual])
if activation is not None:
x = layers.Activation(activation)(x)
return x
def convolution_block(x_input, filters, dilation=1):
"""Apply convolution + batch normalization + relu layer."""
x = layers.Conv2D(filters, (3, 3), padding="same", dilation_rate=dilation)(x_input)
x = layers.BatchNormalization()(x)
return layers.Activation("relu")(x)
def segmentation_head(x_input, out_classes, final_size):
"""Map each decoder stage output to model output classes."""
x = layers.Conv2D(out_classes, kernel_size=(3, 3), padding="same")(x_input)
if final_size is not None:
x = layers.Resizing(final_size[0], final_size[1])(x)
return x
def get_resnet_block(_resnet, block_num):
"""Extract and return ResNet-34 block."""
resnet_layers = [3, 4, 6, 3] # ResNet-34 layer sizes at different block.
return keras.models.Model(
inputs=_resnet.get_layer(f"v2_stack_{block_num}_block1_1_conv").input,
outputs=_resnet.get_layer(
f"v2_stack_{block_num}_block{resnet_layers[block_num]}_add"
).output,
name=f"resnet34_block{block_num + 1}",
)<jupyter_output><empty_output><jupyter_text>Prediction ModulePrediction module is a heavy encoder decoder structure like U-Net. The encoder includes an inputconvolutional layer and six stages. First four are adopted from ResNet-34 and rest are basicres-blocks. Since first convolution and pooling layer of ResNet-34 is skipped so we will use`get_resnet_block()` to extract first four blocks. Both bridge and decoder uses threeconvolutional layers with side outputs. The module produces seven segmentation probabilitymaps during training, with the last one considered the final output.<jupyter_code>def basnet_predict(input_shape, out_classes):
"""BASNet Prediction Module, it outputs coarse label map."""
filters = 64
num_stages = 6
x_input = layers.Input(input_shape)
# -------------Encoder--------------
x = layers.Conv2D(filters, kernel_size=(3, 3), padding="same")(x_input)
resnet = keras_cv.models.ResNet34Backbone(
include_rescaling=False,
)
encoder_blocks = []
for i in range(num_stages):
if i < 4: # First four stages are adopted from ResNet-34 blocks.
x = get_resnet_block(resnet, i)(x)
encoder_blocks.append(x)
x = layers.Activation("relu")(x)
else: # Last 2 stages consist of three basic resnet blocks.
x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)
x = basic_block(x, filters=filters * 8, activation="relu")
x = basic_block(x, filters=filters * 8, activation="relu")
x = basic_block(x, filters=filters * 8, activation="relu")
encoder_blocks.append(x)
# -------------Bridge-------------
x = convolution_block(x, filters=filters * 8, dilation=2)
x = convolution_block(x, filters=filters * 8, dilation=2)
x = convolution_block(x, filters=filters * 8, dilation=2)
encoder_blocks.append(x)
# -------------Decoder-------------
decoder_blocks = []
for i in reversed(range(num_stages)):
if i != (num_stages - 1): # Except first, scale other decoder stages.
shape = keras.backend.int_shape(x)
x = layers.Resizing(shape[1] * 2, shape[2] * 2)(x)
x = layers.concatenate([encoder_blocks[i], x], axis=-1)
x = convolution_block(x, filters=filters * 8)
x = convolution_block(x, filters=filters * 8)
x = convolution_block(x, filters=filters * 8)
decoder_blocks.append(x)
decoder_blocks.reverse() # Change order from last to first decoder stage.
decoder_blocks.append(encoder_blocks[-1]) # Copy bridge to decoder.
# -------------Side Outputs--------------
decoder_blocks = [
segmentation_head(decoder_block, out_classes, input_shape[:2])
for decoder_block in decoder_blocks
]
return keras.models.Model(inputs=[x_input], outputs=decoder_blocks)<jupyter_output><empty_output><jupyter_text>Residual Refinement ModuleRefinement Modules (RMs), designed as a residual block aim to refines the coarse(blurry and noisyboundaries) segmentation maps generated by prediction module. Similar to prediction module it'salso an encode decoder structure but with light weight 4 stages, each containing one`convolutional block()` init. At the end it adds both coarse and residual output to generaterefined output.<jupyter_code>def basnet_rrm(base_model, out_classes):
"""BASNet Residual Refinement Module(RRM) module, output fine label map."""
num_stages = 4
filters = 64
x_input = base_model.output[0]
# -------------Encoder--------------
x = layers.Conv2D(filters, kernel_size=(3, 3), padding="same")(x_input)
encoder_blocks = []
for _ in range(num_stages):
x = convolution_block(x, filters=filters)
encoder_blocks.append(x)
x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)
# -------------Bridge--------------
x = convolution_block(x, filters=filters)
# -------------Decoder--------------
for i in reversed(range(num_stages)):
shape = keras.backend.int_shape(x)
x = layers.Resizing(shape[1] * 2, shape[2] * 2)(x)
x = layers.concatenate([encoder_blocks[i], x], axis=-1)
x = convolution_block(x, filters=filters)
x = segmentation_head(x, out_classes, None) # Segmentation head.
# ------------- refined = coarse + residual
x = layers.Add()([x_input, x]) # Add prediction + refinement output
return keras.models.Model(inputs=[base_model.input], outputs=[x])<jupyter_output><empty_output><jupyter_text>Combine Predict and Refinement Module<jupyter_code>def basnet(input_shape, out_classes):
"""BASNet, it's a combination of two modules
Prediction Module and Residual Refinement Module(RRM)."""
# Prediction model.
predict_model = basnet_predict(input_shape, out_classes)
# Refinement model.
refine_model = basnet_rrm(predict_model, out_classes)
output = [refine_model.output] # Combine outputs.
output.extend(predict_model.output)
output = [layers.Activation("sigmoid")(_) for _ in output] # Activations.
return keras.models.Model(inputs=[predict_model.input], outputs=output)<jupyter_output><empty_output><jupyter_text>Hybrid LossAnother important feature of BASNet is its hybrid loss function, which is a combination ofbinary cross entropy, structural similarity and intersection-over-union losses, which guidethe network to learn three-level (i.e., pixel, patch and map level) hierarchy representations.<jupyter_code>class BasnetLoss(keras.losses.Loss):
"""BASNet hybrid loss."""
def __init__(self, **kwargs):
super().__init__(name="basnet_loss", **kwargs)
self.smooth = 1.0e-9
# Binary Cross Entropy loss.
self.cross_entropy_loss = keras.losses.BinaryCrossentropy()
# Structural Similarity Index value.
self.ssim_value = tf.image.ssim
# Jaccard / IoU loss.
self.iou_value = self.calculate_iou
def calculate_iou(
self,
y_true,
y_pred,
):
"""Calculate intersection over union (IoU) between images."""
intersection = backend.sum(backend.abs(y_true * y_pred), axis=[1, 2, 3])
union = backend.sum(y_true, [1, 2, 3]) + backend.sum(y_pred, [1, 2, 3])
union = union - intersection
return backend.mean(
(intersection + self.smooth) / (union + self.smooth), axis=0
)
def call(self, y_true, y_pred):
cross_entropy_loss = self.cross_entropy_loss(y_true, y_pred)
ssim_value = self.ssim_value(y_true, y_pred, max_val=1)
ssim_loss = backend.mean(1 - ssim_value + self.smooth, axis=0)
iou_value = self.iou_value(y_true, y_pred)
iou_loss = 1 - iou_value
# Add all three losses.
return cross_entropy_loss + ssim_loss + iou_loss
basnet_model = basnet(
input_shape=[IMAGE_SIZE, IMAGE_SIZE, 3], out_classes=OUT_CLASSES
) # Create model.
basnet_model.summary() # Show model summary.
optimizer = keras.optimizers.Adam(learning_rate=1e-4, epsilon=1e-8)
# Compile model.
basnet_model.compile(
loss=BasnetLoss(),
optimizer=optimizer,
metrics=[keras.metrics.MeanAbsoluteError(name="mae")],
)<jupyter_output><empty_output><jupyter_text>Train the Model<jupyter_code>basnet_model.fit(train_dataset, validation_data=val_dataset, epochs=1)<jupyter_output><empty_output><jupyter_text>Visualize PredictionsIn paper BASNet was trained on DUTS-TR dataset, which has 10553 images. Model was trained for 400kiterations with a batch size of eight and without a validation dataset. After training model wasevaluated on DUTS-TE dataset and achieved a mean absolute error of `0.042`.Since BASNet is a deep model and cannot be trained in a short amount of time which is arequirement for keras example notebook, so we will load pretrained weights from [here](https://github.com/hamidriasat/BASNet/tree/basnet_keras)to show model prediction. Due to computer power limitation this model was trained for 120kiterations but it still demonstrates its capabilities. For further details abouttrainings parameters please check given link.<jupyter_code>!!gdown 1OWKouuAQ7XpXZbWA3mmxDPrFGW71Axrg
def normalize_output(prediction):
max_value = np.max(prediction)
min_value = np.min(prediction)
return (prediction - min_value) / (max_value - min_value)
# Load weights.
basnet_model.load_weights("./basnet_weights.h5")<jupyter_output><empty_output><jupyter_text>Make Predictions<jupyter_code>for image, mask in val_dataset.take(1):
pred_mask = basnet_model.predict(image)
display([image[0], mask[0], normalize_output(pred_mask[0][0])])<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/basnet_segmentation.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/basnet_segmentation.ipynb",
"repo_id": "keras-io",
"token_count": 5868
} | 86 |
<jupyter_start><jupyter_text>Using the Forward-Forward Algorithm for Image Classification**Author:** [Suvaditya Mukherjee](https://twitter.com/halcyonrayes)**Date created:** 2023/01/08**Last modified:** 2023/01/08**Description:** Training a Dense-layer model using the Forward-Forward algorithm. IntroductionThe following example explores how to use the Forward-Forward algorithm to performtraining instead of the traditionally-used method of backpropagation, as proposed byHinton in[The Forward-Forward Algorithm: Some Preliminary Investigations](https://www.cs.toronto.edu/~hinton/FFA13.pdf)(2022).The concept was inspired by the understanding behind[Boltzmann Machines](http://www.cs.toronto.edu/~fritz/absps/dbm.pdf). Backpropagationinvolves calculating the difference between actual and predicted output via a costfunction to adjust network weights. On the other hand, the FF Algorithm suggests theanalogy of neurons which get "excited" based on looking at a certain recognizedcombination of an image and its correct corresponding label.This method takes certain inspiration from the biological learning process that occurs inthe cortex. A significant advantage that this method brings is the fact thatbackpropagation through the network does not need to be performed anymore, and thatweight updates are local to the layer itself.As this is yet still an experimental method, it does not yield state-of-the-art results.But with proper tuning, it is supposed to come close to the same.Through this example, we will examine a process that allows us to implement theForward-Forward algorithm within the layers themselves, instead of the traditional methodof relying on the global loss functions and optimizers.The tutorial is structured as follows:- Perform necessary imports- Load the [MNIST dataset](http://yann.lecun.com/exdb/mnist/)- Visualize Random samples from the MNIST dataset- Define a `FFDense` Layer to override `call` and implement a custom `forwardforward`method which performs weight updates.- Define a `FFNetwork` Layer to override `train_step`, `predict` and implement 2 customfunctions for per-sample prediction and overlaying labels- Convert MNIST from `NumPy` arrays to `tf.data.Dataset`- Fit the network- Visualize results- Perform inference on test samplesAs this example requires the customization of certain core functions with`keras.layers.Layer` and `keras.models.Model`, refer to the following resources fora primer on how to do so:- [Customizing what happens in `model.fit()`](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit)- [Making new Layers and Models via subclassing](https://www.tensorflow.org/guide/keras/custom_layers_and_models) Setup imports<jupyter_code>import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import random
from tensorflow.compiler.tf2xla.python import xla<jupyter_output><empty_output><jupyter_text>Load the dataset and visualize the dataWe use the `keras.datasets.mnist.load_data()` utility to directly pull the MNIST datasetin the form of `NumPy` arrays. We then arrange it in the form of the train and testsplits.Following loading the dataset, we select 4 random samples from within the training setand visualize them using `matplotlib.pyplot`.<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
print("4 Random Training samples and labels")
idx1, idx2, idx3, idx4 = random.sample(range(0, x_train.shape[0]), 4)
img1 = (x_train[idx1], y_train[idx1])
img2 = (x_train[idx2], y_train[idx2])
img3 = (x_train[idx3], y_train[idx3])
img4 = (x_train[idx4], y_train[idx4])
imgs = [img1, img2, img3, img4]
plt.figure(figsize=(10, 10))
for idx, item in enumerate(imgs):
image, label = item[0], item[1]
plt.subplot(2, 2, idx + 1)
plt.imshow(image, cmap="gray")
plt.title(f"Label : {label}")
plt.show()<jupyter_output><empty_output><jupyter_text>Define `FFDense` custom layerIn this custom layer, we have a base `keras.layers.Dense` object which acts as thebase `Dense` layer within. Since weight updates will happen within the layer itself, weadd an `keras.optimizers.Optimizer` object that is accepted from the user. Here, weuse `Adam` as our optimizer with a rather higher learning rate of `0.03`.Following the algorithm's specifics, we must set a `threshold` parameter that will beused to make the positive-negative decision in each prediction. This is set to a defaultof 2.0.As the epochs are localized to the layer itself, we also set a `num_epochs` parameter(defaults to 50).We override the `call` method in order to perform a normalization over the completeinput space followed by running it through the base `Dense` layer as would happen in anormal `Dense` layer call.We implement the Forward-Forward algorithm which accepts 2 kinds of input tensors, eachrepresenting the positive and negative samples respectively. We write a custom trainingloop here with the use of `tf.GradientTape()`, within which we calculate a loss persample by taking the distance of the prediction from the threshold to understand theerror and taking its mean to get a `mean_loss` metric.With the help of `tf.GradientTape()` we calculate the gradient updates for the trainablebase `Dense` layer and apply them using the layer's local optimizer.Finally, we return the `call` result as the `Dense` results of the positive and negativesamples while also returning the last `mean_loss` metric and all the loss values over acertain all-epoch run.<jupyter_code>class FFDense(keras.layers.Layer):
"""
A custom ForwardForward-enabled Dense layer. It has an implementation of the
Forward-Forward network internally for use.
This layer must be used in conjunction with the `FFNetwork` model.
"""
def __init__(
self,
units,
optimizer,
loss_metric,
num_epochs=50,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
**kwargs,
):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(
units=units,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.relu = keras.layers.ReLU()
self.optimizer = optimizer
self.loss_metric = loss_metric
self.threshold = 1.5
self.num_epochs = num_epochs
# We perform a normalization step before we run the input through the Dense
# layer.
def call(self, x):
x_norm = tf.norm(x, ord=2, axis=1, keepdims=True)
x_norm = x_norm + 1e-4
x_dir = x / x_norm
res = self.dense(x_dir)
return self.relu(res)
# The Forward-Forward algorithm is below. We first perform the Dense-layer
# operation and then get a Mean Square value for all positive and negative
# samples respectively.
# The custom loss function finds the distance between the Mean-squared
# result and the threshold value we set (a hyperparameter) that will define
# whether the prediction is positive or negative in nature. Once the loss is
# calculated, we get a mean across the entire batch combined and perform a
# gradient calculation and optimization step. This does not technically
# qualify as backpropagation since there is no gradient being
# sent to any previous layer and is completely local in nature.
def forward_forward(self, x_pos, x_neg):
for i in range(self.num_epochs):
with tf.GradientTape() as tape:
g_pos = tf.math.reduce_mean(tf.math.pow(self.call(x_pos), 2), 1)
g_neg = tf.math.reduce_mean(tf.math.pow(self.call(x_neg), 2), 1)
loss = tf.math.log(
1
+ tf.math.exp(
tf.concat([-g_pos + self.threshold, g_neg - self.threshold], 0)
)
)
mean_loss = tf.cast(tf.math.reduce_mean(loss), tf.float32)
self.loss_metric.update_state([mean_loss])
gradients = tape.gradient(mean_loss, self.dense.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, self.dense.trainable_weights))
return (
tf.stop_gradient(self.call(x_pos)),
tf.stop_gradient(self.call(x_neg)),
self.loss_metric.result(),
)<jupyter_output><empty_output><jupyter_text>Define the `FFNetwork` Custom ModelWith our custom layer defined, we also need to override the `train_step` method anddefine a custom `keras.models.Model` that works with our `FFDense` layer.For this algorithm, we must 'embed' the labels onto the original image. To do so, weexploit the structure of MNIST images where the top-left 10 pixels are always zeros. Weuse that as a label space in order to visually one-hot-encode the labels within the imageitself. This action is performed by the `overlay_y_on_x` function.We break down the prediction function with a per-sample prediction function which is thencalled over the entire test set by the overriden `predict()` function. The prediction isperformed here with the help of measuring the `excitation` of the neurons per layer foreach image. This is then summed over all layers to calculate a network-wide 'goodnessscore'. The label with the highest 'goodness score' is then chosen as the sampleprediction.The `train_step` function is overriden to act as the main controlling loop for runningtraining on each layer as per the number of epochs per layer.<jupyter_code>class FFNetwork(keras.Model):
"""
A `keras.Model` that supports a `FFDense` network creation. This model
can work for any kind of classification task. It has an internal
implementation with some details specific to the MNIST dataset which can be
changed as per the use-case.
"""
# Since each layer runs gradient-calculation and optimization locally, each
# layer has its own optimizer that we pass. As a standard choice, we pass
# the `Adam` optimizer with a default learning rate of 0.03 as that was
# found to be the best rate after experimentation.
# Loss is tracked using `loss_var` and `loss_count` variables.
# Use legacy optimizer for Layer Optimizer to fix issue
# https://github.com/keras-team/keras-io/issues/1241
def __init__(
self,
dims,
layer_optimizer=keras.optimizers.legacy.Adam(learning_rate=0.03),
**kwargs,
):
super().__init__(**kwargs)
self.layer_optimizer = layer_optimizer
self.loss_var = tf.Variable(0.0, trainable=False, dtype=tf.float32)
self.loss_count = tf.Variable(0.0, trainable=False, dtype=tf.float32)
self.layer_list = [keras.Input(shape=(dims[0],))]
for d in range(len(dims) - 1):
self.layer_list += [
FFDense(
dims[d + 1],
optimizer=self.layer_optimizer,
loss_metric=keras.metrics.Mean(),
)
]
# This function makes a dynamic change to the image wherein the labels are
# put on top of the original image (for this example, as MNIST has 10
# unique labels, we take the top-left corner's first 10 pixels). This
# function returns the original data tensor with the first 10 pixels being
# a pixel-based one-hot representation of the labels.
@tf.function(reduce_retracing=True)
def overlay_y_on_x(self, data):
X_sample, y_sample = data
max_sample = tf.reduce_max(X_sample, axis=0, keepdims=True)
max_sample = tf.cast(max_sample, dtype=tf.float64)
X_zeros = tf.zeros([10], dtype=tf.float64)
X_update = xla.dynamic_update_slice(X_zeros, max_sample, [y_sample])
X_sample = xla.dynamic_update_slice(X_sample, X_update, [0])
return X_sample, y_sample
# A custom `predict_one_sample` performs predictions by passing the images
# through the network, measures the results produced by each layer (i.e.
# how high/low the output values are with respect to the set threshold for
# each label) and then simply finding the label with the highest values.
# In such a case, the images are tested for their 'goodness' with all
# labels.
@tf.function(reduce_retracing=True)
def predict_one_sample(self, x):
goodness_per_label = []
x = tf.reshape(x, [tf.shape(x)[0] * tf.shape(x)[1]])
for label in range(10):
h, label = self.overlay_y_on_x(data=(x, label))
h = tf.reshape(h, [-1, tf.shape(h)[0]])
goodness = []
for layer_idx in range(1, len(self.layer_list)):
layer = self.layer_list[layer_idx]
h = layer(h)
goodness += [tf.math.reduce_mean(tf.math.pow(h, 2), 1)]
goodness_per_label += [
tf.expand_dims(tf.reduce_sum(goodness, keepdims=True), 1)
]
goodness_per_label = tf.concat(goodness_per_label, 1)
return tf.cast(tf.argmax(goodness_per_label, 1), tf.float64)
def predict(self, data):
x = data
preds = list()
preds = tf.map_fn(fn=self.predict_one_sample, elems=x)
return np.asarray(preds, dtype=int)
# This custom `train_step` function overrides the internal `train_step`
# implementation. We take all the input image tensors, flatten them and
# subsequently produce positive and negative samples on the images.
# A positive sample is an image that has the right label encoded on it with
# the `overlay_y_on_x` function. A negative sample is an image that has an
# erroneous label present on it.
# With the samples ready, we pass them through each `FFLayer` and perform
# the Forward-Forward computation on it. The returned loss is the final
# loss value over all the layers.
@tf.function(jit_compile=True)
def train_step(self, data):
x, y = data
# Flatten op
x = tf.reshape(x, [-1, tf.shape(x)[1] * tf.shape(x)[2]])
x_pos, y = tf.map_fn(fn=self.overlay_y_on_x, elems=(x, y))
random_y = tf.random.shuffle(y)
x_neg, y = tf.map_fn(fn=self.overlay_y_on_x, elems=(x, random_y))
h_pos, h_neg = x_pos, x_neg
for idx, layer in enumerate(self.layers):
if isinstance(layer, FFDense):
print(f"Training layer {idx+1} now : ")
h_pos, h_neg, loss = layer.forward_forward(h_pos, h_neg)
self.loss_var.assign_add(loss)
self.loss_count.assign_add(1.0)
else:
print(f"Passing layer {idx+1} now : ")
x = layer(x)
mean_res = tf.math.divide(self.loss_var, self.loss_count)
return {"FinalLoss": mean_res}<jupyter_output><empty_output><jupyter_text>Convert MNIST `NumPy` arrays to `tf.data.Dataset`We now perform some preliminary processing on the `NumPy` arrays and then convert theminto the `tf.data.Dataset` format which allows for optimized loading.<jupyter_code>x_train = x_train.astype(float) / 255
x_test = x_test.astype(float) / 255
y_train = y_train.astype(int)
y_test = y_test.astype(int)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_dataset = train_dataset.batch(60000)
test_dataset = test_dataset.batch(10000)<jupyter_output><empty_output><jupyter_text>Fit the network and visualize resultsHaving performed all previous set-up, we are now going to run `model.fit()` and run 250model epochs, which will perform 50*250 epochs on each layer. We get to see the plotted losscurve as each layer is trained.<jupyter_code>model = FFNetwork(dims=[784, 500, 500])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.03),
loss="mse",
jit_compile=True,
metrics=[keras.metrics.Mean()],
)
epochs = 250
history = model.fit(train_dataset, epochs=epochs)<jupyter_output><empty_output><jupyter_text>Perform inference and testingHaving trained the model to a large extent, we now see how it performs on thetest set. We calculate the Accuracy Score to understand the results closely.<jupyter_code>preds = model.predict(tf.convert_to_tensor(x_test))
preds = preds.reshape((preds.shape[0], preds.shape[1]))
results = accuracy_score(preds, y_test)
print(f"Test Accuracy score : {results*100}%")
plt.plot(range(len(history.history["FinalLoss"])), history.history["FinalLoss"])
plt.title("Loss over training")
plt.show()<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/forwardforward.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/forwardforward.ipynb",
"repo_id": "keras-io",
"token_count": 6098
} | 87 |
<jupyter_start><jupyter_text>Metric learning for image similarity search**Author:** [Mat Kelcey](https://twitter.com/mat_kelcey)**Date created:** 2020/06/05**Last modified:** 2020/06/09**Description:** Example of using similarity metric learning on CIFAR-10 images. OverviewMetric learning aims to train models that can embed inputs into a high-dimensional spacesuch that "similar" inputs, as defined by the training scheme, are located close to eachother. These models once trained can produce embeddings for downstream systems where suchsimilarity is useful; examples include as a ranking signal for search or as a form ofpretrained embedding model for another supervised problem.For a more detailed overview of metric learning see:* [What is metric learning?](http://contrib.scikit-learn.org/metric-learn/introduction.html)* ["Using crossentropy for metric learning" tutorial](https://www.youtube.com/watch?v=Jb4Ewl5RzkI) SetupSet Keras backend to tensorflow.<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import random
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from collections import defaultdict
from PIL import Image
from sklearn.metrics import ConfusionMatrixDisplay
import keras
from keras import layers<jupyter_output><empty_output><jupyter_text>DatasetFor this example we will be using the[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset.<jupyter_code>from keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype("float32") / 255.0
y_train = np.squeeze(y_train)
x_test = x_test.astype("float32") / 255.0
y_test = np.squeeze(y_test)<jupyter_output><empty_output><jupyter_text>To get a sense of the dataset we can visualise a grid of 25 random examples.<jupyter_code>height_width = 32
def show_collage(examples):
box_size = height_width + 2
num_rows, num_cols = examples.shape[:2]
collage = Image.new(
mode="RGB",
size=(num_cols * box_size, num_rows * box_size),
color=(250, 250, 250),
)
for row_idx in range(num_rows):
for col_idx in range(num_cols):
array = (np.array(examples[row_idx, col_idx]) * 255).astype(np.uint8)
collage.paste(
Image.fromarray(array), (col_idx * box_size, row_idx * box_size)
)
# Double size for visualisation.
collage = collage.resize((2 * num_cols * box_size, 2 * num_rows * box_size))
return collage
# Show a collage of 5x5 random images.
sample_idxs = np.random.randint(0, 50000, size=(5, 5))
examples = x_train[sample_idxs]
show_collage(examples)<jupyter_output><empty_output><jupyter_text>Metric learning provides training data not as explicit `(X, y)` pairs but instead usesmultiple instances that are related in the way we want to express similarity. In ourexample we will use instances of the same class to represent similarity; a singletraining instance will not be one image, but a pair of images of the same class. Whenreferring to the images in this pair we'll use the common metric learning names of the`anchor` (a randomly chosen image) and the `positive` (another randomly chosen image ofthe same class).To facilitate this we need to build a form of lookup that maps from classes to theinstances of that class. When generating data for training we will sample from thislookup.<jupyter_code>class_idx_to_train_idxs = defaultdict(list)
for y_train_idx, y in enumerate(y_train):
class_idx_to_train_idxs[y].append(y_train_idx)
class_idx_to_test_idxs = defaultdict(list)
for y_test_idx, y in enumerate(y_test):
class_idx_to_test_idxs[y].append(y_test_idx)<jupyter_output><empty_output><jupyter_text>For this example we are using the simplest approach to training; a batch will consist of`(anchor, positive)` pairs spread across the classes. The goal of learning will be tomove the anchor and positive pairs closer together and further away from other instancesin the batch. In this case the batch size will be dictated by the number of classes; forCIFAR-10 this is 10.<jupyter_code>num_classes = 10
class AnchorPositivePairs(keras.utils.Sequence):
def __init__(self, num_batches):
super().__init__()
self.num_batches = num_batches
def __len__(self):
return self.num_batches
def __getitem__(self, _idx):
x = np.empty((2, num_classes, height_width, height_width, 3), dtype=np.float32)
for class_idx in range(num_classes):
examples_for_class = class_idx_to_train_idxs[class_idx]
anchor_idx = random.choice(examples_for_class)
positive_idx = random.choice(examples_for_class)
while positive_idx == anchor_idx:
positive_idx = random.choice(examples_for_class)
x[0, class_idx] = x_train[anchor_idx]
x[1, class_idx] = x_train[positive_idx]
return x<jupyter_output><empty_output><jupyter_text>We can visualise a batch in another collage. The top row shows randomly chosen anchorsfrom the 10 classes, the bottom row shows the corresponding 10 positives.<jupyter_code>examples = next(iter(AnchorPositivePairs(num_batches=1)))
show_collage(examples)<jupyter_output><empty_output><jupyter_text>Embedding modelWe define a custom model with a `train_step` that first embeds both anchors and positivesand then uses their pairwise dot products as logits for a softmax.<jupyter_code>class EmbeddingModel(keras.Model):
def train_step(self, data):
# Note: Workaround for open issue, to be removed.
if isinstance(data, tuple):
data = data[0]
anchors, positives = data[0], data[1]
with tf.GradientTape() as tape:
# Run both anchors and positives through model.
anchor_embeddings = self(anchors, training=True)
positive_embeddings = self(positives, training=True)
# Calculate cosine similarity between anchors and positives. As they have
# been normalised this is just the pair wise dot products.
similarities = keras.ops.einsum(
"ae,pe->ap", anchor_embeddings, positive_embeddings
)
# Since we intend to use these as logits we scale them by a temperature.
# This value would normally be chosen as a hyper parameter.
temperature = 0.2
similarities /= temperature
# We use these similarities as logits for a softmax. The labels for
# this call are just the sequence [0, 1, 2, ..., num_classes] since we
# want the main diagonal values, which correspond to the anchor/positive
# pairs, to be high. This loss will move embeddings for the
# anchor/positive pairs together and move all other pairs apart.
sparse_labels = keras.ops.arange(num_classes)
loss = self.compute_loss(y=sparse_labels, y_pred=similarities)
# Calculate gradients and apply via optimizer.
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
# Update and return metrics (specifically the one for the loss value).
for metric in self.metrics:
# Calling `self.compile` will by default add a `keras.metrics.Mean` loss
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(sparse_labels, similarities)
return {m.name: m.result() for m in self.metrics}<jupyter_output><empty_output><jupyter_text>Next we describe the architecture that maps from an image to an embedding. This modelsimply consists of a sequence of 2d convolutions followed by global pooling with a finallinear projection to an embedding space. As is common in metric learning we normalise theembeddings so that we can use simple dot products to measure similarity. For simplicitythis model is intentionally small.<jupyter_code>inputs = layers.Input(shape=(height_width, height_width, 3))
x = layers.Conv2D(filters=32, kernel_size=3, strides=2, activation="relu")(inputs)
x = layers.Conv2D(filters=64, kernel_size=3, strides=2, activation="relu")(x)
x = layers.Conv2D(filters=128, kernel_size=3, strides=2, activation="relu")(x)
x = layers.GlobalAveragePooling2D()(x)
embeddings = layers.Dense(units=8, activation=None)(x)
embeddings = layers.UnitNormalization()(embeddings)
model = EmbeddingModel(inputs, embeddings)<jupyter_output><empty_output><jupyter_text>Finally we run the training. On a Google Colab GPU instance this takes about a minute.<jupyter_code>model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
history = model.fit(AnchorPositivePairs(num_batches=1000), epochs=20)
plt.plot(history.history["loss"])
plt.show()<jupyter_output><empty_output><jupyter_text>TestingWe can review the quality of this model by applying it to the test set and consideringnear neighbours in the embedding space.First we embed the test set and calculate all near neighbours. Recall that since theembeddings are unit length we can calculate cosine similarity via dot products.<jupyter_code>near_neighbours_per_example = 10
embeddings = model.predict(x_test)
gram_matrix = np.einsum("ae,be->ab", embeddings, embeddings)
near_neighbours = np.argsort(gram_matrix.T)[:, -(near_neighbours_per_example + 1) :]<jupyter_output><empty_output><jupyter_text>As a visual check of these embeddings we can build a collage of the near neighbours for 5random examples. The first column of the image below is a randomly selected image, thefollowing 10 columns show the nearest neighbours in order of similarity.<jupyter_code>num_collage_examples = 5
examples = np.empty(
(
num_collage_examples,
near_neighbours_per_example + 1,
height_width,
height_width,
3,
),
dtype=np.float32,
)
for row_idx in range(num_collage_examples):
examples[row_idx, 0] = x_test[row_idx]
anchor_near_neighbours = reversed(near_neighbours[row_idx][:-1])
for col_idx, nn_idx in enumerate(anchor_near_neighbours):
examples[row_idx, col_idx + 1] = x_test[nn_idx]
show_collage(examples)<jupyter_output><empty_output><jupyter_text>We can also get a quantified view of the performance by considering the correctness ofnear neighbours in terms of a confusion matrix.Let us sample 10 examples from each of the 10 classes and consider their near neighboursas a form of prediction; that is, does the example and its near neighbours share the sameclass?We observe that each animal class does generally well, and is confused the most with theother animal classes. The vehicle classes follow the same pattern.<jupyter_code>confusion_matrix = np.zeros((num_classes, num_classes))
# For each class.
for class_idx in range(num_classes):
# Consider 10 examples.
example_idxs = class_idx_to_test_idxs[class_idx][:10]
for y_test_idx in example_idxs:
# And count the classes of its near neighbours.
for nn_idx in near_neighbours[y_test_idx][:-1]:
nn_class_idx = y_test[nn_idx]
confusion_matrix[class_idx, nn_class_idx] += 1
# Display a confusion matrix.
labels = [
"Airplane",
"Automobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
disp = ConfusionMatrixDisplay(confusion_matrix=confusion_matrix, display_labels=labels)
disp.plot(include_values=True, cmap="viridis", ax=None, xticks_rotation="vertical")
plt.show()<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/metric_learning.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/metric_learning.ipynb",
"repo_id": "keras-io",
"token_count": 4215
} | 88 |
<jupyter_start><jupyter_text>Point cloud segmentation with PointNet**Author:** [Soumik Rakshit](https://github.com/soumik12345), [Sayak Paul](https://github.com/sayakpaul)**Date created:** 2020/10/23**Last modified:** 2020/10/24**Description:** Implementation of a PointNet-based model for segmenting point clouds. IntroductionA "point cloud" is an important type of data structure for storing geometric shape data.Due to its irregular format, it's often transformed intoregular 3D voxel grids or collections of images before being used in deep learning applications,a step which makes the data unnecessarily large.The PointNet family of models solves this problem by directly consuming point clouds, respectingthe permutation-invariance property of the point data. The PointNet family ofmodels provides a simple, unified architecturefor applications ranging from **object classification**, **part segmentation**, to**scene semantic parsing**.In this example, we demonstrate the implementation of the PointNet architecturefor shape segmentation. References- [PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation](https://arxiv.org/abs/1612.00593)- [Point cloud classification with PointNet](https://keras.io/examples/vision/pointnet/)- [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025) Imports<jupyter_code>import os
import json
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
from glob import glob
import tensorflow as tf # For tf.data
import keras
from keras import layers
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Downloading DatasetThe [ShapeNet dataset](https://shapenet.org/) is an ongoing effort to establish a richly-annotated,large-scale dataset of 3D shapes. **ShapeNetCore** is a subset of the full ShapeNetdataset with clean single 3D models and manually verified category and alignmentannotations. It covers 55 common object categories, with about 51,300 unique 3D models.For this example, we use one of the 12 object categories of[PASCAL 3D+](http://cvgl.stanford.edu/projects/pascal3d.html),included as part of the ShapenetCore dataset.<jupyter_code>dataset_url = "https://git.io/JiY4i"
dataset_path = keras.utils.get_file(
fname="shapenet.zip",
origin=dataset_url,
cache_subdir="datasets",
hash_algorithm="auto",
extract=True,
archive_format="auto",
cache_dir="datasets",
)<jupyter_output><empty_output><jupyter_text>Loading the datasetWe parse the dataset metadata in order to easily map model categories to theirrespective directories and segmentation classes to colors for the purpose ofvisualization.<jupyter_code>with open("/tmp/.keras/datasets/PartAnnotation/metadata.json") as json_file:
metadata = json.load(json_file)
print(metadata)<jupyter_output><empty_output><jupyter_text>In this example, we train PointNet to segment the parts of an `Airplane` model.<jupyter_code>points_dir = "/tmp/.keras/datasets/PartAnnotation/{}/points".format(
metadata["Airplane"]["directory"]
)
labels_dir = "/tmp/.keras/datasets/PartAnnotation/{}/points_label".format(
metadata["Airplane"]["directory"]
)
LABELS = metadata["Airplane"]["lables"]
COLORS = metadata["Airplane"]["colors"]
VAL_SPLIT = 0.2
NUM_SAMPLE_POINTS = 1024
BATCH_SIZE = 32
EPOCHS = 60
INITIAL_LR = 1e-3<jupyter_output><empty_output><jupyter_text>Structuring the datasetWe generate the following in-memory data structures from the Airplane point clouds andtheir labels:- `point_clouds` is a list of `np.array` objects that represent the point cloud data inthe form of x, y and z coordinates. Axis 0 represents the number of points in thepoint cloud, while axis 1 represents the coordinates. `all_labels` is the listthat represents the label of each coordinate as a string (needed mainly forvisualization purposes).- `test_point_clouds` is in the same format as `point_clouds`, but doesn't havecorresponding the labels of the point clouds.- `all_labels` is a list of `np.array` objects that represent the point cloud labelsfor each coordinate, corresponding to the `point_clouds` list.- `point_cloud_labels` is a list of `np.array` objects that represent the point cloudlabels for each coordinate in one-hot encoded form, corresponding to the `point_clouds`list.<jupyter_code>point_clouds, test_point_clouds = [], []
point_cloud_labels, all_labels = [], []
points_files = glob(os.path.join(points_dir, "*.pts"))
for point_file in tqdm(points_files):
point_cloud = np.loadtxt(point_file)
if point_cloud.shape[0] < NUM_SAMPLE_POINTS:
continue
# Get the file-id of the current point cloud for parsing its
# labels.
file_id = point_file.split("/")[-1].split(".")[0]
label_data, num_labels = {}, 0
for label in LABELS:
label_file = os.path.join(labels_dir, label, file_id + ".seg")
if os.path.exists(label_file):
label_data[label] = np.loadtxt(label_file).astype("float32")
num_labels = len(label_data[label])
# Point clouds having labels will be our training samples.
try:
label_map = ["none"] * num_labels
for label in LABELS:
for i, data in enumerate(label_data[label]):
label_map[i] = label if data == 1 else label_map[i]
label_data = [
LABELS.index(label) if label != "none" else len(LABELS)
for label in label_map
]
# Apply one-hot encoding to the dense label representation.
label_data = keras.utils.to_categorical(label_data, num_classes=len(LABELS) + 1)
point_clouds.append(point_cloud)
point_cloud_labels.append(label_data)
all_labels.append(label_map)
except KeyError:
test_point_clouds.append(point_cloud)<jupyter_output><empty_output><jupyter_text>Next, we take a look at some samples from the in-memory arrays we just generated:<jupyter_code>for _ in range(5):
i = random.randint(0, len(point_clouds) - 1)
print(f"point_clouds[{i}].shape:", point_clouds[0].shape)
print(f"point_cloud_labels[{i}].shape:", point_cloud_labels[0].shape)
for j in range(5):
print(
f"all_labels[{i}][{j}]:",
all_labels[i][j],
f"\tpoint_cloud_labels[{i}][{j}]:",
point_cloud_labels[i][j],
"\n",
)<jupyter_output><empty_output><jupyter_text>Now, let's visualize some of the point clouds along with their labels.<jupyter_code>def visualize_data(point_cloud, labels):
df = pd.DataFrame(
data={
"x": point_cloud[:, 0],
"y": point_cloud[:, 1],
"z": point_cloud[:, 2],
"label": labels,
}
)
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
for index, label in enumerate(LABELS):
c_df = df[df["label"] == label]
try:
ax.scatter(
c_df["x"], c_df["y"], c_df["z"], label=label, alpha=0.5, c=COLORS[index]
)
except IndexError:
pass
ax.legend()
plt.show()
visualize_data(point_clouds[0], all_labels[0])
visualize_data(point_clouds[300], all_labels[300])<jupyter_output><empty_output><jupyter_text>PreprocessingNote that all the point clouds that we have loaded consist of a variable number of points,which makes it difficult for us to batch them together. In order to overcome this problem, werandomly sample a fixed number of points from each point cloud. We also normalize thepoint clouds in order to make the data scale-invariant.<jupyter_code>for index in tqdm(range(len(point_clouds))):
current_point_cloud = point_clouds[index]
current_label_cloud = point_cloud_labels[index]
current_labels = all_labels[index]
num_points = len(current_point_cloud)
# Randomly sampling respective indices.
sampled_indices = random.sample(list(range(num_points)), NUM_SAMPLE_POINTS)
# Sampling points corresponding to sampled indices.
sampled_point_cloud = np.array([current_point_cloud[i] for i in sampled_indices])
# Sampling corresponding one-hot encoded labels.
sampled_label_cloud = np.array([current_label_cloud[i] for i in sampled_indices])
# Sampling corresponding labels for visualization.
sampled_labels = np.array([current_labels[i] for i in sampled_indices])
# Normalizing sampled point cloud.
norm_point_cloud = sampled_point_cloud - np.mean(sampled_point_cloud, axis=0)
norm_point_cloud /= np.max(np.linalg.norm(norm_point_cloud, axis=1))
point_clouds[index] = norm_point_cloud
point_cloud_labels[index] = sampled_label_cloud
all_labels[index] = sampled_labels<jupyter_output><empty_output><jupyter_text>Let's visualize the sampled and normalized point clouds along with their correspondinglabels.<jupyter_code>visualize_data(point_clouds[0], all_labels[0])
visualize_data(point_clouds[300], all_labels[300])<jupyter_output><empty_output><jupyter_text>Creating TensorFlow datasetsWe create `tf.data.Dataset` objects for the training and validation data.We also augment the training point clouds by applying random jitter to them.<jupyter_code>def load_data(point_cloud_batch, label_cloud_batch):
point_cloud_batch.set_shape([NUM_SAMPLE_POINTS, 3])
label_cloud_batch.set_shape([NUM_SAMPLE_POINTS, len(LABELS) + 1])
return point_cloud_batch, label_cloud_batch
def augment(point_cloud_batch, label_cloud_batch):
noise = tf.random.uniform(
tf.shape(label_cloud_batch), -0.001, 0.001, dtype=tf.float64
)
point_cloud_batch += noise[:, :, :3]
return point_cloud_batch, label_cloud_batch
def generate_dataset(point_clouds, label_clouds, is_training=True):
dataset = tf.data.Dataset.from_tensor_slices((point_clouds, label_clouds))
dataset = dataset.shuffle(BATCH_SIZE * 100) if is_training else dataset
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(batch_size=BATCH_SIZE)
dataset = (
dataset.map(augment, num_parallel_calls=tf.data.AUTOTUNE)
if is_training
else dataset
)
return dataset
split_index = int(len(point_clouds) * (1 - VAL_SPLIT))
train_point_clouds = point_clouds[:split_index]
train_label_cloud = point_cloud_labels[:split_index]
total_training_examples = len(train_point_clouds)
val_point_clouds = point_clouds[split_index:]
val_label_cloud = point_cloud_labels[split_index:]
print("Num train point clouds:", len(train_point_clouds))
print("Num train point cloud labels:", len(train_label_cloud))
print("Num val point clouds:", len(val_point_clouds))
print("Num val point cloud labels:", len(val_label_cloud))
train_dataset = generate_dataset(train_point_clouds, train_label_cloud)
val_dataset = generate_dataset(val_point_clouds, val_label_cloud, is_training=False)
print("Train Dataset:", train_dataset)
print("Validation Dataset:", val_dataset)<jupyter_output><empty_output><jupyter_text>PointNet modelThe figure below depicts the internals of the PointNet model family:Given that PointNet is meant to consume an ***unordered set*** of coordinates as its input data,its architecture needs to match the following characteristic propertiesof point cloud data: Permutation invarianceGiven the unstructured nature of point cloud data, a scan made up of `n` points has `n!`permutations. The subsequent data processing must be invariant to the differentrepresentations. In order to make PointNet invariant to input permutations, we use asymmetric function (such as max-pooling) once the `n` input points are mapped tohigher-dimensional space. The result is a **global feature vector** that aims to capturean aggregate signature of the `n` input points. The global feature vector is used alongsidelocal point features for segmentation. Transformation invarianceSegmentation outputs should be unchanged if the object undergoes certain transformations,such as translation or scaling. For a given input point cloud, we apply an appropriaterigid or affine transformation to achieve pose normalization. Because each of the `n` inputpoints are represented as a vector and are mapped to the embedding spaces independently,applying a geometric transformation simply amounts to matrix multiplying each point witha transformation matrix. This is motivated by the concept of[Spatial Transformer Networks](https://arxiv.org/abs/1506.02025).The operations comprising the T-Net are motivated by the higher-level architecture ofPointNet. MLPs (or fully-connected layers) are used to map the input points independentlyand identically to a higher-dimensional space; max-pooling is used to encode a globalfeature vector whose dimensionality is then reduced with fully-connected layers. Theinput-dependent features at the final fully-connected layer are then combined withglobally trainable weights and biases, resulting in a 3-by-3 transformation matrix. Point interactionsThe interaction between neighboring points often carries useful information (i.e., asingle point should not be treated in isolation). Whereas classification need only makeuse of global features, segmentation must be able to leverage local point features alongwith global point features.**Note**: The figures presented in this section have been taken from the[original paper](https://arxiv.org/abs/1612.00593). Now that we know the pieces that compose the PointNet model, we can implement the model.We start by implementing the basic blocks i.e., the convolutional block and the multi-layerperceptron block.<jupyter_code>def conv_block(x, filters, name):
x = layers.Conv1D(filters, kernel_size=1, padding="valid", name=f"{name}_conv")(x)
x = layers.BatchNormalization(name=f"{name}_batch_norm")(x)
return layers.Activation("relu", name=f"{name}_relu")(x)
def mlp_block(x, filters, name):
x = layers.Dense(filters, name=f"{name}_dense")(x)
x = layers.BatchNormalization(name=f"{name}_batch_norm")(x)
return layers.Activation("relu", name=f"{name}_relu")(x)<jupyter_output><empty_output><jupyter_text>We implement a regularizer (taken from[this example](https://keras.io/examples/vision/pointnet/build-a-model))to enforce orthogonality in the feature space. This is needed to ensurethat the magnitudes of the transformed features do not vary too much.<jupyter_code>class OrthogonalRegularizer(keras.regularizers.Regularizer):
"""Reference: https://keras.io/examples/vision/pointnet/#build-a-model"""
def __init__(self, num_features, l2reg=0.001):
self.num_features = num_features
self.l2reg = l2reg
self.identity = keras.ops.eye(num_features)
def __call__(self, x):
x = keras.ops.reshape(x, (-1, self.num_features, self.num_features))
xxt = keras.ops.tensordot(x, x, axes=(2, 2))
xxt = keras.ops.reshape(xxt, (-1, self.num_features, self.num_features))
return keras.ops.sum(self.l2reg * keras.ops.square(xxt - self.identity))
def get_config(self):
config = super().get_config()
config.update({"num_features": self.num_features, "l2reg_strength": self.l2reg})
return config<jupyter_output><empty_output><jupyter_text>The next piece is the transformation network which we explained earlier.<jupyter_code>def transformation_net(inputs, num_features, name):
"""
Reference: https://keras.io/examples/vision/pointnet/#build-a-model.
The `filters` values come from the original paper:
https://arxiv.org/abs/1612.00593.
"""
x = conv_block(inputs, filters=64, name=f"{name}_1")
x = conv_block(x, filters=128, name=f"{name}_2")
x = conv_block(x, filters=1024, name=f"{name}_3")
x = layers.GlobalMaxPooling1D()(x)
x = mlp_block(x, filters=512, name=f"{name}_1_1")
x = mlp_block(x, filters=256, name=f"{name}_2_1")
return layers.Dense(
num_features * num_features,
kernel_initializer="zeros",
bias_initializer=keras.initializers.Constant(np.eye(num_features).flatten()),
activity_regularizer=OrthogonalRegularizer(num_features),
name=f"{name}_final",
)(x)
def transformation_block(inputs, num_features, name):
transformed_features = transformation_net(inputs, num_features, name=name)
transformed_features = layers.Reshape((num_features, num_features))(
transformed_features
)
return layers.Dot(axes=(2, 1), name=f"{name}_mm")([inputs, transformed_features])<jupyter_output><empty_output><jupyter_text>Finally, we piece the above blocks together and implement the segmentation model.<jupyter_code>def get_shape_segmentation_model(num_points, num_classes):
input_points = keras.Input(shape=(None, 3))
# PointNet Classification Network.
transformed_inputs = transformation_block(
input_points, num_features=3, name="input_transformation_block"
)
features_64 = conv_block(transformed_inputs, filters=64, name="features_64")
features_128_1 = conv_block(features_64, filters=128, name="features_128_1")
features_128_2 = conv_block(features_128_1, filters=128, name="features_128_2")
transformed_features = transformation_block(
features_128_2, num_features=128, name="transformed_features"
)
features_512 = conv_block(transformed_features, filters=512, name="features_512")
features_2048 = conv_block(features_512, filters=2048, name="pre_maxpool_block")
global_features = layers.MaxPool1D(pool_size=num_points, name="global_features")(
features_2048
)
global_features = keras.ops.tile(global_features, [1, num_points, 1])
# Segmentation head.
segmentation_input = layers.Concatenate(name="segmentation_input")(
[
features_64,
features_128_1,
features_128_2,
transformed_features,
features_512,
global_features,
]
)
segmentation_features = conv_block(
segmentation_input, filters=128, name="segmentation_features"
)
outputs = layers.Conv1D(
num_classes, kernel_size=1, activation="softmax", name="segmentation_head"
)(segmentation_features)
return keras.Model(input_points, outputs)<jupyter_output><empty_output><jupyter_text>Instantiate the model<jupyter_code>x, y = next(iter(train_dataset))
num_points = x.shape[1]
num_classes = y.shape[-1]
segmentation_model = get_shape_segmentation_model(num_points, num_classes)
segmentation_model.summary()<jupyter_output><empty_output><jupyter_text>TrainingFor the training the authors recommend using a learning rate schedule that decays theinitial learning rate by half every 20 epochs. In this example, we use 5 epochs.<jupyter_code>steps_per_epoch = total_training_examples // BATCH_SIZE
total_training_steps = steps_per_epoch * EPOCHS
print(f"Steps per epoch: {steps_per_epoch}.")
print(f"Total training steps: {total_training_steps}.")
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.003,
decay_steps=steps_per_epoch * 5,
decay_rate=0.5,
staircase=True,
)
steps = range(total_training_steps)
lrs = [lr_schedule(step) for step in steps]
plt.plot(lrs)
plt.xlabel("Steps")
plt.ylabel("Learning Rate")
plt.show()<jupyter_output><empty_output><jupyter_text>Finally, we implement a utility for running our experiments and launch model training.<jupyter_code>def run_experiment(epochs):
segmentation_model = get_shape_segmentation_model(num_points, num_classes)
segmentation_model.compile(
optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
loss=keras.losses.CategoricalCrossentropy(),
metrics=["accuracy"],
)
checkpoint_filepath = "checkpoint.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
history = segmentation_model.fit(
train_dataset,
validation_data=val_dataset,
epochs=epochs,
callbacks=[checkpoint_callback],
)
segmentation_model.load_weights(checkpoint_filepath)
return segmentation_model, history
segmentation_model, history = run_experiment(epochs=EPOCHS)<jupyter_output><empty_output><jupyter_text>Visualize the training landscape<jupyter_code>def plot_result(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_result("loss")
plot_result("accuracy")<jupyter_output><empty_output><jupyter_text>Inference<jupyter_code>validation_batch = next(iter(val_dataset))
val_predictions = segmentation_model.predict(validation_batch[0])
print(f"Validation prediction shape: {val_predictions.shape}")
def visualize_single_point_cloud(point_clouds, label_clouds, idx):
label_map = LABELS + ["none"]
point_cloud = point_clouds[idx]
label_cloud = label_clouds[idx]
visualize_data(point_cloud, [label_map[np.argmax(label)] for label in label_cloud])
idx = np.random.choice(len(validation_batch[0]))
print(f"Index selected: {idx}")
# Plotting with ground-truth.
visualize_single_point_cloud(validation_batch[0], validation_batch[1], idx)
# Plotting with predicted labels.
visualize_single_point_cloud(validation_batch[0], val_predictions, idx)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/pointnet_segmentation.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/pointnet_segmentation.ipynb",
"repo_id": "keras-io",
"token_count": 7553
} | 89 |
# Enhanced Deep Residual Networks for single-image super-resolution
**Author:** Gitesh Chawda<br>
**Date created:** 2022/04/07<br>
**Last modified:** 2022/04/07<br>
**Description:** Training an EDSR model on the DIV2K Dataset.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/edsr.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/edsr.py)
---
## Introduction
In this example, we implement
[Enhanced Deep Residual Networks for Single Image Super-Resolution (EDSR)](https://arxiv.org/abs/1707.02921)
by Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Kyoung Mu Lee.
The EDSR architecture is based on the SRResNet architecture and consists of multiple
residual blocks. It uses constant scaling layers instead of batch normalization layers to
produce consistent results (input and output have similar distributions, thus
normalizing intermediate features may not be desirable). Instead of using a L2 loss (mean squared error),
the authors employed an L1 loss (mean absolute error), which performs better empirically.
Our implementation only includes 16 residual blocks with 64 channels.
Alternatively, as shown in the Keras example
[Image Super-Resolution using an Efficient Sub-Pixel CNN](https://keras.io/examples/vision/super_resolution_sub_pixel/#image-superresolution-using-an-efficient-subpixel-cnn),
you can do super-resolution using an ESPCN Model. According to the survey paper, EDSR is one of the top-five
best-performing super-resolution methods based on PSNR scores. However, it has more
parameters and requires more computational power than other approaches.
It has a PSNR value (≈34db) that is slightly higher than ESPCN (≈32db).
As per the survey paper, EDSR performs better than ESPCN.
Paper:
[A comprehensive review of deep learning based single image super-resolution](https://arxiv.org/abs/2102.09351)
Comparison Graph:
<img src="https://dfzljdn9uc3pi.cloudfront.net/2021/cs-621/1/fig-11-2x.jpg" width="500" />
---
## Imports
```python
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
AUTOTUNE = tf.data.AUTOTUNE
```
---
## Download the training dataset
We use the DIV2K Dataset, a prominent single-image super-resolution dataset with 1,000
images of scenes with various sorts of degradations,
divided into 800 images for training, 100 images for validation, and 100
images for testing. We use 4x bicubic downsampled images as our "low quality" reference.
```python
# Download DIV2K from TF Datasets
# Using bicubic 4x degradation type
div2k_data = tfds.image.Div2k(config="bicubic_x4")
div2k_data.download_and_prepare()
# Taking train data from div2k_data object
train = div2k_data.as_dataset(split="train", as_supervised=True)
train_cache = train.cache()
# Validation data
val = div2k_data.as_dataset(split="validation", as_supervised=True)
val_cache = val.cache()
```
---
## Flip, crop and resize images
```python
def flip_left_right(lowres_img, highres_img):
"""Flips Images to left and right."""
# Outputs random values from a uniform distribution in between 0 to 1
rn = tf.random.uniform(shape=(), maxval=1)
# If rn is less than 0.5 it returns original lowres_img and highres_img
# If rn is greater than 0.5 it returns flipped image
return tf.cond(
rn < 0.5,
lambda: (lowres_img, highres_img),
lambda: (
tf.image.flip_left_right(lowres_img),
tf.image.flip_left_right(highres_img),
),
)
def random_rotate(lowres_img, highres_img):
"""Rotates Images by 90 degrees."""
# Outputs random values from uniform distribution in between 0 to 4
rn = tf.random.uniform(shape=(), maxval=4, dtype=tf.int32)
# Here rn signifies number of times the image(s) are rotated by 90 degrees
return tf.image.rot90(lowres_img, rn), tf.image.rot90(highres_img, rn)
def random_crop(lowres_img, highres_img, hr_crop_size=96, scale=4):
"""Crop images.
low resolution images: 24x24
high resolution images: 96x96
"""
lowres_crop_size = hr_crop_size // scale # 96//4=24
lowres_img_shape = tf.shape(lowres_img)[:2] # (height,width)
lowres_width = tf.random.uniform(
shape=(), maxval=lowres_img_shape[1] - lowres_crop_size + 1, dtype=tf.int32
)
lowres_height = tf.random.uniform(
shape=(), maxval=lowres_img_shape[0] - lowres_crop_size + 1, dtype=tf.int32
)
highres_width = lowres_width * scale
highres_height = lowres_height * scale
lowres_img_cropped = lowres_img[
lowres_height : lowres_height + lowres_crop_size,
lowres_width : lowres_width + lowres_crop_size,
] # 24x24
highres_img_cropped = highres_img[
highres_height : highres_height + hr_crop_size,
highres_width : highres_width + hr_crop_size,
] # 96x96
return lowres_img_cropped, highres_img_cropped
```
---
## Prepare a `tf.data.Dataset` object
We augment the training data with random horizontal flips and 90 rotations.
As low resolution images, we use 24x24 RGB input patches.
```python
def dataset_object(dataset_cache, training=True):
ds = dataset_cache
ds = ds.map(
lambda lowres, highres: random_crop(lowres, highres, scale=4),
num_parallel_calls=AUTOTUNE,
)
if training:
ds = ds.map(random_rotate, num_parallel_calls=AUTOTUNE)
ds = ds.map(flip_left_right, num_parallel_calls=AUTOTUNE)
# Batching Data
ds = ds.batch(16)
if training:
# Repeating Data, so that cardinality if dataset becomes infinte
ds = ds.repeat()
# prefetching allows later images to be prepared while the current image is being processed
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
train_ds = dataset_object(train_cache, training=True)
val_ds = dataset_object(val_cache, training=False)
```
---
## Visualize the data
Let's visualize a few sample images:
```python
lowres, highres = next(iter(train_ds))
# High Resolution Images
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(highres[i].numpy().astype("uint8"))
plt.title(highres[i].shape)
plt.axis("off")
# Low Resolution Images
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(lowres[i].numpy().astype("uint8"))
plt.title(lowres[i].shape)
plt.axis("off")
def PSNR(super_resolution, high_resolution):
"""Compute the peak signal-to-noise ratio, measures quality of image."""
# Max value of pixel is 255
psnr_value = tf.image.psnr(high_resolution, super_resolution, max_val=255)[0]
return psnr_value
```


---
## Build the model
In the paper, the authors train three models: EDSR, MDSR, and a baseline model. In this code example,
we only train the baseline model.
### Comparison with model with three residual blocks
The residual block design of EDSR differs from that of ResNet. Batch normalization
layers have been removed (together with the final ReLU activation): since batch normalization
layers normalize the features, they hurt output value range flexibility.
It is thus better to remove them. Further, it also helps reduce the
amount of GPU RAM required by the model, since the batch normalization layers consume the same amount of
memory as the preceding convolutional layers.
<img src="https://miro.medium.com/max/1050/1*EPviXGqlGWotVtV2gqVvNg.png" width="500" />
```python
class EDSRModel(tf.keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def predict_step(self, x):
# Adding dummy dimension using tf.expand_dims and converting to float32 using tf.cast
x = tf.cast(tf.expand_dims(x, axis=0), tf.float32)
# Passing low resolution image to model
super_resolution_img = self(x, training=False)
# Clips the tensor from min(0) to max(255)
super_resolution_img = tf.clip_by_value(super_resolution_img, 0, 255)
# Rounds the values of a tensor to the nearest integer
super_resolution_img = tf.round(super_resolution_img)
# Removes dimensions of size 1 from the shape of a tensor and converting to uint8
super_resolution_img = tf.squeeze(
tf.cast(super_resolution_img, tf.uint8), axis=0
)
return super_resolution_img
# Residual Block
def ResBlock(inputs):
x = layers.Conv2D(64, 3, padding="same", activation="relu")(inputs)
x = layers.Conv2D(64, 3, padding="same")(x)
x = layers.Add()([inputs, x])
return x
# Upsampling Block
def Upsampling(inputs, factor=2, **kwargs):
x = layers.Conv2D(64 * (factor ** 2), 3, padding="same", **kwargs)(inputs)
x = tf.nn.depth_to_space(x, block_size=factor)
x = layers.Conv2D(64 * (factor ** 2), 3, padding="same", **kwargs)(x)
x = tf.nn.depth_to_space(x, block_size=factor)
return x
def make_model(num_filters, num_of_residual_blocks):
# Flexible Inputs to input_layer
input_layer = layers.Input(shape=(None, None, 3))
# Scaling Pixel Values
x = layers.Rescaling(scale=1.0 / 255)(input_layer)
x = x_new = layers.Conv2D(num_filters, 3, padding="same")(x)
# 16 residual blocks
for _ in range(num_of_residual_blocks):
x_new = ResBlock(x_new)
x_new = layers.Conv2D(num_filters, 3, padding="same")(x_new)
x = layers.Add()([x, x_new])
x = Upsampling(x)
x = layers.Conv2D(3, 3, padding="same")(x)
output_layer = layers.Rescaling(scale=255)(x)
return EDSRModel(input_layer, output_layer)
model = make_model(num_filters=64, num_of_residual_blocks=16)
```
---
## Train the model
```python
# Using adam optimizer with initial learning rate as 1e-4, changing learning rate after 5000 steps to 5e-5
optim_edsr = keras.optimizers.Adam(
learning_rate=keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[5000], values=[1e-4, 5e-5]
)
)
# Compiling model with loss as mean absolute error(L1 Loss) and metric as psnr
model.compile(optimizer=optim_edsr, loss="mae", metrics=[PSNR])
# Training for more epochs will improve results
model.fit(train_ds, epochs=100, steps_per_epoch=200, validation_data=val_ds)
```
<div class="k-default-codeblock">
```
Epoch 1/100
200/200 [==============================] - 78s 322ms/step - loss: 27.7075 - PSNR: 19.4656 - val_loss: 14.7192 - val_PSNR: 22.5129
Epoch 2/100
200/200 [==============================] - 6s 28ms/step - loss: 12.6842 - PSNR: 24.7269 - val_loss: 12.4348 - val_PSNR: 22.8793
Epoch 3/100
200/200 [==============================] - 6s 28ms/step - loss: 10.7646 - PSNR: 27.3775 - val_loss: 10.6830 - val_PSNR: 24.9075
Epoch 4/100
200/200 [==============================] - 6s 28ms/step - loss: 9.8356 - PSNR: 27.4924 - val_loss: 9.2714 - val_PSNR: 27.7680
Epoch 5/100
200/200 [==============================] - 5s 27ms/step - loss: 9.1752 - PSNR: 29.4013 - val_loss: 8.7747 - val_PSNR: 27.6017
Epoch 6/100
200/200 [==============================] - 6s 30ms/step - loss: 8.8630 - PSNR: 27.8686 - val_loss: 8.7710 - val_PSNR: 30.2381
Epoch 7/100
200/200 [==============================] - 5s 27ms/step - loss: 8.7107 - PSNR: 28.4887 - val_loss: 8.3186 - val_PSNR: 29.4744
Epoch 8/100
200/200 [==============================] - 6s 32ms/step - loss: 8.5374 - PSNR: 28.9546 - val_loss: 8.4716 - val_PSNR: 28.8873
Epoch 9/100
200/200 [==============================] - 5s 27ms/step - loss: 8.4111 - PSNR: 30.2234 - val_loss: 8.1969 - val_PSNR: 28.9538
Epoch 10/100
200/200 [==============================] - 6s 28ms/step - loss: 8.3835 - PSNR: 29.7066 - val_loss: 8.9434 - val_PSNR: 31.9213
Epoch 11/100
200/200 [==============================] - 5s 27ms/step - loss: 8.1713 - PSNR: 30.7191 - val_loss: 8.2816 - val_PSNR: 30.7049
Epoch 12/100
200/200 [==============================] - 6s 30ms/step - loss: 7.9129 - PSNR: 30.3964 - val_loss: 8.9365 - val_PSNR: 26.2667
Epoch 13/100
200/200 [==============================] - 5s 27ms/step - loss: 8.2504 - PSNR: 30.1612 - val_loss: 7.8384 - val_PSNR: 28.4159
Epoch 14/100
200/200 [==============================] - 6s 31ms/step - loss: 8.0114 - PSNR: 30.2370 - val_loss: 7.2658 - val_PSNR: 29.4454
Epoch 15/100
200/200 [==============================] - 5s 27ms/step - loss: 8.0059 - PSNR: 30.7665 - val_loss: 7.6692 - val_PSNR: 31.8294
Epoch 16/100
200/200 [==============================] - 6s 28ms/step - loss: 7.9388 - PSNR: 30.5297 - val_loss: 7.7625 - val_PSNR: 28.6685
Epoch 17/100
200/200 [==============================] - 5s 27ms/step - loss: 7.8627 - PSNR: 30.8213 - val_loss: 8.1984 - val_PSNR: 30.9864
Epoch 18/100
200/200 [==============================] - 6s 30ms/step - loss: 7.8956 - PSNR: 30.4661 - val_loss: 8.2664 - val_PSNR: 34.1168
Epoch 19/100
200/200 [==============================] - 5s 27ms/step - loss: 7.7800 - PSNR: 30.3071 - val_loss: 7.9547 - val_PSNR: 30.9254
Epoch 20/100
200/200 [==============================] - 6s 31ms/step - loss: 7.7402 - PSNR: 30.7251 - val_loss: 7.9632 - val_PSNR: 31.7438
Epoch 21/100
200/200 [==============================] - 5s 27ms/step - loss: 7.7372 - PSNR: 31.3348 - val_loss: 8.0512 - val_PSNR: 29.4988
Epoch 22/100
200/200 [==============================] - 6s 28ms/step - loss: 7.7207 - PSNR: 31.1984 - val_loss: 7.6072 - val_PSNR: 32.6720
Epoch 23/100
200/200 [==============================] - 6s 29ms/step - loss: 7.5955 - PSNR: 31.3128 - val_loss: 6.8593 - val_PSNR: 28.1123
Epoch 24/100
200/200 [==============================] - 6s 28ms/step - loss: 7.6341 - PSNR: 31.6670 - val_loss: 7.4485 - val_PSNR: 30.0567
Epoch 25/100
200/200 [==============================] - 6s 28ms/step - loss: 7.5404 - PSNR: 31.5332 - val_loss: 6.8795 - val_PSNR: 33.6179
Epoch 26/100
200/200 [==============================] - 6s 31ms/step - loss: 7.4429 - PSNR: 32.3681 - val_loss: 7.5937 - val_PSNR: 32.5076
Epoch 27/100
200/200 [==============================] - 6s 28ms/step - loss: 7.4243 - PSNR: 31.2899 - val_loss: 7.0982 - val_PSNR: 37.4561
Epoch 28/100
200/200 [==============================] - 5s 27ms/step - loss: 7.3542 - PSNR: 31.3620 - val_loss: 7.5735 - val_PSNR: 29.3892
Epoch 29/100
200/200 [==============================] - 6s 31ms/step - loss: 7.2648 - PSNR: 32.0806 - val_loss: 7.7589 - val_PSNR: 28.5829
Epoch 30/100
200/200 [==============================] - 5s 27ms/step - loss: 7.2954 - PSNR: 32.3495 - val_loss: 7.1625 - val_PSNR: 32.0560
Epoch 31/100
200/200 [==============================] - 6s 31ms/step - loss: 7.4815 - PSNR: 32.3662 - val_loss: 7.8601 - val_PSNR: 35.0962
Epoch 32/100
200/200 [==============================] - 6s 29ms/step - loss: 7.3957 - PSNR: 30.4455 - val_loss: 7.4800 - val_PSNR: 31.9397
Epoch 33/100
200/200 [==============================] - 6s 29ms/step - loss: 7.3849 - PSNR: 32.0058 - val_loss: 7.2225 - val_PSNR: 35.5276
Epoch 34/100
200/200 [==============================] - 6s 28ms/step - loss: 7.4285 - PSNR: 31.6806 - val_loss: 7.3937 - val_PSNR: 30.4433
Epoch 35/100
200/200 [==============================] - 6s 30ms/step - loss: 7.3841 - PSNR: 32.1425 - val_loss: 7.6458 - val_PSNR: 30.7912
Epoch 36/100
200/200 [==============================] - 5s 27ms/step - loss: 7.3049 - PSNR: 31.7272 - val_loss: 7.5190 - val_PSNR: 33.2980
Epoch 37/100
200/200 [==============================] - 6s 31ms/step - loss: 7.3098 - PSNR: 31.7727 - val_loss: 8.0041 - val_PSNR: 26.8507
Epoch 38/100
200/200 [==============================] - 6s 28ms/step - loss: 7.4027 - PSNR: 31.1814 - val_loss: 7.7334 - val_PSNR: 29.2905
Epoch 39/100
200/200 [==============================] - 6s 29ms/step - loss: 7.2470 - PSNR: 31.3636 - val_loss: 7.1275 - val_PSNR: 33.1772
Epoch 40/100
200/200 [==============================] - 6s 28ms/step - loss: 7.1907 - PSNR: 32.7381 - val_loss: 7.3437 - val_PSNR: 33.7216
Epoch 41/100
200/200 [==============================] - 6s 29ms/step - loss: 7.3383 - PSNR: 31.6409 - val_loss: 6.8769 - val_PSNR: 29.9654
Epoch 42/100
200/200 [==============================] - 5s 27ms/step - loss: 7.3393 - PSNR: 31.4941 - val_loss: 6.1088 - val_PSNR: 35.7083
Epoch 43/100
200/200 [==============================] - 6s 32ms/step - loss: 7.2272 - PSNR: 32.2356 - val_loss: 7.4534 - val_PSNR: 29.5734
Epoch 44/100
200/200 [==============================] - 6s 30ms/step - loss: 7.1773 - PSNR: 32.0016 - val_loss: 7.4676 - val_PSNR: 33.0795
Epoch 45/100
200/200 [==============================] - 6s 28ms/step - loss: 7.4677 - PSNR: 32.3508 - val_loss: 7.2459 - val_PSNR: 31.6806
Epoch 46/100
200/200 [==============================] - 6s 30ms/step - loss: 7.2347 - PSNR: 33.3392 - val_loss: 7.0098 - val_PSNR: 27.1658
Epoch 47/100
200/200 [==============================] - 6s 28ms/step - loss: 7.4494 - PSNR: 32.1602 - val_loss: 8.0211 - val_PSNR: 29.9740
Epoch 48/100
200/200 [==============================] - 6s 28ms/step - loss: 7.1128 - PSNR: 32.1696 - val_loss: 7.0101 - val_PSNR: 32.8874
Epoch 49/100
200/200 [==============================] - 6s 31ms/step - loss: 7.1698 - PSNR: 32.0733 - val_loss: 7.5813 - val_PSNR: 26.1697
Epoch 50/100
200/200 [==============================] - 6s 30ms/step - loss: 7.1904 - PSNR: 31.9198 - val_loss: 6.3655 - val_PSNR: 33.4935
Epoch 51/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0957 - PSNR: 32.3727 - val_loss: 7.2626 - val_PSNR: 28.8388
Epoch 52/100
200/200 [==============================] - 6s 30ms/step - loss: 7.1436 - PSNR: 32.2141 - val_loss: 7.6012 - val_PSNR: 31.2261
Epoch 53/100
200/200 [==============================] - 6s 28ms/step - loss: 7.2270 - PSNR: 32.2675 - val_loss: 6.9826 - val_PSNR: 27.6408
Epoch 54/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0638 - PSNR: 32.5191 - val_loss: 6.6046 - val_PSNR: 32.3862
Epoch 55/100
200/200 [==============================] - 6s 31ms/step - loss: 7.1609 - PSNR: 31.6787 - val_loss: 7.3563 - val_PSNR: 28.3834
Epoch 56/100
200/200 [==============================] - 6s 30ms/step - loss: 7.1953 - PSNR: 31.9948 - val_loss: 6.5111 - val_PSNR: 34.0409
Epoch 57/100
200/200 [==============================] - 6s 30ms/step - loss: 7.1168 - PSNR: 32.3288 - val_loss: 6.7979 - val_PSNR: 31.8126
Epoch 58/100
200/200 [==============================] - 6s 29ms/step - loss: 7.0578 - PSNR: 33.1605 - val_loss: 6.8349 - val_PSNR: 32.0840
Epoch 59/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0890 - PSNR: 32.7020 - val_loss: 7.4109 - val_PSNR: 31.8377
Epoch 60/100
200/200 [==============================] - 6s 29ms/step - loss: 7.1357 - PSNR: 32.9600 - val_loss: 7.7647 - val_PSNR: 30.2965
Epoch 61/100
200/200 [==============================] - 6s 32ms/step - loss: 7.2003 - PSNR: 32.0152 - val_loss: 7.8508 - val_PSNR: 27.8501
Epoch 62/100
200/200 [==============================] - 6s 30ms/step - loss: 7.0474 - PSNR: 32.4485 - val_loss: 7.3319 - val_PSNR: 28.4571
Epoch 63/100
200/200 [==============================] - 6s 30ms/step - loss: 7.1315 - PSNR: 32.6996 - val_loss: 7.0695 - val_PSNR: 34.9915
Epoch 64/100
200/200 [==============================] - 6s 28ms/step - loss: 7.1181 - PSNR: 32.9488 - val_loss: 6.2144 - val_PSNR: 33.9663
Epoch 65/100
200/200 [==============================] - 6s 29ms/step - loss: 7.1262 - PSNR: 32.0699 - val_loss: 7.1910 - val_PSNR: 34.1321
Epoch 66/100
200/200 [==============================] - 6s 28ms/step - loss: 7.2891 - PSNR: 32.5745 - val_loss: 6.9004 - val_PSNR: 34.5732
Epoch 67/100
200/200 [==============================] - 6s 31ms/step - loss: 6.8185 - PSNR: 32.2085 - val_loss: 6.8353 - val_PSNR: 27.2619
Epoch 68/100
200/200 [==============================] - 7s 33ms/step - loss: 6.9238 - PSNR: 33.3510 - val_loss: 7.3350 - val_PSNR: 28.2281
Epoch 69/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0037 - PSNR: 31.6955 - val_loss: 6.5887 - val_PSNR: 30.3138
Epoch 70/100
200/200 [==============================] - 5s 27ms/step - loss: 7.0239 - PSNR: 32.6923 - val_loss: 6.6467 - val_PSNR: 36.0194
Epoch 71/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0828 - PSNR: 32.0297 - val_loss: 6.5626 - val_PSNR: 34.4241
Epoch 72/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0717 - PSNR: 32.5201 - val_loss: 7.5056 - val_PSNR: 31.4176
Epoch 73/100
200/200 [==============================] - 6s 29ms/step - loss: 7.0943 - PSNR: 32.4469 - val_loss: 7.0981 - val_PSNR: 33.2052
Epoch 74/100
200/200 [==============================] - 8s 38ms/step - loss: 7.0288 - PSNR: 32.2301 - val_loss: 6.9661 - val_PSNR: 34.0108
Epoch 75/100
200/200 [==============================] - 6s 28ms/step - loss: 7.1122 - PSNR: 32.1658 - val_loss: 6.9569 - val_PSNR: 30.8972
Epoch 76/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0108 - PSNR: 31.5408 - val_loss: 7.1185 - val_PSNR: 26.8445
Epoch 77/100
200/200 [==============================] - 6s 28ms/step - loss: 6.7812 - PSNR: 32.4927 - val_loss: 7.0030 - val_PSNR: 31.6901
Epoch 78/100
200/200 [==============================] - 6s 29ms/step - loss: 6.9885 - PSNR: 31.9727 - val_loss: 7.1126 - val_PSNR: 29.0163
Epoch 79/100
200/200 [==============================] - 6s 30ms/step - loss: 7.0738 - PSNR: 32.4997 - val_loss: 6.7849 - val_PSNR: 31.0740
Epoch 80/100
200/200 [==============================] - 6s 29ms/step - loss: 7.0899 - PSNR: 31.7940 - val_loss: 6.9975 - val_PSNR: 33.6309
Epoch 81/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0215 - PSNR: 32.6563 - val_loss: 6.5724 - val_PSNR: 35.1765
Epoch 82/100
200/200 [==============================] - 6s 28ms/step - loss: 6.9076 - PSNR: 32.9912 - val_loss: 6.8611 - val_PSNR: 31.8409
Epoch 83/100
200/200 [==============================] - 6s 28ms/step - loss: 6.9978 - PSNR: 32.7159 - val_loss: 6.4787 - val_PSNR: 31.5799
Epoch 84/100
200/200 [==============================] - 6s 29ms/step - loss: 7.1276 - PSNR: 32.8232 - val_loss: 7.9006 - val_PSNR: 27.5171
Epoch 85/100
200/200 [==============================] - 7s 33ms/step - loss: 7.0276 - PSNR: 32.3290 - val_loss: 8.5374 - val_PSNR: 25.2824
Epoch 86/100
200/200 [==============================] - 7s 33ms/step - loss: 7.0434 - PSNR: 31.4983 - val_loss: 6.9392 - val_PSNR: 35.9229
Epoch 87/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0703 - PSNR: 32.2641 - val_loss: 7.8662 - val_PSNR: 28.1676
Epoch 88/100
200/200 [==============================] - 6s 28ms/step - loss: 7.1311 - PSNR: 32.2141 - val_loss: 7.2089 - val_PSNR: 27.3218
Epoch 89/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0730 - PSNR: 33.3360 - val_loss: 6.7915 - val_PSNR: 29.1367
Epoch 90/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0177 - PSNR: 32.6117 - val_loss: 8.3779 - val_PSNR: 31.9831
Epoch 91/100
200/200 [==============================] - 6s 31ms/step - loss: 6.9638 - PSNR: 32.2765 - val_loss: 6.6582 - val_PSNR: 37.5391
Epoch 92/100
200/200 [==============================] - 6s 28ms/step - loss: 6.9623 - PSNR: 32.8864 - val_loss: 7.7435 - val_PSNR: 29.8939
Epoch 93/100
200/200 [==============================] - 6s 29ms/step - loss: 6.8474 - PSNR: 32.5345 - val_loss: 6.8181 - val_PSNR: 28.1166
Epoch 94/100
200/200 [==============================] - 6s 28ms/step - loss: 6.9059 - PSNR: 32.0613 - val_loss: 7.0014 - val_PSNR: 33.2055
Epoch 95/100
200/200 [==============================] - 6s 29ms/step - loss: 7.0418 - PSNR: 32.2906 - val_loss: 6.9686 - val_PSNR: 28.8045
Epoch 96/100
200/200 [==============================] - 6s 30ms/step - loss: 6.8624 - PSNR: 32.5043 - val_loss: 7.2015 - val_PSNR: 33.2103
Epoch 97/100
200/200 [==============================] - 7s 33ms/step - loss: 6.9632 - PSNR: 33.0834 - val_loss: 7.0972 - val_PSNR: 30.3407
Epoch 98/100
200/200 [==============================] - 6s 31ms/step - loss: 6.9307 - PSNR: 31.9062 - val_loss: 7.3421 - val_PSNR: 31.5380
Epoch 99/100
200/200 [==============================] - 6s 28ms/step - loss: 7.0685 - PSNR: 31.9839 - val_loss: 7.9828 - val_PSNR: 33.0619
Epoch 100/100
200/200 [==============================] - 6s 28ms/step - loss: 6.9233 - PSNR: 31.8346 - val_loss: 6.3802 - val_PSNR: 38.4415
<keras.callbacks.History at 0x7fe3682f2c90>
```
</div>
---
## Run inference on new images and plot the results
```python
def plot_results(lowres, preds):
"""
Displays low resolution image and super resolution image
"""
plt.figure(figsize=(24, 14))
plt.subplot(132), plt.imshow(lowres), plt.title("Low resolution")
plt.subplot(133), plt.imshow(preds), plt.title("Prediction")
plt.show()
for lowres, highres in val.take(10):
lowres = tf.image.random_crop(lowres, (150, 150, 3))
preds = model.predict_step(lowres)
plot_results(lowres, preds)
```










---
## Final remarks
In this example, we implemented the EDSR model (Enhanced Deep Residual Networks for Single Image
Super-Resolution). You could improve the model accuracy by training the model for more epochs, as well as
training the model with a wider variety of inputs with mixed downgrading factors, so as to
be able to handle a greater range of real-world images.
You could also improve on the given baseline EDSR model by implementing EDSR+,
or MDSR( Multi-Scale super-resolution) and MDSR+,
which were proposed in the same paper.
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/EDSR) | [](https://huggingface.co/spaces/keras-io/EDSR) | | keras-io/examples/vision/md/edsr.md/0 | {
"file_path": "keras-io/examples/vision/md/edsr.md",
"repo_id": "keras-io",
"token_count": 10769
} | 90 |
# Knowledge Distillation
**Author:** [Kenneth Borup](https://twitter.com/Kennethborup)<br>
**Date created:** 2020/09/01<br>
**Last modified:** 2020/09/01<br>
**Description:** Implementation of classical Knowledge Distillation.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/knowledge_distillation.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/knowledge_distillation.py)
---
## Introduction to Knowledge Distillation
Knowledge Distillation is a procedure for model
compression, in which a small (student) model is trained to match a large pre-trained
(teacher) model. Knowledge is transferred from the teacher model to the student
by minimizing a loss function, aimed at matching softened teacher logits as well as
ground-truth labels.
The logits are softened by applying a "temperature" scaling function in the softmax,
effectively smoothing out the probability distribution and revealing
inter-class relationships learned by the teacher.
**Reference:**
- [Hinton et al. (2015)](https://arxiv.org/abs/1503.02531)
---
## Setup
```python
import os
import keras
from keras import layers
from keras import ops
import numpy as np
```
---
## Construct `Distiller()` class
The custom `Distiller()` class, overrides the `Model` methods `compile`, `compute_loss`,
and `call`. In order to use the distiller, we need:
- A trained teacher model
- A student model to train
- A student loss function on the difference between student predictions and ground-truth
- A distillation loss function, along with a `temperature`, on the difference between the
soft student predictions and the soft teacher labels
- An `alpha` factor to weight the student and distillation loss
- An optimizer for the student and (optional) metrics to evaluate performance
In the `compute_loss` method, we perform a forward pass of both the teacher and student,
calculate the loss with weighting of the `student_loss` and `distillation_loss` by `alpha`
and `1 - alpha`, respectively. Note: only the student weights are updated.
```python
class Distiller(keras.Model):
def __init__(self, student, teacher):
super().__init__()
self.teacher = teacher
self.student = student
def compile(
self,
optimizer,
metrics,
student_loss_fn,
distillation_loss_fn,
alpha=0.1,
temperature=3,
):
"""Configure the distiller.
Args:
optimizer: Keras optimizer for the student weights
metrics: Keras metrics for evaluation
student_loss_fn: Loss function of difference between student
predictions and ground-truth
distillation_loss_fn: Loss function of difference between soft
student predictions and soft teacher predictions
alpha: weight to student_loss_fn and 1-alpha to distillation_loss_fn
temperature: Temperature for softening probability distributions.
Larger temperature gives softer distributions.
"""
super().compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
self.alpha = alpha
self.temperature = temperature
def compute_loss(
self, x=None, y=None, y_pred=None, sample_weight=None, allow_empty=False
):
teacher_pred = self.teacher(x, training=False)
student_loss = self.student_loss_fn(y, y_pred)
distillation_loss = self.distillation_loss_fn(
ops.softmax(teacher_pred / self.temperature, axis=1),
ops.softmax(y_pred / self.temperature, axis=1),
) * (self.temperature**2)
loss = self.alpha * student_loss + (1 - self.alpha) * distillation_loss
return loss
def call(self, x):
return self.student(x)
```
---
## Create student and teacher models
Initialy, we create a teacher model and a smaller student model. Both models are
convolutional neural networks and created using `Sequential()`,
but could be any Keras model.
```python
# Create the teacher
teacher = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(256, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(512, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="teacher",
)
# Create the student
student = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(16, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(32, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="student",
)
# Clone student for later comparison
student_scratch = keras.models.clone_model(student)
```
---
## Prepare the dataset
The dataset used for training the teacher and distilling the teacher is
[MNIST](https://keras.io/api/datasets/mnist/), and the procedure would be equivalent for
any other
dataset, e.g. [CIFAR-10](https://keras.io/api/datasets/cifar10/), with a suitable choice
of models. Both the student and teacher are trained on the training set and evaluated on
the test set.
```python
# Prepare the train and test dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Normalize data
x_train = x_train.astype("float32") / 255.0
x_train = np.reshape(x_train, (-1, 28, 28, 1))
x_test = x_test.astype("float32") / 255.0
x_test = np.reshape(x_test, (-1, 28, 28, 1))
```
---
## Train the teacher
In knowledge distillation we assume that the teacher is trained and fixed. Thus, we start
by training the teacher model on the training set in the usual way.
```python
# Train teacher as usual
teacher.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate teacher on data.
teacher.fit(x_train, y_train, epochs=5)
teacher.evaluate(x_test, y_test)
```
<div class="k-default-codeblock">
```
Epoch 1/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 8s 3ms/step - loss: 0.2408 - sparse_categorical_accuracy: 0.9259
Epoch 2/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - loss: 0.0912 - sparse_categorical_accuracy: 0.9726
Epoch 3/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 7s 4ms/step - loss: 0.0758 - sparse_categorical_accuracy: 0.9777
Epoch 4/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - loss: 0.0690 - sparse_categorical_accuracy: 0.9797
Epoch 5/5
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 5s 3ms/step - loss: 0.0582 - sparse_categorical_accuracy: 0.9825
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0931 - sparse_categorical_accuracy: 0.9760
[0.09044107794761658, 0.978100061416626]
```
</div>
---
## Distill teacher to student
We have already trained the teacher model, and we only need to initialize a
`Distiller(student, teacher)` instance, `compile()` it with the desired losses,
hyperparameters and optimizer, and distill the teacher to the student.
```python
# Initialize and compile distiller
distiller = Distiller(student=student, teacher=teacher)
distiller.compile(
optimizer=keras.optimizers.Adam(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
student_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
distillation_loss_fn=keras.losses.KLDivergence(),
alpha=0.1,
temperature=10,
)
# Distill teacher to student
distiller.fit(x_train, y_train, epochs=3)
# Evaluate student on test dataset
distiller.evaluate(x_test, y_test)
```
<div class="k-default-codeblock">
```
Epoch 1/3
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 8s 3ms/step - loss: 1.8752 - sparse_categorical_accuracy: 0.7357
Epoch 2/3
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - loss: 0.0333 - sparse_categorical_accuracy: 0.9475
Epoch 3/3
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 6s 3ms/step - loss: 0.0223 - sparse_categorical_accuracy: 0.9621
313/313 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 0.0189 - sparse_categorical_accuracy: 0.9629
[0.017046602442860603, 0.969200074672699]
```
</div>
---
## Train student from scratch for comparison
We can also train an equivalent student model from scratch without the teacher, in order
to evaluate the performance gain obtained by knowledge distillation.
```python
# Train student as doen usually
student_scratch.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate student trained from scratch.
student_scratch.fit(x_train, y_train, epochs=3)
student_scratch.evaluate(x_test, y_test)
```
<div class="k-default-codeblock">
```
Epoch 1/3
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 4s 1ms/step - loss: 0.5111 - sparse_categorical_accuracy: 0.8460
Epoch 2/3
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 1ms/step - loss: 0.1039 - sparse_categorical_accuracy: 0.9687
Epoch 3/3
1875/1875 ━━━━━━━━━━━━━━━━━━━━ 3s 1ms/step - loss: 0.0748 - sparse_categorical_accuracy: 0.9780
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 0.0744 - sparse_categorical_accuracy: 0.9737
[0.0629437193274498, 0.9778000712394714]
```
</div>
If the teacher is trained for 5 full epochs and the student is distilled on this teacher
for 3 full epochs, you should in this example experience a performance boost compared to
training the same student model from scratch, and even compared to the teacher itself.
You should expect the teacher to have accuracy around 97.6%, the student trained from
scratch should be around 97.6%, and the distilled student should be around 98.1%. Remove
or try out different seeds to use different weight initializations.
| keras-io/examples/vision/md/knowledge_distillation.md/0 | {
"file_path": "keras-io/examples/vision/md/knowledge_distillation.md",
"repo_id": "keras-io",
"token_count": 3875
} | 91 |
# Augmenting convnets with aggregated attention
**Author:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498)<br>
**Date created:** 2022/01/22<br>
**Last modified:** 2022/01/22<br>
**Description:** Building a patch-convnet architecture and visualizing its attention maps.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/patch_convnet.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/patch_convnet.py)
---
## Introduction
Vision transformers ([Dosovitskiy et. al](https://arxiv.org/abs/2010.11929))
have emerged as a powerful alternative to Convolutional Neural Networks.
ViTs process the images in a patch-based manner. The image information
is then aggregated into a `CLASS` token. This token correlates to the
most important patches of the image for a particular classification decision.
The interaction between the `CLASS` token and the patches can be visualized
to help explain a classification decision. In the academic paper
[Augmenting convolutional networks with attention-based aggregation](https://arxiv.org/abs/2112.13692)
by Touvron et. al, the authors propose to set up an equivalent visualization for
convnets. They propose to substitute the global average pooling layer
of a convnet with a Transformer layer. The self-attention layer of the
Transformer would produce attention maps that correspond to the
most attended patches of the image for the classification decision.
In this example, we minimally implement the ideas of
[Augmenting Convolutional networks with attention-based aggregation](https://arxiv.org/abs/2112.13692).
The main goal of this example is to cover the following ideas, with
minor modifications (to adjust the implementation with CIFAR10):
- The simple design for the attention-based pooling layer, such that
it explicitly provides the weights (importance) of the different
patches.
- The novel architecture of convnet is called the **PatchConvNet** which
deviates from the age old pyramidal architecture.
---
## Setup and Imports
This example requires TensorFlow Addons, which can be installed using
the following command:
```shell
pip install -U tensorflow-addons
```
```python
import math
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import keras
from keras import layers
from keras import ops
from tensorflow import data as tf_data
# Set seed for reproducibiltiy
SEED = 42
keras.utils.set_random_seed(SEED)
```
---
## Hyperparameters
```python
# DATA
BATCH_SIZE = 128
BUFFER_SIZE = BATCH_SIZE * 2
AUTO = tf_data.AUTOTUNE
INPUT_SHAPE = (32, 32, 3)
NUM_CLASSES = 10 # for CIFAR 10
# AUGMENTATION
IMAGE_SIZE = 48 # We will resize input images to this size.
# ARCHITECTURE
DIMENSIONS = 256
SE_RATIO = 8
TRUNK_DEPTH = 2
# OPTIMIZER
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-4
# PRETRAINING
EPOCHS = 50
```
---
## Load the CIFAR10 dataset
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:40000], y_train[:40000]),
(x_train[40000:], y_train[40000:]),
)
print(f"Training samples: {len(x_train)}")
print(f"Validation samples: {len(x_val)}")
print(f"Testing samples: {len(x_test)}")
train_ds = tf_data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(AUTO)
val_ds = tf_data.Dataset.from_tensor_slices((x_val, y_val))
val_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO)
test_ds = tf_data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)
```
<div class="k-default-codeblock">
```
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170500096/170498071 [==============================] - 16s 0us/step
170508288/170498071 [==============================] - 16s 0us/step
Training samples: 40000
Validation samples: 10000
Testing samples: 10000
```
</div>
---
## Augmentation layers
```python
def get_preprocessing():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
],
name="preprocessing",
)
return model
def get_train_augmentation_model():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
],
name="train_data_augmentation",
)
return model
```
---
## Convolutional stem
The stem of the model is a lightweight preprocessing module that
maps images pixels to a set of vectors (patches).
```python
def build_convolutional_stem(dimensions):
"""Build the convolutional stem.
Args:
dimensions: The embedding dimension of the patches (d in paper).
Returs:
The convolutional stem as a keras seqeuntial
model.
"""
config = {
"kernel_size": (3, 3),
"strides": (2, 2),
"activation": ops.gelu,
"padding": "same",
}
convolutional_stem = keras.Sequential(
[
layers.Conv2D(filters=dimensions // 2, **config),
layers.Conv2D(filters=dimensions, **config),
],
name="convolutional_stem",
)
return convolutional_stem
```
---
## Convolutional trunk
The trunk of the model is the most compute-intesive part. It consists
of `N` stacked residual convolutional blocks.
```python
class SqueezeExcite(layers.Layer):
"""Applies squeeze and excitation to input feature maps as seen in
https://arxiv.org/abs/1709.01507.
Args:
ratio: The ratio with which the feature map needs to be reduced in
the reduction phase.
Inputs:
Convolutional features.
Outputs:
Attention modified feature maps.
"""
def __init__(self, ratio, **kwargs):
super().__init__(**kwargs)
self.ratio = ratio
def get_config(self):
config = super().get_config()
config.update({"ratio": self.ratio})
return config
def build(self, input_shape):
filters = input_shape[-1]
self.squeeze = layers.GlobalAveragePooling2D(keepdims=True)
self.reduction = layers.Dense(
units=filters // self.ratio,
activation="relu",
use_bias=False,
)
self.excite = layers.Dense(units=filters, activation="sigmoid", use_bias=False)
self.multiply = layers.Multiply()
def call(self, x):
shortcut = x
x = self.squeeze(x)
x = self.reduction(x)
x = self.excite(x)
x = self.multiply([shortcut, x])
return x
class Trunk(layers.Layer):
"""Convolutional residual trunk as in the https://arxiv.org/abs/2112.13692
Args:
depth: Number of trunk residual blocks
dimensions: Dimnesion of the model (denoted by d in the paper)
ratio: The Squeeze-Excitation ratio
Inputs:
Convolutional features extracted from the conv stem.
Outputs:
Flattened patches.
"""
def __init__(self, depth, dimensions, ratio, **kwargs):
super().__init__(**kwargs)
self.ratio = ratio
self.dimensions = dimensions
self.depth = depth
def get_config(self):
config = super().get_config()
config.update(
{
"ratio": self.ratio,
"dimensions": self.dimensions,
"depth": self.depth,
}
)
return config
def build(self, input_shape):
config = {
"filters": self.dimensions,
"activation": ops.gelu,
"padding": "same",
}
trunk_block = [
layers.LayerNormalization(epsilon=1e-6),
layers.Conv2D(kernel_size=(1, 1), **config),
layers.Conv2D(kernel_size=(3, 3), **config),
SqueezeExcite(ratio=self.ratio),
layers.Conv2D(kernel_size=(1, 1), filters=self.dimensions, padding="same"),
]
self.trunk_blocks = [keras.Sequential(trunk_block) for _ in range(self.depth)]
self.add = layers.Add()
self.flatten_spatial = layers.Reshape((-1, self.dimensions))
def call(self, x):
# Remember the input.
shortcut = x
for trunk_block in self.trunk_blocks:
output = trunk_block(x)
shortcut = self.add([output, shortcut])
x = shortcut
# Flatten the patches.
x = self.flatten_spatial(x)
return x
```
---
## Attention Pooling
The output of the convolutional trunk is attended with a trainable
_query_ class token. The resulting attention map is the weight of
every patch of the image for a classification decision.
```python
class AttentionPooling(layers.Layer):
"""Applies attention to the patches extracted form the
trunk with the CLS token.
Args:
dimensions: The dimension of the whole architecture.
num_classes: The number of classes in the dataset.
Inputs:
Flattened patches from the trunk.
Outputs:
The modifies CLS token.
"""
def __init__(self, dimensions, num_classes, **kwargs):
super().__init__(**kwargs)
self.dimensions = dimensions
self.num_classes = num_classes
self.cls = keras.Variable(ops.zeros((1, 1, dimensions)))
def get_config(self):
config = super().get_config()
config.update(
{
"dimensions": self.dimensions,
"num_classes": self.num_classes,
"cls": self.cls.numpy(),
}
)
return config
def build(self, input_shape):
self.attention = layers.MultiHeadAttention(
num_heads=1,
key_dim=self.dimensions,
dropout=0.2,
)
self.layer_norm1 = layers.LayerNormalization(epsilon=1e-6)
self.layer_norm2 = layers.LayerNormalization(epsilon=1e-6)
self.layer_norm3 = layers.LayerNormalization(epsilon=1e-6)
self.mlp = keras.Sequential(
[
layers.Dense(units=self.dimensions, activation=ops.gelu),
layers.Dropout(0.2),
layers.Dense(units=self.dimensions, activation=ops.gelu),
]
)
self.dense = layers.Dense(units=self.num_classes)
self.flatten = layers.Flatten()
def call(self, x):
batch_size = ops.shape(x)[0]
# Expand the class token batch number of times.
class_token = ops.repeat(self.cls, repeats=batch_size, axis=0)
# Concat the input with the trainable class token.
x = ops.concatenate([class_token, x], axis=1)
# Apply attention to x.
x = self.layer_norm1(x)
x, viz_weights = self.attention(
query=x[:, 0:1], key=x, value=x, return_attention_scores=True
)
class_token = class_token + x
class_token = self.layer_norm2(class_token)
class_token = self.flatten(class_token)
class_token = self.layer_norm3(class_token)
class_token = class_token + self.mlp(class_token)
# Build the logits
logits = self.dense(class_token)
return logits, ops.squeeze(viz_weights)[..., 1:]
```
---
## Patch convnet
The patch-convnet is shown in the figure below.
|  |
| :--: |
| [Source](https://arxiv.org/abs/2112.13692) |
All the modules in the architecture are built in the earlier seciton.
In this section, we stack all of the different modules together.
```python
class PatchConvNet(keras.Model):
def __init__(
self,
stem,
trunk,
attention_pooling,
preprocessing_model,
train_augmentation_model,
**kwargs,
):
super().__init__(**kwargs)
self.stem = stem
self.trunk = trunk
self.attention_pooling = attention_pooling
self.train_augmentation_model = train_augmentation_model
self.preprocessing_model = preprocessing_model
def get_config(self):
config = super().get_config()
config.update(
{
"stem": self.stem,
"trunk": self.trunk,
"attention_pooling": self.attention_pooling,
"train_augmentation_model": self.train_augmentation_model,
"preprocessing_model": self.preprocessing_model,
}
)
return config
def _calculate_loss(self, inputs, test=False):
images, labels = inputs
# Augment the input images.
if test:
augmented_images = self.preprocessing_model(images)
else:
augmented_images = self.train_augmentation_model(images)
# Pass through the stem.
x = self.stem(augmented_images)
# Pass through the trunk.
x = self.trunk(x)
# Pass through the attention pooling block.
logits, _ = self.attention_pooling(x)
# Compute the total loss.
total_loss = self.compiled_loss(labels, logits)
return total_loss, logits
def train_step(self, inputs):
with tf.GradientTape() as tape:
total_loss, logits = self._calculate_loss(inputs)
# Apply gradients.
train_vars = [
self.stem.trainable_variables,
self.trunk.trainable_variables,
self.attention_pooling.trainable_variables,
]
grads = tape.gradient(total_loss, train_vars)
trainable_variable_list = []
for grad, var in zip(grads, train_vars):
for g, v in zip(grad, var):
trainable_variable_list.append((g, v))
self.optimizer.apply_gradients(trainable_variable_list)
# Report progress.
_, labels = inputs
self.compiled_metrics.update_state(labels, logits)
return {m.name: m.result() for m in self.metrics}
def test_step(self, inputs):
total_loss, logits = self._calculate_loss(inputs, test=True)
# Report progress.
_, labels = inputs
self.compiled_metrics.update_state(labels, logits)
return {m.name: m.result() for m in self.metrics}
def call(self, images):
# Augment the input images.
augmented_images = self.preprocessing_model(images)
# Pass through the stem.
x = self.stem(augmented_images)
# Pass through the trunk.
x = self.trunk(x)
# Pass through the attention pooling block.
logits, viz_weights = self.attention_pooling(x)
return logits, viz_weights
```
---
## Callbacks
This callback will plot the image and the attention map overlayed on
the image.
```python
# Taking a batch of test inputs to measure model's progress.
test_images, test_labels = next(iter(test_ds))
class TrainMonitor(keras.callbacks.Callback):
def __init__(self, epoch_interval=None):
self.epoch_interval = epoch_interval
def on_epoch_end(self, epoch, logs=None):
if self.epoch_interval and epoch % self.epoch_interval == 4:
test_augmented_images = self.model.preprocessing_model(test_images)
# Pass through the stem.
test_x = self.model.stem(test_augmented_images)
# Pass through the trunk.
test_x = self.model.trunk(test_x)
# Pass through the attention pooling block.
_, test_viz_weights = self.model.attention_pooling(test_x)
# Reshape the vizualization weights
num_patches = ops.shape(test_viz_weights)[-1]
height = width = int(math.sqrt(num_patches))
test_viz_weights = layers.Reshape((height, width))(test_viz_weights)
# Take a random image and its attention weights.
index = np.random.randint(low=0, high=ops.shape(test_augmented_images)[0])
selected_image = test_augmented_images[index]
selected_weight = test_viz_weights[index]
# Plot the images and the overlayed attention map.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].imshow(selected_image)
ax[0].set_title(f"Original: {epoch:03d}")
ax[0].axis("off")
img = ax[1].imshow(selected_image)
ax[1].imshow(
selected_weight, cmap="inferno", alpha=0.6, extent=img.get_extent()
)
ax[1].set_title(f"Attended: {epoch:03d}")
ax[1].axis("off")
plt.axis("off")
plt.show()
plt.close()
```
---
## Learning rate schedule
```python
class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps
):
super().__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.pi = np.pi
def __call__(self, step):
if self.total_steps < self.warmup_steps:
raise ValueError("Total_steps must be larger or equal to warmup_steps.")
cos_annealed_lr = ops.cos(
self.pi
* (ops.cast(step, "float32") - self.warmup_steps)
/ float(self.total_steps - self.warmup_steps)
)
learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr)
if self.warmup_steps > 0:
if self.learning_rate_base < self.warmup_learning_rate:
raise ValueError(
"Learning_rate_base must be larger or equal to "
"warmup_learning_rate."
)
slope = (
self.learning_rate_base - self.warmup_learning_rate
) / self.warmup_steps
warmup_rate = slope * ops.cast(step, "float32") + self.warmup_learning_rate
learning_rate = ops.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
return ops.where(
step > self.total_steps,
0.0,
learning_rate,
)
total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS)
warmup_epoch_percentage = 0.15
warmup_steps = int(total_steps * warmup_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=LEARNING_RATE,
total_steps=total_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)
```
---
## Training
We build the model, compile it, and train it.
```python
train_augmentation_model = get_train_augmentation_model()
preprocessing_model = get_preprocessing()
conv_stem = build_convolutional_stem(dimensions=DIMENSIONS)
conv_trunk = Trunk(depth=TRUNK_DEPTH, dimensions=DIMENSIONS, ratio=SE_RATIO)
attention_pooling = AttentionPooling(dimensions=DIMENSIONS, num_classes=NUM_CLASSES)
patch_conv_net = PatchConvNet(
stem=conv_stem,
trunk=conv_trunk,
attention_pooling=attention_pooling,
train_augmentation_model=train_augmentation_model,
preprocessing_model=preprocessing_model,
)
# Assemble the callbacks.
train_callbacks = [TrainMonitor(epoch_interval=5)]
# Get the optimizer.
optimizer = keras.optimizers.AdamW(
learning_rate=scheduled_lrs, weight_decay=WEIGHT_DECAY
)
# Compile and pretrain the model.
patch_conv_net.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = patch_conv_net.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
callbacks=train_callbacks,
)
# Evaluate the model with the test dataset.
loss, acc_top1, acc_top5 = patch_conv_net.evaluate(test_ds)
print(f"Loss: {loss:0.2f}")
print(f"Top 1 test accuracy: {acc_top1*100:0.2f}%")
print(f"Top 5 test accuracy: {acc_top5*100:0.2f}%")
```
<div class="k-default-codeblock">
```
Epoch 1/50
313/313 [==============================] - 14s 27ms/step - loss: 1.9639 - accuracy: 0.2635 - top-5-accuracy: 0.7792 - val_loss: 1.7219 - val_accuracy: 0.3778 - val_top-5-accuracy: 0.8514
Epoch 2/50
313/313 [==============================] - 8s 26ms/step - loss: 1.5475 - accuracy: 0.4214 - top-5-accuracy: 0.9099 - val_loss: 1.4351 - val_accuracy: 0.4592 - val_top-5-accuracy: 0.9298
Epoch 3/50
313/313 [==============================] - 8s 25ms/step - loss: 1.3328 - accuracy: 0.5135 - top-5-accuracy: 0.9368 - val_loss: 1.3763 - val_accuracy: 0.5077 - val_top-5-accuracy: 0.9268
Epoch 4/50
313/313 [==============================] - 8s 25ms/step - loss: 1.1653 - accuracy: 0.5807 - top-5-accuracy: 0.9554 - val_loss: 1.0892 - val_accuracy: 0.6146 - val_top-5-accuracy: 0.9560
Epoch 5/50
313/313 [==============================] - ETA: 0s - loss: 1.0235 - accuracy: 0.6345 - top-5-accuracy: 0.9660
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 25ms/step - loss: 1.0235 - accuracy: 0.6345 - top-5-accuracy: 0.9660 - val_loss: 1.0085 - val_accuracy: 0.6424 - val_top-5-accuracy: 0.9640
Epoch 6/50
313/313 [==============================] - 8s 25ms/step - loss: 0.9190 - accuracy: 0.6729 - top-5-accuracy: 0.9741 - val_loss: 0.9066 - val_accuracy: 0.6850 - val_top-5-accuracy: 0.9751
Epoch 7/50
313/313 [==============================] - 8s 25ms/step - loss: 0.8331 - accuracy: 0.7056 - top-5-accuracy: 0.9783 - val_loss: 0.8844 - val_accuracy: 0.6903 - val_top-5-accuracy: 0.9779
Epoch 8/50
313/313 [==============================] - 8s 25ms/step - loss: 0.7526 - accuracy: 0.7376 - top-5-accuracy: 0.9823 - val_loss: 0.8200 - val_accuracy: 0.7114 - val_top-5-accuracy: 0.9793
Epoch 9/50
313/313 [==============================] - 8s 25ms/step - loss: 0.6853 - accuracy: 0.7636 - top-5-accuracy: 0.9856 - val_loss: 0.7216 - val_accuracy: 0.7584 - val_top-5-accuracy: 0.9823
Epoch 10/50
313/313 [==============================] - ETA: 0s - loss: 0.6260 - accuracy: 0.7849 - top-5-accuracy: 0.9877
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 25ms/step - loss: 0.6260 - accuracy: 0.7849 - top-5-accuracy: 0.9877 - val_loss: 0.6985 - val_accuracy: 0.7624 - val_top-5-accuracy: 0.9847
Epoch 11/50
313/313 [==============================] - 8s 25ms/step - loss: 0.5877 - accuracy: 0.7978 - top-5-accuracy: 0.9897 - val_loss: 0.7357 - val_accuracy: 0.7595 - val_top-5-accuracy: 0.9816
Epoch 12/50
313/313 [==============================] - 8s 25ms/step - loss: 0.5615 - accuracy: 0.8066 - top-5-accuracy: 0.9905 - val_loss: 0.6554 - val_accuracy: 0.7806 - val_top-5-accuracy: 0.9841
Epoch 13/50
313/313 [==============================] - 8s 25ms/step - loss: 0.5287 - accuracy: 0.8174 - top-5-accuracy: 0.9915 - val_loss: 0.5867 - val_accuracy: 0.8051 - val_top-5-accuracy: 0.9869
Epoch 14/50
313/313 [==============================] - 8s 25ms/step - loss: 0.4976 - accuracy: 0.8286 - top-5-accuracy: 0.9921 - val_loss: 0.5707 - val_accuracy: 0.8047 - val_top-5-accuracy: 0.9899
Epoch 15/50
313/313 [==============================] - ETA: 0s - loss: 0.4735 - accuracy: 0.8348 - top-5-accuracy: 0.9939
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 25ms/step - loss: 0.4735 - accuracy: 0.8348 - top-5-accuracy: 0.9939 - val_loss: 0.5945 - val_accuracy: 0.8040 - val_top-5-accuracy: 0.9883
Epoch 16/50
313/313 [==============================] - 8s 25ms/step - loss: 0.4660 - accuracy: 0.8364 - top-5-accuracy: 0.9936 - val_loss: 0.5629 - val_accuracy: 0.8125 - val_top-5-accuracy: 0.9906
Epoch 17/50
313/313 [==============================] - 8s 25ms/step - loss: 0.4416 - accuracy: 0.8462 - top-5-accuracy: 0.9946 - val_loss: 0.5747 - val_accuracy: 0.8013 - val_top-5-accuracy: 0.9888
Epoch 18/50
313/313 [==============================] - 8s 25ms/step - loss: 0.4175 - accuracy: 0.8560 - top-5-accuracy: 0.9949 - val_loss: 0.5672 - val_accuracy: 0.8088 - val_top-5-accuracy: 0.9903
Epoch 19/50
313/313 [==============================] - 8s 25ms/step - loss: 0.3912 - accuracy: 0.8650 - top-5-accuracy: 0.9957 - val_loss: 0.5454 - val_accuracy: 0.8136 - val_top-5-accuracy: 0.9907
Epoch 20/50
311/313 [============================>.] - ETA: 0s - loss: 0.3800 - accuracy: 0.8676 - top-5-accuracy: 0.9956
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 25ms/step - loss: 0.3801 - accuracy: 0.8676 - top-5-accuracy: 0.9956 - val_loss: 0.5274 - val_accuracy: 0.8222 - val_top-5-accuracy: 0.9915
Epoch 21/50
313/313 [==============================] - 8s 25ms/step - loss: 0.3641 - accuracy: 0.8734 - top-5-accuracy: 0.9962 - val_loss: 0.5032 - val_accuracy: 0.8315 - val_top-5-accuracy: 0.9921
Epoch 22/50
313/313 [==============================] - 8s 25ms/step - loss: 0.3474 - accuracy: 0.8805 - top-5-accuracy: 0.9970 - val_loss: 0.5251 - val_accuracy: 0.8302 - val_top-5-accuracy: 0.9917
Epoch 23/50
313/313 [==============================] - 8s 25ms/step - loss: 0.3327 - accuracy: 0.8833 - top-5-accuracy: 0.9976 - val_loss: 0.5158 - val_accuracy: 0.8321 - val_top-5-accuracy: 0.9903
Epoch 24/50
313/313 [==============================] - 8s 25ms/step - loss: 0.3158 - accuracy: 0.8897 - top-5-accuracy: 0.9977 - val_loss: 0.5098 - val_accuracy: 0.8355 - val_top-5-accuracy: 0.9912
Epoch 25/50
312/313 [============================>.] - ETA: 0s - loss: 0.2985 - accuracy: 0.8976 - top-5-accuracy: 0.9976
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 25ms/step - loss: 0.2986 - accuracy: 0.8976 - top-5-accuracy: 0.9976 - val_loss: 0.5302 - val_accuracy: 0.8276 - val_top-5-accuracy: 0.9922
Epoch 26/50
313/313 [==============================] - 8s 25ms/step - loss: 0.2819 - accuracy: 0.9021 - top-5-accuracy: 0.9977 - val_loss: 0.5130 - val_accuracy: 0.8358 - val_top-5-accuracy: 0.9923
Epoch 27/50
313/313 [==============================] - 8s 25ms/step - loss: 0.2696 - accuracy: 0.9065 - top-5-accuracy: 0.9983 - val_loss: 0.5096 - val_accuracy: 0.8389 - val_top-5-accuracy: 0.9926
Epoch 28/50
313/313 [==============================] - 8s 25ms/step - loss: 0.2526 - accuracy: 0.9115 - top-5-accuracy: 0.9983 - val_loss: 0.4988 - val_accuracy: 0.8403 - val_top-5-accuracy: 0.9921
Epoch 29/50
313/313 [==============================] - 8s 25ms/step - loss: 0.2322 - accuracy: 0.9190 - top-5-accuracy: 0.9987 - val_loss: 0.5234 - val_accuracy: 0.8395 - val_top-5-accuracy: 0.9915
Epoch 30/50
313/313 [==============================] - ETA: 0s - loss: 0.2180 - accuracy: 0.9235 - top-5-accuracy: 0.9988
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 26ms/step - loss: 0.2180 - accuracy: 0.9235 - top-5-accuracy: 0.9988 - val_loss: 0.5175 - val_accuracy: 0.8407 - val_top-5-accuracy: 0.9925
Epoch 31/50
313/313 [==============================] - 8s 25ms/step - loss: 0.2108 - accuracy: 0.9267 - top-5-accuracy: 0.9990 - val_loss: 0.5046 - val_accuracy: 0.8476 - val_top-5-accuracy: 0.9937
Epoch 32/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1929 - accuracy: 0.9337 - top-5-accuracy: 0.9991 - val_loss: 0.5096 - val_accuracy: 0.8516 - val_top-5-accuracy: 0.9914
Epoch 33/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1787 - accuracy: 0.9370 - top-5-accuracy: 0.9992 - val_loss: 0.4963 - val_accuracy: 0.8541 - val_top-5-accuracy: 0.9917
Epoch 34/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1653 - accuracy: 0.9428 - top-5-accuracy: 0.9994 - val_loss: 0.5092 - val_accuracy: 0.8547 - val_top-5-accuracy: 0.9921
Epoch 35/50
313/313 [==============================] - ETA: 0s - loss: 0.1544 - accuracy: 0.9464 - top-5-accuracy: 0.9995
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 7s 24ms/step - loss: 0.1544 - accuracy: 0.9464 - top-5-accuracy: 0.9995 - val_loss: 0.5137 - val_accuracy: 0.8513 - val_top-5-accuracy: 0.9928
Epoch 36/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1418 - accuracy: 0.9507 - top-5-accuracy: 0.9997 - val_loss: 0.5267 - val_accuracy: 0.8560 - val_top-5-accuracy: 0.9913
Epoch 37/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1259 - accuracy: 0.9561 - top-5-accuracy: 0.9997 - val_loss: 0.5283 - val_accuracy: 0.8584 - val_top-5-accuracy: 0.9923
Epoch 38/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1166 - accuracy: 0.9599 - top-5-accuracy: 0.9997 - val_loss: 0.5541 - val_accuracy: 0.8549 - val_top-5-accuracy: 0.9919
Epoch 39/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1111 - accuracy: 0.9624 - top-5-accuracy: 0.9997 - val_loss: 0.5543 - val_accuracy: 0.8575 - val_top-5-accuracy: 0.9917
Epoch 40/50
312/313 [============================>.] - ETA: 0s - loss: 0.1017 - accuracy: 0.9653 - top-5-accuracy: 0.9997
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 25ms/step - loss: 0.1016 - accuracy: 0.9653 - top-5-accuracy: 0.9997 - val_loss: 0.5357 - val_accuracy: 0.8614 - val_top-5-accuracy: 0.9923
Epoch 41/50
313/313 [==============================] - 8s 25ms/step - loss: 0.0925 - accuracy: 0.9687 - top-5-accuracy: 0.9998 - val_loss: 0.5248 - val_accuracy: 0.8615 - val_top-5-accuracy: 0.9924
Epoch 42/50
313/313 [==============================] - 8s 25ms/step - loss: 0.0848 - accuracy: 0.9726 - top-5-accuracy: 0.9997 - val_loss: 0.5182 - val_accuracy: 0.8654 - val_top-5-accuracy: 0.9939
Epoch 43/50
313/313 [==============================] - 8s 25ms/step - loss: 0.0823 - accuracy: 0.9724 - top-5-accuracy: 0.9999 - val_loss: 0.5010 - val_accuracy: 0.8679 - val_top-5-accuracy: 0.9931
Epoch 44/50
313/313 [==============================] - 8s 25ms/step - loss: 0.0762 - accuracy: 0.9752 - top-5-accuracy: 0.9998 - val_loss: 0.5088 - val_accuracy: 0.8686 - val_top-5-accuracy: 0.9939
Epoch 45/50
312/313 [============================>.] - ETA: 0s - loss: 0.0752 - accuracy: 0.9763 - top-5-accuracy: 0.9999
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 26ms/step - loss: 0.0752 - accuracy: 0.9764 - top-5-accuracy: 0.9999 - val_loss: 0.4844 - val_accuracy: 0.8679 - val_top-5-accuracy: 0.9938
Epoch 46/50
313/313 [==============================] - 8s 25ms/step - loss: 0.0789 - accuracy: 0.9745 - top-5-accuracy: 0.9997 - val_loss: 0.4774 - val_accuracy: 0.8702 - val_top-5-accuracy: 0.9937
Epoch 47/50
313/313 [==============================] - 8s 25ms/step - loss: 0.0866 - accuracy: 0.9726 - top-5-accuracy: 0.9998 - val_loss: 0.4644 - val_accuracy: 0.8666 - val_top-5-accuracy: 0.9936
Epoch 48/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1000 - accuracy: 0.9697 - top-5-accuracy: 0.9999 - val_loss: 0.4471 - val_accuracy: 0.8636 - val_top-5-accuracy: 0.9933
Epoch 49/50
313/313 [==============================] - 8s 25ms/step - loss: 0.1315 - accuracy: 0.9592 - top-5-accuracy: 0.9997 - val_loss: 0.4411 - val_accuracy: 0.8603 - val_top-5-accuracy: 0.9926
Epoch 50/50
313/313 [==============================] - ETA: 0s - loss: 0.1828 - accuracy: 0.9447 - top-5-accuracy: 0.9995
```
</div>

<div class="k-default-codeblock">
```
313/313 [==============================] - 8s 25ms/step - loss: 0.1828 - accuracy: 0.9447 - top-5-accuracy: 0.9995 - val_loss: 0.4614 - val_accuracy: 0.8480 - val_top-5-accuracy: 0.9920
79/79 [==============================] - 1s 8ms/step - loss: 0.4696 - accuracy: 0.8459 - top-5-accuracy: 0.9921
Loss: 0.47
Top 1 test accuracy: 84.59%
Top 5 test accuracy: 99.21%
```
</div>
---
## Inference
Here, we use the trained model to plot the attention map.
```python
def plot_attention(image):
"""Plots the attention map on top of the image.
Args:
image: A numpy image of arbitrary size.
"""
# Resize the image to a (32, 32) dim.
image = ops.image.resize(image, (32, 32))
image = image[np.newaxis, ...]
test_augmented_images = patch_conv_net.preprocessing_model(image)
# Pass through the stem.
test_x = patch_conv_net.stem(test_augmented_images)
# Pass through the trunk.
test_x = patch_conv_net.trunk(test_x)
# Pass through the attention pooling block.
_, test_viz_weights = patch_conv_net.attention_pooling(test_x)
test_viz_weights = test_viz_weights[np.newaxis, ...]
# Reshape the vizualization weights.
num_patches = ops.shape(test_viz_weights)[-1]
height = width = int(math.sqrt(num_patches))
test_viz_weights = layers.Reshape((height, width))(test_viz_weights)
selected_image = test_augmented_images[0]
selected_weight = test_viz_weights[0]
# Plot the images.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].imshow(selected_image)
ax[0].set_title(f"Original")
ax[0].axis("off")
img = ax[1].imshow(selected_image)
ax[1].imshow(selected_weight, cmap="inferno", alpha=0.6, extent=img.get_extent())
ax[1].set_title(f"Attended")
ax[1].axis("off")
plt.axis("off")
plt.show()
plt.close()
url = "http://farm9.staticflickr.com/8017/7140384795_385b1f48df_z.jpg"
image_name = keras.utils.get_file(fname="image.jpg", origin=url)
image = keras.utils.load_img(image_name)
image = keras.utils.img_to_array(image)
plot_attention(image)
```

---
## Conclusions
The attention map corresponding to the trainable `CLASS`
token and the patches of the image helps explain the classificaiton
decision. One should also note that the attention maps gradually get
better. In the initial training regime, the attention is scattered all
around while at a later stage, it focuses more on the objects of the
image.
The non-pyramidal convnet achieves an accuracy of ~84-85% top-1 test
accuracy.
I would like to thank [JarvisLabs.ai](https://jarvislabs.ai/) for
providing GPU credits for this project.
| keras-io/examples/vision/md/patch_convnet.md/0 | {
"file_path": "keras-io/examples/vision/md/patch_convnet.md",
"repo_id": "keras-io",
"token_count": 14745
} | 92 |
# Image Super-Resolution using an Efficient Sub-Pixel CNN
**Author:** [Xingyu Long](https://github.com/xingyu-long)<br>
**Date created:** 2020/07/28<br>
**Last modified:** 2020/08/27<br>
**Description:** Implementing Super-Resolution using Efficient sub-pixel model on BSDS500.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/super_resolution_sub_pixel.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/super_resolution_sub_pixel.py)
---
## Introduction
ESPCN (Efficient Sub-Pixel CNN), proposed by [Shi, 2016](https://arxiv.org/abs/1609.05158)
is a model that reconstructs a high-resolution version of an image given a low-resolution
version.
It leverages efficient "sub-pixel convolution" layers, which learns an array of
image upscaling filters.
In this code example, we will implement the model from the paper and train it on a small
dataset,
[BSDS500](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html).
[BSDS500](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html).
---
## Setup
```python
import keras
from keras import layers
from keras import ops
from keras.utils import load_img
from keras.utils import array_to_img
from keras.utils import img_to_array
from keras.preprocessing import image_dataset_from_directory
import tensorflow as tf # only for data preprocessing
import os
import math
import numpy as np
from IPython.display import display
```
---
## Load data: BSDS500 dataset
### Download dataset
We use the built-in `keras.utils.get_file` utility to retrieve the dataset.
```python
dataset_url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz"
data_dir = keras.utils.get_file(origin=dataset_url, fname="BSR", untar=True)
root_dir = os.path.join(data_dir, "BSDS500/data")
```
We create training and validation datasets via `image_dataset_from_directory`.
```python
crop_size = 300
upscale_factor = 3
input_size = crop_size // upscale_factor
batch_size = 8
train_ds = image_dataset_from_directory(
root_dir,
batch_size=batch_size,
image_size=(crop_size, crop_size),
validation_split=0.2,
subset="training",
seed=1337,
label_mode=None,
)
valid_ds = image_dataset_from_directory(
root_dir,
batch_size=batch_size,
image_size=(crop_size, crop_size),
validation_split=0.2,
subset="validation",
seed=1337,
label_mode=None,
)
```
<div class="k-default-codeblock">
```
Found 500 files.
Using 400 files for training.
Found 500 files.
Using 100 files for validation.
```
</div>
We rescale the images to take values in the range [0, 1].
```python
def scaling(input_image):
input_image = input_image / 255.0
return input_image
# Scale from (0, 255) to (0, 1)
train_ds = train_ds.map(scaling)
valid_ds = valid_ds.map(scaling)
```
Let's visualize a few sample images:
```python
for batch in train_ds.take(1):
for img in batch:
display(array_to_img(img))
```








We prepare a dataset of test image paths that we will use for
visual evaluation at the end of this example.
```python
dataset = os.path.join(root_dir, "images")
test_path = os.path.join(dataset, "test")
test_img_paths = sorted(
[
os.path.join(test_path, fname)
for fname in os.listdir(test_path)
if fname.endswith(".jpg")
]
)
```
---
## Crop and resize images
Let's process image data.
First, we convert our images from the RGB color space to the
[YUV colour space](https://en.wikipedia.org/wiki/YUV).
For the input data (low-resolution images),
we crop the image, retrieve the `y` channel (luninance),
and resize it with the `area` method (use `BICUBIC` if you use PIL).
We only consider the luminance channel
in the YUV color space because humans are more sensitive to
luminance change.
For the target data (high-resolution images), we just crop the image
and retrieve the `y` channel.
```python
# Use TF Ops to process.
def process_input(input, input_size, upscale_factor):
input = tf.image.rgb_to_yuv(input)
last_dimension_axis = len(input.shape) - 1
y, u, v = tf.split(input, 3, axis=last_dimension_axis)
return tf.image.resize(y, [input_size, input_size], method="area")
def process_target(input):
input = tf.image.rgb_to_yuv(input)
last_dimension_axis = len(input.shape) - 1
y, u, v = tf.split(input, 3, axis=last_dimension_axis)
return y
train_ds = train_ds.map(
lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
)
train_ds = train_ds.prefetch(buffer_size=32)
valid_ds = valid_ds.map(
lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
)
valid_ds = valid_ds.prefetch(buffer_size=32)
```
Let's take a look at the input and target data.
```python
for batch in train_ds.take(1):
for img in batch[0]:
display(array_to_img(img))
for img in batch[1]:
display(array_to_img(img))
```
















---
## Build a model
Compared to the paper, we add one more layer and we use the `relu` activation function
instead of `tanh`.
It achieves better performance even though we train the model for fewer epochs.
```python
class DepthToSpace(layers.Layer):
def __init__(self, block_size):
super().__init__()
self.block_size = block_size
def call(self, input):
batch, height, width, depth = ops.shape(input)
depth = depth // (self.block_size**2)
x = ops.reshape(
input, [batch, height, width, self.block_size, self.block_size, depth]
)
x = ops.transpose(x, [0, 1, 3, 2, 4, 5])
x = ops.reshape(
x, [batch, height * self.block_size, width * self.block_size, depth]
)
return x
def get_model(upscale_factor=3, channels=1):
conv_args = {
"activation": "relu",
"kernel_initializer": "orthogonal",
"padding": "same",
}
inputs = keras.Input(shape=(None, None, channels))
x = layers.Conv2D(64, 5, **conv_args)(inputs)
x = layers.Conv2D(64, 3, **conv_args)(x)
x = layers.Conv2D(32, 3, **conv_args)(x)
x = layers.Conv2D(channels * (upscale_factor**2), 3, **conv_args)(x)
outputs = DepthToSpace(upscale_factor)(x)
return keras.Model(inputs, outputs)
```
---
## Define utility functions
We need to define several utility functions to monitor our results:
- `plot_results` to plot an save an image.
- `get_lowres_image` to convert an image to its low-resolution version.
- `upscale_image` to turn a low-resolution image to
a high-resolution version reconstructed by the model.
In this function, we use the `y` channel from the YUV color space
as input to the model and then combine the output with the
other channels to obtain an RGB image.
```python
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import PIL
def plot_results(img, prefix, title):
"""Plot the result with zoom-in area."""
img_array = img_to_array(img)
img_array = img_array.astype("float32") / 255.0
# Create a new figure with a default 111 subplot.
fig, ax = plt.subplots()
im = ax.imshow(img_array[::-1], origin="lower")
plt.title(title)
# zoom-factor: 2.0, location: upper-left
axins = zoomed_inset_axes(ax, 2, loc=2)
axins.imshow(img_array[::-1], origin="lower")
# Specify the limits.
x1, x2, y1, y2 = 200, 300, 100, 200
# Apply the x-limits.
axins.set_xlim(x1, x2)
# Apply the y-limits.
axins.set_ylim(y1, y2)
plt.yticks(visible=False)
plt.xticks(visible=False)
# Make the line.
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="blue")
plt.savefig(str(prefix) + "-" + title + ".png")
plt.show()
def get_lowres_image(img, upscale_factor):
"""Return low-resolution image to use as model input."""
return img.resize(
(img.size[0] // upscale_factor, img.size[1] // upscale_factor),
PIL.Image.BICUBIC,
)
def upscale_image(model, img):
"""Predict the result based on input image and restore the image as RGB."""
ycbcr = img.convert("YCbCr")
y, cb, cr = ycbcr.split()
y = img_to_array(y)
y = y.astype("float32") / 255.0
input = np.expand_dims(y, axis=0)
out = model.predict(input)
out_img_y = out[0]
out_img_y *= 255.0
# Restore the image in RGB color space.
out_img_y = out_img_y.clip(0, 255)
out_img_y = out_img_y.reshape((np.shape(out_img_y)[0], np.shape(out_img_y)[1]))
out_img_y = PIL.Image.fromarray(np.uint8(out_img_y), mode="L")
out_img_cb = cb.resize(out_img_y.size, PIL.Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, PIL.Image.BICUBIC)
out_img = PIL.Image.merge("YCbCr", (out_img_y, out_img_cb, out_img_cr)).convert(
"RGB"
)
return out_img
```
---
## Define callbacks to monitor training
The `ESPCNCallback` object will compute and display
the [PSNR](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio) metric.
This is the main metric we use to evaluate super-resolution performance.
```python
class ESPCNCallback(keras.callbacks.Callback):
def __init__(self):
super().__init__()
self.test_img = get_lowres_image(load_img(test_img_paths[0]), upscale_factor)
# Store PSNR value in each epoch.
def on_epoch_begin(self, epoch, logs=None):
self.psnr = []
def on_epoch_end(self, epoch, logs=None):
print("Mean PSNR for epoch: %.2f" % (np.mean(self.psnr)))
if epoch % 20 == 0:
prediction = upscale_image(self.model, self.test_img)
plot_results(prediction, "epoch-" + str(epoch), "prediction")
def on_test_batch_end(self, batch, logs=None):
self.psnr.append(10 * math.log10(1 / logs["loss"]))
```
Define `ModelCheckpoint` and `EarlyStopping` callbacks.
```python
early_stopping_callback = keras.callbacks.EarlyStopping(monitor="loss", patience=10)
checkpoint_filepath = "/tmp/checkpoint.keras"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor="loss",
mode="min",
save_best_only=True,
)
model = get_model(upscale_factor=upscale_factor, channels=1)
model.summary()
callbacks = [ESPCNCallback(), early_stopping_callback, model_checkpoint_callback]
loss_fn = keras.losses.MeanSquaredError()
optimizer = keras.optimizers.Adam(learning_rate=0.001)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,664</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">18,464</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">9</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,601</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ depth_to_space (<span style="color: #0087ff; text-decoration-color: #0087ff">DepthToSpace</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">59,657</span> (233.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">59,657</span> (233.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Train the model
```python
epochs = 100
model.compile(
optimizer=optimizer,
loss=loss_fn,
)
model.fit(
train_ds, epochs=epochs, callbacks=callbacks, validation_data=valid_ds, verbose=2
)
# The model weights (that are considered the best) are loaded into the model.
model.load_weights(checkpoint_filepath)
```
<div class="k-default-codeblock">
```
Epoch 1/100
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1699478222.454735 357563 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
Mean PSNR for epoch: 22.51
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 684ms/step
```
</div>

<div class="k-default-codeblock">
```
50/50 - 8s - 158ms/step - loss: 0.0284 - val_loss: 0.0057
Epoch 2/100
Mean PSNR for epoch: 24.82
50/50 - 1s - 11ms/step - loss: 0.0049 - val_loss: 0.0033
Epoch 3/100
Mean PSNR for epoch: 24.84
50/50 - 1s - 11ms/step - loss: 0.0034 - val_loss: 0.0031
Epoch 4/100
Mean PSNR for epoch: 25.44
50/50 - 1s - 11ms/step - loss: 0.0032 - val_loss: 0.0027
Epoch 5/100
Mean PSNR for epoch: 25.64
50/50 - 1s - 11ms/step - loss: 0.0030 - val_loss: 0.0027
Epoch 6/100
Mean PSNR for epoch: 26.20
50/50 - 1s - 11ms/step - loss: 0.0029 - val_loss: 0.0026
Epoch 7/100
Mean PSNR for epoch: 26.42
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0025
Epoch 8/100
Mean PSNR for epoch: 26.58
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0025
Epoch 9/100
Mean PSNR for epoch: 26.25
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0024
Epoch 10/100
Mean PSNR for epoch: 26.25
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0024
Epoch 11/100
Mean PSNR for epoch: 26.43
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0024
Epoch 12/100
Mean PSNR for epoch: 26.43
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0024
Epoch 13/100
Mean PSNR for epoch: 26.17
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0024
Epoch 14/100
Mean PSNR for epoch: 26.45
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0024
Epoch 15/100
Mean PSNR for epoch: 26.23
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0024
Epoch 16/100
Mean PSNR for epoch: 26.40
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0024
Epoch 17/100
Mean PSNR for epoch: 26.49
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0024
Epoch 18/100
Mean PSNR for epoch: 26.17
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0026
Epoch 19/100
Mean PSNR for epoch: 26.61
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0023
Epoch 20/100
Mean PSNR for epoch: 26.38
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0024
Epoch 21/100
Mean PSNR for epoch: 26.52
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step
```
</div>

<div class="k-default-codeblock">
```
50/50 - 1s - 24ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 22/100
Mean PSNR for epoch: 26.46
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0024
Epoch 23/100
Mean PSNR for epoch: 26.71
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 24/100
Mean PSNR for epoch: 26.20
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0024
Epoch 25/100
Mean PSNR for epoch: 26.66
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0024
Epoch 26/100
Mean PSNR for epoch: 26.41
50/50 - 1s - 10ms/step - loss: 0.0027 - val_loss: 0.0024
Epoch 27/100
Mean PSNR for epoch: 26.48
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0025
Epoch 28/100
Mean PSNR for epoch: 26.27
50/50 - 1s - 10ms/step - loss: 0.0028 - val_loss: 0.0025
Epoch 29/100
Mean PSNR for epoch: 26.52
50/50 - 1s - 10ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 30/100
Mean PSNR for epoch: 26.62
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 31/100
Mean PSNR for epoch: 26.67
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 32/100
Mean PSNR for epoch: 26.57
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 33/100
Mean PSNR for epoch: 26.78
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0024
Epoch 34/100
Mean PSNR for epoch: 26.02
50/50 - 1s - 12ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 35/100
Mean PSNR for epoch: 26.07
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0025
Epoch 36/100
Mean PSNR for epoch: 26.49
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0024
Epoch 37/100
Mean PSNR for epoch: 26.35
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0022
Epoch 38/100
Mean PSNR for epoch: 26.92
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 39/100
Mean PSNR for epoch: 26.84
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 40/100
Mean PSNR for epoch: 26.08
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0027
Epoch 41/100
Mean PSNR for epoch: 26.37
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step
```
</div>

<div class="k-default-codeblock">
```
50/50 - 1s - 23ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 42/100
Mean PSNR for epoch: 26.17
50/50 - 1s - 10ms/step - loss: 0.0027 - val_loss: 0.0026
Epoch 43/100
Mean PSNR for epoch: 26.68
50/50 - 1s - 11ms/step - loss: 0.0028 - val_loss: 0.0023
Epoch 44/100
Mean PSNR for epoch: 26.34
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 45/100
Mean PSNR for epoch: 26.87
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 46/100
Mean PSNR for epoch: 26.73
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 47/100
Mean PSNR for epoch: 26.63
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 48/100
Mean PSNR for epoch: 26.79
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 49/100
Mean PSNR for epoch: 26.59
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 50/100
Mean PSNR for epoch: 27.11
50/50 - 1s - 10ms/step - loss: 0.0025 - val_loss: 0.0024
Epoch 51/100
Mean PSNR for epoch: 26.76
50/50 - 1s - 12ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 52/100
Mean PSNR for epoch: 26.41
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 53/100
Mean PSNR for epoch: 26.28
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 54/100
Mean PSNR for epoch: 27.25
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 55/100
Mean PSNR for epoch: 26.41
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 56/100
Mean PSNR for epoch: 26.64
50/50 - 1s - 12ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 57/100
Mean PSNR for epoch: 26.66
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 58/100
Mean PSNR for epoch: 26.72
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 59/100
Mean PSNR for epoch: 26.66
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 60/100
Mean PSNR for epoch: 26.55
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 61/100
Mean PSNR for epoch: 26.52
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step
```
</div>

<div class="k-default-codeblock">
```
50/50 - 1s - 23ms/step - loss: 0.0027 - val_loss: 0.0023
Epoch 62/100
Mean PSNR for epoch: 26.16
50/50 - 1s - 11ms/step - loss: 0.0027 - val_loss: 0.0023
Epoch 63/100
Mean PSNR for epoch: 26.66
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 64/100
Mean PSNR for epoch: 26.61
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 65/100
Mean PSNR for epoch: 26.97
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 66/100
Mean PSNR for epoch: 27.02
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 67/100
Mean PSNR for epoch: 26.79
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 68/100
Mean PSNR for epoch: 26.59
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 69/100
Mean PSNR for epoch: 26.69
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0024
Epoch 70/100
Mean PSNR for epoch: 26.75
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 71/100
Mean PSNR for epoch: 26.79
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 72/100
Mean PSNR for epoch: 26.94
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 73/100
Mean PSNR for epoch: 26.66
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 74/100
Mean PSNR for epoch: 26.67
50/50 - 1s - 10ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 75/100
Mean PSNR for epoch: 26.97
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 76/100
Mean PSNR for epoch: 26.83
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 77/100
Mean PSNR for epoch: 26.09
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 78/100
Mean PSNR for epoch: 26.76
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 79/100
Mean PSNR for epoch: 26.82
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 80/100
Mean PSNR for epoch: 26.48
50/50 - 1s - 12ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 81/100
Mean PSNR for epoch: 26.49
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step
```
</div>

<div class="k-default-codeblock">
```
50/50 - 1s - 23ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 82/100
Mean PSNR for epoch: 26.49
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 83/100
Mean PSNR for epoch: 26.68
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0024
Epoch 84/100
Mean PSNR for epoch: 26.75
50/50 - 1s - 11ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 85/100
Mean PSNR for epoch: 26.52
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 86/100
Mean PSNR for epoch: 26.92
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 87/100
Mean PSNR for epoch: 26.57
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 88/100
Mean PSNR for epoch: 26.96
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 89/100
Mean PSNR for epoch: 26.82
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 90/100
Mean PSNR for epoch: 26.54
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 91/100
Mean PSNR for epoch: 26.48
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 92/100
Mean PSNR for epoch: 26.36
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 93/100
Mean PSNR for epoch: 26.81
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 94/100
Mean PSNR for epoch: 26.66
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 95/100
Mean PSNR for epoch: 26.87
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 96/100
Mean PSNR for epoch: 26.43
50/50 - 1s - 10ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 97/100
Mean PSNR for epoch: 26.52
50/50 - 1s - 10ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 98/100
Mean PSNR for epoch: 26.57
50/50 - 1s - 11ms/step - loss: 0.0025 - val_loss: 0.0023
Epoch 99/100
Mean PSNR for epoch: 26.33
50/50 - 1s - 12ms/step - loss: 0.0025 - val_loss: 0.0022
Epoch 100/100
Mean PSNR for epoch: 26.50
50/50 - 1s - 13ms/step - loss: 0.0025 - val_loss: 0.0022
```
</div>
---
## Run model prediction and plot the results
Let's compute the reconstructed version of a few images and save the results.
```python
total_bicubic_psnr = 0.0
total_test_psnr = 0.0
for index, test_img_path in enumerate(test_img_paths[50:60]):
img = load_img(test_img_path)
lowres_input = get_lowres_image(img, upscale_factor)
w = lowres_input.size[0] * upscale_factor
h = lowres_input.size[1] * upscale_factor
highres_img = img.resize((w, h))
prediction = upscale_image(model, lowres_input)
lowres_img = lowres_input.resize((w, h))
lowres_img_arr = img_to_array(lowres_img)
highres_img_arr = img_to_array(highres_img)
predict_img_arr = img_to_array(prediction)
bicubic_psnr = tf.image.psnr(lowres_img_arr, highres_img_arr, max_val=255)
test_psnr = tf.image.psnr(predict_img_arr, highres_img_arr, max_val=255)
total_bicubic_psnr += bicubic_psnr
total_test_psnr += test_psnr
print(
"PSNR of low resolution image and high resolution image is %.4f" % bicubic_psnr
)
print("PSNR of predict and high resolution is %.4f" % test_psnr)
plot_results(lowres_img, index, "lowres")
plot_results(highres_img, index, "highres")
plot_results(prediction, index, "prediction")
print("Avg. PSNR of lowres images is %.4f" % (total_bicubic_psnr / 10))
print("Avg. PSNR of reconstructions is %.4f" % (total_test_psnr / 10))
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 19ms/step
PSNR of low resolution image and high resolution image is 30.0157
PSNR of predict and high resolution is 30.5336
```
</div>



<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step
PSNR of low resolution image and high resolution image is 25.1103
PSNR of predict and high resolution is 26.0954
```
</div>



<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 693ms/step
PSNR of low resolution image and high resolution image is 27.7789
PSNR of predict and high resolution is 28.3920
```
</div>



<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step
PSNR of low resolution image and high resolution image is 28.0321
PSNR of predict and high resolution is 28.2747
```
</div>



<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step
PSNR of low resolution image and high resolution image is 25.7853
PSNR of predict and high resolution is 26.3532
```
</div>



<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step
PSNR of low resolution image and high resolution image is 25.9181
PSNR of predict and high resolution is 26.7292
```
</div>



<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step
PSNR of low resolution image and high resolution image is 26.2389
PSNR of predict and high resolution is 27.1362
```
</div>



<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step
PSNR of low resolution image and high resolution image is 23.3281
PSNR of predict and high resolution is 24.6649
```
</div>



<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 17ms/step
PSNR of low resolution image and high resolution image is 29.9008
PSNR of predict and high resolution is 30.0894
```
</div>

| keras-io/examples/vision/md/super_resolution_sub_pixel.md/0 | {
"file_path": "keras-io/examples/vision/md/super_resolution_sub_pixel.md",
"repo_id": "keras-io",
"token_count": 15029
} | 93 |
<jupyter_start><jupyter_text>Tune hyperparameters in your custom training loop**Authors:** Tom O'Malley, Haifeng Jin**Date created:** 2019/10/28**Last modified:** 2022/01/12**Description:** Use `HyperModel.fit()` to tune training hyperparameters (such as batch size).<jupyter_code>!pip install keras-tuner -q<jupyter_output><empty_output><jupyter_text>IntroductionThe `HyperModel` class in KerasTuner provides a convenient way to define yoursearch space in a reusable object. You can override `HyperModel.build()` todefine and hypertune the model itself. To hypertune the training process (e.g.by selecting the proper batch size, number of training epochs, or dataaugmentation setup), you can override `HyperModel.fit()`, where you can access:- The `hp` object, which is an instance of `keras_tuner.HyperParameters`- The model built by `HyperModel.build()`A basic example is shown in the "tune model training" section of[Getting Started with KerasTuner](https://keras.io/guides/keras_tuner/getting_started/tune-model-training). Tuning the custom training loopIn this guide, we will subclass the `HyperModel` class and write a customtraining loop by overriding `HyperModel.fit()`. For how to write a customtraining loop with Keras, you can refer to the guide[Writing a training loop from scratch](https://keras.io/guides/writing_a_training_loop_from_scratch/).First, we import the libraries we need, and we create datasets for training andvalidation. Here, we just use some random data for demonstration purposes.<jupyter_code>import keras_tuner
import tensorflow as tf
import keras
import numpy as np
x_train = np.random.rand(1000, 28, 28, 1)
y_train = np.random.randint(0, 10, (1000, 1))
x_val = np.random.rand(1000, 28, 28, 1)
y_val = np.random.randint(0, 10, (1000, 1))<jupyter_output><empty_output><jupyter_text>Then, we subclass the `HyperModel` class as `MyHyperModel`. In`MyHyperModel.build()`, we build a simple Keras model to do imageclassification for 10 different classes. `MyHyperModel.fit()` accepts severalarguments. Its signature is shown below:```pythondef fit(self, hp, model, x, y, validation_data, callbacks=None, **kwargs):```* The `hp` argument is for defining the hyperparameters.* The `model` argument is the model returned by `MyHyperModel.build()`.* `x`, `y`, and `validation_data` are all custom-defined arguments. We willpass our data to them by calling `tuner.search(x=x, y=y,validation_data=(x_val, y_val))` later. You can define any number of them andgive custom names.* The `callbacks` argument was intended to be used with `model.fit()`.KerasTuner put some helpful Keras callbacks in it, for example, the callbackfor checkpointing the model at its best epoch.We will manually call the callbacks in the custom training loop. Before wecan call them, we need to assign our model to them with the following code sothat they have access to the model for checkpointing.```pyfor callback in callbacks: callback.model = model```In this example, we only called the `on_epoch_end()` method of the callbacksto help us checkpoint the model. You may also call other callback methodsif needed. If you don't need to save the model, you don't need to use thecallbacks.In the custom training loop, we tune the batch size of the dataset as we wrapthe NumPy data into a `tf.data.Dataset`. Note that you can tune anypreprocessing steps here as well. We also tune the learning rate of theoptimizer.We will use the validation loss as the evaluation metric for the model. Tocompute the mean validation loss, we will use `keras.metrics.Mean()`, whichaverages the validation loss across the batches. We need to return thevalidation loss for the tuner to make a record.<jupyter_code>class MyHyperModel(keras_tuner.HyperModel):
def build(self, hp):
"""Builds a convolutional model."""
inputs = keras.Input(shape=(28, 28, 1))
x = keras.layers.Flatten()(inputs)
x = keras.layers.Dense(
units=hp.Choice("units", [32, 64, 128]), activation="relu"
)(x)
outputs = keras.layers.Dense(10)(x)
return keras.Model(inputs=inputs, outputs=outputs)
def fit(self, hp, model, x, y, validation_data, callbacks=None, **kwargs):
# Convert the datasets to tf.data.Dataset.
batch_size = hp.Int("batch_size", 32, 128, step=32, default=64)
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(
batch_size
)
validation_data = tf.data.Dataset.from_tensor_slices(validation_data).batch(
batch_size
)
# Define the optimizer.
optimizer = keras.optimizers.Adam(
hp.Float("learning_rate", 1e-4, 1e-2, sampling="log", default=1e-3)
)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# The metric to track validation loss.
epoch_loss_metric = keras.metrics.Mean()
# Function to run the train step.
@tf.function
def run_train_step(images, labels):
with tf.GradientTape() as tape:
logits = model(images)
loss = loss_fn(labels, logits)
# Add any regularization losses.
if model.losses:
loss += tf.math.add_n(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Function to run the validation step.
@tf.function
def run_val_step(images, labels):
logits = model(images)
loss = loss_fn(labels, logits)
# Update the metric.
epoch_loss_metric.update_state(loss)
# Assign the model to the callbacks.
for callback in callbacks:
callback.set_model(model)
# Record the best validation loss value
best_epoch_loss = float("inf")
# The custom training loop.
for epoch in range(2):
print(f"Epoch: {epoch}")
# Iterate the training data to run the training step.
for images, labels in train_ds:
run_train_step(images, labels)
# Iterate the validation data to run the validation step.
for images, labels in validation_data:
run_val_step(images, labels)
# Calling the callbacks after epoch.
epoch_loss = float(epoch_loss_metric.result().numpy())
for callback in callbacks:
# The "my_metric" is the objective passed to the tuner.
callback.on_epoch_end(epoch, logs={"my_metric": epoch_loss})
epoch_loss_metric.reset_state()
print(f"Epoch loss: {epoch_loss}")
best_epoch_loss = min(best_epoch_loss, epoch_loss)
# Return the evaluation metric value.
return best_epoch_loss<jupyter_output><empty_output><jupyter_text>Now, we can initialize the tuner. Here, we use `Objective("my_metric", "min")`as our metric to be minimized. The objective name should be consistent with theone you use as the key in the `logs` passed to the 'on_epoch_end()' method ofthe callbacks. The callbacks need to use this value in the `logs` to find thebest epoch to checkpoint the model.<jupyter_code>tuner = keras_tuner.RandomSearch(
objective=keras_tuner.Objective("my_metric", "min"),
max_trials=2,
hypermodel=MyHyperModel(),
directory="results",
project_name="custom_training",
overwrite=True,
)<jupyter_output><empty_output><jupyter_text>We start the search by passing the arguments we defined in the signature of`MyHyperModel.fit()` to `tuner.search()`.<jupyter_code>tuner.search(x=x_train, y=y_train, validation_data=(x_val, y_val))<jupyter_output><empty_output><jupyter_text>Finally, we can retrieve the results.<jupyter_code>best_hps = tuner.get_best_hyperparameters()[0]
print(best_hps.values)
best_model = tuner.get_best_models()[0]
best_model.summary()<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_tuner/custom_tuner.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_tuner/custom_tuner.ipynb",
"repo_id": "keras-io",
"token_count": 2925
} | 94 |
# Distributed training with Keras 3
**Author:** [Qianli Zhu](https://github.com/qlzh727)<br>
**Date created:** 2023/11/07<br>
**Last modified:** 2023/11/07<br>
**Description:** Complete guide to the distribution API for multi-backend Keras.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/distribution.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/distribution.py)
---
## Introduction
The Keras distribution API is a new interface designed to facilitate
distributed deep learning across a variety of backends like JAX, TensorFlow and
PyTorch. This powerful API introduces a suite of tools enabling data and model
parallelism, allowing for efficient scaling of deep learning models on multiple
accelerators and hosts. Whether leveraging the power of GPUs or TPUs, the API
provides a streamlined approach to initializing distributed environments,
defining device meshes, and orchestrating the layout of tensors across
computational resources. Through classes like `DataParallel` and
`ModelParallel`, it abstracts the complexity involved in parallel computation,
making it easier for developers to accelerate their machine learning
workflows.
---
## How it works
The Keras distribution API provides a global programming model that allows
developers to compose applications that operate on tensors in a global context
(as if working with a single device) while
automatically managing distribution across many devices. The API leverages the
underlying framework (e.g. JAX) to distribute the program and tensors according to the
sharding directives through a procedure called single program, multiple data
(SPMD) expansion.
By decoupling the application from sharding directives, the API enables running
the same application on a single device, multiple devices, or even multiple
clients, while preserving its global semantics.
---
## Setup
```python
import os
# The distribution API is only implemented for the JAX backend for now.
os.environ["KERAS_BACKEND"] = "jax"
import keras
from keras import layers
import jax
import numpy as np
from tensorflow import data as tf_data # For dataset input.
```
---
## `DeviceMesh` and `TensorLayout`
The `keras.distribution.DeviceMesh` class in Keras distribution API represents a cluster of
computational devices configured for distributed computation. It aligns with
similar concepts in [`jax.sharding.Mesh`](https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.Mesh) and
[`tf.dtensor.Mesh`](https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Mesh),
where it's used to map the physical devices to a logical mesh structure.
The `TensorLayout` class then specifies how tensors are distributed across the
`DeviceMesh`, detailing the sharding of tensors along specified axes that
correspond to the names of the axes in the `DeviceMesh`.
You can find more detailed concept explainers in the
[TensorFlow DTensor guide](https://www.tensorflow.org/guide/dtensor_overview#dtensors_model_of_distributed_tensors).
```python
# Retrieve the local available gpu devices.
devices = jax.devices("gpu") # Assume it has 8 local GPUs.
# Define a 2x4 device mesh with data and model parallel axes
mesh = keras.distribution.DeviceMesh(
shape=(2, 4), axis_names=["data", "model"], devices=devices
)
# A 2D layout, which describes how a tensor is distributed across the
# mesh. The layout can be visualized as a 2D grid with "model" as rows and
# "data" as columns, and it is a [4, 2] grid when it mapped to the physical
# devices on the mesh.
layout_2d = keras.distribution.TensorLayout(axes=("model", "data"), device_mesh=mesh)
# A 4D layout which could be used for data parallel of a image input.
replicated_layout_4d = keras.distribution.TensorLayout(
axes=("data", None, None, None), device_mesh=mesh
)
```
---
## Distribution
The `Distribution` class in Keras serves as a foundational abstract class designed
for developing custom distribution strategies. It encapsulates the core logic
needed to distribute a model's variables, input data, and intermediate
computations across a device mesh. As an end user, you won't have to interact
directly with this class, but its subclasses like `DataParallel` or
`ModelParallel`.
---
## DataParallel
The `DataParallel` class in the Keras distribution API is designed for the
data parallelism strategy in distributed training, where the model weights are
replicated across all devices in the `DeviceMesh`, and each device processes a
portion of the input data.
Here is a sample usage of this class.
```python
# Create DataParallel with list of devices.
# As a shortcut, the devices can be skipped,
# and Keras will detect all local available devices.
# E.g. data_parallel = DataParallel()
data_parallel = keras.distribution.DataParallel(devices=devices)
# Or you can choose to create DataParallel with a 1D `DeviceMesh`.
mesh_1d = keras.distribution.DeviceMesh(
shape=(8,), axis_names=["data"], devices=devices
)
data_parallel = keras.distribution.DataParallel(device_mesh=mesh_1d)
inputs = np.random.normal(size=(128, 28, 28, 1))
labels = np.random.normal(size=(128, 10))
dataset = tf_data.Dataset.from_tensor_slices((inputs, labels)).batch(16)
# Set the global distribution.
keras.distribution.set_distribution(data_parallel)
# Note that all the model weights from here on are replicated to
# all the devices of the `DeviceMesh`. This includes the RNG
# state, optimizer states, metrics, etc. The dataset fed into `model.fit` or
# `model.evaluate` will be split evenly on the batch dimension, and sent to
# all the devices. You don't have to do any manual aggregration of losses,
# since all the computation happens in a global context.
inputs = layers.Input(shape=(28, 28, 1))
y = layers.Flatten()(inputs)
y = layers.Dense(units=200, use_bias=False, activation="relu")(y)
y = layers.Dropout(0.4)(y)
y = layers.Dense(units=10, activation="softmax")(y)
model = keras.Model(inputs=inputs, outputs=y)
model.compile(loss="mse")
model.fit(dataset, epochs=3)
model.evaluate(dataset)
```
<div class="k-default-codeblock">
```
Epoch 1/3
8/8 ━━━━━━━━━━━━━━━━━━━━ 8s 30ms/step - loss: 1.0116
Epoch 2/3
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.9237
Epoch 3/3
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 4ms/step - loss: 0.8736
8/8 ━━━━━━━━━━━━━━━━━━━━ 5s 5ms/step - loss: 0.8349
0.842325747013092
```
</div>
---
## `ModelParallel` and `LayoutMap`
`ModelParallel` will be mostly useful when model weights are too large to fit
on a single accelerator. This setting allows you to spit your model weights or
activation tensors across all the devices on the `DeviceMesh`, and enable the
horizontal scaling for the large models.
Unlike the `DataParallel` model where all weights are fully replicated,
the weights layout under `ModelParallel` usually need some customization for
best performances. We introduce `LayoutMap` to let you specify the
`TensorLayout` for any weights and intermediate tensors from global perspective.
`LayoutMap` is a dict-like object that maps a string to `TensorLayout`
instances. It behaves differently from a normal Python dict in that the string
key is treated as a regex when retrieving the value. The class allows you to
define the naming schema of `TensorLayout` and then retrieve the corresponding
`TensorLayout` instance. Typically, the key used to query
is the `variable.path` attribute, which is the identifier of the variable.
As a shortcut, a tuple or list of axis
names is also allowed when inserting a value, and it will be converted to
`TensorLayout`.
The `LayoutMap` can also optionally contain a `DeviceMesh` to populate the
`TensorLayout.device_mesh` if it is not set. When retrieving a layout with a
key, and if there isn't an exact match, all existing keys in the layout map will
be treated as regex and matched against the input key again. If there are
multiple matches, a `ValueError` is raised. If no matches are found, `None` is
returned.
```python
mesh_2d = keras.distribution.DeviceMesh(
shape=(2, 4), axis_names=["data", "model"], devices=devices
)
layout_map = keras.distribution.LayoutMap(mesh_2d)
# The rule below means that for any weights that match with d1/kernel, it
# will be sharded with model dimensions (4 devices), same for the d1/bias.
# All other weights will be fully replicated.
layout_map["d1/kernel"] = (None, "model")
layout_map["d1/bias"] = ("model",)
# You can also set the layout for the layer output like
layout_map["d2/output"] = ("data", None)
model_parallel = keras.distribution.ModelParallel(
mesh_2d, layout_map, batch_dim_name="data"
)
keras.distribution.set_distribution(model_parallel)
inputs = layers.Input(shape=(28, 28, 1))
y = layers.Flatten()(inputs)
y = layers.Dense(units=200, use_bias=False, activation="relu", name="d1")(y)
y = layers.Dropout(0.4)(y)
y = layers.Dense(units=10, activation="softmax", name="d2")(y)
model = keras.Model(inputs=inputs, outputs=y)
# The data will be sharded across the "data" dimension of the method, which
# has 2 devices.
model.compile(loss="mse")
model.fit(dataset, epochs=3)
model.evaluate(dataset)
```
<div class="k-default-codeblock">
```
Epoch 1/3
/opt/conda/envs/keras-jax/lib/python3.10/site-packages/jax/_src/interpreters/mlir.py:761: UserWarning: Some donated buffers were not usable: ShapedArray(float32[784,50]).
See an explanation at https://jax.readthedocs.io/en/latest/faq.html#buffer-donation.
warnings.warn("Some donated buffers were not usable:"
8/8 ━━━━━━━━━━━━━━━━━━━━ 5s 8ms/step - loss: 1.0266
Epoch 2/3
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.9181
Epoch 3/3
8/8 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.8725
8/8 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 0.8381
0.8502610325813293
```
</div>
It is also easy to change the mesh structure to tune the computation between
more data parallel or model parallel. You can do this by adjusting the shape of
the mesh. And no changes are needed for any other code.
```python
full_data_parallel_mesh = keras.distribution.DeviceMesh(
shape=(8, 1), axis_names=["data", "model"], devices=devices
)
more_data_parallel_mesh = keras.distribution.DeviceMesh(
shape=(4, 2), axis_names=["data", "model"], devices=devices
)
more_model_parallel_mesh = keras.distribution.DeviceMesh(
shape=(2, 4), axis_names=["data", "model"], devices=devices
)
full_model_parallel_mesh = keras.distribution.DeviceMesh(
shape=(1, 8), axis_names=["data", "model"], devices=devices
)
```
### Further reading
1. [JAX Distributed arrays and automatic parallelization](https://jax.readthedocs.io/en/latest/notebooks/Distributed_arrays_and_automatic_parallelization.html)
2. [JAX sharding module](https://jax.readthedocs.io/en/latest/jax.sharding.html)
3. [TensorFlow Distributed training with DTensors](https://www.tensorflow.org/tutorials/distribute/dtensor_ml_tutorial)
4. [TensorFlow DTensor concepts](https://www.tensorflow.org/guide/dtensor_overview)
5. [Using DTensors with tf.keras](https://www.tensorflow.org/tutorials/distribute/dtensor_keras_tutorial)
| keras-io/guides/md/distribution.md/0 | {
"file_path": "keras-io/guides/md/distribution.md",
"repo_id": "keras-io",
"token_count": 3587
} | 95 |
# Distributed hyperparameter tuning
**Authors:** Tom O'Malley, Haifeng Jin<br>
**Date created:** 2019/10/24<br>
**Last modified:** 2021/06/02<br>
**Description:** Tuning the hyperparameters of the models with multiple GPUs and multiple machines.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_tuner/distributed_tuning.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_tuner/distributed_tuning.py)
```python
!pip install keras-tuner -q
```
---
## Introduction
KerasTuner makes it easy to perform distributed hyperparameter search. No
changes to your code are needed to scale up from running single-threaded
locally to running on dozens or hundreds of workers in parallel. Distributed
KerasTuner uses a chief-worker model. The chief runs a service to which the
workers report results and query for the hyperparameters to try next. The chief
should be run on a single-threaded CPU instance (or alternatively as a separate
process on one of the workers).
### Configuring distributed mode
Configuring distributed mode for KerasTuner only requires setting three
environment variables:
**KERASTUNER_TUNER_ID**: This should be set to "chief" for the chief process.
Other workers should be passed a unique ID (by convention, "tuner0", "tuner1",
etc).
**KERASTUNER_ORACLE_IP**: The IP address or hostname that the chief service
should run on. All workers should be able to resolve and access this address.
**KERASTUNER_ORACLE_PORT**: The port that the chief service should run on. This
can be freely chosen, but must be a port that is accessible to the other
workers. Instances communicate via the [gRPC](https://www.grpc.io) protocol.
The same code can be run on all workers. Additional considerations for
distributed mode are:
- All workers should have access to a centralized file system to which they can
write their results.
- All workers should be able to access the necessary training and validation
data needed for tuning.
- To support fault-tolerance, `overwrite` should be kept as `False` in
`Tuner.__init__` (`False` is the default).
Example bash script for chief service (sample code for `run_tuning.py` at
bottom of page):
```
export KERASTUNER_TUNER_ID="chief"
export KERASTUNER_ORACLE_IP="127.0.0.1"
export KERASTUNER_ORACLE_PORT="8000"
python run_tuning.py
```
Example bash script for worker:
```
export KERASTUNER_TUNER_ID="tuner0"
export KERASTUNER_ORACLE_IP="127.0.0.1"
export KERASTUNER_ORACLE_PORT="8000"
python run_tuning.py
```
### Data parallelism with `tf.distribute`
KerasTuner also supports data parallelism via
[tf.distribute](https://www.tensorflow.org/tutorials/distribute/keras). Data
parallelism and distributed tuning can be combined. For example, if you have 10
workers with 4 GPUs on each worker, you can run 10 parallel trials with each
trial training on 4 GPUs by using
[tf.distribute.MirroredStrategy](
https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy).
You can also run each trial on TPUs via
[tf.distribute.TPUStrategy](
https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/TPUStrategy).
Currently
[tf.distribute.MultiWorkerMirroredStrategy](
https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/MultiWorkerMirroredStrategy)
is not supported, but support for this is on the roadmap.
### Example code
When the environment variables described above are set, the example below will
run distributed tuning and use data parallelism within each trial via
`tf.distribute`. The example loads MNIST from `tensorflow_datasets` and uses
[Hyperband](https://arxiv.org/abs/1603.06560) for the hyperparameter
search.
```python
import keras
import keras_tuner
import tensorflow as tf
import numpy as np
def build_model(hp):
"""Builds a convolutional model."""
inputs = keras.Input(shape=(28, 28, 1))
x = inputs
for i in range(hp.Int("conv_layers", 1, 3, default=3)):
x = keras.layers.Conv2D(
filters=hp.Int("filters_" + str(i), 4, 32, step=4, default=8),
kernel_size=hp.Int("kernel_size_" + str(i), 3, 5),
activation="relu",
padding="same",
)(x)
if hp.Choice("pooling" + str(i), ["max", "avg"]) == "max":
x = keras.layers.MaxPooling2D()(x)
else:
x = keras.layers.AveragePooling2D()(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.ReLU()(x)
if hp.Choice("global_pooling", ["max", "avg"]) == "max":
x = keras.layers.GlobalMaxPooling2D()(x)
else:
x = keras.layers.GlobalAveragePooling2D()(x)
outputs = keras.layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs, outputs)
optimizer = hp.Choice("optimizer", ["adam", "sgd"])
model.compile(
optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
return model
tuner = keras_tuner.Hyperband(
hypermodel=build_model,
objective="val_accuracy",
max_epochs=2,
factor=3,
hyperband_iterations=1,
distribution_strategy=tf.distribute.MirroredStrategy(),
directory="results_dir",
project_name="mnist",
overwrite=True,
)
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Reshape the images to have the channel dimension.
x_train = (x_train.reshape(x_train.shape + (1,)) / 255.0)[:1000]
y_train = y_train.astype(np.int64)[:1000]
x_test = (x_test.reshape(x_test.shape + (1,)) / 255.0)[:100]
y_test = y_test.astype(np.int64)[:100]
tuner.search(
x_train,
y_train,
steps_per_epoch=600,
validation_data=(x_test, y_test),
validation_steps=100,
callbacks=[keras.callbacks.EarlyStopping("val_accuracy")],
)
```
<div class="k-default-codeblock">
```
Trial 2 Complete [00h 00m 18s]
val_accuracy: 0.07000000029802322
```
</div>
<div class="k-default-codeblock">
```
Best val_accuracy So Far: 0.07000000029802322
Total elapsed time: 00h 00m 26s
```
</div> | keras-io/guides/md/keras_tuner/distributed_tuning.md/0 | {
"file_path": "keras-io/guides/md/keras_tuner/distributed_tuning.md",
"repo_id": "keras-io",
"token_count": 2254
} | 96 |
# Writing a training loop from scratch in PyTorch
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2023/06/25<br>
**Last modified:** 2023/06/25<br>
**Description:** Writing low-level training & evaluation loops in PyTorch.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/writing_a_custom_training_loop_in_torch.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/writing_a_custom_training_loop_in_torch.py)
---
## Setup
```python
import os
# This guide can only be run with the torch backend.
os.environ["KERAS_BACKEND"] = "torch"
import torch
import keras
import numpy as np
```
---
## Introduction
Keras provides default training and evaluation loops, `fit()` and `evaluate()`.
Their usage is covered in the guide
[Training & evaluation with the built-in methods](/guides/training_with_built_in_methods/).
If you want to customize the learning algorithm of your model while still leveraging
the convenience of `fit()`
(for instance, to train a GAN using `fit()`), you can subclass the `Model` class and
implement your own `train_step()` method, which
is called repeatedly during `fit()`.
Now, if you want very low-level control over training & evaluation, you should write
your own training & evaluation loops from scratch. This is what this guide is about.
---
## A first end-to-end example
To write a custom training loop, we need the following ingredients:
- A model to train, of course.
- An optimizer. You could either use a `keras.optimizers` optimizer,
or a native PyTorch optimizer from `torch.optim`.
- A loss function. You could either use a `keras.losses` loss,
or a native PyTorch loss from `torch.nn`.
- A dataset. You could use any format: a `tf.data.Dataset`,
a PyTorch `DataLoader`, a Python generator, etc.
Let's line them up. We'll use torch-native objects in each case --
except, of course, for the Keras model.
First, let's get the model and the MNIST dataset:
```python
# Let's consider a simple MNIST model
def get_model():
inputs = keras.Input(shape=(784,), name="digits")
x1 = keras.layers.Dense(64, activation="relu")(inputs)
x2 = keras.layers.Dense(64, activation="relu")(x1)
outputs = keras.layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
# Create load up the MNIST dataset and put it in a torch DataLoader
# Prepare the training dataset.
batch_size = 32
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784)).astype("float32")
x_test = np.reshape(x_test, (-1, 784)).astype("float32")
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Create torch Datasets
train_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
val_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_val), torch.from_numpy(y_val)
)
# Create DataLoaders for the Datasets
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False
)
```
Next, here's our PyTorch optimizer and our PyTorch loss function:
```python
# Instantiate a torch optimizer
model = get_model()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Instantiate a torch loss function
loss_fn = torch.nn.CrossEntropyLoss()
```
Let's train our model using mini-batch gradient with a custom training loop.
Calling `loss.backward()` on a loss tensor triggers backpropagation.
Once that's done, your optimizer is magically aware of the gradients for each variable
and can update its variables, which is done via `optimizer.step()`.
Tensors, variables, optimizers are all interconnected to one another via hidden global state.
Also, don't forget to call `model.zero_grad()` before `loss.backward()`, or you won't
get the right gradients for your variables.
Here's our training loop, step by step:
- We open a `for` loop that iterates over epochs
- For each epoch, we open a `for` loop that iterates over the dataset, in batches
- For each batch, we call the model on the input data to retrieve the predictions,
then we use them to compute a loss value
- We call `loss.backward()` to
- Outside the scope, we retrieve the gradients of the weights
of the model with regard to the loss
- Finally, we use the optimizer to update the weights of the model based on the
gradients
```python
epochs = 3
for epoch in range(epochs):
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(logits, targets)
# Backward pass
model.zero_grad()
loss.backward()
# Optimizer variable updates
optimizer.step()
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
```
<div class="k-default-codeblock">
```
Training loss (for 1 batch) at step 0: 110.9115
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 2.9493
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 2.7383
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 1.6616
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 1.5927
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 1.0992
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.5425
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.3308
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.8231
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.5570
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.6321
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.4962
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 1.0833
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 1.3607
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 1.1250
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 1.2562
Seen so far: 48032 samples
Training loss (for 1 batch) at step 0: 0.5181
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 0.3939
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 0.3406
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 0.1122
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.2015
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.1184
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 1.0702
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.4062
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.4570
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 1.2490
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.0714
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.3677
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.8291
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.8320
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.1179
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.5390
Seen so far: 48032 samples
Training loss (for 1 batch) at step 0: 0.1309
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 0.4061
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 0.2734
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 0.2972
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.4282
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.3504
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.3556
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.7834
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.2522
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.2056
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.3259
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.5215
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.8051
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.4423
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.0473
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.1419
Seen so far: 48032 samples
```
</div>
As an alternative, let's look at what the loop looks like when using a Keras optimizer
and a Keras loss function.
Important differences:
- You retrieve the gradients for the variables via `v.value.grad`,
called on each trainable variable.
- You update your variables via `optimizer.apply()`, which must be
called in a `torch.no_grad()` scope.
**Also, a big gotcha:** while all NumPy/TensorFlow/JAX/Keras APIs
as well as Python `unittest` APIs use the argument order convention
`fn(y_true, y_pred)` (reference values first, predicted values second),
PyTorch actually uses `fn(y_pred, y_true)` for its losses.
So make sure to invert the order of `logits` and `targets`.
```python
model = get_model()
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
```
<div class="k-default-codeblock">
```
Start of epoch 0
Training loss (for 1 batch) at step 0: 98.9569
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 5.3304
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 0.3246
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 1.6745
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 1.0936
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 1.4159
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.2796
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 2.3532
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.7533
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 1.0432
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.3959
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.4722
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.3851
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.8599
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.1237
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.4919
Seen so far: 48032 samples
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 1
Training loss (for 1 batch) at step 0: 0.8972
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 0.5844
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 0.1285
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 0.0671
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.4296
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.1483
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.0230
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.1368
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.1531
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.0472
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.2343
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.4449
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.3942
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.3236
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.0717
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.9288
Seen so far: 48032 samples
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 2
Training loss (for 1 batch) at step 0: 0.9393
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 0.2383
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 0.1116
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 0.6736
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.6713
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.3394
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.2385
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.4248
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.0200
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.1259
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.7566
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.0594
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.2821
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.2088
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.5654
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.0512
Seen so far: 48032 samples
```
</div>
---
## Low-level handling of metrics
Let's add metrics monitoring to this basic training loop.
You can readily reuse built-in Keras metrics (or custom ones you wrote) in such training
loops written from scratch. Here's the flow:
- Instantiate the metric at the start of the loop
- Call `metric.update_state()` after each batch
- Call `metric.result()` when you need to display the current value of the metric
- Call `metric.reset_state()` when you need to clear the state of the metric
(typically at the end of an epoch)
Let's use this knowledge to compute `CategoricalAccuracy` on training and
validation data at the end of each epoch:
```python
# Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.CategoricalAccuracy()
val_acc_metric = keras.metrics.CategoricalAccuracy()
```
Here's our training & evaluation loop:
```python
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Update training metric.
train_acc_metric.update_state(targets, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataloader:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
```
<div class="k-default-codeblock">
```
Start of epoch 0
Training loss (for 1 batch) at step 0: 59.2206
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 8.9801
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 5.2990
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 3.6978
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 1.9965
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 2.1896
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 1.2416
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.9403
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.1838
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.5884
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.7836
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.7015
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.3335
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.2763
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.4787
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.2562
Seen so far: 48032 samples
Training acc over epoch: 0.8411
Validation acc: 0.8963
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 1
Training loss (for 1 batch) at step 0: 0.3417
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 1.1465
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 0.7274
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 0.1273
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.6500
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.2008
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.7483
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.5821
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.5696
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.3112
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.1761
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.1811
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.2736
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.3848
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.4627
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.3934
Seen so far: 48032 samples
Training acc over epoch: 0.9053
Validation acc: 0.9221
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 2
Training loss (for 1 batch) at step 0: 0.5743
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 0.4448
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 0.9880
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 0.2268
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.5607
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.1178
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.4305
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.1712
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.3109
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.1548
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.1090
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.5169
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.3791
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.6963
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.6204
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.1111
Seen so far: 48032 samples
Training acc over epoch: 0.9216
Validation acc: 0.9356
```
</div>
---
## Low-level handling of losses tracked by the model
Layers & models recursively track any losses created during the forward pass
by layers that call `self.add_loss(value)`. The resulting list of scalar loss
values are available via the property `model.losses`
at the end of the forward pass.
If you want to be using these loss components, you should sum them
and add them to the main loss in your training step.
Consider this layer, that creates an activity regularization loss:
```python
class ActivityRegularizationLayer(keras.layers.Layer):
def call(self, inputs):
self.add_loss(1e-2 * torch.sum(inputs))
return inputs
```
Let's build a really simple model that uses it:
```python
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = keras.layers.Dense(64, activation="relu")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
```
Here's what our training loop should look like now:
```python
# Get a fresh model
model = get_model()
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
# Prepare the metrics.
train_acc_metric = keras.metrics.CategoricalAccuracy()
val_acc_metric = keras.metrics.CategoricalAccuracy()
for epoch in range(epochs):
print(f"\nStart of epoch {epoch}")
for step, (inputs, targets) in enumerate(train_dataloader):
# Forward pass
logits = model(inputs)
loss = loss_fn(targets, logits)
if model.losses:
loss = loss + torch.sum(*model.losses)
# Backward pass
model.zero_grad()
trainable_weights = [v for v in model.trainable_weights]
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
optimizer.apply(gradients, trainable_weights)
# Update training metric.
train_acc_metric.update_state(targets, logits)
# Log every 100 batches.
if step % 100 == 0:
print(
f"Training loss (for 1 batch) at step {step}: {loss.detach().numpy():.4f}"
)
print(f"Seen so far: {(step + 1) * batch_size} samples")
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print(f"Training acc over epoch: {float(train_acc):.4f}")
# Reset training metrics at the end of each epoch
train_acc_metric.reset_state()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataloader:
val_logits = model(x_batch_val, training=False)
# Update val metrics
val_acc_metric.update_state(y_batch_val, val_logits)
val_acc = val_acc_metric.result()
val_acc_metric.reset_state()
print(f"Validation acc: {float(val_acc):.4f}")
```
<div class="k-default-codeblock">
```
Start of epoch 0
Training loss (for 1 batch) at step 0: 138.7979
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 4.4268
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 1.0779
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 1.7229
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.5801
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.4298
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.4717
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 1.3369
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 1.3239
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.5972
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.1983
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.5228
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 1.0025
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.3424
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.5196
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.4287
Seen so far: 48032 samples
Training acc over epoch: 0.8089
Validation acc: 0.8947
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 1
Training loss (for 1 batch) at step 0: 0.2903
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 0.4118
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 0.6533
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 0.0402
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.3638
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.3313
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.5119
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.1628
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.4793
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.2726
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.5721
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.5783
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.2533
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.2218
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.1232
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.6805
Seen so far: 48032 samples
Training acc over epoch: 0.8970
Validation acc: 0.9097
```
</div>
<div class="k-default-codeblock">
```
Start of epoch 2
Training loss (for 1 batch) at step 0: 0.4553
Seen so far: 32 samples
Training loss (for 1 batch) at step 100: 0.3975
Seen so far: 3232 samples
Training loss (for 1 batch) at step 200: 1.2382
Seen so far: 6432 samples
Training loss (for 1 batch) at step 300: 0.0927
Seen so far: 9632 samples
Training loss (for 1 batch) at step 400: 0.3530
Seen so far: 12832 samples
Training loss (for 1 batch) at step 500: 0.3842
Seen so far: 16032 samples
Training loss (for 1 batch) at step 600: 0.6423
Seen so far: 19232 samples
Training loss (for 1 batch) at step 700: 0.1751
Seen so far: 22432 samples
Training loss (for 1 batch) at step 800: 0.4769
Seen so far: 25632 samples
Training loss (for 1 batch) at step 900: 0.1854
Seen so far: 28832 samples
Training loss (for 1 batch) at step 1000: 0.3130
Seen so far: 32032 samples
Training loss (for 1 batch) at step 1100: 0.1633
Seen so far: 35232 samples
Training loss (for 1 batch) at step 1200: 0.1446
Seen so far: 38432 samples
Training loss (for 1 batch) at step 1300: 0.4661
Seen so far: 41632 samples
Training loss (for 1 batch) at step 1400: 0.9977
Seen so far: 44832 samples
Training loss (for 1 batch) at step 1500: 0.3392
Seen so far: 48032 samples
Training acc over epoch: 0.9182
Validation acc: 0.9200
```
</div>
That's it!
| keras-io/guides/md/writing_a_custom_training_loop_in_torch.md/0 | {
"file_path": "keras-io/guides/md/writing_a_custom_training_loop_in_torch.md",
"repo_id": "keras-io",
"token_count": 10048
} | 97 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_nlp/preprocessing_layers/masked_lm_mask_generator/'" />
| keras-io/redirects/api/keras_nlp/layers/mlm_mask_generator/index.html/0 | {
"file_path": "keras-io/redirects/api/keras_nlp/layers/mlm_mask_generator/index.html",
"repo_id": "keras-io",
"token_count": 52
} | 98 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/constraints/'" />
| keras-io/redirects/constraints/index.html/0 | {
"file_path": "keras-io/redirects/constraints/index.html",
"repo_id": "keras-io",
"token_count": 36
} | 99 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/core_layers/embedding/'" />
| keras-io/redirects/layers/embeddings/index.html/0 | {
"file_path": "keras-io/redirects/layers/embeddings/index.html",
"repo_id": "keras-io",
"token_count": 40
} | 100 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/data_loading/'" />
| keras-io/redirects/preprocessing/index.html/0 | {
"file_path": "keras-io/redirects/preprocessing/index.html",
"repo_id": "keras-io",
"token_count": 32
} | 101 |
CV_GUIDES_MASTER = {
"path": "keras_cv/",
"title": "KerasCV",
"toc": True,
"children": [
{
"path": "object_detection_keras_cv",
"title": "Use KerasCV to assemble object detection pipelines",
},
{
"path": "classification_with_keras_cv",
"title": "Use KerasCV to train powerful image classifiers.",
},
{
"path": "cut_mix_mix_up_and_rand_augment",
"title": "CutMix, MixUp, and RandAugment image augmentation with KerasCV",
},
{
"path": "generate_images_with_stable_diffusion",
"title": "High-performance image generation using Stable Diffusion in KerasCV",
},
{
"path": "custom_image_augmentations",
"title": "Custom Image Augmentations with BaseImageAugmentationLayer",
},
{
"path": "semantic_segmentation_deeplab_v3_plus",
"title": "Semantic Segmentation with KerasCV",
},
{
"path": "segment_anything_in_keras_cv",
"title": "Segment Anything in KerasCV",
},
],
}
NLP_GUIDES_MASTER = {
"path": "keras_nlp/",
"title": "KerasNLP",
"toc": True,
"children": [
{
"path": "getting_started",
"title": "Getting Started with KerasNLP",
},
{
"path": "transformer_pretraining",
"title": "Pretraining a Transformer from scratch with KerasNLP",
},
],
}
KT_GUIDES_MASTER = {
"path": "keras_tuner/",
"title": "Hyperparameter Tuning",
"toc": True,
"children": [
{
"path": "getting_started",
"title": "Getting started with KerasTuner",
},
{
"path": "distributed_tuning",
"title": "Distributed hyperparameter tuning with KerasTuner",
},
{
"path": "custom_tuner",
"title": "Tune hyperparameters in your custom training loop",
},
{
"path": "visualize_tuning",
"title": "Visualize the hyperparameter tuning process",
},
{
"path": "failed_trials",
"title": "Handling failed trials in KerasTuner",
},
{
"path": "tailor_the_search_space",
"title": "Tailor the search space",
},
],
}
GUIDES_MASTER = {
"path": "guides/",
"title": "Developer guides",
"toc": True,
"children": [
{
"path": "functional_api",
"title": "The Functional API",
},
{
"path": "sequential_model",
"title": "The Sequential model",
},
{
"path": "making_new_layers_and_models_via_subclassing",
"title": "Making new layers & models via subclassing",
},
{
"path": "training_with_built_in_methods",
"title": "Training & evaluation with the built-in methods",
},
{
"path": "custom_train_step_in_jax",
"title": "Customizing `fit()` with JAX",
},
{
"path": "custom_train_step_in_tensorflow",
"title": "Customizing `fit()` with TensorFlow",
},
{
"path": "custom_train_step_in_torch",
"title": "Customizing `fit()` with PyTorch",
},
{
"path": "writing_a_custom_training_loop_in_jax",
"title": "Writing a custom training loop in JAX",
},
{
"path": "writing_a_custom_training_loop_in_tensorflow",
"title": "Writing a custom training loop in TensorFlow",
},
{
"path": "writing_a_custom_training_loop_in_torch",
"title": "Writing a custom training loop in PyTorch",
},
{
"path": "serialization_and_saving",
"title": "Serialization & saving",
},
{
"path": "customizing_saving_and_serialization",
"title": "Customizing saving & serialization",
},
{
"path": "writing_your_own_callbacks",
"title": "Writing your own callbacks",
},
{
"path": "transfer_learning",
"title": "Transfer learning & fine-tuning",
},
{
"path": "distributed_training_with_jax",
"title": "Distributed training with JAX",
},
{
"path": "distributed_training_with_tensorflow",
"title": "Distributed training with TensorFlow",
},
{
"path": "distributed_training_with_torch",
"title": "Distributed training with PyTorch",
},
{
"path": "distribution",
"title": "Distributed training with Keras 3",
},
{
"path": "migrating_to_keras_3",
"title": "Migrating Keras 2 code to Keras 3",
},
# {
# "path": "preprocessing_layers",
# "title": "Working with preprocessing layers",
# },
# {
# "path": "working_with_rnns",
# "title": "Working with recurrent neural networks",
# },
# {
# "path": "understanding_masking_and_padding",
# "title": "Understanding masking & padding",
# },
# {
# 'path': 'writing_your_own_metrics',
# 'title': 'Writing your own Metrics',
# },
# {
# 'path': 'writing_your_own_losses',
# 'title': 'Writing your own Losses',
# },
# {
# 'path': 'tpu_training',
# 'title': 'Training Keras models on TPU',
# },
# {
# 'path': 'hyperparameter_optimization',
# 'title': 'Hyperparameter optimization',
# },
# {
# 'path': 'mixed_precision',
# 'title': 'Mixed precision training',
# },
KT_GUIDES_MASTER,
CV_GUIDES_MASTER,
NLP_GUIDES_MASTER,
],
}
| keras-io/scripts/guides_master.py/0 | {
"file_path": "keras-io/scripts/guides_master.py",
"repo_id": "keras-io",
"token_count": 3183
} | 102 |
# KerasCV Layers
KerasCV layers are `keras.layers.Layer` subclasses for computer vision specific use cases.
{{toc}}
| keras-io/templates/api/keras_cv/layers/index.md/0 | {
"file_path": "keras-io/templates/api/keras_cv/layers/index.md",
"repo_id": "keras-io",
"token_count": 38
} | 103 |
# KerasTuner API
The **Hyperparameters** class is used to specify a set of hyperparameters
and their values, to be used in the model building function.
The **Tuner** subclasses corresponding to different tuning algorithms are
called directly by the user to start the search or to get the best models.
The **Oracle** subclasses are the core search algorithms, receiving model evaluation
results from the Tuner and providing new hyperparameter values.
The **HyperModel** subclasses are predefined search spaces for certain model
families like ResNet and XceptionNet.
{{toc}}
| keras-io/templates/api/keras_tuner/index.md/0 | {
"file_path": "keras-io/templates/api/keras_tuner/index.md",
"repo_id": "keras-io",
"token_count": 136
} | 104 |
# On Github Issues and Pull Requests
Found a bug? Have a new feature to suggest?
Want to add a new code examples to keras.io, or to contribute changes to the codebase?
Make sure to read this first.
---
## Bug reporting
Your code doesn't work, and you have determined that the issue lies with Keras? Follow these steps to report a bug.
1. Your bug may already be fixed. Make sure to update to the current Keras nightly release (`pip install keras-nightly --upgrade`) and test whether your bug is still occurring.
2. Search for similar issues among the [keras-team/keras Github issues](https://github.com/keras-team/keras/issues). Make sure to delete `is:open` on the issue search to find solved tickets as well. It's possible somebody has encountered this bug already. Also remember to check out Keras [FAQ](http://keras.io/getting_started/faq/). Still having a problem? Open an issue on the Keras Github to let us know.
3. Make sure you provide us with useful information about your configuration: What backend are you using? Are you running on GPU?
4. Provide us with a standalone script to reproduce the issue. This script **should be runnable as-is** and should not require external data download (use randomly generated data if you need to run a model on some test data). We recommend that you use GitHub Gists or Colab to post your code. Any issue that cannot be reproduced is likely to be closed.
5. If possible, take a stab at fixing the bug yourself -- if you can!
The more information you provide, the easier it is for us to validate that there is a bug and the faster we'll be able to take action. If you want your issue to be resolved quickly, following the steps above is crucial.
---
## Requesting a Feature
You can use [keras-team/keras Github issues](https://github.com/keras-team/keras/issues) to request features you would like to see in Keras, or changes in the Keras API.
1. Provide a clear and detailed explanation of the feature you want and why it's important to add. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on library for Keras. It is crucial for Keras to avoid bloating the API and codebase.
2. Provide code snippets demonstrating the API you have in mind and illustrating the use cases of your feature. Of course, you don't need to write any real code at this point!
---
## Submitting a Pull Request
You can open a PR at [keras-team/keras](https://github.com/keras-team/keras/pulls).
Please note that PRs that are primarily about **code style** (as opposed to fixing bugs, improving docs, or adding new functionality) will likely be rejected.
---
## Adding new examples
Even if you don't contribute to the Keras source code, if you have an application of Keras that is concise and powerful,
please consider adding it to our collection of examples, featured on [keras.io](https://keras.io).
[Follow these steps](/examples/#adding-a-new-code-example) to submit a new code examples.
| keras-io/templates/contributing.md/0 | {
"file_path": "keras-io/templates/contributing.md",
"repo_id": "keras-io",
"token_count": 779
} | 105 |
After five months of extensive public beta testing,
we're excited to announce the official release of Keras 3.0.
Keras 3 is a full rewrite of Keras that enables you to
run your Keras workflows on top of either JAX, TensorFlow, or PyTorch, and that
unlocks brand new large-scale model training and deployment capabilities.
You can pick the framework that suits you best,
and switch from one to another based on your current goals.
You can also use Keras as a low-level cross-framework language
to develop custom components such as layers, models, or metrics
that can be used in native workflows in JAX, TensorFlow, or PyTorch — with one codebase.
---
## Welcome to multi-framework machine learning.
You're already familiar with the benefits of using Keras — it enables
high-velocity development via an obsessive focus on great UX, API design,
and debuggability. It's also a battle-tested framework that has been chosen
by over 2.5M developers and that powers some of the most sophisticated,
largest-scale ML systems in the world,
such as the Waymo self-driving fleet and the YouTube recommendation engine.
But what are the additional benefits of using the new multi-backend Keras 3?
- **Always get the best performance for your models.** In our benchmarks,
we found that JAX typically delivers the best training and inference performance
on GPU, TPU, and CPU — but results vary from model to model, as non-XLA
TensorFlow is occasionally faster on GPU. The ability to dynamically select
the backend that will deliver the best performance for your model
*without having to change anything to your code* means you're guaranteed
to train and serve with the highest achievable efficiency.
- **Unlock ecosystem optionality for your models.** Any Keras 3
model can be instantiated as a PyTorch `Module`, can be exported as a TensorFlow
`SavedModel`, or can be instantiated as a stateless JAX function. That means
that you can use your Keras 3 models with PyTorch ecosystem packages,
with the full range of TensorFlow deployment & production tools
(like TF-Serving, TF.js and TFLite), and with JAX large-scale
TPU training infrastructure. Write one `model.py` using
Keras 3 APIs, and get access to everything the ML world has to offer.
- **Leverage large-scale model parallelism & data parallelism with JAX.** Keras 3 includes
a brand new distribution API, the `keras.distribution` namespace,
currently implemented for the JAX backend (coming soon to the TensorFlow and PyTorch backends).
It makes it easy to do model parallelism, data parallelism, and combinations of both —
at arbitrary model scales and cluster scales.
Because it keeps the model definition, training logic,
and sharding configuration all separate from each other,
it makes your distribution workflow easy to develop and easy to maintain.
See our [starter guide](/guides/distribution/).
- **Maximize reach for your open-source model releases.** Want to
release a pretrained model? Want as many people as possible
to be able to use it? If you implement it in pure TensorFlow or PyTorch,
it will be usable by roughly half of the community.
If you implement it in Keras 3, it is instantly usable by anyone regardless
of their framework of choice (even if they're not Keras users themselves).
Twice the impact at no added development cost.
- **Use data pipelines from any source.** The Keras 3
`fit()`/`evaluate()`/`predict()` routines are compatible with `tf.data.Dataset` objects,
with PyTorch `DataLoader` objects, with NumPy arrays, Pandas dataframes —
regardless of the backend you're using. You can train a Keras 3 + TensorFlow
model on a PyTorch `DataLoader` or train a Keras 3 + PyTorch model on a
`tf.data.Dataset`.
---
## The full Keras API, available for JAX, TensorFlow, and PyTorch.
Keras 3 implements the full Keras API and makes it available
with TensorFlow, JAX, and PyTorch — over a hundred layers, dozens of metrics,
loss functions, optimizers, and callbacks, the Keras training and evaluation
loops, and the Keras saving & serialization infrastructure. All the APIs you
know and love are here.
Any Keras model that only uses built-in layers will immediately work with
all supported backends. In fact, your existing `tf.keras` models
that only use built-in layers can start running in JAX and PyTorch *right away*!
That's right, your codebase just gained a whole new set of capabilities.
<img class="irasto" src="https://s3.amazonaws.com/keras.io/img/keras_3/cross_framework_keras_3.jpg" />
---
## Author multi-framework layers, models, metrics...
Keras 3 enables you to create components
(like arbitrary custom layers or pretrained models) that will work the same
in any framework. In particular, Keras 3 gives you access
to the `keras.ops` namespace that works across all backends. It contains:
- **A full implementation of the NumPy API.**
Not something "NumPy-like" — just literally the
NumPy API, with the same functions and the same arguments.
You get `ops.matmul`, `ops.sum`, `ops.stack`, `ops.einsum`, etc.
- **A set of neural network-specific functions** that are absent from NumPy,
such as `ops.softmax`, `ops.binary_crossentropy`, `ops.conv`, etc.
As long as you only use ops from `keras.ops`, your custom layers,
custom losses, custom metrics, and custom optimizers
**will work with JAX, PyTorch, and TensorFlow — with the same code**.
That means that you can maintain only one
component implementation (e.g. a single `model.py`
together with a single checkpoint file), and you can use it in all frameworks,
with the exact same numerics.
<img class="irasto" src="https://s3.amazonaws.com/keras.io/img/keras_3/custom_component_authoring_keras_3.jpg" />
---
## ...that works seamlessly with any JAX, TensorFlow, and PyTorch workflow.
Keras 3 is not just intended for Keras-centric workflows
where you define a Keras model, a Keras optimizer, a Keras loss and metrics,
and you call `fit()`, `evaluate()`, and `predict()`.
It's also meant to work seamlessly with low-level backend-native workflows:
you can take a Keras model (or any other component, such as a loss or metric)
and start using it in a JAX training loop, a TensorFlow training loop,
or a PyTorch training loop, or as part of a JAX or PyTorch model,
with zero friction. Keras 3 provides exactly
the same degree of low-level implementation flexibility in JAX and PyTorch
as `tf.keras` previously did in TensorFlow.
You can:
- Write a low-level JAX training loop to train a Keras model
using an `optax` optimizer, `jax.grad`, `jax.jit`, `jax.pmap`.
- Write a low-level TensorFlow training loop to train a Keras model
using `tf.GradientTape` and `tf.distribute`.
- Write a low-level PyTorch training loop to train a Keras model
using a `torch.optim` optimizer, a `torch` loss function,
and the `torch.nn.parallel.DistributedDataParallel` wrapper.
- Use Keras layers in a PyTorch `Module` (because they are `Module` instances too!)
- Use any PyTorch `Module` in a Keras model as if it were a Keras layer.
- etc.
<img class="irasto" src="https://s3.amazonaws.com/keras.io/img/keras-core/custom_training_loops.jpg" />
---
## A new distribution API for large-scale data parallelism and model parallelism.
The models we've been working with have been getting larger and larger, so we wanted
to provide a Kerasic solution to the multi-device model sharding problem. The API we designed
keeps the model definition, the training logic, and the sharding configuration entirely separate from each
other, meaning that your models can be written as if they were going to run on a single device. You
can then add arbitrary sharding configurations to arbitrary models when it's time to train them.
Data parallelism (replicating a small model identically on multiple devices) can be handled in just two lines:
<img class="irasto" src="https://s3.amazonaws.com/keras.io/img/keras_3/keras_3_data_parallel.jpg" />
Model parallelism lets you specify sharding layouts for model variables and intermediate output tensors,
along multiple named dimensions. In the typical case, you would organize available devices as a 2D grid
(called a *device mesh*), where the first dimension is used for data parallelism and the second dimension
is used for model parallelism. You would then configure your model to be sharded along the model dimension
and replicated along the data dimension.
The API lets you configure the layout of every variable and every output tensor via regular expressions.
This makes it easy to quickly specify the same layout for entire categories of variables.
<img class="irasto" src="https://s3.amazonaws.com/keras.io/img/keras_3/keras_3_model_parallel.jpg" />
The new distribution API is intended to be multi-backend, but is only available for the JAX backend for the time
being. TensorFlow and PyTorch support is coming soon. Get started with [this guide](/guides/distribution/)!
---
## Pretrained models.
There's a wide range of pretrained models that
you can start using today with Keras 3.
All 40 Keras Applications models (the `keras.applications` namespace)
are available in all backends.
The vast array of pretrained models in [KerasCV](https://keras.io/api/keras_cv/)
and [KerasNLP](https://keras.io/api/keras_nlp/) also work with all backends. This includes:
- BERT
- OPT
- Whisper
- T5
- StableDiffusion
- YOLOv8
- SegmentAnything
- etc.
---
## Support for cross-framework data pipelines with all backends.
Multi-framework ML also means multi-framework data loading and preprocessing.
Keras 3 models can be trained using a wide range of
data pipelines — regardless of whether you're using the JAX, PyTorch, or
TensorFlow backends. It just works.
- `tf.data.Dataset` pipelines: the reference for scalable production ML.
- `torch.utils.data.DataLoader` objects.
- NumPy arrays and Pandas dataframes.
- Keras's own `keras.utils.PyDataset` objects.
---
## Progressive disclosure of complexity.
*Progressive disclosure of complexity* is the design principle at the heart
of the Keras API. Keras doesn't force you to follow
a single "true" way of building and training models. Instead, it enables
a wide range of different workflows, from the very high-level to the very
low-level, corresponding to different user profiles.
That means that you can start out with simple workflows — such as using
`Sequential` and `Functional` models and training them with `fit()` — and when
you need more flexibility, you can easily customize different components while
reusing most of your prior code. As your needs become more specific,
you don't suddenly fall off a complexity cliff and you don't need to switch
to a different set of tools.
We've brought this principle to all of our backends. For instance,
you can customize what happens in your training loop while still
leveraging the power of `fit()`, without having to write your own training loop
from scratch — just by overriding the `train_step` method.
Here's how it works in PyTorch and TensorFlow:
<img class="irasto" src="https://s3.amazonaws.com/keras.io/img/keras-core/customizing_fit.jpg" />
And [here's the link](http://keras.io/guides/custom_train_step_in_jax/) to the JAX version.
---
## A new stateless API for layers, models, metrics, and optimizers.
Do you enjoy [functional programming](https://en.wikipedia.org/wiki/Functional_programming)?
You're in for a treat.
All stateful objects in Keras (i.e. objects that own numerical variables that
get updated during training or evaluation) now have a stateless API, making it
possible to use them in JAX functions (which are required to be fully stateless):
- All layers and models have a `stateless_call()` method which mirrors `__call__()`.
- All optimizers have a `stateless_apply()` method which mirrors `apply()`.
- All metrics have a `stateless_update_state()` method which mirrors `update_state()`
and a `stateless_result()` method which mirrors `result()`.
These methods have no side-effects whatsoever: they take as input the current value
of the state variables of the target object, and return the update values as part
of their outputs, e.g.:
```python
outputs, updated_non_trainable_variables = layer.stateless_call(
trainable_variables,
non_trainable_variables,
inputs,
)
```
You never have to implement these methods yourself — they're automatically available
as long as you've implemented the stateful version (e.g. `call()` or `update_state()`).
---
## Moving from Keras 2 to Keras 3
Keras 3 is highly backwards compatible with Keras 2:
it implements the full public API surface of Keras 2,
with a limited number of exceptions, listed [here](https://github.com/keras-team/keras/issues/18467).
Most users will not have to make any code change
to start running their Keras scripts on Keras 3.
Larger codebases are likely to require some code changes,
since they are more likely to run into one of the exceptions listed above,
and are more likely to have been using private APIs or deprecated APIs
(`tf.compat.v1.keras` namespace, `experimental` namespace, `keras.src` private namespace).
To help you move to Keras 3, we are releasing a complete [migration guide](/guides/migrating_to_keras_3/)
with quick fixes for all issues you might encounter.
You also have the option to ignore the changes in Keras 3 and just keep using Keras 2 with TensorFlow —
this can be a good option for projects that are not actively developed
but need to keep running with updated dependencies.
You have two possibilities:
1. If you were accessing `keras` as a standalone package,
just switch to using the Python package `tf_keras` instead,
which you can install via `pip install tf_keras`.
The code and API are wholly unchanged — it's Keras 2.15 with a different package name.
We will keep fixing bugs in `tf_keras` and we will keep regularly releasing new versions.
However, no new features or performance improvements will be added,
since the package is now in maintenance mode.
2. If you were accessing `keras` via `tf.keras`,
there are no immediate changes until TensorFlow 2.16.
TensorFlow 2.16+ will use Keras 3 by default.
In TensorFlow 2.16+, to keep using Keras 2, you can first install `tf_keras`,
and then export the environment variable `TF_USE_LEGACY_KERAS=1`.
This will direct TensorFlow 2.16+ to resolve tf.keras to the locally-installed `tf_keras` package.
Note that this may affect more than your own code, however:
it will affect any package importing `tf.keras` in your Python process.
To make sure your changes only affect your own code, you should use the `tf_keras` package.
---
## Enjoy the library!
We're excited for you to try out the new Keras and improve your workflows by leveraging multi-framework ML.
Let us know how it goes: issues, points of friction, feature requests, or success stories —
we're eager to hear from you!
---
## FAQ
#### Q: Is Keras 3 compatible with legacy Keras 2?
Code developed with `tf.keras` can generally be run as-is with Keras 3
(with the TensorFlow backend). There's a limited number of incompatibilities you should be mindful
of, all addressed in [this migration guide](/guides/migrating_to_keras_3/).
When it comes to using APIs from `tf.keras` and Keras 3 side by side,
that is **not** possible — they're different packages, running on entirely separate engines.
### Q: Do pretrained models developed in legacy Keras 2 work with Keras 3?
Generally, yes. Any `tf.keras` model should work out of the box with Keras 3
with the TensorFlow backend (make sure to save it in the `.keras` v3 format).
In addition, if the model only
uses built-in Keras layers, then it will also work out of the box
with Keras 3 with the JAX and PyTorch backends.
If the model contains custom layers written using TensorFlow APIs,
it is usually easy to convert the code to be backend-agnostic.
For instance, it only took us a few hours to convert all 40
legacy `tf.keras` models from Keras Applications to be backend-agnostic.
### Q: Can I save a Keras 3 model in one backend and reload it in another backend?
Yes, you can. There is no backend specialization in saved `.keras` files whatsoever.
Your saved Keras models are framework-agnostic and can be reloaded with any backend.
However, note that reloading a model that contains custom components
with a different backend requires your custom components to be implemented
using backend-agnostic APIs, e.g. `keras.ops`.
### Q: Can I use Keras 3 components inside `tf.data` pipelines?
With the TensorFlow backend, Keras 3 is fully compatible with `tf.data`
(e.g. you can `.map()` a `Sequential` model into a `tf.data` pipeline).
With a different backend, Keras 3 has limited support for `tf.data`.
You won't be able to `.map()` arbitrary layers or models into a `tf.data`
pipeline. However, you will be able to use specific Keras 3
preprocessing layers with `tf.data`, such as `IntegerLookup` or
`CategoryEncoding`.
When it comes to using a `tf.data` pipeline (that does not use Keras)
to feed your call to `.fit()`, `.evaluate()` or `.predict()` —
that works out of the box with all backends.
### Q: Do Keras 3 models behave the same when run with different backends?
Yes, numerics are identical across backends.
However, keep in mind the following caveats:
- RNG behavior is different across different backends
(even after seeding — your results will be deterministic in each backend
but will differ across backends). So random weight initializations
values and dropout values will differ across backends.
- Due to the nature of floating-point implementations,
results are only identical up to `1e-7` precision in float32,
per function execution. So when training a model for a long time,
small numerical differences will accumulate and may end up resulting
in noticeable numerical differences.
- Due to lack of support for average pooling with asymmetric padding
in PyTorch, average pooling layers with `padding="same"`
may result in different numerics on border rows/columns.
This doesn't happen very often in practice —
out of 40 Keras Applications vision models, only one was affected.
### Q: Does Keras 3 support distributed training?
Data-parallel distribution is supported out of the box in JAX, TensorFlow,
and PyTorch. Model parallel distribution is supported out of the box for JAX
with the `keras.distribution` API.
**With TensorFlow:**
Keras 3 is compatible with `tf.distribute` —
just open a Distribution Strategy scope and create / train your model within it.
[Here's an example](http://keras.io/guides/distributed_training_with_tensorflow/).
**With PyTorch:**
Keras 3 is compatible with PyTorch's `DistributedDataParallel` utility.
[Here's an example](http://keras.io/guides/distributed_training_with_torch/).
**With JAX:**
You can do both data parallel and model parallel distribution in JAX using the `keras.distribution` API.
For instance, to do data parallel distribution, you only need the following code snippet:
```python
distribution = keras.distribution.DataParallel(devices=keras.distribution.list_devices())
keras.distribution.set_distribution(distribution)
```
For model parallel distribution, see [the following guide](/guides/distribution/).
You can also distribute training yourself via JAX APIs such as
`jax.sharding`. [Here's an example](http://keras.io/guides/distributed_training_with_jax/).
### Q: Can my custom Keras layers be used in native PyTorch `Modules` or with Flax `Modules`?
If they are only written using Keras APIs (e.g. the `keras.ops` namespace), then yes, your
Keras layers will work out of the box with native PyTorch and JAX code.
In PyTorch, just use your Keras layer like any other PyTorch `Module`.
In JAX, make sure to use the stateless layer API, i.e. `layer.stateless_call()`.
### Q: Will you add more backends in the future? What about framework XYZ?
We're open to adding new backends as long as the target framework has a large user base
or otherwise has some unique technical benefits to bring to the table.
However, adding and maintaining a new backend is a large burden,
so we're going to carefully consider each new backend candidate on a case by case basis,
and we're not likely to add many new backends. We will not add any new frameworks
that aren't yet well-established.
We are now potentially considering adding a backend written in [Mojo](https://www.modular.com/mojo).
If that's something you might find useful, please let the Mojo team know.
| keras-io/templates/keras_3/keras_3_announcement.md/0 | {
"file_path": "keras-io/templates/keras_3/keras_3_announcement.md",
"repo_id": "keras-io",
"token_count": 5515
} | 106 |
build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh"
action {
define_artifacts {
regex: "**/sponge_log.log"
regex: "**/sponge_log.xml"
}
}
env_vars: {
key: "KERAS_BACKEND"
value: "jax"
}
# Set timeout to 60 mins from default 180 mins
timeout_mins: 60 | keras-nlp/.kokoro/github/ubuntu/gpu/jax/continuous.cfg/0 | {
"file_path": "keras-nlp/.kokoro/github/ubuntu/gpu/jax/continuous.cfg",
"repo_id": "keras-nlp",
"token_count": 120
} | 107 |
# Roadmap
## What KerasNLP is
- **A high-quality library of modular building blocks.** KerasNLP components
follow an established Keras interface (e.g. `keras.layers.Layer`,
`keras.metrics.Metric`, or `keras_nlp.tokenizers.Tokenizer`), and make it easy
to assemble state-of-the-art NLP workflows.
- **A collection of guides and examples.** This effort is split between two
locations. On [keras.io](keras.io/keras_nlp), we host a collection of
small-scale, easily accessible guides showing end-to-end workflows using
KerasNLP. In this repository, we host a collection of
[examples](https://github.com/keras-team/keras-nlp/tree/master/examples) on
how to train large-scale, state-of-the-art models from scratch. This is not
part of the library itself, but rather a way to vet our components and show
best practices.
- **A community of NLP practitioners.** KerasNLP is an actively growing project,
and we welcome contributors on all fronts of our development. We hope that our
guides and examples can be both a valuable resource to experienced
practitioners and an accessible entry point to newcomers to the field.
## What KerasNLP is not
- **KerasNLP is not a research library.** Researchers may use it, but we do not
consider researchers to be our target audience. Our target audience is
applied NLP engineers with experimentation and production needs. KerasNLP
should make it possible to quickly re-implement industry-strength versions of
the latest generation of architectures produced by researchers, but we don't
expect the research effort itself to be built on top of KerasNLP. This enables
us to focus on usability and API standardization, and produce objects that
have a longer lifespan than the average research project.
- **KerasNLP is not a repository of blackbox end-to-end solutions.**
KerasNLP is focused on modular and reusable building blocks. In the process
of developing these building blocks, we will by necessity implement
end-to-end workflows, but they're intended purely for demonstration and
grounding purposes, they're not our main deliverable.
- **KerasNLP is not a repository of low-level string ops, like `tf.text`.**
KerasNLP is fundamentally an extension of the Keras API: it hosts Keras
objects, like layers, metrics, or callbacks. Low-level C++ ops should go
directly to [Tensorflow Text](https://www.tensorflow.org/text) or
core Tensorflow.
- **KerasNLP is not a Transformer only library.**
Transformer based models are a key offering for KerasNLP, and they should be
easy to train and use within the library. However, we need to support other
types of models, such as n-gram or word2vec approaches that might run more
easily on limited hardware. We will always want the most practical tool for
the task, regardless of the architecture.
## Focus areas for 2022
At this point in our development cycle, we are primarily interested in providing
building blocks for a short list of "greatest hits" NLP models (such as BERT,
GPT-2, word2vec). Given a popular model architecture (e.g. a
sequence-to-sequence transformer like T5) and an end-to-end task (e.g.
summarization), we should have a clear code example in mind and a list of
components to use.
Below, we describe our areas of focus for the year in more detail.
### Easy-to-use and feature-complete tokenization
KerasNLP should be the "go-to" tokenization solution for Keras model training
and deployment by the end of 2022.
The major tasks within this effort:
- Work with Tensorflow Text to continue to support a growing range of
tokenization options and popular vocabulary formats. For example, we would
like to add support for byte-level BPE tokenization (the RoBERTa and GPT
tokenizer) within the Tensorflow graph.
- Pre-trained sub-word tokenizers for any language. Training a tokenizer can
add a lot of friction to a project, particularly when working in a
language where examples are less readily available. We would like to support
a pre-trained tokenization offering that allows a user to choose a tokenizer,
language, and vocabulary size and then download an off the shelf vocabulary.
- A standardized way to train tokenizer vocabularies. As another way to
reduce the friction of training a tokenizer, we should offer a standardized
experience for training new vocabularies.
### Scalable examples of popular model architectures using KerasNLP
We would like our
[examples](https://github.com/keras-team/keras-nlp/tree/master/examples)
directory to contain scalable implementations of popular model
architectures easily runnable on Google Cloud. Note that these will not be
shipped with the library itself.
These examples will serve two purposes—a demonstration to the community of how
models can be built using KerasNLP, and a way to vet our the performance and
accuracy of our library components on both TPUs and GPUs at scale.
At this moment in time, our focus is on polishing our BERT example. We would
like it to run entirely on KerasNLP components for both training and
preprocessing, and come with easy recipes for running multi-worker training
jobs. Once this is done, we would like to extend our examples directory to other
popular architectures (e.g. RoBERTa and ELECTRA).
As we move forward with KerasNLP as a whole, we expect development for new
components (say, a new attention mechanism) to happen in tandem with an
example demonstrating the component in an end-to-end architecture.
By the end of 2022, we should have an actively growing collection of examples
models, with a standardized set of training scripts, that match expected
performance as reported in publications.
On the scalability front, we should have at least one example demonstrating both
data parallel and model parallel training, in a multi-worker GPU and TPU
setting, leveraging
[DTensor](https://www.tensorflow.org/guide/dtensor_overview) for distributed
support.
### Tools for data preprocessing and postprocessing for end-to-end workflows
It should be easy to take a trained Keras language model and use it for a wide
range of real world NLP tasks. We should support classification, text
generation, summarization, translation, name-entity recognition, and question
answering. We should have a guide for each of these tasks using KerasNLP by
the end of 2022.
We are looking to develop simple, modular components that make it easy to build
end-to-end workflows for each of these tasks.
Currently, projects in this area include:
- Utilities for generating sequences of text using greedy or beam search.
- Metrics for evaluating the quality of generated sequences, such a ROUGE and
BLEU.
- Data augmentation preprocessing layers for domains with limited data. These
layers will allow easily defining `tf.data` pipelines that augment input
example sentences on the fly.
### Accessible guides and examples on keras.io
For all of the above focus areas, we would like to make ensure we have an
industry leading collection of easy to use guides and examples.
These examples should be easy to follow, run within a colab notebook, and
provide a practical starting place for solving most real-world NLP problems.
Given the scale of modern NLP models, this will often involve scaling down the
model or data size for a particular task while preserving the core of what we
are trying to explain to the reader.
This will continue to be a key investment area for the library. If you have an
idea for a guide or example, please open an issue to discuss.
By the end of 2022, most new NLP examples on keras.io should use
KerasNLP library.
## Citation bar
At this moment in time, we have no set citation bar for development, but due to
the newness of the library we want to focus our efforts on a small subset of the
best known and most effective NLP techniques.
Proposed components should usually either be part of a very well known
architecture or contribute in some meaningful way to the usability of an
end-to-end workflow.
## Pretrained modeling workflows
Pretraining many modern NLP models is prohibitively expensive and
time-consuming for an average user. A key goal with for the KerasNLP project is
to have KerasNLP components available in a pretrained model offering of some
form.
We are working with the rest of the Tensorflow ecosystem, to provide a coherent
plan for accessing pretrained models. We will continue to share updates as they
are available.
| keras-nlp/ROADMAP.md/0 | {
"file_path": "keras-nlp/ROADMAP.md",
"repo_id": "keras-nlp",
"token_count": 2111
} | 108 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import app
from absl import flags
from tensorflow import keras
import keras_nlp
FLAGS = flags.FLAGS
flags.DEFINE_string(
"task_name",
"mrpc",
"The name of the GLUE task to finetune on.",
)
flags.DEFINE_integer(
"batch_size",
32,
"Batch size of data.",
)
flags.DEFINE_integer(
"epochs",
2,
"Number of epochs to run finetuning.",
)
flags.DEFINE_float(
"learning_rate",
5e-5,
"Learning rate",
)
flags.DEFINE_string(
"tpu_name",
None,
"The name of TPU to connect to. If None, no TPU will be used. If you only "
"have one TPU, use `local`",
)
flags.DEFINE_string(
"submission_directory",
None,
"The directory to save the glue submission file.",
)
flags.DEFINE_string(
"load_finetuning_model",
None,
"The path to load the finetuning model. If None, the model is trained.",
)
flags.DEFINE_string(
"save_finetuning_model",
None,
"The path to save the finetuning model. If None, the model is not saved.",
)
def load_data(task_name):
"""
Load GLUE dataset.
Load GLUE dataset, and convert the dictionary format to (features, label),
where features is a tuple of all input sentences.
"""
if task_name in ("cola", "sst2"):
feature_names = ("sentence",)
elif task_name in ("mrpc", "stsb", "rte", "wnli"):
feature_names = ("sentence1", "sentence2")
elif task_name in ("mnli", "mnli_matched", "mnli_mismatched", "ax"):
feature_names = ("premise", "hypothesis")
elif task_name in "qnli":
feature_names = ("question", "sentence")
elif task_name in "qqp":
feature_names = ("question1", "question2")
else:
raise ValueError(f"Unknown task_name {task_name}.")
test_suffix = ""
if task_name in ("mnli", "mnli_matched"):
# For "mnli", just run default to "mnli_matched".
task_name = "mnli"
test_suffix = "_matched"
elif task_name in ("mnli_mismatched"):
task_name = "mnli"
test_suffix = "_mismatched"
def split_features(x):
# GLUE comes with dictonary data, we convert it to a uniform format
# (features, label), where features is a tuple consisting of all
# features.
features = tuple([x[name] for name in feature_names])
label = x["label"]
return (features, label)
if task_name == "ax":
# AX is trained and evaluated on MNLI, and has its own test split.
train_ds, validation_ds = tfds.load(
"glue/mnli",
split=["train", "validation_matched"],
)
test_ds = tfds.load(
"glue/ax",
split="test",
)
else:
train_ds, test_ds, validation_ds = tfds.load(
f"glue/{task_name}",
split=["train", "test" + test_suffix, "validation" + test_suffix],
)
# Extract out the index order of test dataset.
idx_order = test_ds.map(lambda data: data["idx"])
train_ds = train_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)
test_ds = test_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)
validation_ds = validation_ds.map(
split_features, num_parallel_calls=tf.data.AUTOTUNE
)
return train_ds, test_ds, validation_ds, idx_order
def preprocess_data(preprocess_fn, dataset):
"""Run `proprocess_fn` on input dataset then batch & prefetch."""
return (
dataset.map(preprocess_fn)
.batch(FLAGS.batch_size)
.prefetch(tf.data.AUTOTUNE)
)
def generate_submission_files(finetuning_model, test_ds, idx_order):
"""Generate GLUE leaderboard submission files."""
filenames = {
"cola": "CoLA.tsv",
"sst2": "SST-2.tsv",
"mrpc": "MRPC.tsv",
"qqp": "QQP.tsv",
"stsb": "STS-B.tsv",
"mnli_matched": "MNLI-m.tsv",
"mnli_mismatched": "MNLI-mm.tsv",
"qnli": "QNLI.tsv",
"rte": "RTE.tsv",
"wnli": "WNLI.tsv",
"ax": "AX.tsv",
}
labelnames = {
"mnli_matched": ["entailment", "neutral", "contradiction"],
"mnli_mismatched": ["entailment", "neutral", "contradiction"],
"ax": ["entailment", "neutral", "contradiction"],
"qnli": ["entailment", "not_entailment"],
"rte": ["entailment", "not_entailment"],
}
if not os.path.exists(FLAGS.submission_directory):
os.makedirs(FLAGS.submission_directory)
filename = FLAGS.submission_directory + "/" + filenames[FLAGS.task_name]
labelname = labelnames.get(FLAGS.task_name)
predictions = finetuning_model.predict(test_ds)
if FLAGS.task_name == "stsb":
predictions = np.squeeze(predictions)
else:
predictions = np.argmax(predictions, -1)
# Map the predictions to the right index order.
idx_order = list(idx_order.as_numpy_iterator())
contents = ["" for _ in idx_order]
for idx, pred in zip(idx_order, predictions):
if labelname:
pred_value = labelname[int(pred)]
else:
pred_value = pred
if FLAGS.task_name == "stsb":
pred_value = min(pred_value, 5)
pred_value = max(pred_value, 0)
pred_value = f"{pred_value:.3f}"
contents[idx] = pred_value
with tf.io.gfile.GFile(filename, "w") as f:
# GLUE requires a format of index + tab + prediction.
writer = csv.writer(f, delimiter="\t")
# Write the required headline for GLUE.
writer.writerow(["index", "prediction"])
for idx, value in enumerate(contents):
writer.writerow([idx, value])
def connect_to_tpu(tpu_name):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver.connect(
tpu=tpu_name
)
return tf.distribute.TPUStrategy(resolver)
def main(_):
if FLAGS.tpu_name:
strategy = connect_to_tpu(FLAGS.tpu_name)
policy = keras.mixed_precision.Policy("mixed_bfloat16")
else:
# Use default strategy if not using TPU.
strategy = tf.distribute.get_strategy()
policy = keras.mixed_precision.Policy("mixed_float16")
keras.mixed_precision.set_global_policy(policy)
train_ds, test_ds, val_ds, idx_order = load_data(FLAGS.task_name)
# ----- Custom code block starts -----
bert_preprocessor = keras_nlp.models.BertPreprocessor.from_preset(
"bert_base_en_uncased"
)
# Users should change this function to implement the preprocessing required
# by the model.
def preprocess_fn(feature, label):
return bert_preprocessor(feature), label
# ----- Custom code block ends -----
train_ds = preprocess_data(preprocess_fn, train_ds)
val_ds = preprocess_data(preprocess_fn, val_ds)
test_ds = preprocess_data(preprocess_fn, test_ds)
if FLAGS.load_finetuning_model:
with strategy.scope():
finetuning_model = tf.keras.models.load_model(
FLAGS.load_finetuning_model
)
else:
with strategy.scope():
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = [keras.metrics.SparseCategoricalAccuracy()]
if FLAGS.task_name == "stsb":
num_classes = 1
loss = keras.losses.MeanSquaredError()
metrics = [keras.metrics.MeanSquaredError()]
elif FLAGS.task_name in (
"mnli",
"mnli_mismatched",
"mnli_matched",
"ax",
):
num_classes = 3
else:
num_classes = 2
# ----- Custom code block starts -----
# Users should change this `BertClassifier` to your own classifier.
# Commonly the classifier is simply your model + several dense layers,
# please refer to "Make the Finetuning Model" section in README for
# detailed instructions.
bert_model = keras_nlp.models.BertBackbone.from_preset(
"bert_base_en_uncased"
)
finetuning_model = keras_nlp.models.BertClassifier(
backbone=bert_model,
num_classes=num_classes,
)
# ----- Custom code block ends -----
lr = tf.keras.optimizers.schedules.PolynomialDecay(
FLAGS.learning_rate,
decay_steps=train_ds.cardinality() * FLAGS.epochs,
end_learning_rate=0.0,
)
optimizer = tf.keras.optimizers.experimental.AdamW(
lr, weight_decay=0.01, global_clipnorm=1.0
)
optimizer.exclude_from_weight_decay(
var_names=["LayerNorm", "layer_norm", "bias"]
)
finetuning_model.compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
)
finetuning_model.fit(
train_ds,
validation_data=val_ds,
epochs=FLAGS.epochs,
)
with strategy.scope():
if FLAGS.submission_directory:
generate_submission_files(finetuning_model, test_ds, idx_order)
if FLAGS.save_finetuning_model:
# Don't need to save the optimizer.
finetuning_model.optimizer = None
finetuning_model.save(FLAGS.save_finetuning_model)
if __name__ == "__main__":
app.run(main)
| keras-nlp/examples/glue_benchmark/glue.py/0 | {
"file_path": "keras-nlp/examples/glue_benchmark/glue.py",
"repo_id": "keras-nlp",
"token_count": 4580
} | 109 |
# Copyright 2021 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from tensorflow import keras
import keras_nlp
class BasicUsageTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("jit_compile_false", False), ("jit_compile_true", True)
)
def test_quick_start(self, jit_compile):
"""This matches the quick start example in our base README."""
# Tokenize some inputs with a binary label.
vocab = ["[UNK]", "the", "qu", "##ick", "br", "##own", "fox", "."]
sentences = ["The quick brown fox jumped.", "The fox slept."]
tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=vocab,
sequence_length=10,
)
x, y = tokenizer(sentences), tf.constant([1, 0])
# Create a tiny transformer.
inputs = keras.Input(shape=(None,), dtype="int32")
outputs = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=len(vocab),
sequence_length=10,
embedding_dim=16,
)(inputs)
outputs = keras_nlp.layers.TransformerEncoder(
num_heads=4,
intermediate_dim=32,
)(outputs)
outputs = keras.layers.GlobalAveragePooling1D()(outputs)
outputs = keras.layers.Dense(1, activation="sigmoid")(outputs)
model = keras.Model(inputs, outputs)
# Run a single batch of gradient descent.
model.compile(loss="binary_crossentropy", jit_compile=jit_compile)
loss = model.train_on_batch(x, y)
# Make sure we have a valid loss.
self.assertGreater(loss, 0)
| keras-nlp/integration_tests/basic_usage_test.py/0 | {
"file_path": "keras-nlp/integration_tests/basic_usage_test.py",
"repo_id": "keras-nlp",
"token_count": 865
} | 110 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.utils.keras_utils import clone_initializer
@keras_nlp_export("keras_nlp.layers.FNetEncoder")
class FNetEncoder(keras.layers.Layer):
"""FNet encoder.
This class follows the architecture of FNet encoder layer in the
[FNet paper](https://arxiv.org/abs/2105.03824). Users can instantiate
multiple instances of this class to stack up the encoder.
Note on masking: In the official FNet code, padding tokens are added to the
the input. However, the padding masks are deleted, i.e., mixing of
all tokens is done. This is because certain frequencies will be zeroed
out if we apply padding masks in every encoder layer. Hence, we don't
take padding mask as input in the call() function.
Args:
intermediate_dim: int. The hidden size of feedforward network.
dropout: float. The dropout value, applied in the
feedforward network. Defaults to `0.`.
activation: string or `keras.activations`. The
activation function of feedforward network.
Defaults to `"relu"`.
layer_norm_epsilon: float. The epsilon value in layer
normalization components. Defaults to `1e-5`.
kernel_initializer: `str` or `keras.initializers` initializer.
The kernel initializer for the dense layers.
Defaults to `"glorot_uniform"`.
bias_initializer: "string" or `keras.initializers` initializer.
The bias initializer for the dense layers.
Defaults to `"zeros"`.
name: string. The name of the layer. Defaults to `None`.
**kwargs: other keyword arguments.
Examples:
```python
# Create a single FNet encoder layer.
encoder = keras_nlp.layers.FNetEncoder(
intermediate_dim=64)
# Create a simple model containing the encoder.
input = keras.Input(shape=(10, 64))
output = encoder(input)
model = keras.Model(inputs=input, outputs=output)
# Call encoder on the inputs.
input_data = np.random.uniform(size=(1, 10, 64))
output = model(input_data)
```
References:
- [Lee-Thorp et al., 2021](https://arxiv.org/abs/2105.03824)
"""
def __init__(
self,
intermediate_dim,
dropout=0,
activation="relu",
layer_norm_epsilon=1e-5,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
name=None,
**kwargs
):
super().__init__(name=name, **kwargs)
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.activation = keras.activations.get(activation)
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
def build(self, inputs_shape):
# Create layers based on input shape.
feature_size = inputs_shape[-1]
# Layer Norm layers.
self._mixing_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="mixing_layer_norm",
)
self._mixing_layer_norm.build(inputs_shape)
self._output_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="output_layer_norm",
)
self._output_layer_norm.build(inputs_shape)
# Feedforward layers.
self._intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
activation=self.activation,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="intermediate_dense",
)
self._intermediate_dense.build(inputs_shape)
self._output_dense = keras.layers.Dense(
feature_size,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="output_dense",
)
self._output_dense.build(
self._intermediate_dense.compute_output_shape(inputs_shape)
)
self._output_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="output_dropout",
)
self.built = True
def call(self, inputs):
"""Forward pass of the FNetEncoder.
Args:
inputs: a Tensor. The input data to TransformerEncoder, should be
of shape [batch_size, sequence_length, feature_dim].
Returns:
A Tensor of the same shape as the `inputs`.
"""
def fourier_transform(input):
# Apply FFT on the input and take the real part.
input_dtype = input.dtype
# FFT transforms do not support float16.
input = ops.cast(input, "float32")
real_in, imaginary_in = (input, ops.zeros_like(input))
real_out, _ = ops.fft2((real_in, imaginary_in))
return ops.cast(real_out, input_dtype)
def add_and_norm(input1, input2, norm_layer):
return norm_layer(input1 + input2)
def feed_forward(input):
x = self._intermediate_dense(input)
x = self._output_dense(x)
return self._output_dropout(x)
mixing_output = fourier_transform(inputs)
mixing_output = add_and_norm(
inputs, mixing_output, self._mixing_layer_norm
)
feed_forward_output = feed_forward(mixing_output)
x = add_and_norm(
mixing_output, feed_forward_output, self._output_layer_norm
)
return x
def get_config(self):
config = super().get_config()
config.update(
{
"intermediate_dim": self.intermediate_dim,
"dropout": self.dropout,
"activation": keras.activations.serialize(self.activation),
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"bias_initializer": keras.initializers.serialize(
self.bias_initializer
),
}
)
return config
def compute_output_shape(self, inputs_shape):
return inputs_shape
| keras-nlp/keras_nlp/layers/modeling/f_net_encoder.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/f_net_encoder.py",
"repo_id": "keras-nlp",
"token_count": 3170
} | 111 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.utils.keras_utils import clone_initializer
from keras_nlp.layers.modeling.transformer_layer_utils import ( # isort:skip
merge_padding_and_attention_mask,
)
@keras_nlp_export("keras_nlp.layers.TransformerEncoder")
class TransformerEncoder(keras.layers.Layer):
"""Transformer encoder.
This class follows the architecture of the transformer encoder layer in the
paper [Attention is All You Need](https://arxiv.org/abs/1706.03762). Users
can instantiate multiple instances of this class to stack up an encoder.
This layer will correctly compute an attention mask from an implicit
Keras padding mask (for example, by passing `mask_zero=True` to a
`keras.layers.Embedding` layer). See the Masking and Padding
[guide](https://keras.io/guides/understanding_masking_and_padding/)
for more details.
Args:
intermediate_dim: int, the hidden size of feedforward network.
num_heads: int, the number of heads in the
`keras.layers.MultiHeadAttention` layer.
dropout: float. the dropout value, shared by
`keras.layers.MultiHeadAttention` and feedforward network.
Defaults to `0.`.
activation: string or `keras.activations`. the
activation function of feedforward network.
Defaults to `"relu"`.
layer_norm_epsilon: float. The epsilon value in layer
normalization components. Defaults to `1e-5`.
kernel_initializer: string or `keras.initializers` initializer.
The kernel initializer for the dense and multiheaded
attention layers. Defaults to `"glorot_uniform"`.
bias_initializer: string or `keras.initializers` initializer.
The bias initializer for the dense and multiheaded
attention layers. Defaults to `"zeros"`.
normalize_first: bool. If True, the inputs to the
attention layer and the intermediate dense layer are normalized
(similar to GPT-2). If set to False, outputs of attention layer and
intermediate dense layer are normalized (similar to BERT).
Defaults to `False`.
name: string. The name of the layer. Defaults to `None`.
**kwargs: other keyword arguments.
Examples:
```python
# Create a single transformer encoder layer.
encoder = keras_nlp.layers.TransformerEncoder(
intermediate_dim=64, num_heads=8)
# Create a simple model containing the encoder.
input = keras.Input(shape=(10, 64))
output = encoder(input)
model = keras.Model(inputs=input, outputs=output)
# Call encoder on the inputs.
input_data = np.random.uniform(size=(2, 10, 64))
output = model(input_data)
```
References:
- [Vaswani et al., 2017](https://arxiv.org/abs/1706.03762)
"""
def __init__(
self,
intermediate_dim,
num_heads,
dropout=0,
activation="relu",
layer_norm_epsilon=1e-05,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
normalize_first=False,
**kwargs,
):
super().__init__(**kwargs)
self.intermediate_dim = intermediate_dim
self.num_heads = num_heads
self.dropout = dropout
self.activation = keras.activations.get(activation)
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
self.normalize_first = normalize_first
self.supports_masking = True
def build(self, inputs_shape):
# Infer the dimension of our hidden feature size from the build shape.
hidden_dim = inputs_shape[-1]
# Attention head size is `hidden_dim` over the number of heads.
key_dim = int(hidden_dim // self.num_heads)
if key_dim == 0:
raise ValueError(
"Attention `key_dim` computed cannot be zero. "
f"The `hidden_dim` value of {hidden_dim} has to be equal to "
f"or greater than `num_heads` value of {self.num_heads}."
)
# Self attention layers.
self._self_attention_layer = keras.layers.MultiHeadAttention(
num_heads=self.num_heads,
key_dim=key_dim,
dropout=self.dropout,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="self_attention_layer",
)
if hasattr(self._self_attention_layer, "_build_from_signature"):
self._self_attention_layer._build_from_signature(
query=inputs_shape,
value=inputs_shape,
)
else:
self._self_attention_layer.build(
query_shape=inputs_shape,
value_shape=inputs_shape,
)
self._self_attention_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="self_attention_layer_norm",
)
self._self_attention_layer_norm.build(inputs_shape)
self._self_attention_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="self_attention_dropout",
)
# Feedforward layers.
self._feedforward_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="feedforward_layer_norm",
)
self._feedforward_layer_norm.build(inputs_shape)
self._feedforward_intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
activation=self.activation,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="feedforward_intermediate_dense",
)
self._feedforward_intermediate_dense.build(inputs_shape)
self._feedforward_output_dense = keras.layers.Dense(
hidden_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="feedforward_output_dense",
)
intermediate_shape = list(inputs_shape)
intermediate_shape[-1] = self.intermediate_dim
self._feedforward_output_dense.build(tuple(intermediate_shape))
self._feedforward_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="feedforward_dropout",
)
self.built = True
def call(self, inputs, padding_mask=None, attention_mask=None):
"""Forward pass of the TransformerEncoder.
Args:
inputs: a Tensor. The input data to TransformerEncoder, should be
of shape [batch_size, sequence_length, hidden_dim].
padding_mask: a boolean Tensor. It indicates if the token should be
masked because the token is introduced due to padding.
`padding_mask` should have shape [batch_size, sequence_length].
attention_mask: a boolean Tensor. Customized mask used to mask out
certain tokens. `attention_mask` should have shape
[batch_size, sequence_length, sequence_length].
Returns:
A Tensor of the same shape as the `inputs`.
"""
x = inputs # Intermediate result.
# Compute self attention mask.
self_attention_mask = merge_padding_and_attention_mask(
inputs, padding_mask, attention_mask
)
# Self attention block.
residual = x
if self.normalize_first:
x = self._self_attention_layer_norm(x)
x = self._self_attention_layer(
query=x,
value=x,
attention_mask=self_attention_mask,
)
x = self._self_attention_dropout(x)
x = x + residual
if not self.normalize_first:
x = self._self_attention_layer_norm(x)
# Feedforward block.
residual = x
if self.normalize_first:
x = self._feedforward_layer_norm(x)
x = self._feedforward_intermediate_dense(x)
x = self._feedforward_output_dense(x)
x = self._feedforward_dropout(x)
x = x + residual
if not self.normalize_first:
x = self._feedforward_layer_norm(x)
return x
def get_config(self):
config = super().get_config()
config.update(
{
"intermediate_dim": self.intermediate_dim,
"num_heads": self.num_heads,
"dropout": self.dropout,
"activation": keras.activations.serialize(self.activation),
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"bias_initializer": keras.initializers.serialize(
self.bias_initializer
),
"normalize_first": self.normalize_first,
}
)
return config
def compute_output_shape(self, inputs_shape):
return inputs_shape
| keras-nlp/keras_nlp/layers/modeling/transformer_encoder.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/transformer_encoder.py",
"repo_id": "keras-nlp",
"token_count": 4480
} | 112 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
from keras_nlp.models.bart.bart_preprocessor import BartPreprocessor
from keras_nlp.models.bart.bart_tokenizer import BartTokenizer
from keras_nlp.tests.test_case import TestCase
class BartPreprocessorTest(TestCase):
def setUp(self):
self.vocab = ["<s>", "<pad>", "</s>", "air", "Ġair", "plane", "Ġat"]
self.vocab += ["port", "<mask>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.tokenizer = BartTokenizer(
vocabulary=self.vocab, merges=self.merges
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
"encoder_sequence_length": 5,
"decoder_sequence_length": 8,
}
self.input_data = (
{
"encoder_text": [" airplane at airport"],
"decoder_text": [" airplane airport"],
},
[1], # Pass through labels.
[1.0], # Pass through sample_weights.
)
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=BartPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=(
{
"encoder_token_ids": [[0, 4, 5, 6, 2]],
"encoder_padding_mask": [[1, 1, 1, 1, 1]],
"decoder_token_ids": [[2, 0, 4, 5, 4, 7, 2, 1]],
"decoder_padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]],
},
[1], # Pass through labels.
[1.0], # Pass through sample_weights.
),
token_id_key="decoder_token_ids",
)
def test_error_multi_segment_input(self):
preprocessor = BartPreprocessor(**self.init_kwargs)
input_data = {
"encoder_text": (
tf.constant([" airplane at airport"] * 2),
tf.constant([" airplane"] * 2),
),
"decoder_text": (
tf.constant([" kohli is the best"] * 2),
tf.constant([" kohli"] * 2),
),
}
with self.assertRaises(ValueError):
preprocessor(input_data)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in BartPreprocessor.presets:
self.run_preset_test(
cls=BartPreprocessor,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/bart/bart_preprocessor_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bart/bart_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1604
} | 113 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.models.bert.bert_backbone import BertBackbone
from keras_nlp.models.bert.bert_masked_lm import BertMaskedLM
from keras_nlp.models.bert.bert_masked_lm_preprocessor import (
BertMaskedLMPreprocessor,
)
from keras_nlp.models.bert.bert_tokenizer import BertTokenizer
from keras_nlp.tests.test_case import TestCase
class BertMaskedLMTest(TestCase):
def setUp(self):
# Setup model.
self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]
self.vocab += ["the", "quick", "brown", "fox", "."]
self.preprocessor = BertMaskedLMPreprocessor(
BertTokenizer(vocabulary=self.vocab),
# Simplify our testing by masking every available token.
mask_selection_rate=1.0,
mask_token_rate=1.0,
random_token_rate=0.0,
mask_selection_length=5,
sequence_length=5,
)
self.backbone = BertBackbone(
vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(),
num_layers=2,
num_heads=2,
hidden_dim=2,
intermediate_dim=4,
max_sequence_length=self.preprocessor.sequence_length,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
"backbone": self.backbone,
}
self.train_data = (
["the quick brown fox.", "the slow brown fox."], # Features.
)
self.input_data = self.preprocessor(*self.train_data)[0]
def test_masked_lm_basics(self):
self.run_task_test(
cls=BertMaskedLM,
init_kwargs=self.init_kwargs,
train_data=self.train_data,
expected_output_shape=(2, 5, 10),
)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=BertMaskedLM,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in BertMaskedLM.presets:
self.run_preset_test(
cls=BertMaskedLM,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/bert/bert_masked_lm_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bert/bert_masked_lm_test.py",
"repo_id": "keras-nlp",
"token_count": 1266
} | 114 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.models.deberta_v3.disentangled_self_attention import (
DisentangledSelfAttention,
)
from keras_nlp.utils.keras_utils import clone_initializer
from keras_nlp.layers.modeling.transformer_layer_utils import ( # isort:skip
merge_padding_and_attention_mask,
)
class DisentangledAttentionEncoder(keras.layers.Layer):
"""Disentangled attention encoder.
This class follows the architecture of the disentangled attention encoder
layer in the paper
["DeBERTaV3: Improving DeBERTa using ELECTRA-Style Pre-Training with Gradient-Disentangled Embedding Sharing"](https://arxiv.org/abs/2111.09543).
Users can instantiate multiple instances of this class to stack up a
an encoder model which has disentangled self-attention.
`DisentangledAttentionEncoder` is similar to
`keras_nlp.layers.TransformerEncoder`, except for the attention layer - it
uses disentangled self-attention instead of multi-head attention.
Args:
intermediate_dim: int, the hidden size of feedforward network.
num_heads: int, the number of heads in the attention layer.
max_position_embeddings: int. The maximum input
sequence length. Defaults to `512`.
bucket_size: int. The size of the relative position
buckets. Generally equal to `max_sequence_length // 2`.
Defaults to `256`.
dropout: float. The dropout value, shared by
the attention layer and feedforward network.
Defaults to `0.0`.
activation: string or `keras.activations`. the
activation function of feedforward network.
Defaults to `"relu"`.
layer_norm_epsilon: float. The epsilon value in layer
normalization components. Defaults to `1e-5`.
kernel_initializer: string or `keras.initializers` initializer.
The kernel initializer for the dense and disentangled
self-attention layers. Defaults to `"glorot_uniform"`.
bias_initializer: string or `keras.initializers` initializer.
The bias initializer for the dense and disentangled
self-attention layers. Defaults to `"zeros"`.
"""
def __init__(
self,
intermediate_dim,
num_heads,
max_position_embeddings=512,
bucket_size=256,
dropout=0,
activation="relu",
layer_norm_epsilon=1e-05,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs
):
super().__init__(**kwargs)
self.intermediate_dim = intermediate_dim
self.num_heads = num_heads
self.max_position_embeddings = max_position_embeddings
self.bucket_size = bucket_size
self.dropout = dropout
self.activation = keras.activations.get(activation)
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
self._built = False
self.supports_masking = True
def build(self, inputs_shape):
# Infer the dimension of our hidden feature size from the build shape.
hidden_dim = inputs_shape[-1]
# Self attention layers.
self._self_attention_layer = DisentangledSelfAttention(
num_heads=self.num_heads,
hidden_dim=hidden_dim,
max_position_embeddings=self.max_position_embeddings,
bucket_size=self.bucket_size,
dropout=self.dropout,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="self_attention_layer",
)
self._self_attention_layer.build(inputs_shape)
self._self_attention_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="self_attention_layer_norm",
)
self._self_attention_layer_norm.build(inputs_shape)
self._self_attention_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="self_attention_dropout",
)
# Feedforward layers.
self._feedforward_layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="feedforward_layer_norm",
)
self._feedforward_layer_norm.build(inputs_shape)
self._feedforward_intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
activation=self.activation,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="feedforward_intermediate_dense",
)
self._feedforward_intermediate_dense.build(inputs_shape)
self._feedforward_output_dense = keras.layers.Dense(
hidden_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="feedforward_output_dense",
)
intermediate_shape = list(inputs_shape)
intermediate_shape[-1] = self.intermediate_dim
self._feedforward_output_dense.build(tuple(intermediate_shape))
self._feedforward_dropout = keras.layers.Dropout(
rate=self.dropout,
dtype=self.dtype_policy,
name="feedforward_dropout",
)
self.built = True
def call(
self,
inputs,
rel_embeddings,
padding_mask=None,
attention_mask=None,
):
"""Forward pass of `DisentangledAttentionEncoder`.
Args:
inputs: a Tensor. The input data to `DisentangledAttentionEncoder`, should be
of shape [batch_size, sequence_length, hidden_dim].
rel_embeddings: a Tensor. The relative position embedding matrix,
should be of shape `[batch_size, 2 * bucket_size, hidden_dim]`.
padding_mask: a boolean Tensor. It indicates if the token should be
masked because the token is introduced due to padding.
`padding_mask` should have shape [batch_size, sequence_length].
False means the certain token is masked out.
attention_mask: a boolean Tensor. Customized mask used to mask out
certain tokens. `attention_mask` should have shape
[batch_size, sequence_length, sequence_length].
Returns:
A Tensor of the same shape as the `inputs`.
"""
x = inputs
# Compute self attention mask.
self_attention_mask = merge_padding_and_attention_mask(
inputs, padding_mask, attention_mask
)
# Self attention block.
residual = x
x = self._self_attention_layer(
x,
rel_embeddings=rel_embeddings,
attention_mask=self_attention_mask,
)
x = self._self_attention_dropout(x)
x = x + residual
x = self._self_attention_layer_norm(x)
# Feedforward block.
residual = x
x = self._feedforward_intermediate_dense(x)
x = self._feedforward_output_dense(x)
x = self._feedforward_dropout(x)
x = x + residual
x = self._feedforward_layer_norm(x)
return x
def get_config(self):
config = super().get_config()
config.update(
{
"intermediate_dim": self.intermediate_dim,
"num_heads": self.num_heads,
"max_position_embeddings": self.max_position_embeddings,
"bucket_size": self.bucket_size,
"dropout": self.dropout,
"activation": keras.activations.serialize(self.activation),
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"bias_initializer": keras.initializers.serialize(
self.bias_initializer
),
}
)
return config
def compute_output_shape(self, inputs_shape):
return inputs_shape
| keras-nlp/keras_nlp/models/deberta_v3/disentangled_attention_encoder.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/deberta_v3/disentangled_attention_encoder.py",
"repo_id": "keras-nlp",
"token_count": 3977
} | 115 |
# Copyright 2024 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gemma model preset configurations."""
# Metadata for loading pretrained model weights.
backbone_presets = {
"gemma_2b_en": {
"metadata": {
"description": (
"18-layer Gemma model (Gemma with 2B parameters). "
),
"params": 2506172416,
"official_name": "Gemma",
"path": "gemma",
"model_card": "https://www.kaggle.com/models/google/gemma",
},
"kaggle_handle": "kaggle://keras/gemma/keras/gemma_2b_en/2",
},
"gemma_instruct_2b_en": {
"metadata": {
"description": (
"18-layer Gemma model (Gemma with 2B parameters). "
),
"params": 2506172416,
"official_name": "Gemma",
"path": "gemma",
"model_card": "https://www.kaggle.com/models/google/gemma",
},
"kaggle_handle": "kaggle://keras/gemma/keras/gemma_instruct_2b_en/2",
},
"gemma_7b_en": {
"metadata": {
"description": (
"28-layer Gemma model (Gemma with 7B parameters). "
),
"params": 8537680896,
"official_name": "Gemma",
"path": "gemma",
"model_card": "https://www.kaggle.com/models/google/gemma",
},
"kaggle_handle": "kaggle://keras/gemma/keras/gemma_7b_en/2",
},
"gemma_instruct_7b_en": {
"metadata": {
"description": (
"28-layer Gemma model (Gemma with 7B parameters). "
),
"params": 8537680896,
"official_name": "Gemma",
"path": "gemma",
"model_card": "https://www.kaggle.com/models/google/gemma",
},
"kaggle_handle": "kaggle://keras/gemma/keras/gemma_instruct_7b_en/2",
},
}
| keras-nlp/keras_nlp/models/gemma/gemma_presets.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gemma/gemma_presets.py",
"repo_id": "keras-nlp",
"token_count": 1144
} | 116 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer
from keras_nlp.tests.test_case import TestCase
class GPT2TokenizerTest(TestCase):
def setUp(self):
self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"]
self.vocab += ["<|endoftext|>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.init_kwargs = {"vocabulary": self.vocab, "merges": self.merges}
self.input_data = [
" airplane at airport<|endoftext|>",
" airplane airport",
]
def test_tokenizer_basics(self):
self.run_preprocessing_layer_test(
cls=GPT2Tokenizer,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=[[2, 3, 4, 2, 5, 6], [2, 3, 2, 5]],
)
def test_errors_missing_special_tokens(self):
with self.assertRaises(ValueError):
GPT2Tokenizer(vocabulary=["a", "b", "c"], merges=[])
@pytest.mark.large
def test_smallest_preset(self):
self.run_preset_test(
cls=GPT2Tokenizer,
preset="gpt2_base_en",
input_data=["The quick brown fox."],
expected_output=[[464, 2068, 7586, 21831, 13]],
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in GPT2Tokenizer.presets:
self.run_preset_test(
cls=GPT2Tokenizer,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/gpt2/gpt2_tokenizer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gpt2/gpt2_tokenizer_test.py",
"repo_id": "keras-nlp",
"token_count": 1042
} | 117 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding
from keras_nlp.models.backbone import Backbone
from keras_nlp.models.llama.llama_decoder import LlamaDecoder
from keras_nlp.models.llama.llama_layernorm import LlamaLayerNorm
def _llama_kernel_initializer(stddev=0.02):
return keras.initializers.RandomNormal(stddev=stddev)
@keras_nlp_export("keras_nlp.models.LlamaBackbone")
class LlamaBackbone(Backbone):
"""
LLaMA core network with hyperparameters.
This network implements a Transformer-based decoder network,
LLaMA, as described in ["LLaMA: Open Foundation and Fine-Tuned Language Models"](https://arxiv.org/abs/2302.13971).
The default constructor gives a fully customizable, randomly initialized
LLaMA model with any number of layers, heads, and embedding
dimensions. This backbone also supports LLaMA2 checkpoints.
Args:
vocabulary_size: int. The size of the token vocabulary.
num_layers: int. The number of transformer layers.
num_query_heads: int. The number of attention heads for each transformer.
The hidden size must be divisible by the number of attention heads.
hidden_dim: int. The size of the transformer encoding and pooler layers.
intermediate_dim: int. The output dimension of the first Dense layer in
a two-layer feedforward network for each transformer.
num_key_value_heads: int. This is the number of key_value heads that
should be used to implement Grouped Query Attention. If num_key_value_heads=num_attention_heads,
the model will use Multi Head Attention (MHA), if num_key_value_heads=1
the model will use Multi Query Attention (MQA)
rope_scaling_factor: float. The scaling factor for calculation of rotary
embedding
rope_max_wavelength: int. The maximum angular wavelength of the
sine/cosine curves, for rotary embeddings.
layer_norm_epsilon: float. a value added to the denominator for
numerical stability.
max_sequence_length: int. The maximum sequence length that this encoder
can consume. If `None`, `max_sequence_length` uses the value from
sequence length. This determines the variable shape for positional
embeddings.
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
for model computations and weights. Note that some computations,
such as softmax and layer normalization, will always be done at
float32 precision regardless of dtype.
"""
def __init__(
self,
vocabulary_size,
num_layers,
num_query_heads,
hidden_dim,
intermediate_dim,
num_key_value_heads,
rope_scaling_factor=1.0,
rope_max_wavelength=10000,
layer_norm_epsilon=1e-5,
max_sequence_length=4096,
dtype=None,
**kwargs,
):
# === Layers ===
self.token_embedding = ReversibleEmbedding(
input_dim=vocabulary_size,
output_dim=hidden_dim,
embeddings_initializer=_llama_kernel_initializer(stddev=0.01),
tie_weights=False,
dtype=dtype,
name="token_embedding",
)
self.transformer_layers = []
for i in range(num_layers):
layer = LlamaDecoder(
intermediate_dim=intermediate_dim,
num_query_heads=num_query_heads,
num_key_value_heads=num_key_value_heads,
rope_scaling_factor=rope_scaling_factor,
max_sequence_length=max_sequence_length,
rope_max_wavelength=rope_max_wavelength,
layer_norm_epsilon=layer_norm_epsilon,
activation=ops.silu,
kernel_initializer=_llama_kernel_initializer(stddev=0.02),
dtype=dtype,
name=f"transformer_layer_{i}",
)
self.transformer_layers.append(layer)
self.layer_norm = LlamaLayerNorm(
dtype=dtype,
epsilon=layer_norm_epsilon,
name="layer_norm",
)
# === Functional Model ===
token_id_input = keras.Input(
shape=(None,), dtype="int32", name="token_ids"
)
padding_mask_input = keras.Input(
shape=(None,), dtype="int32", name="padding_mask"
)
x = self.token_embedding(token_id_input)
for transformer_layer in self.transformer_layers:
x = transformer_layer(x, decoder_padding_mask=padding_mask_input)
sequence_output = self.layer_norm(x)
super().__init__(
inputs={
"token_ids": token_id_input,
"padding_mask": padding_mask_input,
},
outputs=sequence_output,
**kwargs,
)
# === Config ===
self.vocabulary_size = vocabulary_size
self.num_layers = num_layers
self.num_query_heads = num_query_heads
self.hidden_dim = hidden_dim
self.intermediate_dim = intermediate_dim
self.rope_max_wavelength = rope_max_wavelength
self.num_key_value_heads = num_key_value_heads
self.rope_scaling_factor = rope_scaling_factor
self.max_sequence_length = max_sequence_length
self.layer_norm_epsilon = layer_norm_epsilon
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"num_layers": self.num_layers,
"num_query_heads": self.num_query_heads,
"hidden_dim": self.hidden_dim,
"intermediate_dim": self.intermediate_dim,
"rope_max_wavelength": self.rope_max_wavelength,
"rope_scaling_factor": self.rope_scaling_factor,
"num_key_value_heads": self.num_key_value_heads,
"max_sequence_length": self.max_sequence_length,
"layer_norm_epsilon": self.layer_norm_epsilon,
}
)
return config
| keras-nlp/keras_nlp/models/llama/llama_backbone.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/llama/llama_backbone.py",
"repo_id": "keras-nlp",
"token_count": 2971
} | 118 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Whisper Cached Multi-Head Attention layer."""
import collections
import string
import keras_nlp
from keras_nlp.backend import keras
def _index_to_einsum_variable(i):
"""Converts an index to a einsum variable name.
We simply map indices to lowercase characters, e.g. 0 -> 'a', 1 -> 'b'.
"""
return string.ascii_lowercase[i]
def _build_proj_equation(free_dims, bound_dims, output_dims):
"""Builds an einsum equation for projections inside multi-head attention."""
input_str = ""
kernel_str = ""
output_str = ""
bias_axes = ""
letter_offset = 0
for i in range(free_dims):
char = _index_to_einsum_variable(i + letter_offset)
input_str += char
output_str += char
letter_offset += free_dims
for i in range(bound_dims):
char = _index_to_einsum_variable(i + letter_offset)
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _index_to_einsum_variable(i + letter_offset)
kernel_str += char
output_str += char
bias_axes += char
equation = f"{input_str},{kernel_str}->{output_str}"
return equation, bias_axes, len(output_str)
def _get_output_shape(output_rank, known_last_dims):
return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims)
@keras.saving.register_keras_serializable(package="keras_nlp")
class WhisperCachedMultiHeadAttention(
keras_nlp.layers.CachedMultiHeadAttention
):
"""Whisper Cached Multi-Head Attention layer.
Inherits from `keras_nlp.layers.CachedMultiHeadAttention`, and overrides the
`build` method so that Q, V projection layers have bias
whereas K projection layer does not.
"""
def build(
self,
query_shape,
value_shape,
key_shape=None,
):
key_shape = value_shape if key_shape is None else key_shape
query_rank = len(query_shape)
value_rank = len(value_shape)
key_rank = len(key_shape)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
query_rank - 1, bound_dims=1, output_dims=2
)
self._query_dense = keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._key_dim]
),
bias_axes=bias_axes if self._use_bias else None,
name="query",
**self._get_common_kwargs_for_sublayer(),
)
self._query_dense.build(query_shape)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
key_rank - 1, bound_dims=1, output_dims=2
)
self._key_dense = keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._key_dim]
),
bias_axes=None,
name="key",
**self._get_common_kwargs_for_sublayer(),
)
self._key_dense.build(key_shape)
einsum_equation, bias_axes, output_rank = _build_proj_equation(
value_rank - 1, bound_dims=1, output_dims=2
)
self._value_dense = keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(
output_rank - 1, [self._num_heads, self._value_dim]
),
bias_axes=bias_axes if self._use_bias else None,
name="value",
**self._get_common_kwargs_for_sublayer(),
)
self._value_dense.build(value_shape)
# Builds the attention computations for multi-head dot product
# attention. These computations could be wrapped into the keras
# attention layer once it supports multi-head einsum computations.
self._build_attention(output_rank)
if self._output_shape:
if not isinstance(self._output_shape, collections.abc.Sized):
output_shape = [self._output_shape]
else:
output_shape = self._output_shape
else:
output_shape = [query_shape[-1]]
einsum_equation, bias_axes, output_rank = _build_proj_equation(
query_rank - 1, bound_dims=2, output_dims=len(output_shape)
)
self._output_dense = keras.layers.EinsumDense(
einsum_equation,
output_shape=_get_output_shape(output_rank - 1, output_shape),
bias_axes=bias_axes if self._use_bias else None,
name="attention_output",
**self._get_common_kwargs_for_sublayer(),
)
output_dense_input_shape = list(
self._query_dense.compute_output_shape(query_shape)
)
output_dense_input_shape[-1] = self._value_dim
self._output_dense.build(tuple(output_dense_input_shape))
self.built = True
def _build_from_signature(self, query, value, key=None):
pass
| keras-nlp/keras_nlp/models/whisper/whisper_cached_multi_head_attention.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/whisper/whisper_cached_multi_head_attention.py",
"repo_id": "keras-nlp",
"token_count": 2481
} | 119 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_nlp.backend import ops
from keras_nlp.samplers.contrastive_sampler import ContrastiveSampler
from keras_nlp.tests.test_case import TestCase
class ContrastiveSamplerTest(TestCase):
def setUp(self):
super().setUp()
# Use a simple alphabet of lowercase characters to [0, 26).
self.int_lookup = {i: chr(i + ord("a")) for i in range(26)}
self.char_lookup = {v: k for k, v in self.int_lookup.items()}
self.batch_size = 1
self.length = 12
self.hidden_dim = 3
self.vocab_size = len(self.int_lookup)
self.hidden_states = ops.ones(
[
self.batch_size,
self.length,
self.hidden_dim,
]
)
def next(prompt, cache, index):
batch_size = ops.shape(prompt)[0]
# Return a distribution favoring the next char in cache.
logits = ops.one_hot(cache[:, index], self.vocab_size) * 1e9
hidden_states = ops.ones([batch_size, self.hidden_dim])
return logits, hidden_states, cache
self.next = next
self.sampler = ContrastiveSampler(k=5, alpha=0.2, temperature=1.0)
def join_as_string(self, x):
x = ops.convert_to_numpy(x)
return ["".join([self.int_lookup[i] for i in s]) for s in x]
def test_stateless_call(self):
def next(prompt, cache, index):
# Return a distribution favoring the first token in the vocab.
batch_size = ops.shape(prompt)[0]
logits = (
ops.one_hot(
ops.zeros(batch_size, dtype="int32"),
self.vocab_size,
)
* 1e9
)
hidden_states = ops.ones([batch_size, self.hidden_dim])
return logits, hidden_states, cache
prompt = ops.full((self.batch_size, self.length), self.char_lookup["z"])
output = self.sampler(
next=next,
prompt=prompt,
index=5,
hidden_states=self.hidden_states,
)
self.assertEqual(self.join_as_string(output), ["zzzzzaaaaaaa"])
def test_stateful_call(self):
cache_chars = list("sequentiallyy")
cache = ops.array([[self.char_lookup[c] for c in cache_chars]])
prompt = ops.full((self.batch_size, self.length), self.char_lookup["s"])
output = self.sampler(
next=self.next,
prompt=prompt,
cache=cache,
index=1,
hidden_states=self.hidden_states,
)
self.assertEqual(self.join_as_string(output), ["sequentially"])
def test_early_stopping(self):
cache_chars = list("sequentiallyy")
cache = ops.array([[self.char_lookup[c] for c in cache_chars]])
prompt = ops.full((self.batch_size, self.length), self.char_lookup["s"])
output = self.sampler(
next=self.next,
prompt=prompt,
cache=cache,
end_token_id=self.char_lookup["t"],
index=0,
hidden_states=self.hidden_states,
)
self.assertEqual(self.join_as_string(output), ["sequentsssss"])
def test_outputs_in_top_k(self):
def next(prompt, cache, index):
batch_size = ops.shape(prompt)[0]
# Return a distribution where each id is progressively less likely.
logits = ops.arange(self.vocab_size, 0, -1, dtype="float32")
logits = ops.repeat(logits[None, :], batch_size, axis=0)
hidden_states = ops.ones([batch_size, self.hidden_dim])
return logits, hidden_states, cache
prompt = ops.full((self.batch_size, self.length), self.char_lookup["z"])
output = self.sampler(
next=next,
prompt=prompt,
index=1,
hidden_states=self.hidden_states,
)
output_ids = set(ops.convert_to_numpy(output[0, 1:]))
self.assertContainsSubset(output_ids, range(5))
def test_alpha_penalty(self):
def next(prompt, cache, index):
batch_size = ops.shape(prompt)[0]
best_token_id = self.char_lookup["h"]
logits = ops.ones([batch_size, self.vocab_size])
# Favoring `best_token_id` in the logits.
logits += (
ops.one_hot(
ops.zeros(self.batch_size, dtype="int32") + best_token_id,
self.vocab_size,
)
* 1e9
)
# Set the hidden states for `best_token_id` as [1, 1, ..., 1], so it
# gets the max similarity penality score.
mask_of_best_token = prompt[:, index - 1] == best_token_id
random_states = ops.convert_to_tensor(
np.random.uniform(size=[batch_size, self.hidden_dim]),
dtype="float32",
) * (1 - ops.cast(mask_of_best_token, dtype="float32")[:, None])
hidden_states = (
ops.ones([batch_size, self.hidden_dim])
* ops.cast(mask_of_best_token, dtype="float32")[:, None]
)
hidden_states = hidden_states + random_states
return logits, hidden_states, cache
prompt = ops.full((1, self.length), self.char_lookup["z"])
hidden_states = ops.ones([1, self.length, self.hidden_dim]) + 1e-5
output = self.sampler(
next=next,
prompt=prompt,
index=5,
hidden_states=hidden_states,
)
self.assertEqual(self.join_as_string(output), ["zzzzzhhhhhhh"])
sampler = ContrastiveSampler(k=5, alpha=1.0)
output = sampler(
next=next,
prompt=prompt,
index=5,
hidden_states=hidden_states,
)
self.assertTrue("h" not in self.join_as_string(output))
@parameterized.named_parameters(
("jit_compile_false", False), ("jit_compile_true", True)
)
@pytest.mark.tf_only
def test_compilation(self, jit_compile):
cache_chars = list("sequentiallyy")
cache = ops.array([[self.char_lookup[c] for c in cache_chars]])
prompt = ops.full((self.batch_size, self.length), self.char_lookup["s"])
@tf.function(jit_compile=jit_compile)
def generate(prompt, cache):
return self.sampler(
self.next,
prompt=prompt,
cache=cache,
index=1,
hidden_states=self.hidden_states,
)
output = generate(prompt, cache)
self.assertEqual(self.join_as_string(output), ["sequentially"])
| keras-nlp/keras_nlp/samplers/contrastive_sampler_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/samplers/contrastive_sampler_test.py",
"repo_id": "keras-nlp",
"token_count": 3536
} | 120 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import doctest
import re
import textwrap
from typing import List
try:
import astor
except:
astor = None
class FencedCellParser(doctest.DocTestParser):
"""Implements test parsing for ``` fenced cells.
https://docs.python.org/3/library/doctest.html#doctestparser-objects
The `get_examples` method receives a string and returns an
iterable of `doctest.Example` objects.
Adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/docs/fenced_doctest_lib.py.
"""
patched = False
def __init__(self, fence_label="python"):
super().__init__()
if not self.patched:
# The default doctest compiles in "single" mode. The fenced block may
# contain multiple statements. The `_patch_compile` function fixes the
# compile mode.
doctest.compile = _patch_compile
print(
textwrap.dedent(
"""
*********************************************************************
* Caution: `fenced_doctest` patches `doctest.compile` don't use this
* in the same binary as any other doctests.
*********************************************************************
"""
)
)
type(self).patched = True
# Match anything, except if the look-behind sees a closing fence.
no_fence = "(.(?<!```))*?"
self.fence_cell_re = re.compile(
rf"""
^( # After a newline
\s*```\s*({fence_label})\n # Open a labeled ``` fence
(?P<doctest>{no_fence}) # Match anything except a closing fence
\n\s*```\s*(\n|$) # Close the fence.
)
( # Optional!
[\s\n]* # Any number of blank lines.
```\s*\n # Open ```
(?P<output>{no_fence}) # Anything except a closing fence
\n\s*``` # Close the fence.
)?
""",
# Multiline so ^ matches after a newline
re.MULTILINE |
# Dotall so `.` matches newlines.
re.DOTALL |
# Verbose to allow comments/ignore-whitespace.
re.VERBOSE,
)
def get_examples(
self, string: str, name: str = "<string>"
) -> List[doctest.Example]:
tests = []
for match in self.fence_cell_re.finditer(string):
if re.search("doctest.*skip", match.group(0), re.IGNORECASE):
continue
# Do not test any docstring with our format string markers.
# These will not run until formatted.
if re.search("{{", match.group(0)):
continue
groups = match.groupdict()
source = textwrap.dedent(groups["doctest"])
want = groups["output"]
if want is not None:
want = textwrap.dedent(want)
tests.append(
doctest.Example(
lineno=string[: match.start()].count("\n") + 1,
source=source,
want=want,
)
)
return tests
def _print_if_not_none(obj):
"""Print like a notebook: Show the repr if the object is not None.
`_patch_compile` Uses this on the final expression in each cell.
This way the outputs feel like notebooks.
Args:
obj: the object to print.
"""
if obj is not None:
print(repr(obj))
def _patch_compile(
source, filename, mode, flags=0, dont_inherit=False, optimize=-1
):
"""Patch `doctest.compile` to make doctest to behave like a notebook.
Default settings for doctest are configured to run like a repl: one statement
at a time. The doctest source uses `compile(..., mode="single")`
So to let doctest act like a notebook:
1. We need `mode="exec"` (easy)
2. We need the last expression to be printed (harder).
To print the last expression, just wrap the last expression in
`_print_if_not_none(expr)`. To detect the last expression use `AST`.
If the last node is an expression modify the ast to call
`_print_if_not_none` on it, convert the ast back to source and compile that.
https://docs.python.org/3/library/functions.html#compile
Args:
source: Can either be a normal string, a byte string, or an AST object.
filename: Argument should give the file from which the code was read; pass
some recognizable value if it wasn’t read from a file ('<string>' is
commonly used).
mode: [Ignored] always use exec.
flags: Compiler options.
dont_inherit: Compiler options.
optimize: Compiler options.
Returns:
The resulting code object.
"""
# doctest passes some dummy string as the file name, AFAICT
# but tf.function freaks-out if this doesn't look like a
# python file name.
del filename
# Doctest always passes "single" here, you need exec for multiple lines.
del mode
source_ast = ast.parse(source)
final = source_ast.body[-1]
if isinstance(final, ast.Expr):
# Wrap the final expression as `_print_if_not_none(expr)`
print_it = ast.Expr(
lineno=-1,
col_offset=-1,
value=ast.Call(
func=ast.Name(
id="_print_if_not_none",
ctx=ast.Load(),
lineno=-1,
col_offset=-1,
),
lineno=-1,
col_offset=-1,
args=[final], # wrap the final Expression
keywords=[],
),
)
source_ast.body[-1] = print_it
# It's not clear why this step is necessary. `compile` is supposed to handle
# AST directly.
source = astor.to_source(source_ast)
return compile(
source,
filename="dummy.py",
mode="exec",
flags=flags,
dont_inherit=dont_inherit,
optimize=optimize,
)
| keras-nlp/keras_nlp/tests/doc_tests/fenced_docstring_lib.py/0 | {
"file_path": "keras-nlp/keras_nlp/tests/doc_tests/fenced_docstring_lib.py",
"repo_id": "keras-nlp",
"token_count": 3007
} | 121 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.tests.test_case import TestCase
from keras_nlp.tokenizers.byte_tokenizer import ByteTokenizer
class ByteTokenizerTest(TestCase):
def test_tokenize(self):
input_data = ["hello", "fun", "▀▁▂▃"]
tokenizer = ByteTokenizer()
call_output = tokenizer(input_data)
tokenize_output = tokenizer.tokenize(input_data)
exp_outputs = [
[104, 101, 108, 108, 111],
[102, 117, 110],
[226, 150, 128, 226, 150, 129, 226, 150, 130, 226, 150, 131],
]
self.assertAllEqual(call_output, exp_outputs)
self.assertAllEqual(tokenize_output, exp_outputs)
def test_tokenize_scalar(self):
input_data = "hello"
tokenizer = ByteTokenizer()
call_output = tokenizer(input_data)
tokenize_output = tokenizer.tokenize(input_data)
self.assertAllEqual(call_output, [104, 101, 108, 108, 111])
self.assertAllEqual(tokenize_output, [104, 101, 108, 108, 111])
def test_dense_output(self):
input_data = ["hello", "fun", "▀▁▂▃"]
tokenizer = ByteTokenizer(sequence_length=10)
call_output = tokenizer(input_data)
self.assertAllEqual(
call_output,
[
[104, 101, 108, 108, 111, 0, 0, 0, 0, 0],
[102, 117, 110, 0, 0, 0, 0, 0, 0, 0],
[226, 150, 128, 226, 150, 129, 226, 150, 130, 226],
],
)
def test_detokenize(self):
input_data = [
[104, 101, 108, 108, 111],
[102, 117, 110],
[226, 150, 128, 226, 150, 129, 226, 150, 130, 226, 150, 131],
]
tokenizer = ByteTokenizer()
detokenize_output = tokenizer.detokenize(input_data)
self.assertAllEqual(detokenize_output, ["hello", "fun", "▀▁▂▃"])
def test_detokenize_replace_error(self):
# 226 is an invalid UTF-8 byte.
input_data = [[104, 101, 226, 150, 108, 108, 111]]
tokenizer = ByteTokenizer(errors="replace", replacement_char=341)
detokenize_output = tokenizer.detokenize(input_data)
self.assertAllEqual(detokenize_output, [b"he\xc5\x95llo"])
def test_detokenize_ignore_error(self):
input_data = [[104, 101, 226, 150, 108, 108, 111]]
tokenizer = ByteTokenizer(errors="ignore")
detokenize_output = tokenizer.detokenize(input_data)
self.assertAllEqual(detokenize_output, [b"hello"])
def test_detokenize_strict_error(self):
input_data = [[104, 101, 226, 150, 108, 108, 111]]
tokenizer = ByteTokenizer(errors="strict")
with self.assertRaises(tf.errors.InvalidArgumentError):
_ = tokenizer.detokenize(input_data)
def test_vocab_size(self):
tokenizer = ByteTokenizer()
self.assertEqual(tokenizer.vocabulary_size(), 256)
def test_lowercase(self):
input_data = ["HeLlO wOrLd"]
tokenizer = ByteTokenizer()
call_output = tokenizer(input_data)
self.assertAllEqual(
call_output,
[[104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]],
)
def test_skip_lowercase(self):
input_data = ["HeLlO wOrLd"]
tokenizer = ByteTokenizer(lowercase=False)
call_output = tokenizer(input_data)
self.assertAllEqual(
call_output, [[72, 101, 76, 108, 79, 32, 119, 79, 114, 76, 100]]
)
def test_tokenize_first_batch_second(self):
tokenizer = ByteTokenizer()
ds = tf.data.Dataset.from_tensor_slices(
["hello", "fun", "▀▁▂▃", "haha"]
)
ds = ds.map(tokenizer)
ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(4))
output = ds.take(1).get_single_element()
exp_output = [
[104, 101, 108, 108, 111],
[102, 117, 110],
[226, 150, 128, 226, 150, 129, 226, 150, 130, 226, 150, 131],
[104, 97, 104, 97],
]
self.assertAllEqual(output, exp_output)
def test_tokenize_first_batch_second_with_sequence_length(self):
tokenizer = ByteTokenizer(sequence_length=10)
ds = tf.data.Dataset.from_tensor_slices(
["hello", "fun", "▀▁▂▃", "haha"]
)
ds = ds.map(tokenizer)
ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(4))
output = ds.take(1).get_single_element()
exp_output = [
[104, 101, 108, 108, 111, 0, 0, 0, 0, 0],
[102, 117, 110, 0, 0, 0, 0, 0, 0, 0],
[226, 150, 128, 226, 150, 129, 226, 150, 130, 226],
[104, 97, 104, 97, 0, 0, 0, 0, 0, 0],
]
self.assertAllEqual(output, exp_output)
def test_batch_first_tokenize_second(self):
tokenizer = ByteTokenizer()
ds = tf.data.Dataset.from_tensor_slices(
["hello", "fun", "▀▁▂▃", "haha"]
)
ds = ds.batch(4).map(tokenizer)
output = ds.take(1).get_single_element()
exp_output = [
[104, 101, 108, 108, 111],
[102, 117, 110],
[226, 150, 128, 226, 150, 129, 226, 150, 130, 226, 150, 131],
[104, 97, 104, 97],
]
self.assertAllEqual(output, exp_output)
def test_batch_first_tokenize_second_with_sequence_length(self):
tokenizer = ByteTokenizer(sequence_length=10)
ds = tf.data.Dataset.from_tensor_slices(
["hello", "fun", "▀▁▂▃", "haha"]
)
ds = ds.batch(4).map(tokenizer)
output = ds.take(1).get_single_element()
exp_output = [
[104, 101, 108, 108, 111, 0, 0, 0, 0, 0],
[102, 117, 110, 0, 0, 0, 0, 0, 0, 0],
[226, 150, 128, 226, 150, 129, 226, 150, 130, 226],
[104, 97, 104, 97, 0, 0, 0, 0, 0, 0],
]
self.assertAllEqual(output, exp_output)
def test_load_model_with_config(self):
input_data = ["hello"]
original_tokenizer = ByteTokenizer(
lowercase=False,
sequence_length=8,
normalization_form="NFC",
errors="ignore",
)
cloned_tokenizer = ByteTokenizer.from_config(
original_tokenizer.get_config()
)
self.assertAllEqual(
original_tokenizer(input_data),
cloned_tokenizer(input_data),
)
decoded_input = [[104, 101, 226, 150, 108, 108, 111]]
self.assertAllEqual(
original_tokenizer.detokenize(decoded_input),
cloned_tokenizer.detokenize(decoded_input),
)
def test_config(self):
tokenizer = ByteTokenizer(
name="byte_tokenizer_config_test",
lowercase=False,
sequence_length=8,
normalization_form="NFC",
errors="ignore",
replacement_char=0,
)
exp_config = {
"dtype": "int32",
"errors": "ignore",
"lowercase": False,
"name": "byte_tokenizer_config_test",
"normalization_form": "NFC",
"replacement_char": 0,
"sequence_length": 8,
"trainable": True,
}
self.assertEqual(tokenizer.get_config(), exp_config)
| keras-nlp/keras_nlp/tokenizers/byte_tokenizer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/tokenizers/byte_tokenizer_test.py",
"repo_id": "keras-nlp",
"token_count": 3780
} | 122 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import math
import tensorflow as tf
import tree
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.utils.keras_utils import pack_x_y_sample_weight
from keras_nlp.utils.tensor_utils import is_tensor_type
def _convert_inputs_to_dataset(
x=None,
y=None,
sample_weight=None,
batch_size=None,
):
"""Convert inputs to a `tf.data.Dataset`.
This is a stand in for the `TensorLikeDataAdapter` in core Keras.
"""
if isinstance(x, tf.data.Dataset):
if y is not None:
raise ValueError(
"When `x` is a `tf.data.Dataset`, please do not provide "
f"`y`. Received: `type(y)={type(y)}`."
)
if sample_weight is not None:
raise ValueError(
"When `x` is a `tf.data.Dataset`, please do not provide "
"`sample_weight`. Received: "
f"`type(sample_weight)={type(sample_weight)}`."
)
if batch_size is not None:
raise ValueError(
"When `x` is a `tf.data.Dataset`, please do not provide "
"`batch_size`. Received: "
f"`type(batch_size)={type(batch_size)}`."
)
return x
inputs = pack_x_y_sample_weight(x, y, sample_weight)
try:
def convert(x):
if isinstance(x, (tf.Tensor, tf.RaggedTensor)):
return x
if hasattr(x, "__array__"):
return ops.convert_to_numpy(x)
return x
inputs = tree.map_structure(convert, inputs)
ds = tf.data.Dataset.from_tensor_slices(inputs)
except ValueError as e:
# If our inputs are unbatched, re-raise with a more friendly error
# message the default from tf.data. We expect this to come up with
# some frequency, so it's important to have a good sign post here.
if "only supported for rank >= 1" in str(e):
raise ValueError(
"`x`, `y`, and `sample_weight` must have a batch dimension "
"when calling `fit()`, `evaluate()`, and `predict()`. Received "
"an input with rank 0. Please add an outer dimension to your "
"input, e.g., wrap it in a list."
) from e
raise e
return ds.batch(batch_size or 32)
def _train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
This is copied directly from core Keras.
"""
def _can_split(t):
return is_tensor_type(t) or t is None
flat_arrays = tree.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable)
)
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1.0 - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not "
"sufficient to split it into a validation and training set as "
"specified by `validation_split={validation_split}`. Either "
"provide more data, or a different value for the "
"`validation_split` argument.".format(
batch_dim=batch_dim, validation_split=validation_split
)
)
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = tree.map_structure(
functools.partial(_split, start=0, end=split_at), arrays
)
val_arrays = tree.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays
)
return train_arrays, val_arrays
@keras.saving.register_keras_serializable(package="keras_nlp")
class PipelineModel(keras.Model):
"""A model which allows automatically applying preprocessing."""
def __init__(self, *args, **kwargs):
# Workaround for https://github.com/keras-team/keras/issues/17270
# Reset any attempt to overwrite this classes base class to this class
# can continue to be used for functional and non-functional models.
PipelineModel.__bases__ = (keras.Model,)
super().__init__(*args, **kwargs)
def preprocess_samples(self, x, y=None, sample_weight=None):
"""An overridable function which preprocesses entire samples."""
return pack_x_y_sample_weight(x, y, sample_weight)
# ========================================================================
# Below are overrides to keras.Model methods to apply the functions above.
# ========================================================================
def fit(
self,
x=None,
y=None,
batch_size=None,
sample_weight=None,
validation_data=None,
validation_split=None,
**kwargs,
):
if validation_split and validation_data is None:
(x, y, sample_weight), validation_data = _train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
x = _convert_inputs_to_dataset(x, y, sample_weight, batch_size)
x = x.map(
self.preprocess_samples, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
if validation_data is not None:
if not isinstance(validation_data, tf.data.Dataset):
(vx, vy, vsw) = keras.utils.unpack_x_y_sample_weight(
validation_data
)
validation_data = _convert_inputs_to_dataset(
vx, vy, vsw, batch_size
)
return super().fit(
x=x,
y=None,
batch_size=None,
sample_weight=None,
validation_data=validation_data,
**kwargs,
)
def evaluate(
self,
x=None,
y=None,
batch_size=None,
sample_weight=None,
**kwargs,
):
# During `fit()`, `keras.Model` attempts to cache the validation
# dataset and ignores the values for `x`, `y`, and `sample_weight`.
# We don't want that behavior here, as the validation dataset still
# needs preprocessing.
kwargs.pop("_use_cached_eval_dataset", None)
x = _convert_inputs_to_dataset(x, y, sample_weight, batch_size)
x = x.map(
self.preprocess_samples, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
return super().evaluate(
x=x,
y=None,
batch_size=None,
**kwargs,
)
def predict(
self,
x=None,
batch_size=None,
**kwargs,
):
x = _convert_inputs_to_dataset(x, None, None, batch_size)
x = x.map(
self.preprocess_samples, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
return super().predict(
x=x,
batch_size=None,
**kwargs,
)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
**kwargs,
):
data = self.preprocess_samples(x, y, sample_weight)
x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data)
x = ops.convert_to_tensor(x)
if y is not None:
y = ops.convert_to_tensor(y)
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight)
return super().train_on_batch(
x=x,
y=y,
sample_weight=sample_weight,
**kwargs,
)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
**kwargs,
):
data = self.preprocess_samples(x, y, sample_weight)
x, y, sample_weight = keras.utils.unpack_x_y_sample_weight(data)
x = ops.convert_to_tensor(x)
if y is not None:
y = ops.convert_to_tensor(y)
if sample_weight is not None:
sample_weight = ops.convert_to_tensor(sample_weight)
return super().test_on_batch(
x=x,
y=y,
sample_weight=sample_weight,
**kwargs,
)
def predict_on_batch(
self,
x,
**kwargs,
):
data = self.preprocess_samples(x)
x, _, _ = keras.utils.unpack_x_y_sample_weight(data)
x = ops.convert_to_tensor(x)
return super().predict_on_batch(
x=x,
**kwargs,
)
| keras-nlp/keras_nlp/utils/pipeline_model.py/0 | {
"file_path": "keras-nlp/keras_nlp/utils/pipeline_model.py",
"repo_id": "keras-nlp",
"token_count": 4478
} | 123 |
[tool:pytest]
filterwarnings =
error
ignore::DeprecationWarning
ignore::ImportWarning
ignore::RuntimeWarning
ignore::PendingDeprecationWarning
ignore::FutureWarning
ignore::UserWarning
# Ignore a spurious warning on tf-nightly related to save model changes.
ignore:Custom mask layers require a config
addopts=-vv
# Do not run tests in the `build` folders
norecursedirs = build
[coverage:report]
exclude_lines =
pragma: no cover
@abstract
raise NotImplementedError
omit = *_test.py
[flake8]
ignore =
# Conflicts with black
E203
# defaults flake8 ignores
E121,E123,E126,E226,E24,E704,W503,W504
# Function name should be lowercase
N802
# lowercase ... imported as non lowercase
# Useful to ignore for "import keras.backend as K"
N812
# do not use bare 'except'
E722
exclude =
*_pb2.py
*_pb2_grpc.py
#imported but unused in __init__.py, that's ok.
per-file-ignores = **/__init__.py:F401
max-line-length = 200
| keras-nlp/setup.cfg/0 | {
"file_path": "keras-nlp/setup.cfg",
"repo_id": "keras-nlp",
"token_count": 390
} | 124 |
"""Utilities for real-time data augmentation on image data.
"""
import os
import threading
import numpy as np
from keras_preprocessing import get_keras_submodule
try:
IteratorType = get_keras_submodule('utils').Sequence
except ImportError:
IteratorType = object
from .utils import array_to_img, img_to_array, load_img
class Iterator(IteratorType):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
white_list_formats = ('png', 'jpg', 'jpeg', 'bmp', 'ppm', 'tif', 'tiff')
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
if self.n == 0:
# Avoiding modulo by zero error
current_index = 0
else:
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class BatchFromFilesMixin():
"""Adds methods related to getting batches from filenames
It includes the logic to transform image files to batches.
"""
def set_processing_attrs(self,
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio):
"""Sets attributes to use later for processing files into a batch.
# Arguments
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.keep_aspect_ratio = keep_aspect_ratio
if color_mode not in {'rgb', 'rgba', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb", "rgba", or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgba':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (4,)
else:
self.image_shape = (4,) + self.target_size
elif self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError(
'Invalid subset name: %s;'
'expected "training" or "validation"' % (subset,))
else:
split = None
self.split = split
self.subset = subset
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)
# build batch of image data
# self.filepaths is dynamic, is better to call it once outside the loop
filepaths = self.filepaths
for i, j in enumerate(index_array):
img = load_img(filepaths[j],
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation,
keep_aspect_ratio=self.keep_aspect_ratio)
x = img_to_array(img, data_format=self.data_format)
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, 'close'):
img.close()
if self.image_data_generator:
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode in {'binary', 'sparse'}:
batch_y = np.empty(len(batch_x), dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i] = self.classes[n_observation]
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), len(self.class_indices)),
dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i, self.classes[n_observation]] = 1.
elif self.class_mode == 'multi_output':
batch_y = [output[index_array] for output in self.labels]
elif self.class_mode == 'raw':
batch_y = self.labels[index_array]
else:
return batch_x
if self.sample_weight is None:
return batch_x, batch_y
else:
return batch_x, batch_y, self.sample_weight[index_array]
@property
def filepaths(self):
"""List of absolute paths to image files"""
raise NotImplementedError(
'`filepaths` property method has not been implemented in {}.'
.format(type(self).__name__)
)
@property
def labels(self):
"""Class labels of every observation"""
raise NotImplementedError(
'`labels` property method has not been implemented in {}.'
.format(type(self).__name__)
)
@property
def sample_weight(self):
raise NotImplementedError(
'`sample_weight` property method has not been implemented in {}.'
.format(type(self).__name__)
)
| keras-preprocessing/keras_preprocessing/image/iterator.py/0 | {
"file_path": "keras-preprocessing/keras_preprocessing/image/iterator.py",
"repo_id": "keras-preprocessing",
"token_count": 5634
} | 125 |
import pytest
import keras_preprocessing
def test_api_modules():
expected_exposed_modules = [
'image',
'sequence',
'text'
]
for _module in expected_exposed_modules:
assert hasattr(keras_preprocessing, _module)
def test_get_keras_submodule(monkeypatch):
monkeypatch.setattr(keras_preprocessing, '_KERAS_BACKEND', 'backend')
assert 'backend' == keras_preprocessing.get_keras_submodule('backend')
monkeypatch.setattr(keras_preprocessing, '_KERAS_UTILS', 'utils')
assert 'utils' == keras_preprocessing.get_keras_submodule('utils')
def test_get_keras_submodule_errors(monkeypatch):
with pytest.raises(ImportError):
keras_preprocessing.get_keras_submodule('something')
monkeypatch.setattr(keras_preprocessing, '_KERAS_BACKEND', None)
with pytest.raises(ImportError):
keras_preprocessing.get_keras_submodule('backend')
with pytest.raises(ImportError):
keras_preprocessing.get_keras_submodule('utils')
| keras-preprocessing/tests/test_api.py/0 | {
"file_path": "keras-preprocessing/tests/test_api.py",
"repo_id": "keras-preprocessing",
"token_count": 389
} | 126 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_tuner/oracles/'" />
| keras-tuner/docs/site/documentation/oracles/index.html/0 | {
"file_path": "keras-tuner/docs/site/documentation/oracles/index.html",
"repo_id": "keras-tuner",
"token_count": 37
} | 127 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for HyperXception Model."""
import numpy as np
import pytest
from keras_tuner.applications import xception
from keras_tuner.backend import config
from keras_tuner.backend import keras
from keras_tuner.engine import hyperparameters as hp_module
@pytest.mark.parametrize("pooling", ["flatten", "avg", "max"])
@pytest.mark.skipif(
config.multi_backend(),
reason="The test is too slow.",
)
def test_model_construction(pooling):
hp = hp_module.HyperParameters()
hp.Choice("pooling", [pooling])
hypermodel = xception.HyperXception(input_shape=(128, 128, 3), classes=10)
model = hypermodel.build(hp)
assert hp.values["pooling"] == pooling
assert model.layers
assert model.name == "Xception"
assert model.output_shape == (None, 10)
model.train_on_batch(np.ones((1, 128, 128, 3)), np.ones((1, 10)))
out = model.predict(np.ones((1, 128, 128, 3)))
assert out.shape == (1, 10)
def test_hyperparameter_existence_and_defaults():
hp = hp_module.HyperParameters()
hypermodel = xception.HyperXception(input_shape=(256, 256, 3), classes=10)
hypermodel.build(hp)
assert hp.values == {
"activation": "relu",
"conv2d_num_filters": 64,
"kernel_size": 3,
"initial_strides": 2,
"sep_num_filters": 256,
"num_residual_blocks": 4,
"pooling": "avg",
"num_dense_layers": 1,
"dropout_rate": 0.5,
"dense_use_bn": True,
"learning_rate": 1e-3,
}
def test_include_top_false():
hp = hp_module.HyperParameters()
hypermodel = xception.HyperXception(
input_shape=(256, 256, 3), classes=10, include_top=False
)
model = hypermodel.build(hp)
assert not hasattr(model, "optimizer") or not model.optimizer
def test_hyperparameter_override():
hp = hp_module.HyperParameters()
hp.Choice("pooling", ["flatten"])
hp.Choice("num_dense_layers", [2])
hypermodel = xception.HyperXception(input_shape=(256, 256, 3), classes=10)
hypermodel.build(hp)
assert hp.get("pooling") == "flatten"
assert hp.get("num_dense_layers") == 2
def test_input_tensor():
hp = hp_module.HyperParameters()
inputs = keras.Input((256, 256, 3))
hypermodel = xception.HyperXception(input_tensor=inputs, include_top=False)
model = hypermodel.build(hp)
assert model.inputs == [inputs]
def test_activation_selu():
hp = hp_module.HyperParameters()
hp.values["activation"] = "selu"
hypermodel = xception.HyperXception(input_shape=(256, 256, 3), classes=10)
hypermodel.build(hp)
def test_no_classes_raise_error():
with pytest.raises(ValueError, match="classes"):
xception.HyperXception(input_shape=(256, 256, 3))
def test_no_input_shape_tensor_raise_error():
with pytest.raises(ValueError, match="input_tensor"):
xception.HyperXception(classes=10)
| keras-tuner/keras_tuner/applications/xception_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/applications/xception_test.py",
"repo_id": "keras-tuner",
"token_count": 1300
} | 128 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_tuner import protos
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.engine import conditions as conditions_mod
from keras_tuner.engine.hyperparameters import hp_utils
from keras_tuner.engine.hyperparameters.hp_types import numerical
def _check_int(val, arg):
int_val = int(val)
if int_val != val:
raise ValueError(
f"{arg} must be an int, Received: {str(val)} of type {type(val)}."
)
return int_val
@keras_tuner_export("keras_tuner.engine.hyperparameters.Int")
class Int(numerical.Numerical):
"""Integer hyperparameter.
Note that unlike Python's `range` function, `max_value` is *included* in
the possible values this parameter can take on.
Example #1:
```py
hp.Int(
"n_layers",
min_value=6,
max_value=12)
```
The possible values are [6, 7, 8, 9, 10, 11, 12].
Example #2:
```py
hp.Int(
"n_layers",
min_value=6,
max_value=13,
step=3)
```
`step` is the minimum distance between samples.
The possible values are [6, 9, 12].
Example #3:
```py
hp.Int(
"batch_size",
min_value=2,
max_value=32,
step=2,
sampling="log")
```
When `sampling="log"` the `step` is multiplied between samples.
The possible values are [2, 4, 8, 16, 32].
Args:
name: A string. the name of parameter. Must be unique for each
`HyperParameter` instance in the search space.
min_value: Integer, the lower limit of range, inclusive.
max_value: Integer, the upper limit of range, inclusive.
step: Optional integer, the distance between two consecutive samples in
the range. If left unspecified, it is possible to sample any
integers in the interval. If `sampling="linear"`, it will be the
minimum additve between two samples. If `sampling="log"`, it will be
the minimum multiplier between two samples.
sampling: String. One of "linear", "log", "reverse_log". Defaults to
"linear". When sampling value, it always start from a value in range
[0.0, 1.0). The `sampling` argument decides how the value is
projected into the range of [min_value, max_value].
"linear": min_value + value * (max_value - min_value)
"log": min_value * (max_value / min_value) ^ value
"reverse_log":
max_value - min_value * ((max_value/min_value)^(1-value) - 1)
default: Integer, default value to return for the parameter. If
unspecified, the default value will be `min_value`.
"""
def __init__(
self,
name,
min_value,
max_value,
step=None,
sampling="linear",
default=None,
**kwargs,
):
if step is not None:
step = _check_int(step, arg="step")
elif sampling == "linear":
step = 1
super().__init__(
name=name,
min_value=_check_int(min_value, arg="min_value"),
max_value=_check_int(max_value, arg="max_value"),
step=step,
sampling=sampling,
default=default,
**kwargs,
)
def __repr__(self):
return (
f"Int(name: '{self.name}', min_value: {self.min_value}, "
f"max_value: {self.max_value}, step: {self.step}, "
f"sampling: {self.sampling}, default: {self.default})"
)
def prob_to_value(self, prob):
if self.step is None:
# prob is in range [0.0, 1.0), use max_value + 1 so that
# max_value may be sampled.
return int(self._sample_numerical_value(prob, self.max_value + 1))
return int(self._sample_with_step(prob))
def value_to_prob(self, value):
if self.step is None:
return self._numerical_to_prob(
# + 0.5 to center the prob
value + 0.5,
# + 1 to include the max_value
self.max_value + 1,
)
return self._to_prob_with_step(value)
@property
def default(self):
return self._default if self._default is not None else self.min_value
def get_config(self):
config = super().get_config()
config["min_value"] = self.min_value
config["max_value"] = self.max_value
config["step"] = self.step
config["sampling"] = self.sampling
config["default"] = self._default
return config
@classmethod
def from_proto(cls, proto):
conditions = [
conditions_mod.Condition.from_proto(c) for c in proto.conditions
]
return cls(
name=proto.name,
min_value=proto.min_value,
max_value=proto.max_value,
step=proto.step or None,
sampling=hp_utils.sampling_from_proto(proto.sampling),
default=proto.default,
conditions=conditions,
)
def to_proto(self):
return protos.get_proto().Int(
name=self.name,
min_value=self.min_value,
max_value=self.max_value,
step=self.step if self.step is not None else 0,
sampling=hp_utils.sampling_to_proto(self.sampling),
default=self.default,
conditions=[c.to_proto() for c in self.conditions],
)
| keras-tuner/keras_tuner/engine/hyperparameters/hp_types/int_hp.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/int_hp.py",
"repo_id": "keras-tuner",
"token_count": 2686
} | 129 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trial class."""
import hashlib
import random
import time
from keras_tuner import protos
from keras_tuner import utils
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.engine import metrics_tracking
from keras_tuner.engine import stateful
@keras_tuner_export(["keras_tuner.engine.trial.TrialStatus"])
class TrialStatus:
# The Trial may start to run.
RUNNING = "RUNNING"
# The Trial is empty. The Oracle is waiting on something else before
# creating the trial. Should call Oracle.create_trial() again.
IDLE = "IDLE"
# The Trial has crashed or been deemed infeasible for the current run, but
# subject to retries.
INVALID = "INVALID"
# The Trial is empty. Oracle finished searching. No new trial needed. The
# tuner should also end the search.
STOPPED = "STOPPED"
# The Trial finished normally.
COMPLETED = "COMPLETED"
# The Trial is failed. No more retries needed.
FAILED = "FAILED"
@staticmethod
def to_proto(status):
ts = protos.get_proto().TrialStatus
if status is None:
return ts.UNKNOWN
elif status == TrialStatus.RUNNING:
return ts.RUNNING
elif status == TrialStatus.IDLE:
return ts.IDLE
elif status == TrialStatus.INVALID:
return ts.INVALID
elif status == TrialStatus.STOPPED:
return ts.STOPPED
elif status == TrialStatus.COMPLETED:
return ts.COMPLETED
elif status == TrialStatus.FAILED:
return ts.FAILED
else:
raise ValueError(f"Unknown status {status}")
@staticmethod
def from_proto(proto):
ts = protos.get_proto().TrialStatus
if proto == ts.UNKNOWN:
return None
elif proto == ts.RUNNING:
return TrialStatus.RUNNING
elif proto == ts.IDLE:
return TrialStatus.IDLE
elif proto == ts.INVALID:
return TrialStatus.INVALID
elif proto == ts.STOPPED:
return TrialStatus.STOPPED
elif proto == ts.COMPLETED:
return TrialStatus.COMPLETED
elif proto == ts.FAILED:
return TrialStatus.FAILED
else:
raise ValueError(f"Unknown status {proto}")
@keras_tuner_export(["keras_tuner.engine.trial.Trial"])
class Trial(stateful.Stateful):
"""The runs with the same set of hyperparameter values.
`Trial` objects are managed by the `Oracle`. A `Trial` object contains all
the information related to the executions with the same set of
hyperparameter values. A `Trial` may be executed multiple times for more
accurate results or for retrying when failed. The related information
includes hyperparameter values, the Trial ID, and the trial results.
Args:
hyperparameters: HyperParameters. It contains the hyperparameter values
for the trial.
trial_id: String. The unique identifier for a trial.
status: one of the TrialStatus attributes. It marks the current status
of the Trial.
message: String. The error message if the trial status is "INVALID".
"""
def __init__(
self,
hyperparameters,
trial_id=None,
status=TrialStatus.RUNNING,
message=None,
):
self.hyperparameters = hyperparameters
self.trial_id = generate_trial_id() if trial_id is None else trial_id
self.metrics = metrics_tracking.MetricsTracker()
self.score = None
self.best_step = 0
self.status = status
self.message = message
def summary(self):
"""Displays a summary of this Trial."""
print(f"Trial {self.trial_id} summary")
print("Hyperparameters:")
self.display_hyperparameters()
if self.score is not None:
print(f"Score: {self.score}")
if self.message is not None:
print(self.message)
def display_hyperparameters(self):
if self.hyperparameters.values:
for hp, value in self.hyperparameters.values.items():
print(f"{hp}:", value)
else:
print("default configuration")
def get_state(self):
return {
"trial_id": self.trial_id,
"hyperparameters": self.hyperparameters.get_config(),
"metrics": self.metrics.get_config(),
"score": self.score,
"best_step": self.best_step,
"status": self.status,
"message": self.message,
}
def set_state(self, state):
self.trial_id = state["trial_id"]
hp = hp_module.HyperParameters.from_config(state["hyperparameters"])
self.hyperparameters = hp
self.metrics = metrics_tracking.MetricsTracker.from_config(
state["metrics"]
)
self.score = state["score"]
self.best_step = state["best_step"]
self.status = state["status"]
self.message = state["message"]
@classmethod
def from_state(cls, state):
trial = cls(hyperparameters=None)
trial.set_state(state)
return trial
@classmethod
def load(cls, fname):
return cls.from_state(utils.load_json(fname))
def to_proto(self):
if self.score is not None:
score = protos.get_proto().Trial.Score(
value=self.score, step=self.best_step
)
else:
score = None
proto = protos.get_proto().Trial(
trial_id=self.trial_id,
hyperparameters=self.hyperparameters.to_proto(),
score=score,
status=TrialStatus.to_proto(self.status),
metrics=self.metrics.to_proto(),
)
return proto
@classmethod
def from_proto(cls, proto):
instance = cls(
hp_module.HyperParameters.from_proto(proto.hyperparameters),
trial_id=proto.trial_id,
status=TrialStatus.from_proto(proto.status),
)
if proto.HasField("score"):
instance.score = proto.score.value
instance.best_step = proto.score.step
instance.metrics = metrics_tracking.MetricsTracker.from_proto(
proto.metrics
)
return instance
def generate_trial_id():
s = str(time.time()) + str(random.randint(1, int(1e7)))
return hashlib.sha256(s.encode("utf-8")).hexdigest()[:32]
| keras-tuner/keras_tuner/engine/trial.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/trial.py",
"repo_id": "keras-tuner",
"token_count": 2979
} | 130 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from keras_tuner.backend import keras
from keras_tuner.engine import hypermodel
from keras_tuner.engine import trial as trial_module
from keras_tuner.tuners import gridsearch
def test_that_exhaustive_space_is_explored(tmp_path):
# Tests that it explores the whole search space given by the combination
# of all hyperparameter of Choice type.
# Given
want_units_1 = [2, 4]
want_units_2 = [1, 3]
want_optimizers = ["adam", "sgd", "rmsprop"]
want_loss = "binary_crossentropy"
want_dropouts = [True, False]
class MyGridSearch(gridsearch.GridSearch):
def run_trial(self, trial, *args, **kwargs):
hp = trial.hyperparameters
hp.Choice("units_1", values=want_units_1)
hp.Boolean("dropout", default=want_dropouts[0])
hp.Choice("units_2", values=want_units_2)
hp.Choice("optmizer", values=want_optimizers),
hp.Fixed("loss", value=want_loss)
return random.random()
# When
tuner = MyGridSearch(directory=tmp_path)
tuner.search()
# Then
assert {hp.name for hp in tuner.oracle.get_space().space} == {
"units_1",
"optmizer",
"units_2",
"loss",
"dropout",
}
# 2 units_1, 3 optimizers, 2 units_2, 2 dropout and 1 loss
expected_hyperparameter_space = 24
assert len(tuner.oracle.trials) == expected_hyperparameter_space
trials = tuner.oracle.get_best_trials(
num_trials=expected_hyperparameter_space
)
explored_space = [trial.hyperparameters.values for trial in trials]
for want_unit_1 in want_units_1:
for want_unit_2 in want_units_2:
for want_optimizer in want_optimizers:
for want_dropout in want_dropouts:
assert {
"units_1": want_unit_1,
"units_2": want_unit_2,
"optmizer": want_optimizer,
"loss": want_loss,
"dropout": want_dropout,
} in explored_space
def test_int_and_float(tmp_path):
class MyGridSearch(gridsearch.GridSearch):
def run_trial(self, trial, *args, **kwargs):
hp = trial.hyperparameters
hp.Int("int", 1, 5)
hp.Float("float", 1, 2)
return random.random()
tuner = MyGridSearch(directory=tmp_path)
tuner.search()
# int has 5 values, float sampled 10 values and 1 default value
# 5 * (10 + 1)
assert len(tuner.oracle.trials) == 55
def test_new_hp(tmp_path):
class MyGridSearch(gridsearch.GridSearch):
def run_trial(self, trial, *args, **kwargs):
hp = trial.hyperparameters
if hp.Boolean("bool"):
hp.Choice("choice1", [0, 1, 2])
else:
hp.Choice("choice2", [3, 4, 5])
return random.random()
tuner = MyGridSearch(directory=tmp_path)
tuner.search(verbose=0)
assert len(tuner.oracle.trials) == 3 + 3 * 3
def test_hp_in_fit(tmp_path):
class MyHyperModel(hypermodel.HyperModel):
def build(self, hp):
hp.Fixed("fixed", 3)
return keras.Sequential()
def fit(self, hp, model, *args, **kwargs):
hp.Choice("choice", [0, 1, 2])
return random.random()
tuner = gridsearch.GridSearch(hypermodel=MyHyperModel(), directory=tmp_path)
tuner.search(verbose=0)
assert len(tuner.oracle.trials) == 3
def test_conditional_scope(tmp_path):
class MyHyperModel(hypermodel.HyperModel):
def build(self, hp):
a = hp.Boolean("bool")
with hp.conditional_scope("bool", [True]):
if a:
hp.Choice("choice1", [1, 2])
with hp.conditional_scope("bool", [False]):
if not a:
hp.Choice("choice2", [3, 4])
return keras.Sequential()
def fit(self, hp, model, *args, **kwargs):
a = hp.Boolean("bool2")
with hp.conditional_scope("bool2", [True]):
if a:
hp.Choice("choice3", [1, 2])
with hp.conditional_scope("bool2", [False]):
if not a:
hp.Choice("choice4", [3, 4])
return random.random()
tuner = gridsearch.GridSearch(hypermodel=MyHyperModel(), directory=tmp_path)
tuner.search(verbose=0)
assert len(tuner.oracle.trials) == 4 * 4
def test_exhaust_trials_in_between_before_the_latter_finishes(tmp_path):
class MyHyperModel(hypermodel.HyperModel):
def build(self, hp):
hp.Boolean("bool")
return keras.Sequential()
def fit(self, hp, model, *args, **kwargs):
hp.Boolean("bool2")
return random.random()
tuner = gridsearch.GridSearch(hypermodel=MyHyperModel(), directory=tmp_path)
oracle = tuner.oracle
def run(trial):
hp = trial.hyperparameters
hm = MyHyperModel()
hm.fit(hp, hm.build(hp))
oracle.update_space(hp)
def end_trial(trial):
run(trial)
oracle.update_trial(
trial_id=trial.trial_id,
metrics={oracle.objective.name: random.random()},
)
trial.status = trial_module.TrialStatus.COMPLETED
oracle.end_trial(trial)
trial_1 = oracle.create_trial(tuner_id="1")
assert trial_1.status == trial_module.TrialStatus.RUNNING
trial_2 = oracle.create_trial(tuner_id="2")
assert trial_2.status == trial_module.TrialStatus.RUNNING
# Iterated bool1, bool2 not discovered yet. So idle.
trial_3 = oracle.create_trial(tuner_id="3")
assert trial_3.status == trial_module.TrialStatus.IDLE
end_trial(trial_1)
# Discovered bool2 in trial_1, so new value of bool2 for trial_3 after
# trial_1.
trial_3 = oracle.create_trial(tuner_id="3")
assert trial_3.status == trial_module.TrialStatus.RUNNING
# Exhausted all possible combinations whose order is between trial_1 and
# trial_2. So idle.
trial_4 = oracle.create_trial(tuner_id="4")
assert trial_4.status == trial_module.TrialStatus.IDLE
end_trial(trial_2)
# New value of bool2 for trial_4 after trial_2.
trial_4 = oracle.create_trial(tuner_id="4")
assert trial_4.status == trial_module.TrialStatus.RUNNING
trial_5 = oracle.create_trial(tuner_id="5")
assert trial_5.status == trial_module.TrialStatus.IDLE
end_trial(trial_3)
end_trial(trial_4)
trial_5 = oracle.create_trial(tuner_id="5")
assert trial_5.status == trial_module.TrialStatus.STOPPED
def test_linked_list():
linked_list = gridsearch.LinkedList()
linked_list.insert("0")
assert linked_list.next("0") is None
linked_list.insert("1")
assert linked_list.next("0") == "1"
assert linked_list.next("1") is None
linked_list.insert("2", "0")
assert linked_list.next("0") == "2"
assert linked_list.next("2") == "1"
assert linked_list.next("1") is None
linked_list.insert("3", "1")
linked_list.insert("4")
assert linked_list.next("0") == "2"
assert linked_list.next("2") == "1"
assert linked_list.next("1") == "3"
assert linked_list.next("3") == "4"
assert linked_list.next("4") is None
| keras-tuner/keras_tuner/tuners/gridsearch_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/tuners/gridsearch_test.py",
"repo_id": "keras-tuner",
"token_count": 3473
} | 131 |
set -e
set -x
cd "${KOKORO_ROOT}/"
sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
PYTHON_BINARY="/usr/bin/python3.9"
"${PYTHON_BINARY}" -m venv venv
source venv/bin/activate
# Check the python version
python --version
python3 --version
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:"
# Check cuda
nvidia-smi
nvcc --version
cd "src/github/keras"
pip install -U pip setuptools
# psutil is used by background log reader
pip install -U psutil
if [ "$KERAS_BACKEND" == "tensorflow" ]
then
echo "TensorFlow backend detected."
pip install -r requirements-tensorflow-cuda.txt --progress-bar off
pip uninstall -y keras keras-nightly
echo "Check that TensorFlow uses GPU"
python3 -c 'import tensorflow as tf;print(tf.__version__);print(tf.config.list_physical_devices("GPU"))'
# Raise error if GPU is not detected.
python3 -c 'import tensorflow as tf;assert len(tf.config.list_physical_devices("GPU")) > 0'
# TODO: keras/layers/merging/merging_test.py::MergingLayersTest::test_sparse_dot_2d Fatal Python error: Aborted
pytest keras --ignore keras/applications \
--ignore keras/layers/merging/merging_test.py \
--cov=keras
fi
if [ "$KERAS_BACKEND" == "jax" ]
then
echo "JAX backend detected."
pip install -r requirements-jax-cuda.txt --progress-bar off
pip uninstall -y keras keras-nightly
python3 -c 'import jax;print(jax.__version__);print(jax.default_backend())'
# Raise error if GPU is not detected.
python3 -c 'import jax;assert jax.default_backend().lower() == "gpu"'
# TODO: keras/layers/merging/merging_test.py::MergingLayersTest::test_sparse_dot_2d Fatal Python error: Aborted
# TODO: keras/trainers/data_adapters/py_dataset_adapter_test.py::PyDatasetAdapterTest::test_basic_flow0 Fatal Python error: Aborted
# keras/backend/jax/distribution_lib_test.py is configured for CPU test for now.
pytest keras --ignore keras/applications \
--ignore keras/layers/merging/merging_test.py \
--ignore keras/trainers/data_adapters/py_dataset_adapter_test.py \
--ignore keras/backend/jax/distribution_lib_test.py \
--ignore keras/distribution/distribution_lib_test.py \
--cov=keras
fi
if [ "$KERAS_BACKEND" == "torch" ]
then
echo "PyTorch backend detected."
pip install -r requirements-torch-cuda.txt --progress-bar off
pip uninstall -y keras keras-nightly
python3 -c 'import torch;print(torch.__version__);print(torch.cuda.is_available())'
# Raise error if GPU is not detected.
python3 -c 'import torch;assert torch.cuda.is_available()'
pytest keras --ignore keras/applications \
--cov=keras
fi
| keras/.kokoro/github/ubuntu/gpu/build.sh/0 | {
"file_path": "keras/.kokoro/github/ubuntu/gpu/build.sh",
"repo_id": "keras",
"token_count": 1102
} | 132 |
import time
import numpy as np
import tensorflow as tf
from absl import flags
import keras
FLAGS = flags.FLAGS
flags.DEFINE_string(
"benchmark_name",
None,
"The name of benchmark to run. If None, all benchmarks in the file will be "
"run.",
)
flags.DEFINE_integer(
"num_samples",
1000,
"Number of input data samples.",
)
flags.DEFINE_integer(
"batch_size",
20,
"Batch size of data.",
)
flags.DEFINE_bool(
"jit_compile",
True,
"If True, the benchmark will run with XLA compilation.",
)
class BenchmarkMetricsCallback:
def __init__(self, start_batch=1, stop_batch=None):
self.start_batch = start_batch
self.stop_batch = stop_batch
self.state = {}
def on_train_batch_begin(self, batch, logs=None):
if batch == self.start_batch:
self.state["benchmark_begin"] = time.time()
def on_train_batch_end(self, batch, logs=None):
if batch == self.stop_batch:
self.state["benchmark_end"] = time.time()
throughput = (self.stop_batch - self.start_batch + 1) / (
self.state["benchmark_end"] - self.state["benchmark_begin"]
)
self.state["throughput"] = throughput
def on_predict_batch_begin(self, batch, logs=None):
if batch == self.start_batch:
self.state["benchmark_begin"] = time.time()
def on_predict_batch_end(self, batch, logs=None):
if batch == self.stop_batch:
self.state["benchmark_end"] = time.time()
throughput = (self.stop_batch - self.start_batch + 1) / (
self.state["benchmark_end"] - self.state["benchmark_begin"]
)
self.state["throughput"] = throughput
class KerasCoreBenchmarkMetricsCallback(keras.callbacks.Callback):
def __init__(self, start_batch=1, stop_batch=None):
self._callback = BenchmarkMetricsCallback(start_batch, stop_batch)
def on_train_batch_begin(self, batch, logs=None):
self._callback.on_train_batch_begin(batch, logs)
def on_train_batch_end(self, batch, logs=None):
self._callback.on_train_batch_end(batch, logs)
def on_predict_batch_begin(self, batch, logs=None):
self._callback.on_predict_batch_begin(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
self._callback.on_predict_batch_end(batch, logs)
class TFKerasBenchmarkMetricsCallback(tf.keras.callbacks.Callback):
def __init__(self, start_batch=1, stop_batch=None):
self._callback = BenchmarkMetricsCallback(start_batch, stop_batch)
def on_train_batch_begin(self, batch, logs=None):
self._callback.on_train_batch_begin(batch, logs)
def on_train_batch_end(self, batch, logs=None):
self._callback.on_train_batch_end(batch, logs)
def on_predict_batch_begin(self, batch, logs=None):
self._callback.on_predict_batch_begin(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
self._callback.on_predict_batch_end(batch, logs)
class LayerBenchmark:
def __init__(
self,
layer_name,
init_args,
input_shape,
flat_call_inputs=True,
jit_compile=True,
keras_layer=None,
tf_keras_layer=None,
):
self.layer_name = layer_name
_keras_layer_class = getattr(keras.layers, layer_name)
_tf_keras_layer_class = getattr(tf.keras.layers, layer_name)
if keras_layer is None:
# Sometimes you want to initialize the keras layer and tf_keras
# layer in a different way. For example, `Bidirectional` layer,
# which takes in `keras.layers.Layer` and
# `tf.keras.layer.Layer` separately.
self._keras_layer = _keras_layer_class(**init_args)
else:
self._keras_layer = keras_layer
if tf_keras_layer is None:
self._tf_keras_layer = _tf_keras_layer_class(**init_args)
else:
self._tf_keras_layer = tf_keras_layer
self.input_shape = input_shape
self._keras_model = self._build_keras_model(
input_shape, flat_call_inputs
)
self._tf_keras_model = self._build_tf_keras_model(
input_shape, flat_call_inputs
)
self._keras_model.compile(
loss="mse", optimizer="sgd", jit_compile=jit_compile
)
self._tf_keras_model.compile(
loss="mse", optimizer="sgd", jit_compile=jit_compile
)
self.flat_call_inputs = flat_call_inputs
self.jit_compile = jit_compile
self.input_shape = input_shape
def _build_keras_model(self, input_shape, flat_call_inputs=True):
inputs = []
if not isinstance(input_shape[0], (tuple, list)):
input_shape = [input_shape]
for shape in input_shape:
inputs.append(keras.Input(shape=shape))
if flat_call_inputs:
outputs = self._keras_layer(*inputs)
else:
outputs = self._keras_layer(inputs)
return keras.Model(inputs=inputs, outputs=outputs)
def _build_tf_keras_model(self, input_shape, flat_call_inputs=True):
inputs = []
if not isinstance(input_shape[0], (tuple, list)):
input_shape = [input_shape]
for shape in input_shape:
inputs.append(tf.keras.Input(shape=shape))
if flat_call_inputs:
outputs = self._tf_keras_layer(*inputs)
else:
outputs = self._tf_keras_layer(inputs)
return tf.keras.Model(inputs=inputs, outputs=outputs)
def benchmark_predict(self, num_samples, batch_size, data=None):
if data is None:
# Generate default data if not provided.
if isinstance(self.input_shape[0], (tuple, list)):
# The layer has multiple inputs.
data = []
for data_shape in self.input_shape:
data_shape = [num_samples] + list(data_shape)
data.append(np.random.normal(size=data_shape))
else:
data_shape = [num_samples] + list(self.input_shape)
data = np.random.normal(size=data_shape)
num_iterations = num_samples // batch_size - 1
callback = KerasCoreBenchmarkMetricsCallback(stop_batch=num_iterations)
tf_keras_callback = TFKerasBenchmarkMetricsCallback(
stop_batch=num_iterations
)
self._keras_model.predict(
data,
batch_size=batch_size,
callbacks=[callback],
)
self._tf_keras_model.predict(
data,
batch_size=batch_size,
callbacks=[tf_keras_callback],
)
keras_throughput = callback._callback.state["throughput"] * batch_size
tf_keras_throughput = (
tf_keras_callback._callback.state["throughput"] * batch_size
)
print(
f"Keras 3 throughput of forward pass of {self.layer_name}: "
f"{keras_throughput:.2f} samples/sec."
)
print(
f"TF Keras throughput of forward pass of {self.layer_name}: "
f"{tf_keras_throughput:.2f} samples/sec."
)
def benchmark_train(self, num_samples, batch_size, data=None, label=None):
if data is None:
# Generate default data if not provided.
if isinstance(self.input_shape[0], (tuple, list)):
# The layer has multiple inputs.
data = []
for data_shape in self.input_shape:
data_shape = [num_samples] + list(data_shape)
data.append(np.random.normal(size=data_shape))
else:
data_shape = [num_samples] + list(self.input_shape)
data = [np.random.normal(size=data_shape)]
if label is None:
# Generate default label if not provided.
if self.flat_call_inputs:
# Scale by a small factor to avoid zero gradients.
label = (
keras.backend.convert_to_numpy(self._keras_layer(*data))
* 1.001
)
else:
label = (
keras.backend.convert_to_numpy(self._keras_layer(data))
* 1.001
)
num_iterations = num_samples // batch_size - 1
callback = KerasCoreBenchmarkMetricsCallback(stop_batch=num_iterations)
tf_keras_callback = TFKerasBenchmarkMetricsCallback(
stop_batch=num_iterations
)
self._keras_model.fit(
data,
label,
batch_size=batch_size,
callbacks=[callback],
)
self._tf_keras_model.fit(
data,
label,
batch_size=batch_size,
callbacks=[tf_keras_callback],
)
keras_throughput = callback._callback.state["throughput"] * batch_size
tf_keras_throughput = (
tf_keras_callback._callback.state["throughput"] * batch_size
)
print(
f"Keras 3 throughput of forward & backward pass of "
f"{self.layer_name}: {keras_throughput:.2f} samples/sec."
)
print(
f"TF Keras throughput of forward & backward pass of "
f"{self.layer_name}: {tf_keras_throughput:.2f} samples/sec."
)
| keras/benchmarks/layer_benchmark/base_benchmark.py/0 | {
"file_path": "keras/benchmarks/layer_benchmark/base_benchmark.py",
"repo_id": "keras",
"token_count": 4504
} | 133 |
"""Benchmark Keras performance with torch custom training loop.
In this file we use a convolution model. Training loop is written in the
vanilla torch way, and we compare the performance between building model with
Keras and torch.
"""
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import keras
from benchmarks.torch_ctl_benchmark.benchmark_utils import train_loop
from keras import layers
num_classes = 2
input_shape = (3, 256, 256)
batch_size = 128
num_batches = 20
num_epochs = 1
x_train = np.random.normal(
size=(num_batches * batch_size, *input_shape)
).astype(np.float32)
y_train = np.random.randint(0, num_classes, size=(num_batches * batch_size,))
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
# Create a DataLoader
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 32, kernel_size=(3, 3))
self.activation = torch.nn.ReLU()
self.max_pool = torch.nn.MaxPool2d((2, 2))
self.flatten = torch.nn.Flatten()
self.dense = torch.nn.LazyLinear(num_classes)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
x = self.max_pool(x)
x = self.flatten(x)
x = self.dense(x)
x = self.softmax(x)
return x
def run_keras_custom_training_loop():
keras_model = keras.Sequential(
[
layers.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dense(num_classes),
layers.Softmax(),
]
)
optimizer = optim.Adam(keras_model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
train_loop(
keras_model,
train_loader,
num_epochs=num_epochs,
optimizer=optimizer,
loss_fn=loss_fn,
framework="keras",
)
def run_torch_custom_training_loop():
torch_model = TorchModel()
optimizer = optim.Adam(torch_model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
train_loop(
torch_model,
train_loader,
num_epochs=num_epochs,
optimizer=optimizer,
loss_fn=loss_fn,
framework="torch",
)
if __name__ == "__main__":
run_keras_custom_training_loop()
run_torch_custom_training_loop()
| keras/benchmarks/torch_ctl_benchmark/conv_model_benchmark.py/0 | {
"file_path": "keras/benchmarks/torch_ctl_benchmark/conv_model_benchmark.py",
"repo_id": "keras",
"token_count": 1171
} | 134 |
import functools
from keras import backend
from keras.api_export import keras_export
from keras.backend.common.variables import ALLOWED_DTYPES
from keras.backend.common.variables import standardize_dtype
"""
We adapted the type promotion lattice from JAX. Ref:
https://github.com/google/jax/blob/main/jax/_src/dtypes.py
"""
BOOL_TYPES = ["bool"]
INT_TYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
FLOAT_TYPES = ["bfloat16", "float16", "float32", "float64"]
WEAK_TYPES = ["int", "float"]
def _type_promotion_lattice():
"""
Return the type promotion lattice in the form of a DAG.
This DAG maps each type to its immediately higher type on the lattice.
"""
(b1,) = BOOL_TYPES
(u1, u2, u4, u8, i1, i2, i4, i8) = INT_TYPES
bf, f2, f4, f8 = FLOAT_TYPES
i_, f_ = WEAK_TYPES
out = {
b1: [i_],
u1: [i2, u2],
u2: [i4, u4],
u4: [i8, u8],
u8: [f_],
i_: [u1, i1],
i1: [i2],
i2: [i4],
i4: [i8],
i8: [f_],
f_: [bf, f2],
bf: [f4],
f2: [f4],
f4: [f8],
f8: [],
}
return out
def _make_lattice_upper_bounds():
lattice = _type_promotion_lattice()
upper_bounds = {node: {node} for node in lattice}
for n in lattice:
while True:
new_upper_bounds = set().union(
*(lattice[b] for b in upper_bounds[n])
)
if n in new_upper_bounds:
raise ValueError(
f"cycle detected in type promotion lattice for node {n}"
)
if new_upper_bounds.issubset(upper_bounds[n]):
break
upper_bounds[n] |= new_upper_bounds
return upper_bounds
LATTICE_UPPER_BOUNDS = _make_lattice_upper_bounds()
@functools.lru_cache(512)
def _least_upper_bound(*nodes):
"""Compute the least upper bound of a set of nodes.
Args:
nodes: sequence of entries from dtypes + weak_types
Returns:
The type representing the least upper bound of the input nodes on the
promotion lattice.
"""
# This function computes the least upper bound of a set of nodes N within a
# partially ordered set defined by the lattice generated above.
# Given a partially ordered set S, let the set of upper bounds of n ∈ S be
# UB(n) ≡ {m ∈ S | n ≤ m}
# Further, for a set of nodes N ⊆ S, let the set of common upper bounds be
# given by
# CUB(N) ≡ {a ∈ S | ∀ b ∈ N: a ∈ UB(b)}
# Then the least upper bound of N is defined as
# LUB(N) ≡ {c ∈ CUB(N) | ∀ d ∈ CUB(N), c ≤ d}
# The definition of an upper bound implies that
# c ≤ d if and only if d ∈ UB(c),
# so the LUB can be expressed:
# LUB(N) = {c ∈ CUB(N) | ∀ d ∈ CUB(N): d ∈ UB(c)}
# or, equivalently:
# LUB(N) = {c ∈ CUB(N) | CUB(N) ⊆ UB(c)}
# By definition, LUB(N) has a cardinality of 1 for a partially ordered set.
# Note a potential algorithmic shortcut: from the definition of CUB(N),
# we have
# ∀ c ∈ N: CUB(N) ⊆ UB(c)
# So if N ∩ CUB(N) is nonempty, if follows that LUB(N) = N ∩ CUB(N).
N = set(nodes)
UB = LATTICE_UPPER_BOUNDS
try:
bounds = [UB[n] for n in N]
except KeyError:
dtype = next(n for n in N if n not in UB)
raise ValueError(
f"{dtype=} is not a valid dtype for Keras type promotion."
)
CUB = set.intersection(*bounds)
LUB = (CUB & N) or {c for c in CUB if CUB.issubset(UB[c])}
if len(LUB) == 1:
return LUB.pop()
elif len(LUB) == 0:
msg = (
f"Input dtypes {tuple(str(n) for n in nodes)} have no available "
"implicit dtype promotion path. Try explicitly casting inputs to "
"the desired output type."
)
raise ValueError(msg)
else:
# If we get here, it means the lattice is ill-formed.
raise ValueError(
f"Internal Type Promotion error: {nodes} do not have a unique "
f"least upper bound on the specified lattice; options are {LUB}. "
"This is an unexpected error in Keras's internal logic; "
"please report it to the maintainers."
)
def _dtype_and_weaktype(value):
"""Return a (dtype, weak_type) tuple for the given input."""
is_weak_type = False
if value is int or value is float:
# Note that we can't use `value in [int, float]` because the dtype
# might be equal to python scalar types.
# e.g, tf.float32 == float is True
is_weak_type = True
return standardize_dtype(value), is_weak_type
@functools.lru_cache(maxsize=None)
def _respect_weak_type(dtype, weak_type):
"""Return the weak dtype of `dtype` if `weak_type==True`."""
if weak_type:
if dtype == "bool":
return dtype
elif "float" in dtype:
return "float"
elif "int" in dtype:
return "int"
else:
raise ValueError(
"Invalid value for argument `dtype`. Expected one of "
f"{ALLOWED_DTYPES}. Received: dtype={dtype}"
)
return dtype
@functools.lru_cache(maxsize=None)
def _resolve_weak_type(dtype, precision="32"):
"""Resolve weak type by the precision of `backend.floatx()`."""
extended_allowed_dtypes = ALLOWED_DTYPES.union(WEAK_TYPES)
if dtype not in extended_allowed_dtypes:
raise ValueError(
"Invalid value for argument `dtype`. Expected one of "
f"{extended_allowed_dtypes}. Received: dtype={dtype}"
)
if precision not in ["16", "32", "64"]:
raise ValueError(
f"Invalid value for argument `precision`. Expected one of "
f"('16', '32', '64'). Received: precision={precision}"
)
if dtype == "bfloat16": # special case for bfloat16
dtype_indicator = "f"
else:
dtype_indicator = dtype[:1]
if dtype_indicator == "b":
return "bool"
elif dtype_indicator == "i":
return "int" + precision
elif dtype_indicator == "u":
return "uint" + precision
else:
return "float" + precision
BIT64_TO_BIT16_DTYPE = {
"int32": "int16",
"int64": "int16",
"uint32": "uint16",
"uint64": "uint16",
"float32": "float16",
"float64": "float16",
}
BIT64_TO_BIT32_DTYPE = {
"int64": "int32",
"uint64": "uint32",
"float64": "float32",
}
def _lattice_result_type(*args):
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
if len(dtypes) == 1:
out_dtype = dtypes[0]
out_weak_type = weak_types[0]
elif len(set(dtypes)) == 1 and not all(weak_types):
# Trivial promotion case. This allows extended dtypes through.
out_dtype = dtypes[0]
out_weak_type = False
elif all(weak_types):
# If all inputs are weakly typed, we compute the bound of the
# strongly-typed counterparts and apply the weak type at the end. This
# avoids returning the incorrect result with non-canonical weak types
# (e.g. weak int16).
out_dtype = _least_upper_bound(
*{_respect_weak_type(d, False) for d in dtypes}
)
out_weak_type = True
else:
out_dtype = _least_upper_bound(
*{_respect_weak_type(d, w) for d, w in zip(dtypes, weak_types)}
)
out_weak_type = any(out_dtype is t for t in WEAK_TYPES)
out_weak_type = (out_dtype != "bool") and out_weak_type
precision = backend.floatx()[-2:]
if out_weak_type:
out_dtype = _resolve_weak_type(out_dtype, precision=precision)
return out_dtype
@keras_export("keras.backend.result_type")
def result_type(*dtypes):
"""Returns the type from applying the Keras type promotion rules.
In general, each argument is first parsed by `backend.standardize_dtype`,
and the resulting dtype is determined by the least upper bound of the type
promotion lattice.
Note: This function attempts to match the result of `jnp.result_type`.
Args:
dtypes: Input dtypes.
Returns:
The result dtype.
Examples:
>>> x = keras.ops.ones((1,), dtype="bfloat16")
>>> keras.backend.result_type(x.dtype, int)
"bfloat16"
>>> x = keras.ops.ones((1,), dtype="int32")
>>> y = keras.ops.ones((1,), dtype="float32")
>>> keras.backend.result_type(x.dtype, y.dtype)
"float32"
"""
if len(dtypes) == 0:
# If no dtypes provided, default to floatx, this matches
# `ops.convert_to_tensor([])`
return backend.floatx()
return _lattice_result_type(
*(backend.floatx() if arg is None else arg for arg in dtypes),
)
| keras/keras/backend/common/dtypes.py/0 | {
"file_path": "keras/keras/backend/common/dtypes.py",
"repo_id": "keras",
"token_count": 4038
} | 135 |
"""!!!DO NOT USE!!!
Distribution related class for JAX backend.
This is just a prototype and we might want to unify it
with other backends in the future.
"""
import jax
import numpy as np
from keras.utils import jax_utils
def list_devices(device_type=None):
"""Return all the available devices based on the device type.
Note that this should return the global devices in a distributed setting.
Args:
device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Defaults to `"gpu"`
or `"tpu"` if available when device_type is not provided. Otherwise
will return the `"cpu"` devices.
Return:
List of devices that are available for distribute computation.
"""
device_type = device_type.lower() if device_type else None
jax_devices = jax.devices(backend=device_type)
return [f"{device.platform}:{device.id}" for device in jax_devices]
def distribute_variable(value, layout):
"""Create a distributed variable for JAX.
Since JAX doesn't have a variable class, this will just return a `jax.Array`
with the corresponding layout/sharding specified.
Note that this function should be used in eager context, not in jitted
function.
Args:
value: the initial value of the variable.
layout: `TensorLayout` for the created variable, or a
`jax.sharding.Sharding` instance.
Returns:
jax.Array which is the distributed variable.
"""
if not isinstance(layout, jax.sharding.Sharding):
layout = _to_jax_layout(layout)
if isinstance(
value, (jax.Array, jax.numpy.ndarray)
) and value.sharding.is_equivalent_to(layout, ndim=len(value.shape)):
# Skip the relayout if the value is already having the proper sharding
return value
if layout.is_fully_addressable:
return jax.device_put(value, layout)
else:
# Need to only distribute the value to local addressible devices, and
# repack them back into global format.
mapping = layout.addressable_devices_indices_map(value.shape)
local_values = jax.device_put(
[value[i] for i in mapping.values()], list(mapping.keys())
)
global_value = jax.make_array_from_single_device_arrays(
value.shape, layout, local_values
)
return global_value
def distribute_tensor(tensor, layout):
"""Distribute the tensor based on the layout.
Note that this function can be used both in eager context, or within a
jitted function.
Args:
tensor: `jax.Array` that need to be distributed.
layout: `TensorLayout` for the distribution information, or a
`jax.sharding.Sharding` instance.
Returns:
Distributed value.
"""
if not isinstance(layout, jax.sharding.Sharding):
layout = _to_jax_layout(layout)
# TODO(scottzhu): This might not be a cheap check, we should consider
# have some proper JAX API for doing this check.
if jax_utils.is_in_jax_tracing_scope():
return jax.lax.with_sharding_constraint(tensor, layout)
if layout.is_fully_addressable:
return jax.device_put(tensor, layout)
else:
# Need to only distribute the value to local addressible devices, and
# repack them back into global format.
mapping = layout.addressable_devices_indices_map(tensor.shape)
local_values = jax.device_put(
[tensor[i] for i in mapping.values()], list(mapping.keys())
)
global_value = jax.make_array_from_single_device_arrays(
tensor.shape, layout, local_values
)
return global_value
def distribute_data_input(inputs, layout):
"""Distribute the input data with the corresponding layout.
Note that the inputs here is a local worker batch. Within the local worker,
the data need to be further partitioned to map to the each of the devices.
Args:
inputs: `jax.Array` that is already sharded to a local process size.
layout: `TensorLayout` for the distribution information, or a
`jax.sharding.Sharding` instance.
Returns:
Distributed inputs thats been properly put to local devices.
"""
if not isinstance(layout, jax.sharding.Sharding):
layout = _to_jax_layout(layout)
if layout.is_fully_addressable:
return jax.device_put(inputs, layout)
# We need the jax mesh information to determine how to place the data
# on to each of the worker.
jax_mesh = layout.mesh
mesh_rank = len(jax_mesh.shape)
per_process_batch_size = inputs.shape[0]
if mesh_rank == 1:
# This is data parallel mesh only. We will split the full data
# across the batch dim.
num_split = jax.local_device_count()
per_replica_batch_size = per_process_batch_size // num_split
if per_process_batch_size % per_replica_batch_size != 0:
raise ValueError(
f"The local batch size {per_process_batch_size} is not"
"divisible by the number of local replicas "
f"{num_split}"
)
global_batch_size = per_process_batch_size * jax.process_count()
per_replica_batches = jax.numpy.split(inputs, num_split, axis=0)
elif mesh_rank == 2:
# Data+Model parallel
# In this case, we need to check if the mesh batch dim shape is large
# than number of local devices, so that we can decide whether a split
# is needed for the data, or a repeat/copy of the data is needed for
# each of the device.
# TODO(scottzhu): The mesh batch dim name is not available here, since
# we only have jax Mesh. We assume the first dim is for batch, and
# second dim is for model for now.
mesh_batch_dim_size = list(jax_mesh.shape.values())[0]
local_device_count = jax.local_device_count()
if mesh_batch_dim_size < local_device_count:
# No split needed, we only need to repeat here.
global_batch_size = per_process_batch_size
per_replica_batches = [inputs for _ in range(local_device_count)]
else:
# Note that global batch size is not simply per_process_batch_size *
# num_process. It actually depends on the model dim size.
global_batch_size = per_process_batch_size * (
mesh_batch_dim_size // local_device_count
)
per_replica_batches = jax.numpy.split(
inputs, local_device_count, axis=0
)
else:
raise ValueError(
"Only 1D or 2D mesh is supported at the moment. "
f"Received mesh shape = {jax_mesh.shape}"
)
global_shape = (global_batch_size,) + inputs.shape[1:]
global_batch_array = jax.make_array_from_single_device_arrays(
global_shape,
layout,
arrays=[
jax.device_put(batch, device)
for batch, device in zip(
per_replica_batches, layout.addressable_devices
)
],
)
return global_batch_array
def initialize(job_addresses, num_processes, process_id):
if job_addresses and "," in job_addresses:
# When user provide all the job addresses, we will split and get the
# first one, which is the coordinator.
job_addresses = job_addresses.split(",")
# Do a sanity check to make sure the number of addresses also match
# the num_processes.
if num_processes is not None and num_processes != len(job_addresses):
raise ValueError(
f"The provided job_addresses {job_addresses} has "
f"{len(job_addresses)} jobs, but num_processes is "
f"{num_processes}"
)
coordinator_address = job_addresses[0]
else:
coordinator_address = job_addresses
jax.distributed.initialize(
coordinator_address=coordinator_address,
num_processes=num_processes,
process_id=process_id,
)
def num_processes():
"""Return the number of processes for the current distribution setting."""
return jax.process_count()
def process_id():
"""Return the current process ID for the distribution setting."""
return jax.process_index()
def _to_jax_device(device_id):
if isinstance(device_id, jax.Device):
return device_id
device_type, index = device_id.split(":")
index = int(index)
devices = jax.devices(backend=device_type)
if index >= len(devices):
raise ValueError(f"Unknown device: {device_id}")
return devices[index]
def _to_jax_mesh(device_mesh):
"""Convert the DeviceMesh to JAX backend specific Mesh.
Args:
device_mesh: DeviceMesh instance to convert.
Returns:
A `jax.sharding.Mesh` instance.
"""
shape = device_mesh.devices.shape
devices = [_to_jax_device(d) for d in device_mesh.devices.flatten()]
devices = np.array(devices).reshape(shape)
return jax.sharding.Mesh(devices, device_mesh.axis_names)
def _to_jax_layout(tensor_layout):
"""Convert the TensorLayout to JAX backend specific Sharding.
Args:
tensor_layout: TensorLayout instance to convert.
Returns:
A `jax.sharding.NamedSharding` instance.
"""
if tensor_layout.device_mesh is None:
raise ValueError(
"Cannot create sharding when device mesh is not set "
"for TensorLayout."
)
partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes)
jax_mesh = _to_jax_mesh(tensor_layout.device_mesh)
return jax.sharding.NamedSharding(jax_mesh, partition_spec)
| keras/keras/backend/jax/distribution_lib.py/0 | {
"file_path": "keras/keras/backend/jax/distribution_lib.py",
"repo_id": "keras",
"token_count": 3877
} | 136 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.