text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import core
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomBrightnessTest(TestCase):
def test_preserves_output_shape(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomBrightness(factor=(0.3, 0.8))
output = layer(image)
self.assertEqual(image.shape, output.shape)
self.assertNotAllClose(image, output)
def test_no_adjustment_for_factor_zero(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomBrightness(factor=0)
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
def test_max_brightness(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomBrightness(factor=(1, 1))
output = layer(image)
self.assertAllClose(
output, tf.fill((4, 8, 8, 3), 255), atol=1e-5, rtol=1e-5
)
def test_max_brightness_rescaled_value_range(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape)
layer = preprocessing.RandomBrightness(
value_range=(0, 1), factor=(1, 1)
)
output = layer(image)
self.assertAllClose(
output, tf.fill((4, 8, 8, 3), 1), atol=1e-5, rtol=1e-5
)
def test_zero_brightness(self):
image_shape = (4, 8, 8, 3)
image = tf.random.uniform(shape=image_shape) * 255.0
layer = preprocessing.RandomBrightness(factor=(-1, -1))
output = layer(image)
self.assertAllClose(
output, tf.fill((4, 8, 8, 3), 0), atol=1e-5, rtol=1e-5
)
def test_with_unit8(self):
image_shape = (4, 8, 8, 3)
image = tf.cast(
tf.random.uniform(shape=image_shape) * 255.0, dtype=tf.uint8
)
layer = preprocessing.RandomBrightness(factor=0)
output = layer(image)
self.assertAllClose(image, output, atol=1e-5, rtol=1e-5)
layer = preprocessing.RandomBrightness(factor=(0.3, 0.8))
output = layer(image)
self.assertNotAllClose(image, output)
def test_config(self):
layer = preprocessing.RandomBrightness(
value_range=(0, 1), factor=(0.3, 0.8)
)
config = layer.get_config()
self.assertTrue(isinstance(config["factor"], core.UniformFactorSampler))
self.assertEqual(config["factor"].get_config()["lower"], 0.3)
self.assertEqual(config["factor"].get_config()["upper"], 0.8)
self.assertEqual(config["value_range"], (0, 1))
| keras-cv/keras_cv/layers/preprocessing/random_brightness_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_brightness_test.py",
"repo_id": "keras-cv",
"token_count": 1443
} | 55 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class RandomCutoutTest(TestCase):
def _run_test(self, height_factor, width_factor):
img_shape = (40, 40, 3)
xs = tf.stack(
[2 * np.ones(img_shape), np.ones(img_shape)],
axis=0,
)
xs = tf.cast(xs, tf.float32)
fill_value = 0.0
layer = preprocessing.RandomCutout(
height_factor=height_factor,
width_factor=width_factor,
fill_mode="constant",
fill_value=fill_value,
seed=1,
)
xs = layer(xs)
# Some pixels should be replaced with fill value
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == fill_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == fill_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
def test_return_shapes(self):
xs = np.ones((2, 512, 512, 3))
ys_segmentation_masks = np.ones((2, 512, 512, 3))
layer = preprocessing.RandomCutout(
height_factor=0.5, width_factor=0.5, seed=1
)
xs = layer(xs)
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
def test_return_shapes_single_element(self):
xs = np.ones((512, 512, 3))
ys_segmentation_masks = np.ones((512, 512, 3))
layer = preprocessing.RandomCutout(
height_factor=0.5, width_factor=0.5, seed=1
)
xs = layer(xs)
ys_segmentation_masks = layer(ys_segmentation_masks)
self.assertEqual(xs.shape, (512, 512, 3))
self.assertEqual(ys_segmentation_masks.shape, (512, 512, 3))
def test_random_cutout_single_float(self):
self._run_test(0.5, 0.5)
def test_random_cutout_tuple_float(self):
self._run_test((0.4, 0.9), (0.1, 0.3))
def test_random_cutout_fail_mix_bad_param_values(self):
fn = lambda: self._run_test(0.5, (15.0, 30))
self.assertRaises(ValueError, fn)
def test_random_cutout_fail_reverse_lower_upper_float(self):
fn = lambda: self._run_test(0.5, (0.9, 0.4))
self.assertRaises(ValueError, fn)
def test_random_cutout_call_results_one_channel(self):
xs = tf.cast(
tf.stack(
[2 * np.ones((40, 40, 1)), np.ones((40, 40, 1))],
axis=0,
),
tf.float32,
)
patch_value = 0.0
layer = preprocessing.RandomCutout(
height_factor=0.5,
width_factor=0.5,
fill_mode="constant",
fill_value=patch_value,
seed=1,
)
xs = layer(xs)
# Some pixels should be replaced with fill value
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == patch_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == patch_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
def test_random_cutout_call_tiny_image(self):
img_shape = (4, 4, 3)
xs = tf.stack(
[2 * np.ones(img_shape), np.ones(img_shape)],
axis=0,
)
xs = tf.cast(xs, tf.float32)
fill_value = 0.0
layer = preprocessing.RandomCutout(
height_factor=(0.4, 0.9),
width_factor=(0.1, 0.3),
fill_mode="constant",
fill_value=fill_value,
seed=1,
)
xs = layer(xs)
# Some pixels should be replaced with fill value
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == fill_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == fill_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
@pytest.mark.tf_only
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * np.ones((100, 100, 1)), np.ones((100, 100, 1))], axis=0
),
tf.float32,
)
patch_value = 0.0
layer = preprocessing.RandomCutout(
height_factor=0.5,
width_factor=0.5,
fill_mode="constant",
fill_value=patch_value,
seed=1,
)
@tf.function
def augment(x):
return layer(x)
xs = augment(xs)
# Some pixels should be replaced with fill value
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == patch_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == patch_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
| keras-cv/keras_cv/layers/preprocessing/random_cutout_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_cutout_test.py",
"repo_id": "keras-cv",
"token_count": 2765
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
num_classes = 10
class RandomShearTest(TestCase):
def test_aggressive_shear_fills_at_least_some_pixels(self):
img_shape = (50, 50, 3)
xs = tf.stack(
[2 * tf.ones(img_shape), tf.ones(img_shape)],
axis=0,
)
ys_segmentation_masks = tf.stack(
[2 * tf.ones(img_shape), tf.ones(img_shape)],
axis=0,
)
xs = tf.cast(xs, tf.float32)
ys_segmentation_masks = tf.cast(ys_segmentation_masks, tf.float32)
fill_value = 0.0
layer = preprocessing.RandomShear(
x_factor=(3, 3), seed=0, fill_mode="constant", fill_value=fill_value
)
xs = layer(xs)
ys_segmentation_masks = layer(ys_segmentation_masks)
# Some pixels should be replaced with fill value
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == fill_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[0]) == 2.0))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == fill_value))
self.assertTrue(np.any(ops.convert_to_numpy(xs[1]) == 1.0))
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[0]) == fill_value)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[0]) == 2.0)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[1]) == fill_value)
)
self.assertTrue(
np.any(ops.convert_to_numpy(ys_segmentation_masks[1]) == 1.0)
)
def test_return_shapes(self):
"""test return dict keys and value pairs"""
xs = tf.ones((2, 512, 512, 3))
# randomly sample labels
ys_labels = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2)
ys_labels = tf.squeeze(ys_labels)
ys_labels = tf.one_hot(ys_labels, num_classes)
# randomly sample bounding boxes
ys_bounding_boxes = {
"boxes": tf.ones((2, 3, 4)),
"classes": tf.random.uniform((2, 3), 0, 1),
}
# randomly sample segmentation masks
ys_segmentation_masks = tf.ones((2, 512, 512, 3))
layer = preprocessing.RandomShear(
x_factor=(0.1, 0.3),
y_factor=(0.1, 0.3),
seed=0,
fill_mode="constant",
bounding_box_format="xywh",
)
outputs = layer(
{
"images": xs,
"targets": ys_labels,
"bounding_boxes": ys_bounding_boxes,
"segmentation_masks": ys_segmentation_masks,
}
)
xs, ys_labels, ys_bounding_boxes, ys_segmentation_masks = (
outputs["images"],
outputs["targets"],
outputs["bounding_boxes"],
outputs["segmentation_masks"],
)
ys_bounding_boxes = bounding_box.to_dense(ys_bounding_boxes)
self.assertEqual(xs.shape, (2, 512, 512, 3))
self.assertEqual(ys_labels.shape, (2, 10))
self.assertEqual(ys_bounding_boxes["boxes"].shape, (2, 3, 4))
self.assertEqual(ys_bounding_boxes["classes"].shape, (2, 3))
self.assertEqual(ys_segmentation_masks.shape, (2, 512, 512, 3))
def test_single_image_input(self):
"""test for single image input"""
xs = tf.ones((512, 512, 3))
inputs = {"images": xs}
layer = preprocessing.RandomShear(
x_factor=(3, 3),
seed=0,
fill_mode="constant",
)
outputs = layer(inputs)
self.assertEqual(outputs["images"].shape, (512, 512, 3))
@pytest.mark.skip(reason="Flaky")
def test_area(self):
xs = tf.ones((1, 512, 512, 3))
ys = {
"boxes": tf.constant(
[[[0.3, 0.4, 0.5, 0.6], [0.9, 0.8, 1.0, 1.0]]]
),
"classes": tf.constant([2, 3]),
}
inputs = {"images": xs, "bounding_boxes": ys}
layer = preprocessing.RandomShear(
x_factor=(0.3, 0.7),
y_factor=(0.4, 0.7),
seed=0,
fill_mode="constant",
bounding_box_format="rel_xyxy",
)
outputs = layer(inputs)
xs, ys_bounding_boxes = (
outputs["images"],
outputs["bounding_boxes"]["boxes"],
)
new_area = tf.math.multiply(
tf.abs(
tf.subtract(
ys_bounding_boxes[..., 2], ys_bounding_boxes[..., 0]
)
),
tf.abs(
tf.subtract(
ys_bounding_boxes[..., 3], ys_bounding_boxes[..., 1]
)
),
)
old_area = tf.math.multiply(
tf.abs(tf.subtract(ys["boxes"][..., 2], ys["boxes"][..., 0])),
tf.abs(tf.subtract(ys["boxes"][..., 3], ys["boxes"][..., 1])),
)
self.assertTrue(tf.math.reduce_all(new_area > old_area))
@pytest.mark.tf_only
def test_in_tf_function(self):
"""test for class works with tf function"""
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
layer = preprocessing.RandomShear(
x_factor=0.2, y_factor=0.2, bounding_box_format="xywh"
)
ys = {
"boxes": tf.random.uniform((2, 3, 4), 0, 1),
"classes": tf.random.uniform((2, 3), 0, 1),
}
@tf.function
def augment(x, y):
return layer({"images": x, "bounding_boxes": y})
outputs = augment(xs, ys)
xs = outputs["images"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
def test_no_augmentation(self):
"""test for no image and bbox augmentation when x_factor,y_factor is
0,0"""
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = {
"boxes": tf.constant(
[
[[0.3, 0.4, 0.5, 0.6], [0.9, 0.8, 1.0, 1.0]],
[[0.3, 0.4, 0.5, 0.6], [0.9, 0.8, 1.0, 1.0]],
],
dtype=tf.float32,
),
"classes": tf.constant([[0, 0], [0, 0]], dtype=tf.float32),
}
layer = preprocessing.RandomShear(
x_factor=0, y_factor=0, bounding_box_format="rel_xyxy"
)
outputs = layer({"images": xs, "bounding_boxes": ys})
output_xs, output_ys = outputs["images"], outputs["bounding_boxes"]
ys = bounding_box.to_dense(ys)
output_ys = bounding_box.to_dense(output_ys)
self.assertAllEqual(xs, output_xs)
self.assertAllEqual(ys["boxes"], output_ys["boxes"])
# TODO re-enable when bounding box augmentation is fixed.
def DISABLED_test_output_values(self):
"""test to verify augmented bounding box output coordinate"""
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 3)), tf.zeros((100, 100, 3))],
axis=0,
),
tf.float32,
)
ys = tf.cast(
tf.stack(
[
tf.constant(
[[10.0, 20.0, 40.0, 50.0], [12.0, 22.0, 42.0, 54.0]]
),
tf.constant(
[[10.0, 20.0, 40.0, 50.0], [12.0, 22.0, 42.0, 54.0]]
),
],
axis=0,
),
tf.float32,
)
ys = bounding_box.add_class_id(ys)
true_ys = tf.cast(
tf.stack(
[
tf.constant(
[
[7.60, 20.43, 39.04, 51.79, 0.0],
[9.41, 22.52, 40.94, 55.88, 0.0],
]
),
tf.constant(
[
[13.68, 22.51, 49.20, 59.05, 0],
[16.04, 24.95, 51.940, 63.56, 0],
]
),
],
axis=0,
),
tf.float32,
)
layer = preprocessing.RandomShear(
x_factor=0.2, y_factor=0.2, bounding_box_format="xyxy", seed=1
)
outputs = layer({"images": xs, "bounding_boxes": ys})
_, output_ys = outputs["images"], outputs["bounding_boxes"].to_tensor()
self.assertAllClose(true_ys, output_ys, rtol=1e-02, atol=1e-03)
def test_random_shear_on_batched_images_independently(self):
image = tf.random.uniform(shape=(100, 100, 3))
input_images = tf.stack([image, image], axis=0)
layer = preprocessing.RandomShear(x_factor=0.5, y_factor=0.5)
results = layer(input_images)
self.assertNotAllClose(results[0], results[1])
def test_ragged_bounding_box(self):
images = tf.random.uniform((2, 16, 16, 3))
random_box = tf.constant(
[[[0.1, 0.2, 1, 1], [0.4, 0.6, 1, 1]]], dtype=tf.float32
)
random_box = tf.squeeze(random_box, axis=0)
random_box = tf.RaggedTensor.from_row_lengths(random_box, [1, 1])
classes = tf.ragged.constant([[0], [0]])
bounding_boxes = {"boxes": random_box, "classes": classes}
inputs = {"images": images, "bounding_boxes": bounding_boxes}
layer = preprocessing.RandomShear(
x_factor=(0.5, 0.5),
y_factor=(0.5, 0.5),
bounding_box_format="rel_xywh",
)
layer(inputs)
| keras-cv/keras_cv/layers/preprocessing/random_shear_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_shear_test.py",
"repo_id": "keras-cv",
"token_count": 5788
} | 57 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.backend import keras
from keras_cv.tests.test_case import TestCase
TEST_CONFIGURATIONS = [
("AutoContrast", layers.AutoContrast, {"value_range": (0, 255)}),
("ChannelShuffle", layers.ChannelShuffle, {}),
("Equalization", layers.Equalization, {"value_range": (0, 255)}),
(
"RandomCropAndResize",
layers.RandomCropAndResize,
{
"target_size": (224, 224),
"crop_area_factor": (0.8, 1.0),
"aspect_ratio_factor": (3 / 4, 4 / 3),
"bounding_box_format": "xywh",
"dtype": "float32",
},
),
("Grayscale", layers.Grayscale, {}),
("GridMask", layers.GridMask, {}),
(
"Posterization",
layers.Posterization,
{"bits": 3, "value_range": (0, 255)},
),
("RandomBrightness", layers.RandomBrightness, {"factor": 0.5}),
(
"RandomColorDegeneration",
layers.RandomColorDegeneration,
{"factor": 0.5},
),
(
"RandomCutout",
layers.RandomCutout,
{"height_factor": 0.2, "width_factor": 0.2},
),
(
"RandomFlip",
layers.RandomFlip,
{"mode": "horizontal", "bounding_box_format": "xyxy"},
),
(
"RandomHue",
layers.RandomHue,
{"factor": 0.5, "value_range": (0, 255)},
),
(
"RandomTranslation",
layers.RandomTranslation,
{
"width_factor": 0.5,
"height_factor": 0.5,
"bounding_box_format": "xyxy",
"dtype": "float32",
},
),
(
"RandomChannelShift",
layers.RandomChannelShift,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomColorJitter",
layers.RandomColorJitter,
{
"value_range": (0, 255),
"brightness_factor": (-0.2, 0.5),
"contrast_factor": (0.5, 0.9),
"saturation_factor": (0.5, 0.9),
"hue_factor": (0.5, 0.9),
"seed": 1,
},
),
(
"RandomContrast",
layers.RandomContrast,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomGaussianBlur",
layers.RandomGaussianBlur,
{"kernel_size": 3, "factor": (0.0, 3.0), "dtype": "float32"},
),
(
"RandomJpegQuality",
layers.RandomJpegQuality,
{"factor": (75, 100), "dtype": "float32"},
),
(
"RandomRotation",
layers.RandomRotation,
{
"factor": 0.5,
"bounding_box_format": "xyxy",
"dtype": "float32",
},
),
("RandomSaturation", layers.RandomSaturation, {"factor": 0.5}),
(
"RandomSharpness",
layers.RandomSharpness,
{"factor": 0.5, "value_range": (0, 255)},
),
(
"RandomAspectRatio",
layers.RandomAspectRatio,
{
"factor": (0.9, 1.1),
"bounding_box_format": "xyxy",
"dtype": "float32",
},
),
(
"RandomShear",
layers.RandomShear,
{
"x_factor": 0.3,
"x_factor": 0.3,
"bounding_box_format": "xyxy",
"dtype": "float32",
},
),
("Solarization", layers.Solarization, {"value_range": (0, 255)}),
(
"Mosaic",
layers.Mosaic,
{"bounding_box_format": "xyxy"},
),
("CutMix", layers.CutMix, {"dtype": "float32"}),
("MixUp", layers.MixUp, {}),
(
"Resizing",
layers.Resizing,
{
"height": 224,
"width": 224,
"bounding_box_format": "xyxy",
"pad_to_aspect_ratio": True,
"dtype": "float32",
},
),
(
"JitteredResize",
layers.JitteredResize,
{
"target_size": (224, 224),
"scale_factor": (0.8, 1.25),
"bounding_box_format": "xywh",
"dtype": "float32",
},
),
(
"RandomZoom",
layers.RandomZoom,
{"height_factor": 0.2, "width_factor": 0.5},
),
(
"RandomCrop",
layers.RandomCrop,
{"height": 224, "width": 224, "bounding_box_format": "xyxy"},
),
(
"Rescaling",
layers.Rescaling,
{
"scale": 1,
"offset": 0.5,
},
),
]
NO_CPU_FP16_KERNEL_LAYERS = [
layers.RandomSaturation,
layers.RandomColorJitter,
layers.RandomHue,
]
NO_BOUNDING_BOXES_TESTS = [
layers.RandomCutout,
layers.RandomZoom,
layers.CutMix,
]
class WithMixedPrecisionTest(TestCase):
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_can_run_in_mixed_precision(self, layer_cls, init_args):
if not tf.config.list_physical_devices("GPU"):
if layer_cls in NO_CPU_FP16_KERNEL_LAYERS:
self.skipTest(
"There is currently no float16 CPU kernel registered for "
"operations `tf.image.adjust_saturation`, and "
"`tf.image.adjust_hue`. Skipping."
)
keras.mixed_precision.set_global_policy("mixed_float16")
img = tf.random.uniform(
shape=(3, 512, 512, 3), minval=0, maxval=255, dtype=tf.float32
)
bounding_boxes = {
"boxes": tf.convert_to_tensor(
[
[
[200, 200, 400, 400],
[250, 250, 450, 450],
[300, 300, 500, 500],
], # Bounding boxes for image 1
[
[100, 100, 300, 300],
[150, 150, 350, 350],
[200, 200, 400, 400],
], # Bounding boxes for image 2
[
[300, 300, 500, 500],
[350, 350, 550, 550],
[400, 400, 600, 600],
],
], # Bounding boxes for image 3
dtype=tf.float32,
),
"classes": tf.ones((3, 3), dtype=tf.float32),
}
inputs = {"images": img}
if layer_cls in NO_BOUNDING_BOXES_TESTS:
inputs["labels"] = bounding_boxes["classes"]
else:
inputs["bounding_boxes"] = bounding_boxes
layer = layer_cls(**init_args)
layer(inputs)
@classmethod
def tearDownClass(cls) -> None:
# Do not affect other tests
keras.mixed_precision.set_global_policy("float32")
if __name__ == "__main__":
tf.test.main()
| keras-cv/keras_cv/layers/preprocessing/with_mixed_precision_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/with_mixed_precision_test.py",
"repo_id": "keras-cv",
"token_count": 3825
} | 58 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import random
from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.point_cloud import coordinate_transform
from keras_cv.point_cloud import wrap_angle_radians
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
@keras_cv_export("keras_cv.layers.GlobalRandomRotation")
class GlobalRandomRotation(base_augmentation_layer_3d.BaseAugmentationLayer3D):
"""A preprocessing layer which randomly rotates point clouds and bounding
boxes along X, Y and Z axes during training.
This layer will randomly rotate the whole scene along the X, Y and Z axes
based on a randomly sampled rotation angle between [-max_rotation_angle,
max_rotation_angle] (in radians) following a uniform distribution. During
inference time, the output will be identical to input. Call the layer with
`training=True` to rotate the input.
Input shape:
point_clouds: 3D (multi frames) float32 Tensor with shape
[num of frames, num of points, num of point features].
The first 5 features are [x, y, z, class, range].
bounding_boxes: 3D (multi frames) float32 Tensor with shape
[num of frames, num of boxes, num of box features]. Boxes are expected
to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py
for more details on supported bounding box formats.
Output shape:
A dictionary of Tensors with the same shape as input Tensors.
Arguments:
max_rotation_angle_x: A float scalar sets the maximum rotation angle (in
radians) along X axis.
max_rotation_angle_y: A float scalar sets the maximum rotation angle (in
radians) along Y axis.
max_rotation_angle_z: A float scalar sets the maximum rotation angle (in
radians) along Z axis.
"""
def __init__(
self,
max_rotation_angle_x=None,
max_rotation_angle_y=None,
max_rotation_angle_z=None,
**kwargs
):
super().__init__(**kwargs)
max_rotation_angle_x = (
max_rotation_angle_x if max_rotation_angle_x else 0.0
)
max_rotation_angle_y = (
max_rotation_angle_y if max_rotation_angle_y else 0.0
)
max_rotation_angle_z = (
max_rotation_angle_z if max_rotation_angle_z else 0.0
)
if max_rotation_angle_x < 0:
raise ValueError("max_rotation_angle_x must be >=0.")
if max_rotation_angle_y < 0:
raise ValueError("max_rotation_angle_y must be >=0.")
if max_rotation_angle_z < 0:
raise ValueError("max_rotation_angle_z must be >=0.")
self._max_rotation_angle_x = max_rotation_angle_x
self._max_rotation_angle_y = max_rotation_angle_y
self._max_rotation_angle_z = max_rotation_angle_z
def get_config(self):
return {
"max_rotation_angle_x": self._max_rotation_angle_x,
"max_rotation_angle_y": self._max_rotation_angle_y,
"max_rotation_angle_z": self._max_rotation_angle_z,
}
def get_random_transformation(self, **kwargs):
random_rotation_x = random.uniform(
(),
minval=-self._max_rotation_angle_x,
maxval=self._max_rotation_angle_x,
dtype=self.compute_dtype,
seed=self._random_generator,
)
random_rotation_y = random.uniform(
(),
minval=-self._max_rotation_angle_y,
maxval=self._max_rotation_angle_y,
dtype=self.compute_dtype,
seed=self._random_generator,
)
random_rotation_z = random.uniform(
(),
minval=-self._max_rotation_angle_z,
maxval=self._max_rotation_angle_z,
dtype=self.compute_dtype,
seed=self._random_generator,
)
return {
"pose": tf.stack(
[
0,
0,
0,
random_rotation_z,
random_rotation_x,
random_rotation_y,
],
axis=0,
)
}
def augment_point_clouds_bounding_boxes(
self, point_clouds, bounding_boxes, transformation, **kwargs
):
pose = transformation["pose"]
point_clouds_xyz = coordinate_transform(point_clouds[..., :3], pose)
point_clouds = tf.concat(
[point_clouds_xyz, point_clouds[..., 3:]], axis=-1
)
bounding_boxes_xyz = coordinate_transform(
bounding_boxes[..., : CENTER_XYZ_DXDYDZ_PHI.Z + 1], pose
)
bounding_boxes_heading = wrap_angle_radians(
tf.expand_dims(
bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.PHI], axis=-1
)
- pose[3]
)
bounding_boxes = tf.concat(
[
bounding_boxes_xyz,
bounding_boxes[
..., CENTER_XYZ_DXDYDZ_PHI.DX : CENTER_XYZ_DXDYDZ_PHI.DZ + 1
],
bounding_boxes_heading,
bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.CLASS :],
],
axis=-1,
)
return (point_clouds, bounding_boxes)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_rotation.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_rotation.py",
"repo_id": "keras-cv",
"token_count": 2740
} | 59 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv.layers.vit_layers import PatchingAndEmbedding
from keras_cv.tests.test_case import TestCase
class ViTLayersTest(TestCase):
def test_patching_wrong_patch_size(self):
with self.assertRaisesRegexp(
ValueError,
"The patch_size cannot be a negative number. Received -16",
):
PatchingAndEmbedding(project_dim=16, patch_size=-16)
def test_patching_wrong_padding(self):
with self.assertRaisesRegexp(
ValueError,
"Padding must be either 'SAME' or 'VALID', but REFLECT was passed.",
):
PatchingAndEmbedding(
project_dim=16, patch_size=16, padding="REFLECT"
)
def test_patch_embedding_return_type_and_shape(self):
layer = PatchingAndEmbedding(project_dim=128, patch_size=16)
inputs = tf.random.normal([1, 224, 224, 3])
output = layer(inputs)
self.assertTrue(isinstance(output, tf.Tensor))
self.assertLen(output, 1)
self.assertEquals(output.shape, [1, 197, 128])
def test_patch_embedding_interpolation(self):
inputs = np.ones([1, 224, 224, 3])
patch_embedding = PatchingAndEmbedding(project_dim=128, patch_size=16)
patch_embedding.build(inputs.shape)
positional_embeddings = np.ones([197, 128])
(
output,
cls,
) = patch_embedding._PatchingAndEmbedding__interpolate_positional_embeddings( # noqa: E501
positional_embeddings, height=450, width=450, patch_size=12
)
self.assertTrue(isinstance(output, tf.Tensor))
self.assertLen(output, 1)
self.assertEquals(output.shape, [1, 1369, 128])
def test_patch_embedding_interpolation_numerical(self):
inputs = np.ones([1, 4, 4, 3])
patch_embedding = PatchingAndEmbedding(project_dim=4, patch_size=1)
patch_embedding.build(inputs.shape)
positional_embeddings = np.ones([17, 4])
(
output,
cls_token,
) = patch_embedding._PatchingAndEmbedding__interpolate_positional_embeddings( # noqa: E501
positional_embeddings, height=8, width=8, patch_size=2
)
self.assertTrue(
tf.reduce_all(tf.equal(output, np.ones([1, 16, 4]))).numpy()
)
| keras-cv/keras_cv/layers/vit_layers_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/vit_layers_test.py",
"repo_id": "keras-cv",
"token_count": 1241
} | 60 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
LARGE_NUM = 1e9
def l2_normalize(x, axis):
epsilon = keras.backend.epsilon()
power_sum = ops.sum(ops.square(x), axis=axis, keepdims=True)
norm = ops.reciprocal(ops.sqrt(ops.maximum(power_sum, epsilon)))
return ops.multiply(x, norm)
@keras_cv_export("keras_cv.losses.SimCLRLoss")
class SimCLRLoss(keras.losses.Loss):
"""Implements SimCLR Cosine Similarity loss.
SimCLR loss is used for contrastive self-supervised learning.
Args:
temperature: a float value between 0 and 1, used as a scaling factor for
cosine similarity.
References:
- [SimCLR paper](https://arxiv.org/pdf/2002.05709)
"""
def __init__(self, temperature, **kwargs):
super().__init__(**kwargs)
self.temperature = temperature
def call(self, projections_1, projections_2):
"""Computes SimCLR loss for a pair of projections in a contrastive
learning trainer.
Note that unlike most loss functions, this should not be called with
y_true and y_pred, but with two unlabeled projections. It can otherwise
be treated as a normal loss function.
Args:
projections_1: a tensor with the output of the first projection
model in a contrastive learning trainer
projections_2: a tensor with the output of the second projection
model in a contrastive learning trainer
Returns:
A tensor with the SimCLR loss computed from the input projections
"""
# Normalize the projections
projections_1 = l2_normalize(projections_1, axis=1)
projections_2 = l2_normalize(projections_2, axis=1)
# Produce artificial labels, 1 for each image in the batch.
batch_size = ops.shape(projections_1)[0]
labels = ops.one_hot(ops.arange(batch_size), batch_size * 2)
masks = ops.one_hot(ops.arange(batch_size), batch_size)
# Compute logits
logits_11 = (
ops.matmul(projections_1, ops.transpose(projections_1))
/ self.temperature
)
logits_11 = logits_11 - ops.cast(masks * LARGE_NUM, logits_11.dtype)
logits_22 = (
ops.matmul(projections_2, ops.transpose(projections_2))
/ self.temperature
)
logits_22 = logits_22 - ops.cast(masks * LARGE_NUM, logits_22.dtype)
logits_12 = (
ops.matmul(projections_1, ops.transpose(projections_2))
/ self.temperature
)
logits_21 = (
ops.matmul(projections_2, ops.transpose(projections_1))
/ self.temperature
)
loss_a = keras.losses.categorical_crossentropy(
labels, ops.concatenate([logits_12, logits_11], 1), from_logits=True
)
loss_b = keras.losses.categorical_crossentropy(
labels, ops.concatenate([logits_21, logits_22], 1), from_logits=True
)
return loss_a + loss_b
def get_config(self):
config = super().get_config()
config.update({"temperature": self.temperature})
return config
| keras-cv/keras_cv/losses/simclr_loss.py/0 | {
"file_path": "keras-cv/keras_cv/losses/simclr_loss.py",
"repo_id": "keras-cv",
"token_count": 1563
} | 61 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone import (
CSPDarkNetBackbone,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_backbone_presets import (
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """CSPDarkNetBackbone model with {stackwise_channels} channels
and {stackwise_depth} depths.
Reference:
- [YoloV4 Paper](https://arxiv.org/abs/1804.02767)
- [CSPNet Paper](https://arxiv.org/pdf/1911.11929)
- [YoloX Paper](https://arxiv.org/abs/2107.08430)
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether or not to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, defaults to (None, None, 3).
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = CSPDarkNet{name}Backbone()
output = model(input_data)
```
""" # noqa: E501
@keras_cv_export("keras_cv.losses.CSPDarkNetTinyBackbone")
class CSPDarkNetTinyBackbone(CSPDarkNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return CSPDarkNetBackbone.from_preset("csp_darknet_tiny", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"csp_darknet_tiny_imagenet": copy.deepcopy(
backbone_presets["csp_darknet_tiny_imagenet"]
)
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.losses.CSPDarkNetSBackbone")
class CSPDarkNetSBackbone(CSPDarkNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return CSPDarkNetBackbone.from_preset("csp_darknet_s", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.losses.CSPDarkNetMBackbone")
class CSPDarkNetMBackbone(CSPDarkNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return CSPDarkNetBackbone.from_preset("csp_darknet_m", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
@keras_cv_export("keras_cv.losses.CSPDarkNetLBackbone")
class CSPDarkNetLBackbone(CSPDarkNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return CSPDarkNetBackbone.from_preset("csp_darknet_l", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"csp_darknet_l_imagenet": copy.deepcopy(
backbone_presets["csp_darknet_l_imagenet"]
)
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
@keras_cv_export("keras_cv.losses.CSPDarkNetXLBackbone")
class CSPDarkNetXLBackbone(CSPDarkNetBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(None, None, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return CSPDarkNetBackbone.from_preset("csp_darknet_xl", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return {}
setattr(
CSPDarkNetTinyBackbone,
"__doc__",
ALIAS_DOCSTRING.format(
name="Tiny",
stackwise_channels="[48, 96, 192, 384]",
stackwise_depth="[1, 3, 3, 1]",
),
)
setattr(
CSPDarkNetSBackbone,
"__doc__",
ALIAS_DOCSTRING.format(
name="S",
stackwise_channels="[64, 128, 256, 512]",
stackwise_depth="[1, 3, 3, 1]",
),
)
setattr(
CSPDarkNetMBackbone,
"__doc__",
ALIAS_DOCSTRING.format(
name="M",
stackwise_channels="[96, 192, 384, 768]",
stackwise_depth="[2, 6, 6, 2]",
),
)
setattr(
CSPDarkNetLBackbone,
"__doc__",
ALIAS_DOCSTRING.format(
name="L",
stackwise_channels="[128, 256, 512, 1024]",
stackwise_depth="[3, 9, 9, 3]",
),
)
setattr(
CSPDarkNetXLBackbone,
"__doc__",
ALIAS_DOCSTRING.format(
name="XL",
stackwise_channels="[170, 340, 680, 1360]",
stackwise_depth="[4, 12, 12, 4]",
),
)
| keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_aliases.py",
"repo_id": "keras-cv",
"token_count": 3456
} | 62 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MiT backbone model.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) (CVPR 2021)
- [Based on the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/blob/main/deepvision/models/classification/mix_transformer/mit_tf.py)
- [Based on the NVlabs' official PyTorch implementation](https://github.com/NVlabs/SegFormer/blob/master/mmseg/models/backbones/mix_transformer.py)
- [Inspired by @sithu31296's reimplementation](https://github.com/sithu31296/semantic-segmentation/blob/main/semseg/models/backbones/mit.py)
""" # noqa: E501
import copy
import numpy as np
from keras_cv import layers as cv_layers
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models import utils
from keras_cv.models.backbones.backbone import Backbone
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone_presets import ( # noqa: E501
backbone_presets_with_weights,
)
from keras_cv.utils.python_utils import classproperty
@keras_cv_export("keras_cv.models.MiTBackbone")
class MiTBackbone(Backbone):
def __init__(
self,
include_rescaling,
depths,
input_shape=(224, 224, 3),
input_tensor=None,
embedding_dims=None,
**kwargs,
):
"""A Keras model implementing the MixTransformer architecture to be
used as a backbone for the SegFormer architecture.
References:
- [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) # noqa: E501
- [Based on the TensorFlow implementation from DeepVision](https://github.com/DavidLandup0/deepvision/tree/main/deepvision/models/classification/mix_transformer) # noqa: E501
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
depths: the number of transformer encoders to be used per stage in the
network
embedding_dims: the embedding dims per hierarchical stage, used as
the levels of the feature pyramid
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `keras.layers.Input()`)
to use as image input for the model.
Examples:
Using the class with a `backbone`:
```python
import tensorflow as tf
import keras_cv
images = np.ones(shape=(1, 96, 96, 3))
labels = np.zeros(shape=(1, 96, 96, 1))
backbone = keras_cv.models.MiTBackbone.from_preset("mit_b0_imagenet")
# Evaluate model
model(images)
# Train model
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(from_logits=False),
metrics=["accuracy"],
)
model.fit(images, labels, epochs=3)
```
"""
drop_path_rate = 0.1
dpr = [x for x in np.linspace(0.0, drop_path_rate, sum(depths))]
blockwise_num_heads = [1, 2, 5, 8]
blockwise_sr_ratios = [8, 4, 2, 1]
num_stages = 4
cur = 0
patch_embedding_layers = []
transformer_blocks = []
layer_norms = []
for i in range(num_stages):
patch_embed_layer = cv_layers.OverlappingPatchingAndEmbedding(
project_dim=embedding_dims[0] if i == 0 else embedding_dims[i],
patch_size=7 if i == 0 else 3,
stride=4 if i == 0 else 2,
name=f"patch_and_embed_{i}",
)
patch_embedding_layers.append(patch_embed_layer)
transformer_block = [
cv_layers.HierarchicalTransformerEncoder(
project_dim=embedding_dims[i],
num_heads=blockwise_num_heads[i],
sr_ratio=blockwise_sr_ratios[i],
drop_prob=dpr[cur + k],
name=f"hierarchical_encoder_{i}_{k}",
)
for k in range(depths[i])
]
transformer_blocks.append(transformer_block)
cur += depths[i]
layer_norms.append(keras.layers.LayerNormalization())
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = keras.layers.Rescaling(scale=1 / 255)(x)
pyramid_level_inputs = []
for i in range(num_stages):
# Compute new height/width after the `proj`
# call in `OverlappingPatchingAndEmbedding`
stride = 4 if i == 0 else 2
new_height, new_width = (
int(ops.shape(x)[1] / stride),
int(ops.shape(x)[2] / stride),
)
x = patch_embedding_layers[i](x)
for blk in transformer_blocks[i]:
x = blk(x)
x = layer_norms[i](x)
x = keras.layers.Reshape(
(new_height, new_width, -1), name=f"output_level_{i}"
)(x)
pyramid_level_inputs.append(utils.get_tensor_input_name(x))
super().__init__(inputs=inputs, outputs=x, **kwargs)
self.depths = depths
self.embedding_dims = embedding_dims
self.include_rescaling = include_rescaling
self.input_tensor = input_tensor
self.pyramid_level_inputs = {
f"P{i + 1}": name for i, name in enumerate(pyramid_level_inputs)
}
def get_config(self):
config = super().get_config()
config.update(
{
"depths": self.depths,
"embedding_dims": self.embedding_dims,
"include_rescaling": self.include_rescaling,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy(backbone_presets)
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(backbone_presets_with_weights)
| keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone.py",
"repo_id": "keras-cv",
"token_count": 3240
} | 63 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageClassifier Task presets."""
classifier_presets = {
"resnet50_v2_imagenet_classifier": {
"metadata": {
"description": (
"ResNet classifier with 50 layers where the batch "
"normalization and ReLU activation precede the convolution "
"layers (v2 style). Trained on Imagenet 2012 classification "
"task."
),
"params": 25_613_800,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/resnetv2/keras/resnet50_v2_imagenet_classifier/2", # noqa: E501
},
"efficientnetv2_s_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the EfficientNet small"
"architecture. In this "
"variant of the EfficientNet architecture, there are "
"6 convolutional blocks. Weights are "
"initialized to pretrained imagenet classification weights."
"Published weights are capable of scoring 83.9% top 1 accuracy "
"and 96.7% top 5 accuracy on imagenet."
),
"params": 21_612_360,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_s_imagenet_classifier/2", # noqa: E501
},
"efficientnetv2_b0_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the EfficientNet B0 "
"architecture. In this variant of the EfficientNet "
"architecture, there are 6 convolutional blocks. As with all "
"of the B style EfficientNet variants, the number of filters "
"in each convolutional block is scaled by "
"`width_coefficient=1.0` and "
"`depth_coefficient=1.0`. Weights are "
"initialized to pretrained imagenet classification weights. "
"Published weights are capable of scoring 77.1% top 1 accuracy "
"and 93.3% top 5 accuracy on imagenet."
),
"params": 7_200_312,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b0_imagenet_classifier/2", # noqa: E501
},
"efficientnetv2_b1_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the EfficientNet B1 "
"architecture. In this variant of the EfficientNet "
"architecture, there are 6 convolutional blocks. As with all "
"of the B style EfficientNet variants, the number of filters "
"in each convolutional block is scaled by "
"`width_coefficient=1.0` and "
"`depth_coefficient=1.1`. Weights are "
"initialized to pretrained imagenet classification weights."
"Published weights are capable of scoring 79.1% top 1 accuracy "
"and 94.4% top 5 accuracy on imagenet."
),
"params": 8_212_124,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b1_imagenet_classifier/2", # noqa: E501
},
"efficientnetv2_b2_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the EfficientNet B2 "
"architecture. In this variant of the EfficientNet "
"architecture, there are 6 convolutional blocks. As with all "
"of the B style EfficientNet variants, the number of filters "
"in each convolutional block is scaled by "
"`width_coefficient=1.1` and "
"`depth_coefficient1.2`. Weights are initialized to pretrained "
"imagenet classification weights."
"Published weights are capable of scoring 80.1% top 1 "
"accuracy and 94.9% top 5 accuracy on imagenet."
),
"params": 10_178_374,
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/efficientnetv2/keras/efficientnetv2_b2_imagenet_classifier/2", # noqa: E501
},
"mobilenet_v3_large_imagenet_classifier": {
"metadata": {
"description": (
"ImageClassifier using the MobileNetV3Large architecture. "
"This preset uses a Dense layer as a classification head "
"instead of the typical fully-convolutional MobileNet head. As "
"a result, it has fewer parameters than the original "
"MobileNetV3Large model, which has 5.4 million parameters."
"Published weights are capable of scoring 69.4% top-1 "
"accuracy and 89.4% top 5 accuracy on imagenet."
),
"params": 3_957_352, # TODO this is wrong
"official_name": "ImageClassifier",
"path": "image_classifier",
},
"kaggle_handle": "kaggle://keras/mobilenetv3/keras/mobilenet_v3_large_imagenet_classifier/2", # noqa: E501
},
}
| keras-cv/keras_cv/models/classification/image_classifier_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/classification/image_classifier_presets.py",
"repo_id": "keras-cv",
"token_count": 2771
} | 64 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ConvNeXt models for Keras.
References:
- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)
(CVPR 2022)
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend
from tensorflow.keras import layers
from keras_cv.layers.regularization import StochasticDepth
from keras_cv.models.legacy import utils
MODEL_CONFIGS = {
"tiny": {
"depths": [3, 3, 9, 3],
"projection_dims": [96, 192, 384, 768],
"default_size": 224,
},
"small": {
"depths": [3, 3, 27, 3],
"projection_dims": [96, 192, 384, 768],
"default_size": 224,
},
"base": {
"depths": [3, 3, 27, 3],
"projection_dims": [128, 256, 512, 1024],
"default_size": 224,
},
"large": {
"depths": [3, 3, 27, 3],
"projection_dims": [192, 384, 768, 1536],
"default_size": 224,
},
"xlarge": {
"depths": [3, 3, 27, 3],
"projection_dims": [256, 512, 1024, 2048],
"default_size": 224,
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
- [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) (CVPR 2022)
This function returns a Keras {name} model.
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, `num_classes` must be provided.
depths: an iterable containing depths for each individual stages.
projection_dims: An iterable containing output number of channels of
each individual stages.
drop_path_rate: stochastic depth probability, if 0.0, then stochastic
depth won't be used.
layer_scale_init_value: layer scale coefficient, if 0.0, layer scaling
won't be used.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights (e.g.
'imagenet/classification')(see available pre-trained weights in
weights.py)
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
num_classes: optional int, number of classes to classify images into
(only to be specified if `include_top` is `True`).
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
name: (Optional) name to pass to the model, defaults to "{name}".
Returns:
A `keras.Model` instance.
"""
@keras.utils.register_keras_serializable(package="keras_cv")
class LayerScale(layers.Layer):
"""Layer scale module.
References:
- https://arxiv.org/abs/2103.17239
Args:
init_values (float): Initial value for layer scale. Should be within
[0, 1].
projection_dim (int): Projection dimensionality.
Returns:
Tensor multiplied to the scale.
"""
def __init__(self, init_values, projection_dim, **kwargs):
super().__init__(**kwargs)
self.init_values = init_values
self.projection_dim = projection_dim
def build(self, input_shape):
self.gamma = tf.Variable(
self.init_values * tf.ones((self.projection_dim,))
)
def call(self, x):
return x * self.gamma
def get_config(self):
config = super().get_config()
config.update(
{
"init_values": self.init_values,
"projection_dim": self.projection_dim,
}
)
return config
def apply_block(
x,
projection_dim,
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
name=None,
):
"""ConvNeXt block.
References:
- https://arxiv.org/abs/2201.03545
- https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
Notes:
In the original ConvNeXt implementation (linked above), the authors use
`Dense` layers for pointwise convolutions for increased efficiency.
Following that, this implementation also uses the same.
Args:
projection_dim (int): Number of filters for convolution layers. In the
ConvNeXt paper, this is referred to as projection dimension.
drop_path_rate (float): Probability of dropping paths. Should be within
[0, 1].
layer_scale_init_value (float): Layer scale value. Should be a small float
number.
name: name to path to the keras layer.
Returns:
A function representing a ConvNeXtBlock block.
""" # noqa: E501
if name is None:
name = "prestem" + str(backend.get_uid("prestem"))
inputs = x
x = layers.Conv2D(
filters=projection_dim,
kernel_size=7,
padding="same",
groups=projection_dim,
name=name + "_depthwise_conv",
)(x)
x = layers.LayerNormalization(epsilon=1e-6, name=name + "_layernorm")(x)
x = layers.Dense(4 * projection_dim, name=name + "_pointwise_conv_1")(x)
x = layers.Activation("gelu", name=name + "_gelu")(x)
x = layers.Dense(projection_dim, name=name + "_pointwise_conv_2")(x)
if layer_scale_init_value is not None:
x = LayerScale(
layer_scale_init_value,
projection_dim,
name=name + "_layer_scale",
)(x)
if drop_path_rate:
layer = StochasticDepth(drop_path_rate, name=name + "_stochastic_depth")
return layer([inputs, x])
else:
layer = layers.Activation("linear", name=name + "_identity")
return inputs + layer(x)
def apply_head(x, num_classes, activation="softmax", name=None):
"""Implementation of classification head of ConvNeXt.
Args:
num_classes: number of classes for Dense layer
activation: activation function for Dense layer
name: name prefix
Returns:
Classification head function.
"""
if name is None:
name = str(backend.get_uid("head"))
x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
x = layers.LayerNormalization(epsilon=1e-6, name=name + "_head_layernorm")(
x
)
x = layers.Dense(
num_classes, activation=activation, name=name + "_head_dense"
)(x)
return x
@keras.utils.register_keras_serializable(package="keras_cv.models")
class ConvNeXt(keras.Model):
"""Instantiates ConvNeXt architecture given specific configuration.
Args:
include_rescaling: bool, whether to rescale the inputs. If set
to `True`, inputs will be passed through a `Rescaling(1/255.0)`
layer.
include_top: bool, whether to include the fully-connected layer at the
top of the network. If provided, `num_classes` must be provided.
depths: An iterable containing depths for each individual stages.
projection_dims: An iterable containing output number of channels of
each individual stages.
drop_path_rate: Stochastic depth probability. If 0.0, then stochastic
depth won't be used.
layer_scale_init_value: Layer scale coefficient. If 0.0, layer scaling
won't be used.
weights: one of `None` (random initialization), a pretrained weight file
path, or a reference to pre-trained weights (e.g.
'imagenet/classification')(see available pre-trained weights in
weights.py)
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg` means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
num_classes: optional int, number of classes to classify images into
(only to be specified if `include_top` is `True`).
classifier_activation: A `str` or callable. The activation function to
use on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top"
layer.
name: (Optional) name to pass to the model, defaults to "convnext".
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`, or invalid input
shape.
ValueError: if `classifier_activation` is not `softmax`, or `None` when
using a pretrained top layer.
ValueError: if `include_top` is True but `num_classes` is not specified.
"""
def __init__(
self,
include_rescaling,
include_top,
depths,
projection_dims,
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either "
"`None` or the path to the weights file to be loaded. "
f"Weights file not found at location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, "
"you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
# Stem block.
stem = keras.Sequential(
[
layers.Conv2D(
projection_dims[0],
kernel_size=4,
strides=4,
name=name + "_stem_conv",
),
layers.LayerNormalization(
epsilon=1e-6, name=name + "_stem_layernorm"
),
],
name=name + "_stem",
)
# Downsampling blocks.
downsample_layers = []
downsample_layers.append(stem)
num_downsample_layers = 3
for i in range(num_downsample_layers):
downsample_layer = keras.Sequential(
[
layers.LayerNormalization(
epsilon=1e-6,
name=name + "_downsampling_layernorm_" + str(i),
),
layers.Conv2D(
projection_dims[i + 1],
kernel_size=2,
strides=2,
name=name + "_downsampling_conv_" + str(i),
),
],
name=name + "_downsampling_block_" + str(i),
)
downsample_layers.append(downsample_layer)
# Stochastic depth schedule.
# This is referred from the original ConvNeXt codebase:
# https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py#L86
depth_drop_rates = [
float(x) for x in tf.linspace(0.0, drop_path_rate, sum(depths))
]
# First apply downsampling blocks and then apply ConvNeXt stages.
cur = 0
num_convnext_blocks = 4
for i in range(num_convnext_blocks):
x = downsample_layers[i](x)
for j in range(depths[i]):
x = apply_block(
x,
projection_dim=projection_dims[i],
drop_path_rate=depth_drop_rates[cur + j],
layer_scale_init_value=layer_scale_init_value,
name=name + f"_stage_{i}_block_{j}",
)
cur += depths[i]
if include_top:
x = apply_head(
x,
num_classes=num_classes,
activation=classifier_activation,
name=name,
)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
x = layers.LayerNormalization(epsilon=1e-6)(x)
# Create model.
super().__init__(inputs=inputs, outputs=x, **kwargs)
if weights is not None:
self.load_weights(weights)
self.include_rescaling = include_rescaling
self.include_top = include_top
self.depths = depths
self.projection_dims = projection_dims
self.drop_path_rate = drop_path_rate
self.layer_scale_init_value = layer_scale_init_value
self.input_tensor = input_tensor
self.pooling = pooling
self.num_classes = num_classes
self.classifier_activation = classifier_activation
def get_config(self):
return {
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"depths": self.depths,
"projection_dims": self.projection_dims,
"drop_path_rate": self.drop_path_rate,
"layer_scale_init_value": self.layer_scale_init_value,
# Remove batch dimension from `input_shape`
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"num_classes": self.num_classes,
"classifier_activation": self.classifier_activation,
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def ConvNeXtTiny(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_tiny",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["tiny"]["depths"],
projection_dims=MODEL_CONFIGS["tiny"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
def ConvNeXtSmall(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_small",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["small"]["depths"],
projection_dims=MODEL_CONFIGS["small"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
def ConvNeXtBase(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_base",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["base"]["depths"],
projection_dims=MODEL_CONFIGS["base"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
def ConvNeXtLarge(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_large",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["large"]["depths"],
projection_dims=MODEL_CONFIGS["large"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
def ConvNeXtXLarge(
*,
include_rescaling,
include_top,
drop_path_rate,
layer_scale_init_value,
weights=None,
input_shape=(None, None, 3),
input_tensor=None,
pooling=None,
num_classes=None,
classifier_activation="softmax",
name="convnext_xlarge",
):
return ConvNeXt(
include_rescaling=include_rescaling,
include_top=include_top,
depths=MODEL_CONFIGS["xlarge"]["depths"],
projection_dims=MODEL_CONFIGS["xlarge"]["projection_dims"],
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
num_classes=num_classes,
classifier_activation=classifier_activation,
name=name,
)
ConvNeXtTiny.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtTiny")
ConvNeXtSmall.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtSmall")
ConvNeXtBase.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtBase")
ConvNeXtLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtLarge")
ConvNeXtXLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtXLarge")
| keras-cv/keras_cv/models/legacy/convnext.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/convnext.py",
"repo_id": "keras-cv",
"token_count": 9184
} | 65 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG16 model for KerasCV.
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
(ICLR 2015)
""" # noqa: E501
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.models.legacy import utils
def apply_vgg_block(
x,
num_layers,
filters,
kernel_size,
activation,
padding,
max_pool,
name,
):
"""
Applies VGG block
Args:
x: Tensor, input tensor to pass through network
num_layers: int, number of CNN layers in the block
filters: int, filter size of each CNN layer in block
kernel_size: int (or) tuple, kernel size for CNN layer in block
activation: str (or) callable, activation function for each CNN layer in
block
padding: str (or) callable, padding function for each CNN layer in block
max_pool: bool, whether to add MaxPooling2D layer at end of block
name: str, name of the block
Returns:
tf.Tensor
"""
for num in range(1, num_layers + 1):
x = layers.Conv2D(
filters,
kernel_size,
activation=activation,
padding=padding,
name=f"{name}_conv{str(num)}",
)(x)
if max_pool:
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name=f"{name}_pool")(x)
return x
@keras.utils.register_keras_serializable(package="keras_cv.models")
class VGG16(keras.Model):
"""
Reference:
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
(ICLR 2015)
This class represents a Keras VGG16 model.
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(1/255.0)` layer.
include_top: bool, whether to include the 3 fully-connected
layers at the top of the network. If provided, num_classes must be
provided.
num_classes: int, optional number of classes to classify images into,
only to be specified if `include_top` is True.
weights: os.PathLike or None, one of `None` (random initialization), or a
pretrained weight file path.
input_shape: tuple, optional shape tuple, defaults to (224, 224, 3).
input_tensor: Tensor, optional Keras tensor (i.e. output of
`layers.Input()`) to use as image input for the model.
pooling: bool, Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classifier_activation:`str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
name: (Optional) name to pass to the model, defaults to "VGG16".
Returns:
A `keras.Model` instance.
""" # noqa: E501
def __init__(
self,
include_rescaling,
include_top,
input_tensor=None,
num_classes=None,
weights=None,
input_shape=(224, 224, 3),
pooling=None,
classifier_activation="softmax",
name="VGG16",
**kwargs,
):
if weights and not tf.io.gfile.exists(weights):
raise ValueError(
"The `weights` argument should be either `None` or the path "
"to the weights file to be loaded. Weights file not found at "
"location: {weights}"
)
if include_top and not num_classes:
raise ValueError(
"If `include_top` is True, you should specify `num_classes`. "
f"Received: num_classes={num_classes}"
)
if include_top and pooling:
raise ValueError(
f"`pooling` must be `None` when `include_top=True`."
f"Received pooling={pooling} and include_top={include_top}. "
)
inputs = utils.parse_model_inputs(input_shape, input_tensor)
x = inputs
if include_rescaling:
x = layers.Rescaling(1 / 255.0)(x)
x = apply_vgg_block(
x=x,
num_layers=2,
filters=64,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block1",
)
x = apply_vgg_block(
x=x,
num_layers=2,
filters=128,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block2",
)
x = apply_vgg_block(
x=x,
num_layers=3,
filters=256,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block3",
)
x = apply_vgg_block(
x=x,
num_layers=3,
filters=512,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block4",
)
x = apply_vgg_block(
x=x,
num_layers=3,
filters=512,
kernel_size=(3, 3),
activation="relu",
padding="same",
max_pool=True,
name="block5",
)
if include_top:
x = layers.Flatten(name="flatten")(x)
x = layers.Dense(4096, activation="relu", name="fc1")(x)
x = layers.Dense(4096, activation="relu", name="fc2")(x)
x = layers.Dense(
num_classes,
activation=classifier_activation,
name="predictions",
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D()(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D()(x)
super().__init__(inputs=inputs, outputs=x, name=name, **kwargs)
if weights is not None:
self.load_weights(weights)
self.include_rescaling = include_rescaling
self.include_top = include_top
self.num_classes = num_classes
self.input_tensor = input_tensor
self.pooling = pooling
self.classifier_activation = classifier_activation
def get_config(self):
return {
"include_rescaling": self.include_rescaling,
"include_top": self.include_top,
"name": self.name,
"input_shape": self.input_shape[1:],
"input_tensor": self.input_tensor,
"pooling": self.pooling,
"num_classes": self.num_classes,
"classifier_activation": self.classifier_activation,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/models/legacy/vgg16.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/vgg16.py",
"repo_id": "keras-cv",
"token_count": 3747
} | 66 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from keras_cv import backend
from keras_cv import layers as cv_layers
from keras_cv.backend import ops
from keras_cv.models.object_detection.retinanet import RetinaNetLabelEncoder
from keras_cv.tests.test_case import TestCase
class RetinaNetLabelEncoderTest(TestCase):
def test_label_encoder_output_shapes(self):
images_shape = (8, 512, 512, 3)
boxes_shape = (8, 10, 4)
classes_shape = (8, 10)
images = np.random.uniform(size=images_shape)
boxes = np.random.uniform(size=boxes_shape, low=0.0, high=1.0)
classes = np.random.uniform(size=classes_shape, low=0, high=5)
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
encoder = RetinaNetLabelEncoder(
anchor_generator=anchor_generator,
bounding_box_format="xyxy",
)
bounding_boxes = {"boxes": boxes, "classes": classes}
box_targets, class_targets = encoder(images, bounding_boxes)
self.assertEqual(box_targets.shape, (8, 49104, 4))
self.assertEqual(class_targets.shape, (8, 49104))
def test_all_negative_1(self):
images_shape = (8, 512, 512, 3)
boxes_shape = (8, 10, 4)
classes_shape = (8, 10)
images = np.random.uniform(size=images_shape)
boxes = -np.ones(shape=boxes_shape, dtype="float32")
classes = -np.ones(shape=classes_shape, dtype="float32")
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
encoder = RetinaNetLabelEncoder(
anchor_generator=anchor_generator,
bounding_box_format="xyxy",
)
bounding_boxes = {"boxes": boxes, "classes": classes}
box_targets, class_targets = encoder(images, bounding_boxes)
self.assertFalse(ops.any(ops.isnan(box_targets)))
self.assertFalse(ops.any(ops.isnan(class_targets)))
@pytest.mark.skipif(
backend.supports_ragged() is False,
reason="Only TensorFlow supports raggeds",
)
def test_ragged_encoding(self):
images_shape = (2, 512, 512, 3)
images = tf.random.uniform(shape=images_shape)
boxes = tf.ragged.stack(
[
tf.constant([[0, 0, 10, 10], [5, 5, 10, 10]], "float32"),
tf.constant([[0, 0, 10, 10]], "float32"),
]
)
classes = tf.ragged.stack(
[
tf.constant([[1], [1]], "float32"),
tf.constant([[1]], "float32"),
]
)
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="xywh",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
encoder = RetinaNetLabelEncoder(
anchor_generator=anchor_generator,
bounding_box_format="xywh",
)
bounding_boxes = {"boxes": boxes, "classes": classes}
box_targets, class_targets = encoder(images, bounding_boxes)
# 49104 is the anchor generator shape
self.assertEqual(box_targets.shape, (2, 49104, 4))
self.assertEqual(class_targets.shape, (2, 49104))
| keras-cv/keras_cv/models/object_detection/retinanet/retinanet_label_encoder_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/retinanet_label_encoder_test.py",
"repo_id": "keras-cv",
"token_count": 2230
} | 67 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from keras_cv.models.object_detection.yolox.layers import YoloXHead
from keras_cv.tests.test_case import TestCase
class YoloXHeadTest(TestCase):
def test_num_parameters(self):
input1 = keras.Input((80, 80, 256))
input2 = keras.Input((40, 40, 512))
input3 = keras.Input((20, 20, 1024))
output = YoloXHead(20)([input1, input2, input3])
model = keras.models.Model(
inputs=[input1, input2, input3], outputs=output
)
keras_params = sum(
[keras.backend.count_params(p) for p in model.trainable_weights]
)
# taken from original implementation
original_params = 7563595
self.assertEqual(keras_params, original_params)
def test_output_type_and_shape(self):
inputs = [
tf.random.uniform((3, 80, 80, 256)),
tf.random.uniform((3, 40, 40, 512)),
tf.random.uniform((3, 20, 20, 1024)),
]
output = YoloXHead(20)(inputs)
self.assertEqual(type(output), list)
self.assertEqual(len(output), 3)
self.assertEqual(output[0].shape, [3, 80, 80, 25])
self.assertEqual(output[1].shape, [3, 40, 40, 25])
self.assertEqual(output[2].shape, [3, 20, 20, 25])
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_head_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_head_test.py",
"repo_id": "keras-cv",
"token_count": 756
} | 68 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.segmentation.segment_anything.sam_layers import (
RandomFrequencyPositionalEmbeddings,
)
@keras_cv_export("keras_cv.models.SAMPromptEncoder", package="keras_cv.models")
class SAMPromptEncoder(keras.layers.Layer):
"""Prompt Encoder for the Segment Anything Model (SAM).
The prompt encoder generates encodings for three types of prompts:
- Point prompts: Points on the image along with a label indicating whether
the point is in the foreground (part of the mask) or in the background
(not a part of the mask).
- Box prompts: A batch of bounding boxes with format [(x1, y1), (x2, y2)]
used to determine the location of the masks in the image.
- Masks: An input mask can be passed to refine the positional embeddings
for the output mask.
First, the point prompts and box prompts are concatenated and positional
encodings are generated using random spatial frequencies. A point is
represented as the sum of a positional encoding of the point's location
and one of two learned embeddings that indicate if the point is either in
the foreground or background. A box is represented by an embedding pair:
(1) the positional encoding of its top-left corner summed with a learned
embedding representing "top-left corner" and
(2) the same structure but using a learned embedding indicating
"bottom-right corner".
The box and point encodings are referred to as "sparse encodings"
If a mask prompt is passed, a convolutional neural net is used to
downscale it to generate "dense encodings". If no mask prompt is passed,
an embedding layer is used instead to generate a "no mask" embedding.
Args:
embed_dim (int, optional): The number of features in the output
embeddings. Defaults to `256`.
image_embedding_size (int, optional): The number of features in the
image embeddings generated by an image encoder. Defaults to
`(64, 64)`.
input_image_size (tuple[int], optional): A tuple of the height and
width of the image being prompted. Defaults to `(1024, 1024)`.
mask_in_chans (int, optional): The number of channels of the mask
prompt. Defaults to `16`.
activation (str, optional): The activation to use in the mask
downscaler neural net. Defaults to `"gelu"`.
References:
- [Segment Anything paper](https://arxiv.org/abs/2304.02643)
- [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything)
""" # noqa: E501
def __init__(
self,
*,
embed_dim=256,
image_embedding_size=(64, 64),
input_image_size=(1024, 1024),
mask_in_chans=16,
activation="gelu",
**kwargs
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.image_embedding_size = image_embedding_size
self.input_image_size = input_image_size
self.mask_in_chans = mask_in_chans
self.activation = activation
self.positional_embedding_layer = RandomFrequencyPositionalEmbeddings(
num_positional_features=self.embed_dim // 2, scale=1
)
self.foreground_point_embed = keras.layers.Embedding(
1, embed_dim, name="foreground_point_embed"
)
self.background_point_embed = keras.layers.Embedding(
1, embed_dim, name="background_point_embed"
)
self.top_left_corner_embed = keras.layers.Embedding(
1, embed_dim, name="top_left_corner_embed"
)
self.bottom_right_corner_embed = keras.layers.Embedding(
1, embed_dim, name="bottom_right_corner_embed"
)
self.not_a_point_embed = keras.layers.Embedding(
1, embed_dim, name="not_a_point_embed"
)
self.mask_downscaler = keras.models.Sequential(
[
keras.layers.Conv2D(
mask_in_chans // 4, kernel_size=2, strides=2
),
keras.layers.LayerNormalization(epsilon=1e-6),
keras.layers.Activation(activation),
keras.layers.Conv2D(mask_in_chans, kernel_size=2, strides=2),
keras.layers.LayerNormalization(epsilon=1e-6),
keras.layers.Activation(activation),
keras.layers.Conv2D(embed_dim, kernel_size=1),
],
name="mask_downscaler",
)
self.no_mask_embed = keras.layers.Embedding(
1, embed_dim, name="no_mask_embed"
)
def build(self, input_shape=None):
self.positional_embedding_layer.build()
for layer in [
self.foreground_point_embed,
self.background_point_embed,
self.top_left_corner_embed,
self.bottom_right_corner_embed,
self.not_a_point_embed,
self.no_mask_embed,
]:
layer.build([None])
self.mask_downscaler.build(
[
None,
4 * self.image_embedding_size[0],
4 * self.image_embedding_size[1],
1,
]
)
self.built = True
def compute_output_shape(self, input_shape):
return {
"sparse_embeddings": [None, None, self.embed_dim],
"dense_embeddings": [
None,
self.image_embedding_size[0],
self.image_embedding_size[1],
self.embed_dim,
],
"dense_positional_embeddings": [
None,
self.image_embedding_size[0],
self.image_embedding_size[1],
self.embed_dim,
],
}
def __embed_points(self, points, labels):
points = points + 0.5
indices = ops.arange(1, dtype="int32")
point_embeddings = self.positional_embedding_layer.encode_coordinates(
points, self.input_image_size
)
labels = ops.broadcast_to(
labels[..., None], ops.shape(point_embeddings)
)
point_embeddings = ops.where(
labels == 0,
point_embeddings + self.background_point_embed(indices),
point_embeddings + self.foreground_point_embed(indices),
)
point_embeddings = ops.where(
labels == -1,
self.not_a_point_embed(indices),
point_embeddings,
)
return point_embeddings
def __embed_box(self, box):
shape = ops.shape(box)
B, N = shape[0], shape[1]
box = box + 0.5
indices = ops.arange(1, dtype="int32")
corner_embedding = self.positional_embedding_layer.encode_coordinates(
box, self.input_image_size
)
top_left_embedding = corner_embedding[
:, :, 0, :
] + self.top_left_corner_embed(indices)
bottom_right_embedding = corner_embedding[
:, :, 1, :
] + self.bottom_right_corner_embed(indices)
corner_embedding = ops.stack(
[top_left_embedding, bottom_right_embedding], axis=2
)
return ops.reshape(corner_embedding, (B, N * 2, self.embed_dim))
def __embed_mask(self, mask):
mask_embedding = self.mask_downscaler(mask)
return mask_embedding
def call(self, inputs):
# Get the batch shape based on any arbitrary input, because batch
# shapes must all match.
B = ops.shape(next(iter(inputs.values())))[0]
points = inputs.get("points", ops.zeros((B, 0, 2)))
labels = inputs.get("labels", ops.zeros((B, 0)))
box = inputs.get("boxes", ops.zeros((B, 0, 2, 2)))
mask = inputs.get("masks", ops.zeros((B, 0, 256, 256, 1)))
# Compute point embeddings
point_embeddings = self.__embed_points(points, labels)
# Compute box embeddings
box_embeddings = self.__embed_box(box)
# Concatenate both into a sparse embeddings tensor
sparse_embeddings = ops.concatenate(
[point_embeddings, box_embeddings], axis=1
)
# Compute the mask embeddings
_no_mask_embed = lambda: (
ops.broadcast_to(
ops.reshape(
self.no_mask_embed(ops.arange(1, dtype="int32")),
(1, 1, 1, self.embed_dim),
),
shape=(
B,
self.image_embedding_size[0],
self.image_embedding_size[1],
self.embed_dim,
),
)
)
def _maybe_input_mask_embed():
# Keras Core passes the masks as concrete tensors for both the
# true and false functions to build the output shape. So, we
# need to handle the case when 0 size mask is passed and
# dispatch the call to `_no_mask_embed`. Note that we can't call
# the lambda directly since the inputs are bound to different
# values when called with concrete values.
if mask.shape[1] == 0:
return ops.broadcast_to(
ops.reshape(
self.no_mask_embed(ops.arange(1, dtype="int32")),
(1, 1, 1, self.embed_dim),
),
shape=(
B,
self.image_embedding_size[0],
self.image_embedding_size[1],
self.embed_dim,
),
)
shape = ops.shape(mask)
BM, N, H, W, C = shape[0], shape[1], shape[2], shape[3], shape[4]
return self.__embed_mask(ops.reshape(mask, (BM * N, H, W, C)))
dense_embeddings = ops.cond(
ops.equal(ops.size(mask), 0),
_no_mask_embed,
_maybe_input_mask_embed,
)
# Compute the dense positional embeddings
dense_positional_embeddings = (
self.positional_embedding_layer.encode_image(
self.image_embedding_size
)[None, ...]
)
return {
"sparse_embeddings": sparse_embeddings,
"dense_embeddings": dense_embeddings,
"dense_positional_embeddings": dense_positional_embeddings,
}
def get_config(self):
config = super().get_config()
config.update(
{
"embed_dim": self.embed_dim,
"image_embedding_size": self.image_embedding_size,
"input_image_size": self.input_image_size,
"mask_in_chans": self.mask_in_chans,
"activation": self.activation,
}
)
return config
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_prompt_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_prompt_encoder.py",
"repo_id": "keras-cv",
"token_count": 5422
} | 69 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
@keras_cv_export("keras_cv.models.stable_diffusion.TextEncoder")
class TextEncoder(keras.Model):
def __init__(
self, max_length, vocab_size=49408, name=None, download_weights=True
):
tokens = keras.layers.Input(
shape=(max_length,), dtype="int32", name="tokens"
)
positions = keras.layers.Input(
shape=(max_length,), dtype="int32", name="positions"
)
x = CLIPEmbedding(vocab_size, 768, max_length)([tokens, positions])
for _ in range(12):
x = CLIPEncoderLayer(768, 12, activation=quick_gelu)(x)
embedded = keras.layers.LayerNormalization(epsilon=1e-5)(x)
super().__init__([tokens, positions], embedded, name=name)
if download_weights:
text_encoder_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/fchollet/stable-diffusion/resolve/main/kcv_encoder.h5", # noqa: E501
file_hash="4789e63e07c0e54d6a34a29b45ce81ece27060c499a709d556c7755b42bb0dc4", # noqa: E501
)
self.load_weights(text_encoder_weights_fpath)
@keras_cv_export("keras_cv.models.stable_diffusion.TextEncoderV2")
class TextEncoderV2(keras.Model):
def __init__(
self, max_length, vocab_size=49408, name=None, download_weights=True
):
tokens = keras.layers.Input(
shape=(max_length,), dtype="int32", name="tokens"
)
positions = keras.layers.Input(
shape=(max_length,), dtype="int32", name="positions"
)
x = CLIPEmbedding(vocab_size, 1024, max_length)([tokens, positions])
for _ in range(23):
x = CLIPEncoderLayer(1024, 16, activation=ops.gelu)(x)
embedded = keras.layers.LayerNormalization(epsilon=1e-5)(x)
super().__init__([tokens, positions], embedded, name=name)
if download_weights:
text_encoder_weights_fpath = keras.utils.get_file(
origin="https://huggingface.co/ianstenbit/keras-sd2.1/resolve/main/text_encoder_v2_1.h5", # noqa: E501
file_hash="985002e68704e1c5c3549de332218e99c5b9b745db7171d5f31fcd9a6089f25b", # noqa: E501
)
self.load_weights(text_encoder_weights_fpath)
def quick_gelu(x):
return x * ops.sigmoid(x * 1.702)
class CLIPEmbedding(keras.layers.Layer):
def __init__(
self, input_dim=49408, output_dim=768, max_length=77, **kwargs
):
super().__init__(**kwargs)
self.token_embedding = keras.layers.Embedding(input_dim, output_dim)
self.position_embedding = keras.layers.Embedding(max_length, output_dim)
def call(self, inputs):
tokens, positions = inputs
tokens = self.token_embedding(tokens)
positions = self.position_embedding(positions)
return tokens + positions
class CLIPEncoderLayer(keras.layers.Layer):
def __init__(self, embed_dim, num_heads, activation=None, **kwargs):
super().__init__(**kwargs)
self.layer_norm1 = keras.layers.LayerNormalization(epsilon=1e-5)
self.clip_attn = CLIPAttention(embed_dim, num_heads, causal=True)
self.layer_norm2 = keras.layers.LayerNormalization(epsilon=1e-5)
self.fc1 = keras.layers.Dense(embed_dim * 4)
self.fc2 = keras.layers.Dense(embed_dim)
self.activation = activation
def call(self, inputs):
residual = inputs
x = self.layer_norm1(inputs)
x = self.clip_attn(x)
x = residual + x
residual = x
x = self.layer_norm2(x)
x = self.fc1(x)
x = self.activation(x)
x = self.fc2(x)
return x + residual
class CLIPAttention(keras.layers.Layer):
def __init__(self, embed_dim=768, num_heads=12, causal=True, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.causal = causal
self.head_dim = self.embed_dim // self.num_heads
self.scale = self.head_dim**-0.5
self.q_proj = keras.layers.Dense(self.embed_dim)
self.k_proj = keras.layers.Dense(self.embed_dim)
self.v_proj = keras.layers.Dense(self.embed_dim)
self.out_proj = keras.layers.Dense(self.embed_dim)
def reshape_states(self, x, sequence_length, batch_size):
x = ops.reshape(
x, (batch_size, sequence_length, self.num_heads, self.head_dim)
)
return ops.transpose(
x, (0, 2, 1, 3)
) # bs, heads, sequence_length, head_dim
def call(self, inputs, attention_mask=None):
if attention_mask is None and self.causal:
length = ops.shape(inputs)[1]
attention_mask = ops.triu(
ops.ones((1, 1, length, length), dtype=self.compute_dtype)
* -float("inf"),
k=1,
)
_, tgt_len, embed_dim = inputs.shape
query_states = self.q_proj(inputs) * self.scale
key_states = self.reshape_states(self.k_proj(inputs), tgt_len, -1)
value_states = self.reshape_states(self.v_proj(inputs), tgt_len, -1)
proj_shape = (-1, tgt_len, self.head_dim)
query_states = self.reshape_states(query_states, tgt_len, -1)
query_states = ops.reshape(query_states, proj_shape)
key_states = ops.reshape(key_states, proj_shape)
src_len = tgt_len
value_states = ops.reshape(value_states, proj_shape)
attn_weights = query_states @ ops.transpose(key_states, (0, 2, 1))
attn_weights = ops.reshape(
attn_weights, (-1, self.num_heads, tgt_len, src_len)
)
attn_weights = attn_weights + attention_mask
attn_weights = ops.reshape(attn_weights, (-1, tgt_len, src_len))
attn_weights = ops.softmax(attn_weights, axis=-1)
attn_output = attn_weights @ value_states
attn_output = ops.reshape(
attn_output, (-1, self.num_heads, tgt_len, self.head_dim)
)
attn_output = ops.transpose(attn_output, (0, 2, 1, 3))
attn_output = ops.reshape(attn_output, (-1, tgt_len, embed_dim))
return self.out_proj(attn_output)
| keras-cv/keras_cv/models/stable_diffusion/text_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/text_encoder.py",
"repo_id": "keras-cv",
"token_count": 3165
} | 70 |
# Copyright 2022 The KerasCV Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities similar to tf.python.platform.resource_loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
import tensorflow as tf
TF_VERSION_FOR_ABI_COMPATIBILITY = "2.13"
abi_warning_already_raised = False
def get_project_root():
"""Returns project root folder."""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def get_path_to_datafile(path):
"""Get the path to the specified file in the data dependencies.
The path is relative to keras_cv/
Args:
path: a string resource path relative to keras_cv/
Returns:
The path to the specified data file
"""
root_dir = get_project_root()
return os.path.join(root_dir, path.replace("/", os.sep))
class LazySO:
def __init__(self, relative_path):
self.relative_path = relative_path
self._ops = None
@property
def ops(self):
if self._ops is None:
self.display_warning_if_incompatible()
self._ops = tf.load_op_library(
get_path_to_datafile(self.relative_path)
)
return self._ops
def display_warning_if_incompatible(self):
global abi_warning_already_raised
if abi_warning_already_raised or abi_is_compatible():
return
user_version = tf.__version__
warnings.warn(
f"You are currently using TensorFlow {user_version} and "
f"trying to load a KerasCV custom op.\n"
f"KerasCV has compiled its custom ops against TensorFlow "
f"{TF_VERSION_FOR_ABI_COMPATIBILITY}, and there are no "
f"compatibility guarantees between the two versions.\n"
"This means that you might get segfaults when loading the custom "
"op, or other kind of low-level errors.\n"
"If you do, do not file an issue on Github. "
"This is a known limitation.",
UserWarning,
)
abi_warning_already_raised = True
def abi_is_compatible():
return tf.__version__.startswith(TF_VERSION_FOR_ABI_COMPATIBILITY)
| keras-cv/keras_cv/utils/resource_loader.py/0 | {
"file_path": "keras-cv/keras_cv/utils/resource_loader.py",
"repo_id": "keras-cv",
"token_count": 1051
} | 71 |
## 制約の利用方法
`constraints`モジュールの関数により,最適化中のネットワークパラメータに制約(例えば非負の制約)を設定することができます.
この制約はレイヤー毎に適用されます.厳密なAPIはレイヤーに依存しますが,`Dense`,`Conv1D`,`Conv2D`,`Conv3D`レイヤーは統一的なAPIを持っています.
これらのレイヤーは2つの引数を取ります:
- `kernel_constraint` 重み行列の制約.
- `bias_constraint` バイアスの制約.
```python
from keras.constraints import max_norm
model.add(Dense(64, kernel_constraint=max_norm(2.)))
```
## 利用可能な制約
- __maxnorm(max_value=2, axis=0)__: 最大値ノルム制約
- __non_neg()__: 非負値制約
- __unit_norm(axis=0)__: ノルム正規化制約
- __min_max_norm(min_value=0.0, max_value=1.0, rate=1.0, axis=0)__: 最小/最大値ノルム制約
| keras-docs-ja/sources/constraints.md/0 | {
"file_path": "keras-docs-ja/sources/constraints.md",
"repo_id": "keras-docs-ja",
"token_count": 492
} | 72 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/normalization.py#L12)</span>
### BatchNormalization
```python
keras.layers.normalization.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None)
```
Batch normalization layer (Ioffe and Szegedy, 2014).
各バッチ毎に前の層の出力(このレイヤーへの入力)を正規化します.
つまり,平均を0,標準偏差値を1に近づける変換を適用します.
__引数__
- __axis__: 整数.正規化する軸(典型的には,特徴量の軸).例えば,`data_format="channels_first"`の`Conv2D`の後では,`axis=1`にします.
- __momentum__: 移動平均のためのMomentum.
- __epsilon__: 0除算を避けるために分散加算する微小量.
- __center__: Trueなら,正規化されたテンソルに`beta`のオフセットを加算します.Falseなら, `beta`は無視します.
- __scale__: Trueなら, `gamma`をかけます.Falseなら, `gamma`は使われません.次のレイヤーがlinear (例えば `nn.relu` も)ならば,次のレイヤーによってスケーリングされるので無効にできます.
- __beta_initializer__: betaの重みのためのInitializer.
- __gamma_initializer__: gammaの重みのためのInitializer.
- __moving_mean_initializer__: 移動平均のためのInitializer.
- __moving_variance_initializer__: 移動分散のためのInitializer.
- __beta_regularizer__: betaの重みのためのオプショナルなRegularizer.
- __gamma_regularizer__: gammaの重みのためのオプショナルなRegularizer.
- __beta_constraint__: betaの重みのためのオプショナルなConstraint.
- __gamma_constraint__: gammaの重みのためのオプショナルなConstraint.
__入力のshape__
任意.
このレイヤーがモデルの最初のレイヤーとなる場合は,`input_shape`引数(サンプル軸を含まない整数のタプル)を与える必要があります.
__出力のshape__
入力と同じです.
__参考文献__
- [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](http://jmlr.org/proceedings/papers/v37/ioffe15.html)
| keras-docs-ja/sources/layers/normalization.md/0 | {
"file_path": "keras-docs-ja/sources/layers/normalization.md",
"repo_id": "keras-docs-ja",
"token_count": 1103
} | 73 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/generic_utils.py#L16)</span>
### CustomObjectScope
```python
keras.utils.CustomObjectScope()
```
`_GLOBAL_CUSTOM_OBJECTS`をエスケープできないスコープを提供します.
`with`では,名前によってcustomオブジェクトにアクセス可能です.
グローバルなcustomオブジェクトへの変更は`with`で囲まれた中でのみ持続し,
`with`から抜けると,グローバルなcustomオブジェクトは`with`の最初の状態に戻ります.
__例__
`MyObject`というcustomオブジェクトの例です.
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/generic_utils.py#L15)</span>
### HDF5Matrix
```python
keras.utils.HDF5Matrix(datapath, dataset, start=0, end=None, normalizer=None)
```
Numpy 配列の代わりに使えるHDF5 datasetの表現です.
__例__
```python
x_data = HDF5Matrix('input/file.hdf5', 'data')
model.predict(x_data)
```
`start`と`end`を指定することでdatasetをスライスできます.
normalizer関数(やラムダ式)を渡せます.normalizer関数は取得されたすべてのスライスに適用されます.
__引数__
- __datapath__: 文字列,HDF5ファイルへのパス
- __dataset__: 文字列,datapathで指定されたファイル中におけるHDF5 datasetの名前
- __start__: 整数,指定されたdatasetのスライスの開始インデックス
- __end__: 整数,指定されたdatasetのスライスの終了インデックス
- __normalizer__: 読み込まれた時にデータに対して適用する関数
__戻り値__
array-likeなHDF5 dataset.
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/utils/generic_utils.py#L300)</span>
### Sequence
```python
keras.utils.Sequence()
```
datasetのようなデータの系列にfittingのためのベースオブジェクト.
Sequenceは`__getitem__`と`__len__`メソッドを実装しなければなりません.エポックの間にデータセットを変更したい場合には`on_epoch_end`を実装すべきです.`__getitem__`メソッドは完全なバッチを返すべきです.
__注意__
`Sequence`はマルチプロセッシングの利用に対して安全な方法です.この構造は,ジェネレータを使用しない限り,エポック毎に各サンプルを1度しか学習しないことを保証します.
__例__
``` python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
---
### to_categorical
```python
keras.utils.to_categorical(y, num_classes=None, dtype='float32')
```
整数のクラスベクトルから2値クラスの行列への変換します.
例えば,`categorical_crossentropy`のために使います.
__引数__
- __y__: 行列に変換されるクラスベクトル(0から`num_classes`までの整数)
- __num_classes__: 総クラス数
- __dtype__: 入力に期待されるデータ型で,文字列型です(`float32`, `float64`, `int32`...).
__戻り値__
入力のバイナリ行列表現.
__例__
```python
# Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:
> labels
array([0, 2, 1, 2, 0])
# `to_categorical` converts this into a matrix with as many
# columns as there are classes. The number of rows
# stays the same.
> to_categorical(labels)
array([[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.]], dtype=float32)
```
---
### normalize
```python
keras.utils.normalize(x, axis=-1, order=2)
```
Numpy配列の正規化
__引数__
- __x__: 正規化するNumpy 配列.
- __axis__: 正規化する軸.
- __order__: 正規化するorder(例: L2ノルムでは2).
__戻り値__
Numpy配列の正規化されたコピー.
---
### get_file
```python
keras.utils.get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None)
```
キャッシュ済みでなければURLからファイルをダウンロードします.
デフォルトではURL`origin`からのファイルはcache_dir `~/.keras`のcache_subdir `datasets`にダウンロードされます.これは`fname`と名付けられます.よって`example.txt`の最終的な場所は`~/.keras/datasets/example.txt`.となります.
更にファイルがtarやtar.gz,tar.bz,zipであれば展開されます.ダウンロード後にハッシュ値を渡せば検証します.コマンドラインプログラムの`shasum`や`sha256sum`がこのハッシュの計算に使えます.
__引数__
- __fname__: ファイル名.絶対パス`/path/to/file.txt`を指定すればその場所に保存されます.
- __origin__: ファイルのオリジナルURL.
- __untar__: 'extract'を推奨しない.真理値で,ファイルを展開するかどうか.
- __md5_hash__: 'file_hash'を推奨しない.ファイルの検証のためのmd5ハッシュ.
- __file_hash__: ダウンロード後に期待されるハッシュの文字列.sha256とmd5の両方のハッシュアルゴリズムがサポートされている.
- __cache_subdir__: Kerasのキャッシュディレクトリ下のどこにファイルが保存されるか.絶対パス`/path/to/folder`を指定すればその場所に保存されます
- __hash_algorithm__: ファイル検証のハッシュアルゴリズムの選択.オプションは'md5', 'sha256'または'auto'.デフォルトの'auto'は使われているハッシュアルゴリズムを推定します.
- __extract__: tarやzipのようなアーカイブとしてファイルを展開する実際の試行.
- __archive_format__: ファイルの展開に使うアーカイブフォーマット.オプションとしては'auto', 'tar', 'zip'またはNone.'tar'はtarやtar.gz,tar.bzファイルを含みます.デフォルトの'auto'は['tar', 'zip']です.Noneや空のリストでは何も合わなかったと返します.
- __cache_dir__: キャッシュファイルの保存場所で,Noneならばデフォルトで[Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored)になります.
__戻り値__
ダウンロードしたファイルへのパス
---
### print_summary
```python
keras.utils.print_summary(model, line_length=None, positions=None, print_fn=None)
```
モデルのサマリを表示します.
__引数__
- __model__: Kerasのモデルインスタンス.
- __line_length__: 表示行数の合計(例えば別のターミナルウィンドウのサイズに合わせる為にセットします).
- __positions__: 行毎のログの相対または絶対位置.指定しなければ[.33, .55, .67, 1.]の用になります.
- __print_fn__: 使うためのプリント関数.サマリの各行で呼ばれます.サマリの文字列をキャプチャするためにカスタム関数を指定することもできます.デフォルトは`print`(標準出力へのprint)です.
---
### plot_model
```python
keras.utils.plot_model(model, to_file='model.png', show_shapes=False, show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96)
```
Kerasモデルをdotフォーマットに変換しファイルに保存します.
__引数__
- __model__: Kerasのモデルインスタンス
- __to_file__: 保存するファイル名
- __show_shapes__: shapeの情報を表示するかどうか
- __show_layer_names__: レイヤー名を表示するかどうか
- __rankdir__: PyDotに渡す`rankdir`引数,プロットのフォーマットを指定する文字列:'TB' はvertical plot,'LR'はhorizontal plot.
- __expand_nested__: ネストされたモデルをクラスタに展開するかどうか.
- __dpi__: dot DPI.
---
### multi_gpu_model
```python
keras.utils.multi_gpu_model(model, gpus=None, cpu_merge=True, cpu_relocation=False)
```
異なるGPUでモデルを反復します.
具体的に言えば,この関数はマルチGPUの1台のマシンでデータ並列化を実装しています.次のような方法で動作しています.
- モデルの入力を複数のサブバッチに分割します.
- サブバッチ毎にモデルのコピーをします.どのモデルのコピーもそれぞれのGPUで実行されます.
- (CPUで)各々の結果を1つの大きなバッチとして連結させます.
例えば`batch_size`が64で`gpus=2`の場合,入力を32個のサンプルの2つのサブバッチに分け,サブバッチ毎に1つのGPUで処理され,64個の処理済みサンプルとしてバッチを返します.
8GPUまでは準線形の高速化を実現しています.
現状ではこの関数はTensorFlowバックエンドでのみ利用可能です.
__引数__
- __model__: Kerasのモデルインスタンス.このインスタンスのモデルはOOMエラーを避けるためにCPU上でビルドされるべきです(下記の使用例を参照してください).
- __gpus__: 2以上の整数でGPUの個数,またはGPUのIDである整数のリスト.モデルのレプリカ作成に使われます.
- __cpu_merge__: CPUのスコープ下にあるモデルの重みを強制的にマージするか否かを判別する為の真理値.
A boolean value to identify whether to force merging model weights under the scope of the CPU or not.
- __cpu_relocation__: CPUのスコープ下のモデルの重みを作るかを判別するための真理値です.事前にモデルがデバイススコープの下で定義されていない場合でも,このオプションを有効化することでモデルを救出することができます.
__返り値__
初めに用いられた`model`に似たKerasの`Model`インスタンスですが,複数のGPUにワークロードが分散されたものです.
__例__
例1 - CPU上で重みをマージしてモデルを訓練
```python
import tensorflow as tf
from keras.applications import Xception
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
# Instantiate the base model (or "template" model).
# We recommend doing this with under a CPU device scope,
# so that the model's weights are hosted on CPU memory.
# Otherwise they may end up hosted on a GPU, which would
# complicate weight sharing.
with tf.device('/cpu:0'):
model = Xception(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# Replicates the model on 8 GPUs.
# This assumes that your machine has 8 available GPUs.
parallel_model = multi_gpu_model(model, gpus=8)
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# Generate dummy data.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# This `fit` call will be distributed on 8 GPUs.
# Since the batch size is 256, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=256)
# Save model via the template model (which shares the same weights):
model.save('my_model.h5')
```
例2 - cpu_relocationを用いてCPU上で重みをマージしてモデルを訓練
```python
..
# Not needed to change the device scope for model definition:
model = Xception(weights=None, ..)
try:
parallel_model = multi_gpu_model(model, cpu_relocation=True)
print("Training using multiple GPUs..")
except ValueError:
parallel_model = model
print("Training using single GPU or CPU..")
parallel_model.compile(..)
..
```
例3 - GPU上(NV-linkを推奨)で重みをマージしてモデルを訓練
```python
..
# Not needed to change the device scope for model definition:
model = Xception(weights=None, ..)
try:
parallel_model = multi_gpu_model(model, cpu_merge=False)
print("Training using multiple GPUs..")
except:
parallel_model = model
print("Training using single GPU or CPU..")
parallel_model.compile(..)
..
```
__モデルの保存__
マルチGPUのモデルを保存するには,`multi_gpu_model`の返り値のモデルではなく,テンプレートになった(`multi_gpu_model`の引数として渡した)モデルで`.save(fname)`か`.save_weights(fname)`を使ってください.
| keras-docs-ja/sources/utils.md/0 | {
"file_path": "keras-docs-ja/sources/utils.md",
"repo_id": "keras-docs-ja",
"token_count": 5871
} | 74 |
## 활성화 함수의 사용법<sub>Usage of activations</sub>
활성화 함수는 `Activation` 층<sub>layer</sub>이나 포워드 패스를 사용하는 모든 층에서 `activation` 인자로 사용 가능합니다.
```python
from keras.layers import Activation, Dense
model.add(Dense(64))
model.add(Activation('tanh'))
```
위의 코드는 아래와 동일합니다.
```python
model.add(Dense(64, activation='tanh'))
```
TensorFlow, Theano, CNTK에서 제공하는 원소별<sub>element-wise</sub> 연산도 활성화 함수로 사용할 수 있습니다.
```python
from keras import backend as K
model.add(Dense(64, activation=K.tanh))
```
## 사용 가능한 활성화 함수
### softmax
```python
keras.activations.softmax(x, axis=-1)
```
Softmax 활성화 함수.
__인자__
- __x__: 입력 텐서.
- __axis__: `int`. Softmax 정규화<sub>normalization</sub>가 적용되는 축<sub>axis</sub>.
__반환값__
Softmax 변환으로 생성된 텐서.
`f(x) = exp(x) / sum(exp(x))`
__오류__
- __ValueError__: `dim(x) == 1`인 경우 발생합니다.
----
### elu
```python
keras.activations.elu(x, alpha=1.0)
```
Exponential Linear Unit(ELU).
__인자__
- __x__: 입력 텐서.
- __alpha__: `float`. `x < 0`인 경우의 기울기. 기본값은 `1.0`.
__반환값__
ELU의 활성값.
- `x > 0`인 경우, `f(x) = x`
- `x < 0`인 경우, `f(x) = alpha * (exp(x) - 1)`
__참고__
- [Fast and Accurate Deep Network Learning by Exponential
Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)
----
### selu
```python
keras.activations.selu(x)
```
Scaled Exponential Linear Unit(SELU).
SELU는 `scale * elu(x, alpha)`와 같습니다. `alpha`와 `scale`은
미리 정해지는 상수입니다. 가중치<sub>weights</sub>가 올바르게 초기화되고(`lecun_normal` 참조)
입력 수가 "충분히 많다"면 `alpha`와 `scale`의 값은
입력의 평균과 분산이 연속되는 두 개의 층에서 보존되도록 결정됩니다(참고자료 참조).
__인자__
- __x__: 입력 텐서.
__반환값__
SELU의 활성값
`f(x) = scale * elu(x, alpha)`
__유의 사항__
- `lecun_normal`과 함께 사용되어야 합니다.
- `AlphaDropout`과 함께 사용되어야 합니다.
__참고__
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
----
### softplus
```python
keras.activations.softplus(x)
```
Softplus 활성화 함수.
__인자__
- __x__: 입력 텐서.
__반환값__
Softplus의 활성값.
`f(x) = log(exp(x) + 1)`
----
### softsign
```python
keras.activations.softsign(x)
```
Softsign 활성화 함수.
__인자__
- __x__: 입력 텐서.
__반환값__
Softsign의 활성값.
`f(x) = x / (abs(x) + 1)`
----
### relu
```python
keras.activations.relu(x, alpha=0.0, max_value=None, threshold=0.0)
```
Rectified Linear Unit(ReLU).
인자의 기본값을 사용하면 원소별로 연산된 `max(x, 0)`를 반환합니다.
다른 인자를 사용하면 ReLU는 다음과 같습니다.
- `x >= max_value`인 경우, `f(x) = max_value`
- `threshold <= x < max_value`인 경우, `f(x) = x`
- 나머지 경우, `f(x) = alpha * (x - threshold)`
__인자__
- __x__: 입력 텐서.
- __alpha__: `float`. `x < 0`인 경우의 기울기. 기본값은 `0.0`.
- __max_value__: `float`. 포화 임계값.
- __threshold__: `float`. 활성화가 일어나는 임계값.
__반환값__
ReLU 변환으로 생성된 텐서.
- `x >= max_value`인 경우, `f(x) = max_value`
- `threshold <= x < max_value`인 경우, `f(x) = x`
- 나머지 경우, `f(x) = alpha * (x - threshold)`
----
### tanh
```python
keras.activations.tanh(x)
```
Hyperbolic Tangent 활성화 함수.
__인자__
- __x__: 입력 텐서.
__반환값__
Hyperbolic Tangent의 활성값.
`f(x) = tanh(x) = (exp(x) - exp(-x)) / (exp(x) + exp(-x))`
----
### sigmoid
```python
keras.activations.sigmoid(x)
```
Sigmoid 활성화 함수.
__인자__
- __x__: 입력 텐서.
__반환값__
Sigmoid의 활성값.
`f(x) = 1 / (1 + exp(-x))`
----
### hard_sigmoid
```python
keras.activations.hard_sigmoid(x)
```
Hard sigmoid 활성화 함수.
Sigmoid 활성화 함수보다 연산 속도가 빠릅니다.
__인자__
- __x__: 입력 텐서.
__반환값__
Hard sigmoid의 활성값.
- `x < -2.5`인 경우, `f(x) = 0`
- `x > 2.5`인 경우, `f(x) = 1`
- `-2.5 <= x <= 2.5`인 경우, `f(x) = 0.2 * x + 0.5`
----
### exponential
```python
keras.activations.exponential(x)
```
(밑이 e인) 지수 활성화 함수.
__인자__
- __x__: 입력 텐서.
__반환값__
Exponential의 활성값.
`f(x) = exp(x)`
----
### linear
```python
keras.activations.linear(x)
```
선형(즉, 항등) 활성화 함수.
__인자__
- __x__: 입력 텐서.
__반환값__
변하지 않은 입력 텐서.
## 고급 활성화 함수에 대하여
간단한 TensorFlow, Theano, CNTK의 활성화 함수보다 더 복잡한 함수들(예: 학습 가능한 파라미터를 가진 활성화 함수)은 [고급 활성화 함수의 사용법](layers/advanced-activations.md)에서 확인할 수 있으며, `keras.layers.advanced_activations` 모듈에서 찾을 수 있습니다. `PReLU`와 `LeakyReLU`도 여기서 찾을 수 있습니다.
| keras-docs-ko/sources/activations.md/0 | {
"file_path": "keras-docs-ko/sources/activations.md",
"repo_id": "keras-docs-ko",
"token_count": 3458
} | 75 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/embeddings.py#L16)</span>
### Embedding
```python
keras.layers.Embedding(input_dim, output_dim, embeddings_initializer='uniform', embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None, mask_zero=False, input_length=None)
```
양의 정수(인덱스)를 고정된 크기의 벡터로 변환합니다.
예: [[4], [20]] → [[0.25, 0.1], [0.6, -0.2]]
이 층<sub>layer</sub>은 모델의 첫 번째 층으로만 사용할 수 있습니다.
__예시__
```python
model = Sequential()
model.add(Embedding(1000, 64, input_length=10))
# Embedding은 (batch, input_length)의 형태를 가진 정수 행렬을 입력으로 받습니다.
# 입력 내 가장 큰 정수(단어 인덱스)는
# 999(어휘목록 사이즈)보다 커서는 안됩니다.
# 이 때 model.output_shape == (None, 10, 64)이며, None은 배치 차원입니다.
input_array = np.random.randint(1000, size=(32, 10))
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
assert output_array.shape == (32, 10, 64)
```
__인자__
- __input_dim__: `int > 0`. 어휘 목록의 크기를 나타내며, 최대 정수 인덱스+1의 값을 가집니다.
- __output_dim__: `int >= 0`. 임베딩 벡터의 차원을 나타냅니다.
- __embeddings_initializer__: 임베딩 행렬(임베딩 벡터로 이루어진 `(input_dim, ouput_dim)`형태의 행렬)의 초기화 함수([초기화 함수](../initializers.md) 참조).
- __embeddings_regularizer__: 임베딩 행렬에 적용되는 규제화 함수([규제화 함수](../regularizers.md) 참조).
- __activity_regularizer__: 층의 출력값에 적용되는 규제화 함수. 자세한 내용은 아래의 [논문](https://arxiv.org/abs/1708.01009)을 참조하십시오.
- __embeddings_constraint__: 임베딩 벡터에 적용되는 제약 함수([제약 함수](../constraints.md) 참조).
- __mask_zero__: 0을 패딩<sub>paddng</sub>값으로 사용하였는지 여부를 알려줍니다.
입력값의 길이가 다양한 경우의 순환 신경망<sub>RNN</sub>에서 유용하게 사용됩니다.
이 값이 `True`인 경우 이후 사용하는 모든 층들은 마스킹을 지원해야 하며, 0은 패딩값으로 사용되기 때문에
단어 인덱스로는 사용할 수 없습니다.
- __input_length__: 입력값의 시계열(순서형) 길이. 모든 배치가 단일한 길이를 가지는 경우 하나의 상수값을 배정합니다. 이후 `Flatten`에 이어 `Dense`층을 연결하려면 이 인자가 필요합니다(이 인자가 주어지지 않은 경우, `Dense`층의 출력을 계산할 수 없습니다).
__입력 형태__
`(batch_size, sequence_length)`의 형태의 2D 텐서.
__출력 형태__
`(batch_size, sequence_length, output_dim)`의 형태의 3D 텐서.
__참조__
- [A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
- [Revisiting Activation Regularization for Language RNNs](https://arxiv.org/abs/1708.01009)
| keras-docs-ko/sources/layers/embeddings.md/0 | {
"file_path": "keras-docs-ko/sources/layers/embeddings.md",
"repo_id": "keras-docs-ko",
"token_count": 1992
} | 76 |
# 순서형 전처리<sub>Sequence Preprocessing</sub>
순서형 전처리 도구 모듈입니다.
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/preprocessing/sequence.py#L16)</span>
### TimeseriesGenerator
```python
keras.preprocessing.sequence.TimeseriesGenerator(data, targets, length, sampling_rate=1, stride=1, start_index=0, end_index=None, shuffle=False, reverse=False, batch_size=128)
```
시간 순서가 있는 데이터의 배치를 생성하는 도구 클래스입니다.
이 클래스는 일정한 간격으로 수집된 시계열 데이터와 전체 길이, 스트라이드와 같이 시계열 특성을 나타내는 매개변수<sub>parameter</sub>를 입력받아 훈련/검증에 사용할 배치 데이터를 생성합니다.
__인자__
- __data__: 리스트 또는 NumPy 배열과 같이 인덱싱 가능한 2D 데이터로 0번째 축<sub>axis</sub>은 연속된 시점에 모인 표본<sub>sample</sub>들로 이루어진 시간 차원을 나타냅니다.
- __targets__: `data`의 시간 단계와 상응하는 목표값으로 0번째 축의 길이가 `data`와 서로 같아야 합니다.
- __length__: 생성할 배치의 시계열 길이를 지정합니다. 해당 인자를 통해 지정되는 길이는 최대 길이로서, 각 표본의 실제 길이는 `length`를 `sampling_rate`로 나눈 몫만큼이 됩니다.
- __sampling_rate__: `length`를 통해 지정된 시계열 범위 가운데 `sampling_rate` 시점마다 입력값을 추출해서 배치에 포함시킬 것인지를 정합니다. 예를 들어 표본이 `i`번째 데이터에서 시작할 때 `sampling_rate`를 `r`로 설정할 경우 생성되는 표본은 `data[i]`, `data[i+r]`, `data[i+2r]`... 의 형태가 되며, 표본의 최종 길이는 `length`를 `sampling_rate`로 나눈 몫이 됩니다. 기본값은 `1`이며, 이 경우 배치의 길이는 `length`와 같아집니다.
- __stride__: 입력값 가운데 `stride`로 지정한 순서마다 표본을 생성합니다. 예를 들어 첫번째 시계열 표본이 `i`번째 입력값에서 시작할 때 `stride`가 `s`면 다음 표본은 `data[i+s]`부터, 그 다음 표본은 `data[i+2s]`부터 생성됩니다. 표본 사이에 데이터가 중복되지 않게 하려면 `stride`값을 `length`보다 같거나 크게 지정하면 됩니다. 기본값은 `1`입니다.
- __start_index__: 입력값 가운데 배치 생성에 사용할 최초 시점을 지정합니다. `start_index`이전의 데이터는 사용되지 않기 때문에 별도의 시험/검증 세트를 만드는 데 활용할 수 있습니다. 기본값은 `0`입니다.
- __end_index__: 입력값 가운데 배치 생성에 사용할 마지막 시점을 지정합니다. `end_index`이후의 데이터는 사용되지 않기 때문에 별도의 시험/검증 세트를 만드는 데 활용할 수 있습니다. 기본값은 `None`으로, 이 경우 입력 데이터의 가장 마지막 인덱스가 자동으로 지정됩니다.
- __shuffle__: `bool`. `True`인 경우, 생성한 표본의 순서를 뒤섞습니다. 기본값은 `False`입니다.
- __reverse__: `bool`. `True`인 경우, 생성한 표본의 순서는 입력된 데이터의 역순입니다. 기본값은 `False`입니다.
- __batch_size__: 하나의 배치 안에 포함될 표본의 개수입니다. 기본값은 `128`입니다.
__반환값__
[Sequence](/utils/#sequence) 인스턴스.
__예시__
```python
from keras.preprocessing.sequence import TimeseriesGenerator
import numpy as np
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = TimeseriesGenerator(data, targets,
length=10, sampling_rate=2,
batch_size=2)
assert len(data_gen) == 20
batch_0 = data_gen[0]
x, y = batch_0
assert np.array_equal(x,
np.array([[[0], [2], [4], [6], [8]],
[[1], [3], [5], [7], [9]]]))
assert np.array_equal(y,
np.array([[10], [11]]))
```
----
### pad_sequences
```python
keras.preprocessing.sequence.pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.0)
```
입력값의 길이를 패딩하여 동일하게 만듭니다.
이 함수는 서로 다른 길이를 가진 `num_samples`개의 리스트에 패딩을 더하여 전체 길이가 `num_timesteps`로 동일한 `(num_samples, num_timesteps)`형태의 2D NumPy 배열로 변형합니다. 패딩을 포함한 `num_timesteps`의 길이는 `maxlen`인자에 의해 결정되며, `maxlen`인자를 지정하지 않을 경우에는 전체 입력 리스트 가운데 가장 긴 리스트를 기준으로 맞춰집니다. 길이가 `num_timesteps`보다 긴 경우는 잘라냅니다. 패딩에 사용되는 값은 `value` 인자로 정할 수 있으며 기본값은 `0.0`입니다.
`padding`과 `truncating`인자는 각각 개별 리스트의 앞/뒤 가운데 어느 부분을 패딩하고 잘라낼지를 결정합니다. 기본값은 `'pre'`(앞)입니다.
__인자__
- __sequences__: 리스트들로 이루어진 리스트로, 각각의 하위 리스트가 순서형 데이터입니다.
- __maxlen__: `int`. 전체 리스트의 최대 길이를 결정합니다.
- __dtype__: 출력값의 자료형을 결정합니다. 패딩에 문자열을 사용할 경우 `object`타입을 지정합니다. 기본값은 `int32`입니다.
- __padding__: `str`. `'pre'` 또는 `'post'`를 입력받아 각 리스트의 앞 또는 뒤를 패딩할 위치로 지정합니다.
- __truncating__: `str`. `'pre'` 또는 `'post'`를 입력받아 `maxlen`보다 길이가 긴 리스트를 해당 위치에서 잘라냅니다.
- __value__: 패딩에 사용할 값으로 `float`또는 `str` 형식의 입력을 받습니다. 기본값은 `0.0`입니다.
__반환값__
`(len(sequences), maxlen)`형태의 NumPy 배열.
__오류__
- __ValueError__: `truncating` 혹은 `padding`에 잘못된 값을 전달한 경우, 또는 `sequences`입력 형식이 잘못된 경우 발생합니다.
----
### skipgrams
```python
keras.preprocessing.sequence.skipgrams(sequence, vocabulary_size, window_size=4, negative_samples=1.0, shuffle=True, categorical=False, sampling_table=None, seed=None)
```
Skipgram 단어 쌍을 생성합니다.
Skipgram은 어떤 문장을 구성하는 각각의 단어들을 '중심 단어'로 지정하고 특정 중심 단어가 등장했을 때 '주변 단어'가 등장할 확률을 말뭉치<sub>corpus</sub>로부터 학습하는 모델입니다. Skipgram에 대한 보다 자세한 설명은 [Mikolov et al.의 탁월한 논문](http://arxiv.org/pdf/1301.3781v3.pdf)을 참고하십시오.
케라스의 `skipgrams`함수는 단어 인덱스로 이루어진 리스트를 입력받아 중심 단어와 주변 단어의 쌍으로 이루어진 학습용 데이터 튜플을 생성합니다. 튜플은 다음의 두 리스트로 이루어집니다.
- 리스트`0`: 문장 내의 중심 단어와 주변 단어로 이루어진 리스트들의 리스트. 중심 단어의 인덱스를 `i`, `windows_size`인자에서 지정한 '창의 크기'를 `n`이라고 하면 `[i-n]`, `[i-n+1]`, ..., `[i-1]`, `[i+1]`, ..., `[i+n-1]`, `[i+n]`인덱스의 단어들이 각각 중심 단어 `[i]`와 단어 쌍을 만들게 됩니다. 따라서 각 중심 단어마다 `2n`개의 단어쌍을 만들며, 해당 중심 단어가 문장의 시작 또는 끝에 가까워서 어느 한쪽의 주변 단어 개수가 `n`보다 작을 경우 그 방향에 존재하는 주변 단어의 개수만큼 단어쌍이 생성됩니다. 또한 `negative_samples` 인자에서 지정한 비율에 따라 중심 단어와 '가짜 주변 단어'로 이루어진 '거짓 표본' 리스트들이 함께 생성됩니다.
- 리스트`1`: 리스트`0`에 포함된 각 단어쌍이 실제 중심 단어와 주변 단어로 이루어진 '참 표본'인지, 무작위로 선택한 가짜 주변 단어로 이루어진 '거짓 표본'인지를 나타내는 레이블의 리스트입니다. 참인 경우 `1`, 거짓인 경우 `0`값을 갖습니다.
__인자__
- __sequence__: 단어의 인덱스로 이루어진 정수값의 리스트입니다. `skipgrams`함수는 한 번에 한 문장, 즉 하나의 리스트를 입력받습니다. `sampling_table` 인자를 사용하는 경우 각 단어가 말뭉치 내에서 등장하는 빈도의 순위를 해당 단어의 인덱스로 지정해야 합니다. 예를 들어 인덱스 `10`은 열 번째로 많이 등장하는 단어를 나타냅니다. 인덱스 `0`은 단어 배정되지 않는 예비 인덱스이기 때문에 사용해서는 안된다는 점에 유의하십시오.
- __vocabulary_size__: `int`. 학습에 사용하고자 하는 단어 목록의 크기로, 존재하는 단어 인덱스 최댓값 + 1을 배정합니다.
- __window_size__: `int`. 하나의 중심 단어로부터 학습하고자 하는 주변 단어의 범위를 지정합니다. `window_size = n`일 때, 중심단어 `[i]`로부터 최대 `[i-n]`에서 `[i+n]`까지 최대 `2n`개의 주변 단어가 학습 범위에 포함됩니다. 해당 중심 단어가 문장의 시작 또는 끝에 가까워서 어느 한쪽의 주변 단어 개수가 `n`보다 작을 경우 그 방향에 존재하는 주변 단어의 개수만큼 단어쌍이 생성됩니다.
- __negative_samples__: `0`보다 같거나 큰 `float`값으로 실제 주변 단어로부터 생성될 단어쌍 표본의 개수 대비 생성할 거짓 표본의 비율을 나타냅니다. 0일 경우 거짓 표본을 생성하지 않으며 1일 경우 참인 표본과 동일한 개수의 무작위 거짓 표본을 생성합니다.
- __shuffle__: `bool`. 생성한 단어쌍의 순서를 무작위로 섞을 것인지를 결정합니다. `skipgrams`함수는 입력된 리스트로부터 차례대로 단어쌍을 생성합니다. `False`일 경우 뒤섞지 않고 생성한 순서대로 결과를 반환합니다. 기본값은 `True`입니다.
- __categorical__: `bool`. `False`인 경우, 출력된 튜플의 레이블(인덱스`1`)은 정수값을(예: `[0, 1, 1 ...]`), `True`인 경우 범주형 값을(예: `[[1,0], [0,1], [0,1] ...]`) 갖습니다.
- __sampling_table__: `vocabulary_size`만큼의 길이를 갖는 1D 배열로, 배열의 `i`번째 값은 인덱스 값이 `i`인 단어가 단어쌍으로 추출될 확률을 나타냅니다. 자세한 부분은 `make_sampling_table`함수를 참조하십시오.
- __seed__: 난수 생성에 사용할 시드입니다.
__반환값__
단어 인덱스의 쌍(리스트)으로 이루어진 리스트와 각 단어쌍의 참/거짓을 나타내는 레이블 리스트로 이루어진 튜플.
__유의사항__
상례에 따라, 인덱스 `0`은 특정 단어에 배정되지 않는 값으로 단어쌍 생성에 사용되지 않습니다.
----
### make_sampling_table
```python
keras.preprocessing.sequence.make_sampling_table(size, sampling_factor=1e-05)
```
각 단어가 등장할 확률을 높은 순서대로 정렬한 배열을 생성합니다.
이 함수는 `skipgrams`함수의 `sampling_table`인자에 입력할 확률 배열을 생성합니다. `sampling_table[i]`는 데이터에서 `i`번째로 빈번하게 나타나는 단어를 학습 표본으로 추출할 확률입니다. 학습에 사용할 데이터는 각 항목의 가짓수가 비슷한 편이 좋기 때문에 균형을 위해 자주 등장하는 단어일수록 추출될 확률을 낮게 설정합니다. 해당 확률은 word2vec에서 사용하는 표본추출 분포 공식을 따릅니다.
```
p(word) = (min(1, sqrt(word_frequency / sampling_factor) /
(word_frequency / sampling_factor)))
```
이때 `word_frequency`값은 각 단어가 실제 말뭉치 안에 등장하는 빈도가 아니라 [지프의 법칙](https://ko.wikipedia.org/wiki/%EC%A7%80%ED%94%84%EC%9D%98_%EB%B2%95%EC%B9%99)(s=1)의 가정을 따라 단어의 등장 빈도 순위별로 자동으로 생성되는 근사값을 사용합니다.
```
frequency(rank) ≈ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))
```
여기서 `gamma`는 오일러-마스케로니 상수입니다.
__인자__
- __size__: `int`. 표본 추출에 사용할 전체 단어의 수.
- __sampling_factor__: word2vec 공식에 사용할 표본 추출 인수. 기본값은 `1e-5`입니다.
__반환값__
`size`의 길이를 가진 1D 형태의 NumPy 배열로, `i`번째 값은 `i`번째로 빈번하게 나타나는 단어를 학습 표본으로 추출할 확률입니다.
| keras-docs-ko/sources/preprocessing/sequence.md/0 | {
"file_path": "keras-docs-ko/sources/preprocessing/sequence.md",
"repo_id": "keras-docs-ko",
"token_count": 9459
} | 77 |
# 在 CIFAR10 数据集上训练 ResNet。
ResNet v1:
[Deep Residual Learning for Image Recognition
](https://arxiv.org/pdf/1512.03385.pdf)
ResNet v2:
[Identity Mappings in Deep Residual Networks
](https://arxiv.org/pdf/1603.05027.pdf)
Model|n|200-epoch accuracy|Original paper accuracy |sec/epoch GTX1080Ti
:------------|--:|-------:|-----------------------:|---:
ResNet20 v1| 3| 92.16 %| 91.25 %|35
ResNet32 v1| 5| 92.46 %| 92.49 %|50
ResNet44 v1| 7| 92.50 %| 92.83 %|70
ResNet56 v1| 9| 92.71 %| 93.03 %|90
ResNet110 v1| 18| 92.65 %| 93.39+-.16 %|165
ResNet164 v1| 27| - %| 94.07 %| -
ResNet1001 v1|N/A| - %| 92.39 %| -
Model|n|200-epoch accuracy|Original paper accuracy |sec/epoch GTX1080Ti
:------------|--:|-------:|-----------------------:|---:
ResNet20 v2| 2| - %| - %|---
ResNet32 v2|N/A| NA %| NA %| NA
ResNet44 v2|N/A| NA %| NA %| NA
ResNet56 v2| 6| 93.01 %| NA %|100
ResNet110 v2| 12| 93.15 %| 93.63 %|180
ResNet164 v2| 18| - %| 94.54 %| -
ResNet1001 v2|111| - %| 95.08+-.14 %| -
```python
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
import numpy as np
import os
# 训练参数
batch_size = 32 # 原论文按照 batch_size=128 训练所有的网络
epochs = 200
data_augmentation = True
num_classes = 10
# 减去像素均值可提高准确度
subtract_pixel_mean = True
# 模型参数
# ----------------------------------------------------------------------------
# | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch
# Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti
# |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)
# ----------------------------------------------------------------------------
# ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)
# ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)
# ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)
# ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)
# ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)
# ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)
# ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)
# ---------------------------------------------------------------------------
n = 3
# 模型版本
# Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2)
version = 1
# 从提供的模型参数 n 计算的深度
if version == 1:
depth = n * 6 + 2
elif version == 2:
depth = n * 9 + 2
# 模型名称、深度和版本
model_type = 'ResNet%dv%d' % (depth, version)
# 载入 CIFAR10 数据。
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 输入图像维度。
input_shape = x_train.shape[1:]
# 数据标准化。
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# 如果使用减去像素均值
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# 将类向量转换为二进制类矩阵。
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def lr_schedule(epoch):
"""学习率调度
学习率将在 80, 120, 160, 180 轮后依次下降。
他作为训练期间回调的一部分,在每个时期自动调用。
# 参数
epoch (int): 轮次
# 返回
lr (float32): 学习率
"""
lr = 1e-3
if epoch > 180:
lr *= 0.5e-3
elif epoch > 160:
lr *= 1e-3
elif epoch > 120:
lr *= 1e-2
elif epoch > 80:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D 卷积批量标准化 - 激活栈构建器
# 参数
inputs (tensor): 从输入图像或前一层来的输入张量
num_filters (int): Conv2D 过滤器数量
kernel_size (int): Conv2D 方形核维度
strides (int): Conv2D 方形步幅维度
activation (string): 激活函数名
batch_normalization (bool): 是否包含批标准化
conv_first (bool): conv-bn-activation (True) 或
bn-activation-conv (False)
# 返回
x (tensor): 作为下一层输入的张量
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet 版本 1 模型构建器 [a]
2 x (3 x 3) Conv2D-BN-ReLU 的堆栈
最后一个 ReLU 在快捷连接之后。
在每个阶段的开始,特征图大小由具有 strides=2 的卷积层减半(下采样),
而滤波器的数量加倍。在每个阶段中,这些层具有相同数量的过滤器和相同的特征图尺寸。
特征图尺寸:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
参数数量与 [a] 中表 6 接近:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# 参数
input_shape (tensor): 输入图像张量的尺寸
depth (int): 核心卷积层的数量
num_classes (int): 类别数 (CIFAR10 为 10)
# 返回
model (Model): Keras 模型实例
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# 开始模型定义
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# 实例化残差单元的堆栈
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # 第一层但不是第一个栈
strides = 2 # downsample
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides)
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None)
if stack > 0 and res_block == 0: # first layer but not first stack
# 线性投影残差快捷键连接,以匹配更改的 dims
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
# 在顶层加分类器。
# v1 不在最后一个快捷连接 ReLU 后使用 BN
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# 实例化模型。
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet 版本 2 模型构建器 [b]
(1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D 的堆栈,也被称为瓶颈层。
每一层的第一个快捷连接是一个 1 x 1 Conv2D。
第二个及以后的快捷连接是 identity。
在每个阶段的开始,特征图大小由具有 strides=2 的卷积层减半(下采样),
而滤波器的数量加倍。在每个阶段中,这些层具有相同数量的过滤器和相同的特征图尺寸。
特征图尺寸:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# 参数
input_shape (tensor): 输入图像张量的尺寸
depth (int): 核心卷积层的数量
num_classes (int): 类别数 (CIFAR10 为 10)
# 返回
model (Model): Keras 模型实例
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# 开始模型定义。
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 在将输入分离为两个路径前执行带 BN-ReLU 的 Conv2D 操作。
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# 实例化残差单元的栈
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# 瓶颈残差单元
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# 线性投影残差快捷键连接,以匹配更改的 dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
num_filters_in = num_filters_out
# 在顶层添加分类器
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# 实例化模型。
model = Model(inputs=inputs, outputs=outputs)
return model
if version == 2:
model = resnet_v2(input_shape=input_shape, depth=depth)
else:
model = resnet_v1(input_shape=input_shape, depth=depth)
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=lr_schedule(0)),
metrics=['accuracy'])
model.summary()
print(model_type)
# 准备模型保存路径。
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
# 准备保存模型和学习速率调整的回调。
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
lr_scheduler = LearningRateScheduler(lr_schedule)
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=5,
min_lr=0.5e-6)
callbacks = [checkpoint, lr_reducer, lr_scheduler]
# 运行训练,是否数据增强可选。
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# 这将做预处理和实时数据增强。
datagen = ImageDataGenerator(
# 在整个数据集上将输入均值置为 0
featurewise_center=False,
# 将每个样本均值置为 0
samplewise_center=False,
# 将输入除以整个数据集的 std
featurewise_std_normalization=False,
# 将每个输入除以其自身 std
samplewise_std_normalization=False,
# 应用 ZCA 白化
zca_whitening=False,
# ZCA 白化的 epsilon 值
zca_epsilon=1e-06,
# 随机图像旋转角度范围 (deg 0 to 180)
rotation_range=0,
# 随机水平平移图像
width_shift_range=0.1,
# 随机垂直平移图像
height_shift_range=0.1,
# 设置随机裁剪范围
shear_range=0.,
# 设置随机缩放范围
zoom_range=0.,
# 设置随机通道切换范围
channel_shift_range=0.,
# 设置输入边界之外的点的数据填充模式
fill_mode='nearest',
# 在 fill_mode = "constant" 时使用的值
cval=0.,
# 随机翻转图像
horizontal_flip=True,
# 随机翻转图像
vertical_flip=False,
# 设置重缩放因子 (应用在其他任何变换之前)
rescale=None,
# 设置应用在每一个输入的预处理函数
preprocessing_function=None,
# 图像数据格式 "channels_first" 或 "channels_last" 之一
data_format=None,
# 保留用于验证的图像的比例 (严格控制在 0 和 1 之间)
validation_split=0.0)
# 计算大量的特征标准化操作
# (如果应用 ZCA 白化,则计算 std, mean, 和 principal components)。
datagen.fit(x_train)
# 在由 datagen.flow() 生成的批次上拟合模型。
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
validation_data=(x_test, y_test),
epochs=epochs, verbose=1, workers=4,
callbacks=callbacks)
# 评估训练模型
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
```
| keras-docs-zh/sources/examples/cifar10_resnet.md/0 | {
"file_path": "keras-docs-zh/sources/examples/cifar10_resnet.md",
"repo_id": "keras-docs-zh",
"token_count": 9169
} | 78 |
# 在 MNIST 数据集上训练辅助分类器 GAN(ACGAN)。
[有关辅助分类器 GAN 的更多详细信息。](https://arxiv.org/abs/1610.09585)
你应该在大约 5 个轮次后开始看到合理的图像,而在大约 15 个轮次后开始看到良好的图像。
你应该使用 GPU,因为大量卷积运算在 CPU 上非常慢。
如果你打算进行迭代,请首选 TensorFlow 后端,因为使用 Theano 的话编译时间可能会称为阻碍。
耗时:
硬件 | 后端 | Time / Epoch
:------------------|:--------|------------:
CPU | TF | 3 hrs
Titan X (maxwell) | TF | 4 min
Titan X (maxwell) | TH | 7 min
有关更多信息和示例输出,请咨询 [Keras 中的辅助分类器生成对抗网络](https://github.com/lukedeo/keras-acgan)。
```python
from __future__ import print_function
from collections import defaultdict
try:
import cPickle as pickle
except ImportError:
import pickle
from PIL import Image
from six.moves import range
from keras.datasets import mnist
from keras import layers
from keras.layers import Input, Dense, Reshape, Flatten, Embedding, Dropout
from keras.layers import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Conv2DTranspose, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils.generic_utils import Progbar
import numpy as np
np.random.seed(1337)
num_classes = 10
def build_generator(latent_size):
# 我们将一对 (z, L) 映射到图像空间 (..., 28, 28, 1),其中 z 是隐向量,L 是从 P_c 绘制的标签。
cnn = Sequential()
cnn.add(Dense(3 * 3 * 384, input_dim=latent_size, activation='relu'))
cnn.add(Reshape((3, 3, 384)))
# 上采样至 (7, 7, ...)
cnn.add(Conv2DTranspose(192, 5, strides=1, padding='valid',
activation='relu',
kernel_initializer='glorot_normal'))
cnn.add(BatchNormalization())
# 上采样至 (14, 14, ...)
cnn.add(Conv2DTranspose(96, 5, strides=2, padding='same',
activation='relu',
kernel_initializer='glorot_normal'))
cnn.add(BatchNormalization())
# 上采样至 (28, 28, ...)
cnn.add(Conv2DTranspose(1, 5, strides=2, padding='same',
activation='tanh',
kernel_initializer='glorot_normal'))
# 这是 GAN 论文中通常提到的 z 空间
latent = Input(shape=(latent_size, ))
# 这将是我们的标签
image_class = Input(shape=(1,), dtype='int32')
cls = Embedding(num_classes, latent_size,
embeddings_initializer='glorot_normal')(image_class)
# z 空间和一类条件嵌入之间的 hadamard 积
h = layers.multiply([latent, cls])
fake_image = cnn(h)
return Model([latent, image_class], fake_image)
def build_discriminator():
# 根据参考文献中的建议,使用 LeakyReLU 构建相对标准的转换网络
cnn = Sequential()
cnn.add(Conv2D(32, 3, padding='same', strides=2,
input_shape=(28, 28, 1)))
cnn.add(LeakyReLU(0.2))
cnn.add(Dropout(0.3))
cnn.add(Conv2D(64, 3, padding='same', strides=1))
cnn.add(LeakyReLU(0.2))
cnn.add(Dropout(0.3))
cnn.add(Conv2D(128, 3, padding='same', strides=2))
cnn.add(LeakyReLU(0.2))
cnn.add(Dropout(0.3))
cnn.add(Conv2D(256, 3, padding='same', strides=1))
cnn.add(LeakyReLU(0.2))
cnn.add(Dropout(0.3))
cnn.add(Flatten())
image = Input(shape=(28, 28, 1))
features = cnn(image)
# 第一个输出 (name=generation) 是鉴别是否认为所显示的图像是伪造的,
# 而第二个输出 (name=auxiliary) 是鉴别认为图像所属的类。
fake = Dense(1, activation='sigmoid', name='generation')(features)
aux = Dense(num_classes, activation='softmax', name='auxiliary')(features)
return Model(image, [fake, aux])
if __name__ == '__main__':
# 论文的批次和潜在大小
epochs = 100
batch_size = 100
latent_size = 100
# https://arxiv.org/abs/1511.06434 建议的 Adam 参数
adam_lr = 0.0002
adam_beta_1 = 0.5
# 建立鉴别器
print('Discriminator model:')
discriminator = build_discriminator()
discriminator.compile(
optimizer=Adam(learning_rate=adam_lr, beta_1=adam_beta_1),
loss=['binary_crossentropy', 'sparse_categorical_crossentropy']
)
discriminator.summary()
# 建立生成器
generator = build_generator(latent_size)
latent = Input(shape=(latent_size, ))
image_class = Input(shape=(1,), dtype='int32')
# 取得假图片
fake = generator([latent, image_class])
# 我们只希望能够训练组合模型的生成
discriminator.trainable = False
fake, aux = discriminator(fake)
combined = Model([latent, image_class], [fake, aux])
print('Combined model:')
combined.compile(
optimizer=Adam(learning_rate=adam_lr, beta_1=adam_beta_1),
loss=['binary_crossentropy', 'sparse_categorical_crossentropy']
)
combined.summary()
# 获取我们的 mnist 数据,并强制其形状为 (..., 28, 28, 1),范围为 [-1, 1]
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_train = np.expand_dims(x_train, axis=-1)
x_test = (x_test.astype(np.float32) - 127.5) / 127.5
x_test = np.expand_dims(x_test, axis=-1)
num_train, num_test = x_train.shape[0], x_test.shape[0]
train_history = defaultdict(list)
test_history = defaultdict(list)
for epoch in range(1, epochs + 1):
print('Epoch {}/{}'.format(epoch, epochs))
num_batches = int(np.ceil(x_train.shape[0] / float(batch_size)))
progress_bar = Progbar(target=num_batches)
epoch_gen_loss = []
epoch_disc_loss = []
for index in range(num_batches):
# 得到一批真实的图像
image_batch = x_train[index * batch_size:(index + 1) * batch_size]
label_batch = y_train[index * batch_size:(index + 1) * batch_size]
# 产生一批新的噪音
noise = np.random.uniform(-1, 1, (len(image_batch), latent_size))
# 从 p_c 采样一些标签
sampled_labels = np.random.randint(0, num_classes, len(image_batch))
# 使用生成的标签作为调节器,生成一批假图像。
# 我们将采样的标签重塑为 (len(image_batch),1),
# 以便我们可以将它们作为一个序列的长度送入嵌入层
generated_images = generator.predict(
[noise, sampled_labels.reshape((-1, 1))], verbose=0)
x = np.concatenate((image_batch, generated_images))
# 使用单面 soft real/fake 标签
# Salimans et al., 2016
# https://arxiv.org/pdf/1606.03498.pdf (Section 3.4)
soft_zero, soft_one = 0, 0.95
y = np.array(
[soft_one] * len(image_batch) + [soft_zero] * len(image_batch))
aux_y = np.concatenate((label_batch, sampled_labels), axis=0)
# 我们不希望鉴别器也能最大化生成图像上辅助分类器的分类精度,
# 因此我们不训练鉴别器为生成图像生成类标签(请参阅 https://openreview.net/forum?id=rJXTf9Bxg)。
# 为了保留辅助分类器的样本权重总和,我们将样本权重 2 分配给实际图像。
disc_sample_weight = [np.ones(2 * len(image_batch)),
np.concatenate((np.ones(len(image_batch)) * 2,
np.zeros(len(image_batch))))]
# 看看鉴别器是否能弄清楚自己...
epoch_disc_loss.append(discriminator.train_on_batch(
x, [y, aux_y], sample_weight=disc_sample_weight))
# 制造新的声音。我们在这里生成 2 倍批量大小,
# 这样我们就可以使生成器对与鉴别器相同数量的图像进行优化
noise = np.random.uniform(-1, 1, (2 * len(image_batch), latent_size))
sampled_labels = np.random.randint(0, num_classes, 2 * len(image_batch))
# 我们想训练生成器来欺骗鉴别器
# 对于生成器,我们希望所有 {fake,not-fake} 标签都说不假
trick = np.ones(2 * len(image_batch)) * soft_one
epoch_gen_loss.append(combined.train_on_batch(
[noise, sampled_labels.reshape((-1, 1))],
[trick, sampled_labels]))
progress_bar.update(index + 1)
print('Testing for epoch {}:'.format(epoch))
#在这里评估测试损失
# 产生一批新的噪音
noise = np.random.uniform(-1, 1, (num_test, latent_size))
# 从 p_c 采样一些标签并从中生成图像
sampled_labels = np.random.randint(0, num_classes, num_test)
generated_images = generator.predict(
[noise, sampled_labels.reshape((-1, 1))], verbose=False)
x = np.concatenate((x_test, generated_images))
y = np.array([1] * num_test + [0] * num_test)
aux_y = np.concatenate((y_test, sampled_labels), axis=0)
# 看看鉴别器是否能弄清楚自己...
discriminator_test_loss = discriminator.evaluate(
x, [y, aux_y], verbose=False)
discriminator_train_loss = np.mean(np.array(epoch_disc_loss), axis=0)
# 制造新的噪声
noise = np.random.uniform(-1, 1, (2 * num_test, latent_size))
sampled_labels = np.random.randint(0, num_classes, 2 * num_test)
trick = np.ones(2 * num_test)
generator_test_loss = combined.evaluate(
[noise, sampled_labels.reshape((-1, 1))],
[trick, sampled_labels], verbose=False)
generator_train_loss = np.mean(np.array(epoch_gen_loss), axis=0)
# 生成有关性能的轮次报告
train_history['generator'].append(generator_train_loss)
train_history['discriminator'].append(discriminator_train_loss)
test_history['generator'].append(generator_test_loss)
test_history['discriminator'].append(discriminator_test_loss)
print('{0:<22s} | {1:4s} | {2:15s} | {3:5s}'.format(
'component', *discriminator.metrics_names))
print('-' * 65)
ROW_FMT = '{0:<22s} | {1:<4.2f} | {2:<15.4f} | {3:<5.4f}'
print(ROW_FMT.format('generator (train)',
*train_history['generator'][-1]))
print(ROW_FMT.format('generator (test)',
*test_history['generator'][-1]))
print(ROW_FMT.format('discriminator (train)',
*train_history['discriminator'][-1]))
print(ROW_FMT.format('discriminator (test)',
*test_history['discriminator'][-1]))
# 在每个轮次保存权重
generator.save_weights(
'params_generator_epoch_{0:03d}.hdf5'.format(epoch), True)
discriminator.save_weights(
'params_discriminator_epoch_{0:03d}.hdf5'.format(epoch), True)
# 生成一些数字来显示
num_rows = 40
noise = np.tile(np.random.uniform(-1, 1, (num_rows, latent_size)),
(num_classes, 1))
sampled_labels = np.array([
[i] * num_rows for i in range(num_classes)
]).reshape(-1, 1)
# 批量显示
generated_images = generator.predict(
[noise, sampled_labels], verbose=0)
# 准备按类别标签排序的真实图像
real_labels = y_train[(epoch - 1) * num_rows * num_classes:
epoch * num_rows * num_classes]
indices = np.argsort(real_labels, axis=0)
real_images = x_train[(epoch - 1) * num_rows * num_classes:
epoch * num_rows * num_classes][indices]
# 显示生成的图像,白色分隔符,真实图像
img = np.concatenate(
(generated_images,
np.repeat(np.ones_like(x_train[:1]), num_rows, axis=0),
real_images))
# 将它们排列成网格
img = (np.concatenate([r.reshape(-1, 28)
for r in np.split(img, 2 * num_classes + 1)
], axis=-1) * 127.5 + 127.5).astype(np.uint8)
Image.fromarray(img).save(
'plot_epoch_{0:03d}_generated.png'.format(epoch))
with open('acgan-history.pkl', 'wb') as f:
pickle.dump({'train': train_history, 'test': test_history}, f)
``` | keras-docs-zh/sources/examples/mnist_acgan.md/0 | {
"file_path": "keras-docs-zh/sources/examples/mnist_acgan.md",
"repo_id": "keras-docs-zh",
"token_count": 6841
} | 79 |
# 使用 MLP 的 MNIST 数据集上的 VAE 示例
VAE 具有模块化设计。编码器、解码器和 VAE 是 3 种共享权重的模型。训练 VAE 模型后,编码器可用于生成潜矢量。
通过从 mean=0 和 std=1 的高斯分布中采样潜矢量,可以将解码器用于生成 MNIST 数字。
# 参考文献
[1] Kingma, Diederik P., and Max Welling.
["Auto-Encoding Variational Bayes."](https://arxiv.org/abs/1312.6114)
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Lambda, Input, Dense
from keras.models import Model
from keras.datasets import mnist
from keras.losses import mse, binary_crossentropy
from keras.utils import plot_model
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import argparse
import os
# 重新参数化技巧
# 代替从 Q(z|X) 采样, 采样 epsilon = N(0,I)
# z = z_mean + sqrt(var) * epsilon
def sampling(args):
"""通过从各向同性单位高斯采样来进行重新参数化技巧。
# 参数
args (tensor): Q(z|X) 的均值和方差对数
# 返回
z (tensor): 采样的潜在向量
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# 默认情况下,random_normal 的 mean = 0,std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def plot_results(models,
data,
batch_size=128,
model_name="vae_mnist"):
"""绘制标签和 MNIST 数字作为 2D 潜矢量的函数
# 参数
models (tuple): 编码器和解码器型号
data (tuple): 测试数据和标签
batch_size (int): 预测批次大小
model_name (string): 哪个模型正在使用此功能
"""
encoder, decoder = models
x_test, y_test = data
os.makedirs(model_name, exist_ok=True)
filename = os.path.join(model_name, "vae_mean.png")
# 在潜在空间中显示数字类的二维图
z_mean, _, _ = encoder.predict(x_test,
batch_size=batch_size)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.savefig(filename)
plt.show()
filename = os.path.join(model_name, "digits_over_latent.png")
# 显示 30x30 的 2D 数字流形
n = 30
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# 线性空间坐标,对应于潜在空间中数字类的二维图
grid_x = np.linspace(-4, 4, n)
grid_y = np.linspace(-4, 4, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]])
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
start_range = digit_size // 2
end_range = n * digit_size + start_range + 1
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.imshow(figure, cmap='Greys_r')
plt.savefig(filename)
plt.show()
# MNIST 数据集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
image_size = x_train.shape[1]
original_dim = image_size * image_size
x_train = np.reshape(x_train, [-1, original_dim])
x_test = np.reshape(x_test, [-1, original_dim])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# 网络参数
input_shape = (original_dim, )
intermediate_dim = 512
batch_size = 128
latent_dim = 2
epochs = 50
# VAE model = encoder + decoder
# 建立编码器模型
inputs = Input(shape=input_shape, name='encoder_input')
x = Dense(intermediate_dim, activation='relu')(inputs)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# 使用重新参数化技巧将采样作为输入推送
# 注意 TensorFlow 后端不需要 "output_shape"
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# 实例化编码器模型
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)
# 建立解码器模型
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(intermediate_dim, activation='relu')(latent_inputs)
outputs = Dense(original_dim, activation='sigmoid')(x)
# 实例化解码器模型
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)
# 实例化 VAE 模型
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs, name='vae_mlp')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
help_ = "Load h5 model trained weights"
parser.add_argument("-w", "--weights", help=help_)
help_ = "Use mse loss instead of binary cross entropy (default)"
parser.add_argument("-m",
"--mse",
help=help_, action='store_true')
args = parser.parse_args()
models = (encoder, decoder)
data = (x_test, y_test)
# VAE loss = mse_loss or xent_loss + kl_loss
if args.mse:
reconstruction_loss = mse(inputs, outputs)
else:
reconstruction_loss = binary_crossentropy(inputs,
outputs)
reconstruction_loss *= original_dim
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
vae.summary()
plot_model(vae,
to_file='vae_mlp.png',
show_shapes=True)
if args.weights:
vae.load_weights(args.weights)
else:
# 训练自动编码器
vae.fit(x_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None))
vae.save_weights('vae_mlp_mnist.h5')
plot_results(models,
data,
batch_size=batch_size,
model_name="vae_mlp")
``` | keras-docs-zh/sources/examples/variational_autoencoder.md/0 | {
"file_path": "keras-docs-zh/sources/examples/variational_autoencoder.md",
"repo_id": "keras-docs-zh",
"token_count": 3435
} | 80 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L19)</span>
### LeakyReLU
```python
keras.layers.LeakyReLU(alpha=0.3)
```
带泄漏的 ReLU。
当神经元未激活时,它仍允许赋予一个很小的梯度:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
__输入尺寸__
可以是任意的。如果将该层作为模型的第一层,
则需要指定 `input_shape` 参数
(整数元组,不包含样本数量的维度)。
__输出尺寸__
与输入相同。
__参数__
- __alpha__: float >= 0。负斜率系数。
__参考文献__
- [Rectifier Nonlinearities Improve Neural Network Acoustic Models](https://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L59)</span>
### PReLU
```python
keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=None)
```
参数化的 ReLU。
形式:
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`,
其中 `alpha` 是一个可学习的数组,尺寸与 x 相同。
__输入尺寸__
可以是任意的。如果将这一层作为模型的第一层,
则需要指定 `input_shape` 参数
(整数元组,不包含样本数量的维度)。
__输出尺寸__
与输入相同。
__参数__
- __alpha_initializer__: 权重的初始化函数。
- __alpha_regularizer__: 权重的正则化方法。
- __alpha_constraint__: 权重的约束。
- __shared_axes__: 激活函数共享可学习参数的轴。
例如,如果输入特征图来自输出形状为 `(batch, height, width, channels)`
的 2D 卷积层,而且你希望跨空间共享参数,以便每个滤波器只有一组参数,
可设置 `shared_axes=[1, 2]`。
__参考文献__
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L153)</span>
### ELU
```python
keras.layers.ELU(alpha=1.0)
```
指数线性单元。
形式:
`f(x) = alpha * (exp(x) - 1.) for x < 0`,
`f(x) = x for x >= 0`.
__输入尺寸__
可以是任意的。如果将这一层作为模型的第一层,
则需要指定 `input_shape` 参数
(整数元组,不包含样本数量的维度)。
__输出尺寸__
与输入相同。
__参数__
- __alpha__: 负因子的尺度。
__参考文献__
- [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289v1)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L193)</span>
### ThresholdedReLU
```python
keras.layers.ThresholdedReLU(theta=1.0)
```
带阈值的修正线性单元。
形式:
`f(x) = x for x > theta`,
`f(x) = 0 otherwise`.
__输入尺寸__
可以是任意的。如果将这一层作为模型的第一层,
则需要指定 `input_shape` 参数
(整数元组,不包含样本数量的维度)。
__输出尺寸__
与输入相同。
__参数__
- __theta__: float >= 0。激活的阈值位。
__参考文献__
- [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/abs/1402.3337)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L233)</span>
### Softmax
```python
keras.layers.Softmax(axis=-1)
```
Softmax 激活函数。
__输入尺寸__
可以是任意的。如果将这一层作为模型的第一层,
则需要指定 `input_shape` 参数
(整数元组,不包含样本数量的维度)。
__输出尺寸__
与输入相同。
__参数__
- __axis__: 整数,应用 softmax 标准化的轴。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/advanced_activations.py#L265)</span>
### ReLU
```python
keras.layers.ReLU(max_value=None, negative_slope=0.0, threshold=0.0)
```
ReLU 激活函数。
使用默认值时,它返回逐个元素的 `max(x,0)`。
否则:
- 如果 `x >= max_value`,返回 `f(x) = max_value`,
- 如果 `threshold <= x < max_value`,返回 `f(x) = x`,
- 否则,返回 `f(x) = negative_slope * (x - threshold)`。
__输入尺寸__
可以是任意的。如果将这一层作为模型的第一层,
则需要指定 `input_shape` 参数
(整数元组,不包含样本数量的维度)。
__输出尺寸__
与输入相同。
__参数__
- __max_value__: 浮点数,最大的输出值。
- __negative_slope__: float >= 0. 负斜率系数。
- __threshold__: float。"thresholded activation" 的阈值。
| keras-docs-zh/sources/layers/advanced-activations.md/0 | {
"file_path": "keras-docs-zh/sources/layers/advanced-activations.md",
"repo_id": "keras-docs-zh",
"token_count": 2636
} | 81 |
# Sequential 模型 API
在阅读这片文档前,请先阅读 [Keras Sequential 模型指引](/getting-started/sequential-model-guide)。
----
## Sequential 模型方法
### compile
```python
compile(optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None)
```
用于配置训练模型。
__参数__
- __optimizer__: 字符串(优化器名)或者优化器对象。详见 [optimizers](/optimizers)。
- __loss__: 字符串(目标函数名)或目标函数或 `Loss` 实例。详见 [losses](/losses)。
如果模型具有多个输出,则可以通过传递损失函数的字典或列表,在每个输出上使用不同的损失。模型将最小化的损失值将是所有单个损失的总和。
- __metrics__: 在训练和测试期间的模型评估标准。
通常你会使用 `metrics = ['accuracy']`。要为多输出模型的不同输出指定不同的评估标准,
还可以传递一个字典,如 `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`。
你也可以传递一个评估指标序列的序列 (len = len(outputs)) 例如 `metrics=[['accuracy'], ['accuracy', 'mse']]`
或 `metrics=['accuracy', ['accuracy', 'mse']]`。
- __loss_weights__: 指定标量系数(Python浮点数)的可选列表或字典,用于加权不同模型输出的损失贡献。
模型将要最小化的损失值将是所有单个损失的加权和,由 `loss_weights` 系数加权。
如果是列表,则期望与模型的输出具有 1:1 映射。
如果是字典,则期望将输出名称(字符串)映射到标量系数。
- __sample_weight_mode__: 如果你需要执行按时间步采样权重(2D 权重),请将其设置为 `temporal`。
默认为 `None`,为采样权重(1D)。如果模型有多个输出,则可以通过传递 mode 的字典或列表,以在每个输出上使用不同的 `sample_weight_mode`。
- __weighted_metrics__: 在训练和测试期间,由 sample_weight 或 class_weight 评估和加权的度量标准列表。
- __target_tensors__: 默认情况下,Keras 将为模型的目标创建一个占位符,在训练过程中将使用目标数据。
相反,如果你想使用自己的目标张量(反过来说,Keras 在训练期间不会载入这些目标张量的外部 Numpy 数据),
您可以通过 `target_tensors` 参数指定它们。它应该是单个张量(对于单输出 Sequential 模型)。
- __**kwargs__: 当使用 Theano/CNTK 后端时,这些参数被传入 `K.function`。当使用 TensorFlow 后端时,这些参数被传递到 `tf.Session.run`。
__异常__
- __ValueError__: 如果 `optimizer`, `loss`, `metrics` 或 `sample_weight_mode` 这些参数不合法。
----
### fit
```python
fit(x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False)
```
以固定数量的轮次(数据集上的迭代)训练模型。
__参数__
- __x__: 输入数据。可以是:
- 一个 Numpy 数组(或类数组),或者数组的序列(如果模型有多个输入)。
- 一个将名称匹配到对应数组/张量的字典,如果模型具有命名输入。
- 一个返回 `(inputs, targets)` 或 `(inputs, targets, sample weights)`
的生成器或 `keras.utils.Sequence`。
- None(默认),如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
- __y__: 目标数据。与输入数据 `x` 类似,它可以是 Numpy 数组(序列)、
本地框架张量(序列)、Numpy数组序列(如果模型有多个输出)
或 None(默认)如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
如果模型输出层已命名,你也可以传递一个名称匹配 Numpy 数组的字典。
如果 `x` 是一个生成器,或 `keras.utils.Sequence` 实例,则不应该
指定 `y`(因为目标可以从 `x` 获得)。
- __batch_size__: 整数或 `None`。每次梯度更新的样本数。如果未指定,默认为 32。
如果你的数据是符号张量、生成器或 `Sequence` 实例形式,不要指定 `batch_size`,
因为它们会生成批次。
- __epochs__: 整数。训练模型迭代轮次。一个轮次是在整个 `x` 或 `y` 上的一轮迭代。
请注意,与 `initial_epoch` 一起,`epochs` 被理解为「最终轮次」。
模型并不是训练了 `epochs` 轮,而是到第 `epochs` 轮停止训练。
- __verbose__: 整数,0, 1 或 2。日志显示模式。
0 = 安静模式, 1 = 进度条, 2 = 每轮一行。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在训练和验证(如果有)时使用的回调函数。
详见 [callbacks](/callbacks)。
- __validation_split__: 0 和 1 之间的浮点数。用作验证集的训练数据的比例。
模型将分出一部分不会被训练的验证数据,并将在每一轮结束时评估这些验证数据的误差和任何其他模型指标。
验证数据是混洗之前 `x` 和`y` 数据的最后一部分样本中。
这个参数在 `x` 是生成器或 `Sequence` 实例时不支持。
- __validation_data__: 用于在每个轮次结束后评估损失和任意指标的数据。
模型不会在这个数据上训练。`validation_data` 会覆盖 `validation_split`。
`validation_data` 可以是:
- 元组 `(x_val, y_val)` 或 Numpy 数组或张量
- 元组 `(x_val, y_val, val_sample_weights)` 或 Numpy 数组。
- 数据集或数据集迭代器。
对于前两种情况,必须提供 `batch_size`。
对于最后一种情况,必须提供 `validation_steps`。
- __shuffle__: 布尔值(是否在每轮迭代之前混洗数据)或者字符串 (`batch`)。
`batch` 是处理 HDF5 数据限制的特殊选项,它对一个 batch 内部的数据进行混洗。
当 `steps_per_epoch` 非 `None` 时,这个参数无效。
- __class_weight__: 可选的字典,用来映射类索引(整数)到权重(浮点)值,用于加权损失函数(仅在训练期间)。
这可能有助于告诉模型「更多关注」来自代表性不足的类的样本。
- __sample_weight__: 训练样本的可选 Numpy 权重数组,用于对损失函数进行加权(仅在训练期间)。
你可以传递与输入样本长度相同的平坦(1D)Numpy 数组(权重和样本之间的 1:1 映射),
或者在时序数据的情况下,可以传递尺寸为 `(samples, sequence_length)` 的 2D 数组,以对每个样本的每个时间步施加不同的权重。
在这种情况下,你应该确保在 `compile()` 中指定 `sample_weight_mode="temporal"`。
这个参数在 `x` 是生成器或 `Sequence` 实例时不支持,应该提供 sample_weights 作为 `x` 的第 3 元素。
- __initial_epoch__: 整数。开始训练的轮次(有助于恢复之前的训练)。
- __steps_per_epoch__: 整数或 `None`。
在声明一个轮次完成并开始下一个轮次之前的总步数(样品批次)。
使用 TensorFlow 数据张量等输入张量进行训练时,默认值 `None` 等于数据集中样本的数量除以 batch 的大小,如果无法确定,则为 1。
- __validation_steps__: 只有在提供了 `validation_data` 并且时一个生成器时才有用。
表示在每个轮次结束时执行验证时,在停止之前要执行的步骤总数(样本批次)。
- __validation_freq__: 只有在提供了验证数据时才有用。整数或列表/元组/集合。
如果是整数,指定在新的验证执行之前要执行多少次训练,例如,`validation_freq=2` 在每 2 轮训练后执行验证。
如果是列表、元组或集合,指定执行验证的轮次,例如,`validation_freq=[1, 2, 10]` 表示在第 1、2、10 轮训练后执行验证。
- __max_queue_size__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
生成器队列的最大尺寸。若未指定,`max_queue_size` 将默认为 10。
- __workers__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
当使用基于进程的多线程时的最大进程数。若未指定,`workers` 将默认为 1。若为 0,将在主线程执行生成器。
- __use_multiprocessing__: 布尔值。仅用于生成器或 `keras.utils.Sequence` 输入。
如果是 `True`,使用基于进程的多线程。若未指定,`use_multiprocessing` 将默认为 `False`。
注意由于这个实现依赖于 multiprocessing,你不应该像生成器传递不可选的参数,因为它们不能轻松地传递给子进程。
- __**kwargs__: 用于向后兼容。
__返回__
一个 `History` 对象。其 `History.history` 属性是连续 epoch 训练损失和评估值,以及验证集损失和评估值的记录(如果适用)。
__异常__
- __RuntimeError__: 如果模型从未编译。
- __ValueError__: 在提供的输入数据与模型期望的不匹配的情况下。
----
### evaluate
```python
evaluate(x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)
```
在测试模式,返回误差值和评估标准值。
计算逐批次进行。
__参数__
- __x__: 输入数据。可以是:
- 一个 Numpy 数组(或类数组),或者数组的序列(如果模型有多个输入)。
- 一个将名称匹配到对应数组/张量的字典,如果模型具有命名输入。
- 一个返回 `(inputs, targets)` 或 `(inputs, targets, sample weights)`
的生成器或 `keras.utils.Sequence`。
- None(默认),如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
- __y__: 目标数据。与输入数据 `x` 类似,它可以是 Numpy 数组(序列)、
本地框架张量(序列)、Numpy数组序列(如果模型有多个输出)
或 None(默认)如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
如果模型输出层已命名,你也可以传递一个名称匹配 Numpy 数组的字典。
如果 `x` 是一个生成器,或 `keras.utils.Sequence` 实例,则不应该
指定 `y`(因为目标可以从 `x` 获得)。
- __batch_size__: 整数或 `None`。每次梯度更新的样本数。如果未指定,默认为 32。
如果你的数据是符号张量、生成器或 `Sequence` 实例形式,不要指定 `batch_size`,
因为它们会生成批次。
- __verbose__: 0, 1。日志显示模式。0 = 安静模式, 1 = 进度条。
- __sample_weight__: 训练样本的可选 Numpy 权重数组,用于对损失函数进行加权。
你可以传递与输入样本长度相同的平坦(1D)Numpy 数组(权重和样本之间的 1:1 映射),
或者在时序数据的情况下,可以传递尺寸为 `(samples, sequence_length)` 的 2D 数组,以对每个样本的每个时间步施加不同的权重。
在这种情况下,你应该确保在 `compile()` 中指定 `sample_weight_mode="temporal"`。
- __steps__: 整数或 `None`。
声明评估结束之前的总步数(批次样本)。默认值 `None` 时被忽略。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在评估时使用的回调函数。
详见 [callbacks](/callbacks)。
- __max_queue_size__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
生成器队列的最大尺寸。若未指定,`max_queue_size` 将默认为 10。
- __workers__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
当使用基于进程的多线程时的最大进程数。若未指定,`workers` 将默认为 1。若为 0,将在主线程执行生成器。
- __use_multiprocessing__: 布尔值。仅用于生成器或 `keras.utils.Sequence` 输入。
如果是 `True`,使用基于进程的多线程。若未指定,`use_multiprocessing` 将默认为 `False`。
注意由于这个实现依赖于 multiprocessing,你不应该像生成器传递不可选的参数,因为它们不能轻松地传递给子进程。
__异常__
- __ValueError__: 若参数非法。
__返回__
标量测试误差(如果模型只有单个输出且没有评估指标)或标量列表(如果模型具有多个输出和/或指标)。
属性 `model.metrics_names` 将提供标量输出的显示标签。
----
### predict
```python
predict(x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)
```
为输入样本生成输出预测。
计算逐批次进行。
__参数__
- __x__: 输入数据。可以是:
- 一个 Numpy 数组(或类数组),或者数组的序列(如果模型有多个输入)。
- 一个将名称匹配到对应数组/张量的字典,如果模型具有命名输入。
- 一个返回 `(inputs, targets)` 或 `(inputs, targets, sample weights)`
的生成器或 `keras.utils.Sequence`。
- None(默认),如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
- __batch_size__: 整数或 `None`。每次梯度更新的样本数。如果未指定,默认为 32。
如果你的数据是符号张量、生成器或 `Sequence` 实例形式,不要指定 `batch_size`,
因为它们会生成批次。
- __verbose__: 日志显示模式,0 或 1。
- __steps__: 声明预测结束之前的总步数(批次样本)。默认值 `None` 时被忽略。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在预测时使用的回调函数。
详见 [callbacks](/callbacks)。
- __max_queue_size__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
生成器队列的最大尺寸。若未指定,`max_queue_size` 将默认为 10。
- __workers__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
当使用基于进程的多线程时的最大进程数。若未指定,`workers` 将默认为 1。若为 0,将在主线程执行生成器。
- __use_multiprocessing__: 布尔值。仅用于生成器或 `keras.utils.Sequence` 输入。
如果是 `True`,使用基于进程的多线程。若未指定,`use_multiprocessing` 将默认为 `False`。
注意由于这个实现依赖于 multiprocessing,你不应该像生成器传递不可选的参数,因为它们不能轻松地传递给子进程。
__返回__
预测的 Numpy 数组。
__异常__
- __ValueError__: 如果提供的输入数据与模型的期望数据不匹配,或者有状态模型收到的数量不是批量大小的倍数。
----
### train_on_batch
```python
train_on_batch(x, y, sample_weight=None, class_weight=None, reset_metrics=True)
```
一批样品的单次梯度更新。
__Arguments__
- __x__: 训练数据的 Numpy 数组(如果模型只有一个输入),
或者是 Numpy 数组的列表(如果模型有多个输入)。
如果模型中的输入层被命名,你也可以传递一个字典,将输入层名称映射到 Numpy 数组。
- __y__: 目标(标签)数据的 Numpy 数组,或 Numpy 数组的列表(如果模型具有多个输出)。
如果模型中的输出层被命名,你也可以传递一个字典,将输出层名称映射到 Numpy 数组。
- __sample_weight__: 可选数组,与 x 长度相同,包含应用到模型损失函数的每个样本的权重。
如果是时域数据,你可以传递一个尺寸为 (samples, sequence_length) 的 2D 数组,
为每一个样本的每一个时间步应用不同的权重。
在这种情况下,你应该在 `compile()` 中指定 `sample_weight_mode="temporal"`。
- __class_weight__: 可选的字典,用来映射类索引(整数)到权重(浮点)值,以在训练时对模型的损失函数加权。
这可能有助于告诉模型 「更多关注」来自代表性不足的类的样本。
- __reset_metrics__: 如果为 `True`,返回的指标仅适用于该批次。
如果为 `False`,则指标将在批次之间有状态地累积。
__返回__
标量训练误差(如果模型只有单个输出且没有评估指标)或标量列表(如果模型具有多个输出和/或指标)。
属性 `model.metrics_names` 将提供标量输出的显示标签。
----
### test_on_batch
```python
test_on_batch(x, y, sample_weight=None)
```
在一批样本上评估模型。
__参数__
- __x__: 测试数据的 Numpy 数组(如果模型只有一个输入),
或者是 Numpy 数组的列表(如果模型有多个输入)。
如果模型中的输入层被命名,你也可以传递一个字典,将输入层名称映射到 Numpy 数组。
- __y__: 目标(标签)数据的 Numpy 数组,或 Numpy 数组的列表(如果模型具有多个输出)。
如果模型中的输出层被命名,你也可以传递一个字典,将输出层名称映射到 Numpy 数组。
- __sample_weight__: 可选数组,与 x 长度相同,包含应用到模型损失函数的每个样本的权重。
如果是时域数据,你可以传递一个尺寸为 (samples, sequence_length) 的 2D 数组,
为每一个样本的每一个时间步应用不同的权重。
- __reset_metrics__: 如果为 `True`,返回的指标仅适用于该批次。
如果为 `False`,则指标将在批次之间有状态地累积。
__返回__
标量测试误差(如果模型只有单个输出且没有评估指标)或标量列表(如果模型具有多个输出和/或指标)。
属性 `model.metrics_names` 将提供标量输出的显示标签。
----
### predict_on_batch
```python
predict_on_batch(x)
```
返回一批样本的模型预测值。
__参数__
- __x__: 输入数据,Numpy 数组或列表(如果模型有多输入)。
__返回__
预测值的 Numpy 数组。
----
### fit_generator
```python
fit_generator(generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)
```
使用 Python 生成器或 `Sequence` 实例逐批生成的数据,按批次训练模型。
生成器与模型并行运行,以提高效率。
例如,这可以让你在 CPU 上对图像进行实时数据增强,以在 GPU 上训练模型。
`keras.utils.Sequence` 的使用可以保证数据的顺序, 以及当 `use_multiprocessing=True` 时 ,保证每个输入在每个 epoch 只使用一次。
__参数__
- __generator__: 一个生成器,或者一个 `Sequence` (`keras.utils.Sequence`) 对象的实例,
以在使用多进程时避免数据的重复。
生成器的输出应该为以下之一:
- `(inputs, targets)` 元组
- `(inputs, targets, sample_weights)` 元组。
这个元组(生成器的单个输出)组成了单个的 batch。
因此,这个元组中的所有数组长度必须相同(与这一个 batch 的大小相等)。
不同的 batch 可能大小不同。
例如,一个 epoch 的最后一个 batch 往往比其他 batch 要小,
如果数据集的尺寸不能被 batch size 整除。
生成器将无限地在数据集上循环。当运行到第 `steps_per_epoch` 时,记一个 epoch 结束。
- __steps_per_epoch__: 整数。在声明一个 epoch 完成并开始下一个 epoch
之前从 `generator` 产生的总步数(批次样本)。
它通常应该等于 `ceil(num_samples / batch_size)`。
对于 `Sequence`,它是可选的:如果未指定,将使用`len(generator)` 作为步数。
- __epochs__: 整数。训练模型的迭代总轮数。一个 epoch 是对所提供的整个数据的一轮迭代,
如 `steps_per_epoch` 所定义。注意,与 `initial_epoch` 一起使用,
epoch 应被理解为「最后一轮」。模型没有经历由 `epochs` 给出的多次迭代的训练,
而仅仅是直到达到索引 `epoch` 的轮次。
- __verbose__: 整数,0, 1 或 2。日志显示模式。
0 = 安静模式, 1 = 进度条, 2 = 每轮一行。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在训练时使用的回调函数。
详见 [callbacks](/callbacks)。
- __validation_data__: 它可以是以下之一:
- 验证数据的生成器或 `Sequence` 实例
- `(inputs, targets)` 元组
- `(inputs, targets, sample_weights)` 元组。
在每个 epoch 结束时评估损失和任何模型指标。该模型不会对此数据进行训练。
- __validation_steps__: 仅当 `validation_data` 是一个生成器时才可用。
表示在每一轮迭代末尾停止前从 `validation_data` 生成器生成地总步数(样本批次)。
它应该等于由 batch size 分割的验证数据集的样本数。
对于 `Sequence` 它是可选的:若未指定,将会使用 `len(validation_data)` 作为步数。
- __validation_freq__: 只有在提供了验证数据时才有用。整数或 `collections.Container` 实例(例如列表、元组等)。
如果是整数,指定在新的验证执行之前要执行多少次训练,例如,`validation_freq=2` 在每 2 轮训练后执行验证。
如果是 Container,指定执行验证的轮次,例如,`validation_freq=[1, 2, 10]` 表示在第 1、2、10 轮训练后执行验证。
- __class_weight__: 可选的将类索引(整数)映射到权重(浮点)值的字典,用于加权损失函数(仅在训练期间)。
这可以用来告诉模型「更多地关注」来自代表性不足的类的样本。
- __max_queue_size__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
生成器队列的最大尺寸。若未指定,`max_queue_size` 将默认为 10。
- __workers__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
当使用基于进程的多线程时的最大进程数。若未指定,`workers` 将默认为 1。若为 0,将在主线程执行生成器。
- __use_multiprocessing__: 布尔值。仅用于生成器或 `keras.utils.Sequence` 输入。
如果是 `True`,使用基于进程的多线程。若未指定,`use_multiprocessing` 将默认为 `False`。
注意由于这个实现依赖于 multiprocessing,你不应该像生成器传递不可选的参数,因为它们不能轻松地传递给子进程。
- __shuffle__: 布尔值。是否在每轮迭代之前打乱 batch 的顺序。
只能与 `Sequence` (`keras.utils.Sequence`) 实例同用。
当 `steps_per_epoch` 为 `None` 是无效。
- __initial_epoch__: 整数。开始训练的轮次(有助于恢复之前的训练)。
__返回__
一个 `History` 对象。其 `History.history` 属性是连续 epoch 训练损失和评估值,以及验证集损失和评估值的记录(如果适用)。
__异常__
- __ValueError__: 如果生成器生成的数据格式不正确。
__示例__
```python
def generate_arrays_from_file(path):
while True:
with open(path) as f:
for line in f:
# 从文件中的每一行生成输入数据和标签的 numpy 数组
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
----
### evaluate_generator
```python
evaluate_generator(generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
```
在数据生成器上评估模型。
这个生成器应该返回与 `test_on_batch` 所接收的同样的数据。
__参数__
- __generator__: 一个生成 `(inputs, targets)` 或 `(inputs, targets, sample_weights)` 的生成器,
或一个 `Sequence` (`keras.utils.Sequence`) 对象的实例,以避免在使用多进程时数据的重复。
- __steps__: 在声明一个 epoch 完成并开始下一个 epoch 之前从 `generator` 产生的总步数(批次样本)。
它通常应该等于你的数据集的样本数量除以批量大小。
对于 `Sequence`,它是可选的:如果未指定,将使用`len(generator)` 作为步数。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在评估时使用的回调函数。
详见 [callbacks](/callbacks)。
- __max_queue_size__: 生成器队列的最大尺寸。
- __workers__: 整数。使用的最大进程数量,如果使用基于进程的多线程。
如未指定,`workers` 将默认为 1。如果为 0,将在主线程上执行生成器。
- __use_multiprocessing__: 布尔值。如果 True,则使用基于进程的多线程。
请注意,由于此实现依赖于多进程,所以不应将不可传递的参数传递给生成器,因为它们不能被轻易地传递给子进程。
- __verbose__: 日志显示模式,0 或 1。
__返回__
标量测试误差(如果模型只有单个输出且没有评估指标)或标量列表(如果模型具有多个输出和/或指标)。
属性 `model.metrics_names` 将提供标量输出的显示标签。
__异常__
- __ValueError__: 如果生成器生成的数据格式不正确。
----
### predict_generator
```python
predict_generator(generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
```
为来自数据生成器的输入样本生成预测。
这个生成器应该返回与 `predict_on_batch` 所接收的同样的数据。
__参数__
- __generator__: 生成器,返回批量输入样本,
或一个 `Sequence` (`keras.utils.Sequence`) 对象的实例,以避免在使用多进程时数据的重复。
- __steps__: 在声明一个 epoch 完成并开始下一个 epoch 之前从 `generator` 产生的总步数(批次样本)。
它通常应该等于你的数据集的样本数量除以批量大小。
对于 `Sequence`,它是可选的:如果未指定,将使用`len(generator)` 作为步数。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在预测时使用的回调函数。
详见 [callbacks](/callbacks)。
- __max_queue_size__: 生成器队列的最大尺寸。
- __max_queue_size__: 生成器队列的最大尺寸。
- __workers__: 整数。使用的最大进程数量,如果使用基于进程的多线程。
如未指定,`workers` 将默认为 1。如果为 0,将在主线程上执行生成器。
- __use_multiprocessing__: 如果 True,则使用基于进程的多线程。
请注意,由于此实现依赖于多进程,所以不应将不可传递的参数传递给生成器,因为它们不能被轻易地传递给子进程。
- __verbose__: 日志显示模式,0 或 1。
__返回__
预测值的 Numpy 数组。
__异常__
- __ValueError__: 如果生成器生成的数据格式不正确。
----
### get_layer
```python
get_layer(name=None, index=None)
```
根据名称(唯一)或索引值查找网络层。
如果同时提供了 `name` 和 `index`,则 `index` 将优先。
根据网络层的名称(唯一)或其索引返回该层。索引是基于水平图遍历的顺序(自下而上)。
__参数__
- __name__: 字符串,层的名字。
- __index__: 整数,层的索引。
__返回__
一个层实例。
__异常__
- __ValueError__: 如果层的名称或索引不正确。
| keras-docs-zh/sources/models/sequential.md/0 | {
"file_path": "keras-docs-zh/sources/models/sequential.md",
"repo_id": "keras-docs-zh",
"token_count": 17413
} | 82 |
# Owners for /guides directory
/guides/ @fchollet @MarkDaoust @pcoet
| keras-io/CODEOWNERS/0 | {
"file_path": "keras-io/CODEOWNERS",
"repo_id": "keras-io",
"token_count": 26
} | 83 |
<jupyter_start><jupyter_text>Conditional GAN**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/07/13**Last modified:** 2024/01/02**Description:** Training a GAN conditioned on class labels to generate handwritten digits. Generative Adversarial Networks (GANs) let us generate novel image data, video data,or audio data from a random input. Typically, the random input is sampledfrom a normal distribution, before going through a series of transformations that turnit into something plausible (image, video, audio, etc.).However, a simple [DCGAN](https://arxiv.org/abs/1511.06434) doesn't let us controlthe appearance (e.g. class) of the samples we're generating. For instance,with a GAN that generates MNIST handwritten digits, a simple DCGAN wouldn't let uschoose the class of digits we're generating.To be able to control what we generate, we need to _condition_ the GAN outputon a semantic input, such as the class of an image.In this example, we'll build a **Conditional GAN** that can generate MNIST handwrittendigits conditioned on a given class. Such a model can have various useful applications:* let's say you are dealing with an[imbalanced image dataset](https://developers.google.com/machine-learning/data-prep/construct/sampling-splitting/imbalanced-data),and you'd like to gather more examples for the skewed class to balance the dataset.Data collection can be a costly process on its own. You could instead train a Conditional GAN and useit to generate novel images for the class that needs balancing.* Since the generator learns to associate the generated samples with the class labels,its representations can also be used for [other downstream tasks](https://arxiv.org/abs/1809.11096).Following are the references used for developing this example:* [Conditional Generative Adversarial Nets](https://arxiv.org/abs/1411.1784)* [Lecture on Conditional Generation from Coursera](https://www.coursera.org/lecture/build-basic-generative-adversarial-networks-gans/conditional-generation-inputs-2OPrG)If you need a refresher on GANs, you can refer to the "Generative adversarial networks"section of[this resource](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-12/r-3/232).This example requires TensorFlow 2.5 or higher, as well as TensorFlow Docs, which can beinstalled using the following command:<jupyter_code>!pip install -q git+https://github.com/tensorflow/docs<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>import keras
from keras import layers
from keras import ops
from tensorflow_docs.vis import embed
import tensorflow as tf
import numpy as np
import imageio<jupyter_output><empty_output><jupyter_text>Constants and hyperparameters<jupyter_code>batch_size = 64
num_channels = 1
num_classes = 10
image_size = 28
latent_dim = 128<jupyter_output><empty_output><jupyter_text>Loading the MNIST dataset and preprocessing it<jupyter_code># We'll use all the available examples from both the training and test
# sets.
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_labels = np.concatenate([y_train, y_test])
# Scale the pixel values to [0, 1] range, add a channel dimension to
# the images, and one-hot encode the labels.
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
all_labels = keras.utils.to_categorical(all_labels, 10)
# Create tf.data.Dataset.
dataset = tf.data.Dataset.from_tensor_slices((all_digits, all_labels))
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
print(f"Shape of training images: {all_digits.shape}")
print(f"Shape of training labels: {all_labels.shape}")<jupyter_output><empty_output><jupyter_text>Calculating the number of input channel for the generator and discriminatorIn a regular (unconditional) GAN, we start by sampling noise (of some fixeddimension) from a normal distribution. In our case, we also need to accountfor the class labels. We will have to add the number of classes tothe input channels of the generator (noise input) as well as the discriminator(generated image input).<jupyter_code>generator_in_channels = latent_dim + num_classes
discriminator_in_channels = num_channels + num_classes
print(generator_in_channels, discriminator_in_channels)<jupyter_output><empty_output><jupyter_text>Creating the discriminator and generatorThe model definitions (`discriminator`, `generator`, and `ConditionalGAN`) have beenadapted from [this example](https://keras.io/guides/customizing_what_happens_in_fit/).<jupyter_code># Create the discriminator.
discriminator = keras.Sequential(
[
keras.layers.InputLayer((28, 28, discriminator_in_channels)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
# Create the generator.
generator = keras.Sequential(
[
keras.layers.InputLayer((generator_in_channels,)),
# We want to generate 128 + num_classes coefficients to reshape into a
# 7x7x(128 + num_classes) map.
layers.Dense(7 * 7 * generator_in_channels),
layers.LeakyReLU(negative_slope=0.2),
layers.Reshape((7, 7, generator_in_channels)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)<jupyter_output><empty_output><jupyter_text>Creating a `ConditionalGAN` model<jupyter_code>class ConditionalGAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super().__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.seed_generator = keras.random.SeedGenerator(1337)
self.gen_loss_tracker = keras.metrics.Mean(name="generator_loss")
self.disc_loss_tracker = keras.metrics.Mean(name="discriminator_loss")
@property
def metrics(self):
return [self.gen_loss_tracker, self.disc_loss_tracker]
def compile(self, d_optimizer, g_optimizer, loss_fn):
super().compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self, data):
# Unpack the data.
real_images, one_hot_labels = data
# Add dummy dimensions to the labels so that they can be concatenated with
# the images. This is for the discriminator.
image_one_hot_labels = one_hot_labels[:, :, None, None]
image_one_hot_labels = ops.repeat(
image_one_hot_labels, repeats=[image_size * image_size]
)
image_one_hot_labels = ops.reshape(
image_one_hot_labels, (-1, image_size, image_size, num_classes)
)
# Sample random points in the latent space and concatenate the labels.
# This is for the generator.
batch_size = ops.shape(real_images)[0]
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
random_vector_labels = ops.concatenate(
[random_latent_vectors, one_hot_labels], axis=1
)
# Decode the noise (guided by labels) to fake images.
generated_images = self.generator(random_vector_labels)
# Combine them with real images. Note that we are concatenating the labels
# with these images here.
fake_image_and_labels = ops.concatenate(
[generated_images, image_one_hot_labels], -1
)
real_image_and_labels = ops.concatenate([real_images, image_one_hot_labels], -1)
combined_images = ops.concatenate(
[fake_image_and_labels, real_image_and_labels], axis=0
)
# Assemble labels discriminating real from fake images.
labels = ops.concatenate(
[ops.ones((batch_size, 1)), ops.zeros((batch_size, 1))], axis=0
)
# Train the discriminator.
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space.
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
random_vector_labels = ops.concatenate(
[random_latent_vectors, one_hot_labels], axis=1
)
# Assemble labels that say "all real images".
misleading_labels = ops.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
fake_images = self.generator(random_vector_labels)
fake_image_and_labels = ops.concatenate(
[fake_images, image_one_hot_labels], -1
)
predictions = self.discriminator(fake_image_and_labels)
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
# Monitor loss.
self.gen_loss_tracker.update_state(g_loss)
self.disc_loss_tracker.update_state(d_loss)
return {
"g_loss": self.gen_loss_tracker.result(),
"d_loss": self.disc_loss_tracker.result(),
}<jupyter_output><empty_output><jupyter_text>Training the Conditional GAN<jupyter_code>cond_gan = ConditionalGAN(
discriminator=discriminator, generator=generator, latent_dim=latent_dim
)
cond_gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
)
cond_gan.fit(dataset, epochs=20)<jupyter_output><empty_output><jupyter_text>Interpolating between classes with the trained generator<jupyter_code># We first extract the trained generator from our Conditional GAN.
trained_gen = cond_gan.generator
# Choose the number of intermediate images that would be generated in
# between the interpolation + 2 (start and last images).
num_interpolation = 9 # @param {type:"integer"}
# Sample noise for the interpolation.
interpolation_noise = keras.random.normal(shape=(1, latent_dim))
interpolation_noise = ops.repeat(interpolation_noise, repeats=num_interpolation)
interpolation_noise = ops.reshape(interpolation_noise, (num_interpolation, latent_dim))
def interpolate_class(first_number, second_number):
# Convert the start and end labels to one-hot encoded vectors.
first_label = keras.utils.to_categorical([first_number], num_classes)
second_label = keras.utils.to_categorical([second_number], num_classes)
first_label = ops.cast(first_label, "float32")
second_label = ops.cast(second_label, "float32")
# Calculate the interpolation vector between the two labels.
percent_second_label = ops.linspace(0, 1, num_interpolation)[:, None]
percent_second_label = ops.cast(percent_second_label, "float32")
interpolation_labels = (
first_label * (1 - percent_second_label) + second_label * percent_second_label
)
# Combine the noise and the labels and run inference with the generator.
noise_and_labels = ops.concatenate([interpolation_noise, interpolation_labels], 1)
fake = trained_gen.predict(noise_and_labels)
return fake
start_class = 2 # @param {type:"slider", min:0, max:9, step:1}
end_class = 6 # @param {type:"slider", min:0, max:9, step:1}
fake_images = interpolate_class(start_class, end_class)<jupyter_output><empty_output><jupyter_text>Here, we first sample noise from a normal distribution and then we repeat that for`num_interpolation` times and reshape the result accordingly.We then distribute it uniformly for `num_interpolation`with the label identities being present in some proportion.<jupyter_code>fake_images *= 255.0
converted_images = fake_images.astype(np.uint8)
converted_images = ops.image.resize(converted_images, (96, 96)).numpy().astype(np.uint8)
imageio.mimsave("animation.gif", converted_images[:, :, :, 0], fps=1)
embed.embed_file("animation.gif")<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/conditional_gan.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/conditional_gan.ipynb",
"repo_id": "keras-io",
"token_count": 4764
} | 84 |
<jupyter_start><jupyter_text>A walk through latent space with Stable Diffusion**Authors:** Ian Stenbit, [fchollet](https://twitter.com/fchollet), [lukewood](https://twitter.com/luke_wood_ml)**Date created:** 2022/09/28**Last modified:** 2022/09/28**Description:** Explore the latent manifold of Stable Diffusion. OverviewGenerative image models learn a "latent manifold" of the visual world:a low-dimensional vector space where each point maps to an image.Going from such a point on the manifold back to a displayable imageis called "decoding" -- in the Stable Diffusion model, this is handled bythe "decoder" model.This latent manifold of images is continuous and interpolative, meaning that:1. Moving a little on the manifold only changes the corresponding image a little (continuity).2. For any two points A and B on the manifold (i.e. any two images), it is possibleto move from A to B via a path where each intermediate point is also on the manifold (i.e.is also a valid image). Intermediate points would be called "interpolations" betweenthe two starting images.Stable Diffusion isn't just an image model, though, it's also a natural language model.It has two latent spaces: the image representation space learned by theencoder used during training, and the prompt latent spacewhich is learned using a combination of pretraining and training-timefine-tuning._Latent space walking_, or _latent space exploration_, is the process ofsampling a point in latent space and incrementally changing the latentrepresentation. Its most common application is generating animationswhere each sampled point is fed to the decoder and is stored as aframe in the final animation.For high-quality latent representations, this produces coherent-lookinganimations. These animations can provide insight into the feature map of thelatent space, and can ultimately lead to improvements in the trainingprocess. One such GIF is displayed below:In this guide, we will show how to take advantage of the Stable Diffusion APIin KerasCV to perform prompt interpolation and circular walks throughStable Diffusion's visual latent manifold, as well as throughthe text encoder's latent manifold.This guide assumes the reader has ahigh-level understanding of Stable Diffusion.If you haven't already, you should startby reading the [Stable Diffusion Tutorial](https://keras.io/guides/keras_cv/generate_images_with_stable_diffusion/).To start, we import KerasCV and load up a Stable Diffusion model using theoptimizations discussed in the tutorial[Generate images with Stable Diffusion](https://keras.io/guides/keras_cv/generate_images_with_stable_diffusion/).Note that if you are running with a M1 Mac GPU you should not enable mixed precision.<jupyter_code>!pip install keras-cv --upgrade --quiet
import keras_cv
import keras
import matplotlib.pyplot as plt
from keras import ops
import numpy as np
import math
from PIL import Image
# Enable mixed precision
# (only do this if you have a recent NVIDIA GPU)
keras.mixed_precision.set_global_policy("mixed_float16")
# Instantiate the Stable Diffusion model
model = keras_cv.models.StableDiffusion(jit_compile=True)<jupyter_output><empty_output><jupyter_text>Interpolating between text promptsIn Stable Diffusion, a text prompt is first encoded into a vector,and that encoding is used to guide the diffusion process.The latent encoding vector has shape77x768 (that's huge!), and when we give Stable Diffusion a text prompt, we'regenerating images from just one such point on the latent manifold.To explore more of this manifold, we can interpolate between two text encodingsand generate images at those interpolated points:<jupyter_code>prompt_1 = "A watercolor painting of a Golden Retriever at the beach"
prompt_2 = "A still life DSLR photo of a bowl of fruit"
interpolation_steps = 5
encoding_1 = ops.squeeze(model.encode_text(prompt_1))
encoding_2 = ops.squeeze(model.encode_text(prompt_2))
interpolated_encodings = ops.linspace(encoding_1, encoding_2, interpolation_steps)
# Show the size of the latent manifold
print(f"Encoding shape: {encoding_1.shape}")<jupyter_output><empty_output><jupyter_text>Once we've interpolated the encodings, we can generate images from each point.Note that in order to maintain some stability between the resulting images wekeep the diffusion noise constant between images.<jupyter_code>seed = 12345
noise = keras.random.normal((512 // 8, 512 // 8, 4), seed=seed)
images = model.generate_image(
interpolated_encodings,
batch_size=interpolation_steps,
diffusion_noise=noise,
)<jupyter_output><empty_output><jupyter_text>Now that we've generated some interpolated images, let's take a look at them!Throughout this tutorial, we're going to export sequences of images as gifs sothat they can be easily viewed with some temporal context. For sequences ofimages where the first and last images don't match conceptually, we rubber-bandthe gif.If you're running in Colab, you can view your own GIFs by running:```from IPython.display import Image as IImageIImage("doggo-and-fruit-5.gif")```<jupyter_code>def export_as_gif(filename, images, frames_per_second=10, rubber_band=False):
if rubber_band:
images += images[2:-1][::-1]
images[0].save(
filename,
save_all=True,
append_images=images[1:],
duration=1000 // frames_per_second,
loop=0,
)
export_as_gif(
"doggo-and-fruit-5.gif",
[Image.fromarray(img) for img in images],
frames_per_second=2,
rubber_band=True,
)<jupyter_output><empty_output><jupyter_text>The results may seem surprising. Generally, interpolating between promptsproduces coherent looking images, and often demonstrates a progressive conceptshift between the contents of the two prompts. This is indicative of a highquality representation space, that closely mirrors the natural structureof the visual world.To best visualize this, we should do a much more fine-grained interpolation,using hundreds of steps. In order to keep batch size small (so that we don'tOOM our GPU), this requires manually batching our interpolatedencodings.<jupyter_code>interpolation_steps = 150
batch_size = 3
batches = interpolation_steps // batch_size
interpolated_encodings = ops.linspace(encoding_1, encoding_2, interpolation_steps)
batched_encodings = ops.split(interpolated_encodings, batches)
images = []
for batch in range(batches):
images += [
Image.fromarray(img)
for img in model.generate_image(
batched_encodings[batch],
batch_size=batch_size,
num_steps=25,
diffusion_noise=noise,
)
]
export_as_gif("doggo-and-fruit-150.gif", images, rubber_band=True)<jupyter_output><empty_output><jupyter_text>The resulting gif shows a much clearer and more coherent shift between the twoprompts. Try out some prompts of your own and experiment!We can even extend this concept for more than one image. For example, we caninterpolate between four prompts:<jupyter_code>prompt_1 = "A watercolor painting of a Golden Retriever at the beach"
prompt_2 = "A still life DSLR photo of a bowl of fruit"
prompt_3 = "The eiffel tower in the style of starry night"
prompt_4 = "An architectural sketch of a skyscraper"
interpolation_steps = 6
batch_size = 3
batches = (interpolation_steps**2) // batch_size
encoding_1 = ops.squeeze(model.encode_text(prompt_1))
encoding_2 = ops.squeeze(model.encode_text(prompt_2))
encoding_3 = ops.squeeze(model.encode_text(prompt_3))
encoding_4 = ops.squeeze(model.encode_text(prompt_4))
interpolated_encodings = ops.linspace(
ops.linspace(encoding_1, encoding_2, interpolation_steps),
ops.linspace(encoding_3, encoding_4, interpolation_steps),
interpolation_steps,
)
interpolated_encodings = ops.reshape(
interpolated_encodings, (interpolation_steps**2, 77, 768)
)
batched_encodings = ops.split(interpolated_encodings, batches)
images = []
for batch in range(batches):
images.append(
model.generate_image(
batched_encodings[batch],
batch_size=batch_size,
diffusion_noise=noise,
)
)
def plot_grid(
images,
path,
grid_size,
scale=2,
):
fig, axs = plt.subplots(
grid_size, grid_size, figsize=(grid_size * scale, grid_size * scale)
)
fig.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
plt.margins(x=0, y=0)
plt.axis("off")
for ax in axs.flat:
ax.axis("off")
images = images.astype(int)
for i in range(min(grid_size * grid_size, len(images))):
ax = axs.flat[i]
ax.imshow(images[i].astype("uint8"))
ax.axis("off")
for i in range(len(images), grid_size * grid_size):
axs.flat[i].axis("off")
axs.flat[i].remove()
plt.savefig(
fname=path,
pad_inches=0,
bbox_inches="tight",
transparent=False,
dpi=60,
)
images = np.concatenate(images)
plot_grid(images, "4-way-interpolation.jpg", interpolation_steps)<jupyter_output><empty_output><jupyter_text>We can also interpolate while allowing diffusion noise to vary by droppingthe `diffusion_noise` parameter:<jupyter_code>images = []
for batch in range(batches):
images.append(model.generate_image(batched_encodings[batch], batch_size=batch_size))
images = np.concatenate(images)
plot_grid(images, "4-way-interpolation-varying-noise.jpg", interpolation_steps)<jupyter_output><empty_output><jupyter_text>Next up -- let's go for some walks! A walk around a text promptOur next experiment will be to go for a walk around the latent manifoldstarting from a point produced by a particular prompt.<jupyter_code>walk_steps = 150
batch_size = 3
batches = walk_steps // batch_size
step_size = 0.005
encoding = ops.squeeze(
model.encode_text("The Eiffel Tower in the style of starry night")
)
# Note that (77, 768) is the shape of the text encoding.
delta = ops.ones_like(encoding) * step_size
walked_encodings = []
for step_index in range(walk_steps):
walked_encodings.append(encoding)
encoding += delta
walked_encodings = ops.stack(walked_encodings)
batched_encodings = ops.split(walked_encodings, batches)
images = []
for batch in range(batches):
images += [
Image.fromarray(img)
for img in model.generate_image(
batched_encodings[batch],
batch_size=batch_size,
num_steps=25,
diffusion_noise=noise,
)
]
export_as_gif("eiffel-tower-starry-night.gif", images, rubber_band=True)<jupyter_output><empty_output><jupyter_text>Perhaps unsurprisingly, walking too far from the encoder's latent manifoldproduces images that look incoherent. Try it for yourself by settingyour own prompt, and adjusting `step_size` to increase or decrease the magnitudeof the walk. Note that when the magnitude of the walk gets large, the walk oftenleads into areas which produce extremely noisy images. A circular walk through the diffusion noise space for a single promptOur final experiment is to stick to one prompt and explore the variety of imagesthat the diffusion model can produce from that prompt. We do this by controllingthe noise that is used to seed the diffusion process.We create two noise components, `x` and `y`, and do a walk from 0 to 2π, summingthe cosine of our `x` component and the sin of our `y` component to produce noise.Using this approach, the end of our walk arrives at the same noise inputs wherewe began our walk, so we get a "loopable" result!<jupyter_code>prompt = "An oil paintings of cows in a field next to a windmill in Holland"
encoding = ops.squeeze(model.encode_text(prompt))
walk_steps = 150
batch_size = 3
batches = walk_steps // batch_size
walk_noise_x = keras.random.normal(noise.shape, dtype="float64")
walk_noise_y = keras.random.normal(noise.shape, dtype="float64")
walk_scale_x = ops.cos(ops.linspace(0, 2, walk_steps) * math.pi)
walk_scale_y = ops.sin(ops.linspace(0, 2, walk_steps) * math.pi)
noise_x = ops.tensordot(walk_scale_x, walk_noise_x, axes=0)
noise_y = ops.tensordot(walk_scale_y, walk_noise_y, axes=0)
noise = ops.add(noise_x, noise_y)
batched_noise = ops.split(noise, batches)
images = []
for batch in range(batches):
images += [
Image.fromarray(img)
for img in model.generate_image(
encoding,
batch_size=batch_size,
num_steps=25,
diffusion_noise=batched_noise[batch],
)
]
export_as_gif("cows.gif", images)<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/random_walks_with_stable_diffusion.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/random_walks_with_stable_diffusion.ipynb",
"repo_id": "keras-io",
"token_count": 4146
} | 85 |
# GPT text generation from scratch with KerasNLP
**Author:** [Jesse Chan](https://github.com/jessechancy)<br>
**Date created:** 2022/07/25<br>
**Last modified:** 2022/07/25<br>
**Description:** Using KerasNLP to train a mini-GPT model for text generation.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/text_generation_gpt.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/text_generation_gpt.py)
---
## Introduction
In this example, we will use KerasNLP to build a scaled down Generative
Pre-Trained (GPT) model. GPT is a Transformer-based model that allows you to generate
sophisticated text from a prompt.
We will train the model on the [simplebooks-92](https://arxiv.org/abs/1911.12391) corpus,
which is a dataset made from several novels. It is a good dataset for this example since
it has a small vocabulary and high word frequency, which is beneficial when training a
model with few parameters.
This example combines concepts from
[Text generation with a miniature GPT](https://keras.io/examples/generative/text_generation_with_miniature_gpt/)
with KerasNLP abstractions. We will demonstrate how KerasNLP tokenization, layers and
metrics simplify the training
process, and then show how to generate output text using the KerasNLP sampling utilities.
Note: If you are running this example on a Colab,
make sure to enable GPU runtime for faster training.
This example requires KerasNLP. You can install it via the following command:
`pip install keras-nlp`
---
## Setup
```python
!pip install -q --upgrade keras-nlp
!pip install -q --upgrade keras # Upgrade to Keras 3.
```
```python
import os
import keras_nlp
import keras
import tensorflow.data as tf_data
import tensorflow.strings as tf_strings
```
---
## Settings & hyperparameters
```python
# Data
BATCH_SIZE = 64
MIN_STRING_LEN = 512 # Strings shorter than this will be discarded
SEQ_LEN = 128 # Length of training sequences, in tokens
# Model
EMBED_DIM = 256
FEED_FORWARD_DIM = 128
NUM_HEADS = 3
NUM_LAYERS = 2
VOCAB_SIZE = 5000 # Limits parameters in model.
# Training
EPOCHS = 5
# Inference
NUM_TOKENS_TO_GENERATE = 80
```
---
## Load the data
Now, let's download the dataset! The SimpleBooks dataset consists of 1,573 Gutenberg books, and has
one of the smallest vocabulary size to word-level tokens ratio. It has a vocabulary size of ~98k,
a third of WikiText-103's, with around the same number of tokens (~100M). This makes it easy to fit a small model.
```python
keras.utils.get_file(
origin="https://dldata-public.s3.us-east-2.amazonaws.com/simplebooks.zip",
extract=True,
)
dir = os.path.expanduser("~/.keras/datasets/simplebooks/")
# Load simplebooks-92 train set and filter out short lines.
raw_train_ds = (
tf_data.TextLineDataset(dir + "simplebooks-92-raw/train.txt")
.filter(lambda x: tf_strings.length(x) > MIN_STRING_LEN)
.batch(BATCH_SIZE)
.shuffle(buffer_size=256)
)
# Load simplebooks-92 validation set and filter out short lines.
raw_val_ds = (
tf_data.TextLineDataset(dir + "simplebooks-92-raw/valid.txt")
.filter(lambda x: tf_strings.length(x) > MIN_STRING_LEN)
.batch(BATCH_SIZE)
)
```
<div class="k-default-codeblock">
```
Downloading data from https://dldata-public.s3.us-east-2.amazonaws.com/simplebooks.zip
282386239/282386239 ━━━━━━━━━━━━━━━━━━━━ 7s 0us/step
```
</div>
---
## Train the tokenizer
We train the tokenizer from the training dataset for a vocabulary size of `VOCAB_SIZE`,
which is a tuned hyperparameter. We want to limit the vocabulary as much as possible, as
we will see later on
that it has a large effect on the number of model parameters. We also don't want to include
*too few* vocabulary terms, or there would be too many out-of-vocabulary (OOV) sub-words. In
addition, three tokens are reserved in the vocabulary:
- `"[PAD]"` for padding sequences to `SEQ_LEN`. This token has index 0 in both
`reserved_tokens` and `vocab`, since `WordPieceTokenizer` (and other layers) consider
`0`/`vocab[0]` as the default padding.
- `"[UNK]"` for OOV sub-words, which should match the default `oov_token="[UNK]"` in
`WordPieceTokenizer`.
- `"[BOS]"` stands for beginning of sentence, but here technically it is a token
representing the beginning of each line of training data.
```python
# Train tokenizer vocabulary
vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(
raw_train_ds,
vocabulary_size=VOCAB_SIZE,
lowercase=True,
reserved_tokens=["[PAD]", "[UNK]", "[BOS]"],
)
```
---
## Load tokenizer
We use the vocabulary data to initialize
`keras_nlp.tokenizers.WordPieceTokenizer`. WordPieceTokenizer is an efficient
implementation of the WordPiece algorithm used by BERT and other models. It will strip,
lower-case and do other irreversible preprocessing operations.
```python
tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=vocab,
sequence_length=SEQ_LEN,
lowercase=True,
)
```
---
## Tokenize data
We preprocess the dataset by tokenizing and splitting it into `features` and `labels`.
```python
# packer adds a start token
start_packer = keras_nlp.layers.StartEndPacker(
sequence_length=SEQ_LEN,
start_value=tokenizer.token_to_id("[BOS]"),
)
def preprocess(inputs):
outputs = tokenizer(inputs)
features = start_packer(outputs)
labels = outputs
return features, labels
# Tokenize and split into train and label sequences.
train_ds = raw_train_ds.map(preprocess, num_parallel_calls=tf_data.AUTOTUNE).prefetch(
tf_data.AUTOTUNE
)
val_ds = raw_val_ds.map(preprocess, num_parallel_calls=tf_data.AUTOTUNE).prefetch(
tf_data.AUTOTUNE
)
```
---
## Build the model
We create our scaled down GPT model with the following layers:
- One `keras_nlp.layers.TokenAndPositionEmbedding` layer, which combines the embedding
for the token and its position.
- Multiple `keras_nlp.layers.TransformerDecoder` layers, with the default causal masking.
The layer has no cross-attention when run with decoder sequence only.
- One final dense linear layer
```python
inputs = keras.layers.Input(shape=(None,), dtype="int32")
# Embedding.
embedding_layer = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=VOCAB_SIZE,
sequence_length=SEQ_LEN,
embedding_dim=EMBED_DIM,
mask_zero=True,
)
x = embedding_layer(inputs)
# Transformer decoders.
for _ in range(NUM_LAYERS):
decoder_layer = keras_nlp.layers.TransformerDecoder(
num_heads=NUM_HEADS,
intermediate_dim=FEED_FORWARD_DIM,
)
x = decoder_layer(x) # Giving one argument only skips cross-attention.
# Output.
outputs = keras.layers.Dense(VOCAB_SIZE)(x)
model = keras.Model(inputs=inputs, outputs=outputs)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
perplexity = keras_nlp.metrics.Perplexity(from_logits=True, mask_token_id=0)
model.compile(optimizer="adam", loss=loss_fn, metrics=[perplexity])
```
Let's take a look at our model summary - a large majority of the
parameters are in the `token_and_position_embedding` and the output `dense` layer!
This means that the vocabulary size (`VOCAB_SIZE`) has a large effect on the size of the model,
while the number of Transformer decoder layers (`NUM_LAYERS`) doesn't affect it as much.
```python
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ token_and_position_embedding │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,312,768</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">TokenAndPositionEmbedding</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ transformer_decoder │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">329,085</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">TransformerDecoder</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ transformer_decoder_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">329,085</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">TransformerDecoder</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">5000</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,285,000</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">3,255,938</span> (12.42 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">3,255,938</span> (12.42 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Training
Now that we have our model, let's train it with the `fit()` method.
```python
model.fit(train_ds, validation_data=val_ds, epochs=EPOCHS)
```
<div class="k-default-codeblock">
```
Epoch 1/5
2445/2445 ━━━━━━━━━━━━━━━━━━━━ 216s 66ms/step - loss: 5.0008 - perplexity: 180.0715 - val_loss: 4.2176 - val_perplexity: 68.0438
Epoch 2/5
2445/2445 ━━━━━━━━━━━━━━━━━━━━ 127s 48ms/step - loss: 4.1699 - perplexity: 64.7740 - val_loss: 4.0553 - val_perplexity: 57.7996
Epoch 3/5
2445/2445 ━━━━━━━━━━━━━━━━━━━━ 126s 47ms/step - loss: 4.0286 - perplexity: 56.2138 - val_loss: 4.0134 - val_perplexity: 55.4446
Epoch 4/5
2445/2445 ━━━━━━━━━━━━━━━━━━━━ 134s 50ms/step - loss: 3.9576 - perplexity: 52.3643 - val_loss: 3.9900 - val_perplexity: 54.1153
Epoch 5/5
2445/2445 ━━━━━━━━━━━━━━━━━━━━ 135s 51ms/step - loss: 3.9080 - perplexity: 49.8242 - val_loss: 3.9500 - val_perplexity: 52.0006
<keras.src.callbacks.history.History at 0x7f7de0365ba0>
```
</div>
---
## Inference
With our trained model, we can test it out to gauge its performance. To do this
we can seed our model with an input sequence starting with the `"[BOS]"` token,
and progressively sample the model by making predictions for each subsequent
token in a loop.
To start lets build a prompt with the same shape as our model inputs, containing
only the `"[BOS]"` token.
```python
# The "packer" layers adds the [BOS] token for us.
prompt_tokens = start_packer(tokenizer([""]))
prompt_tokens
```
<div class="k-default-codeblock">
```
<tf.Tensor: shape=(1, 128), dtype=int32, numpy=
array([[2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=int32)>
```
</div>
We will use the `keras_nlp.samplers` module for inference, which requires a
callback function wrapping the model we just trained. This wrapper calls
the model and returns the logit predictions for the current token we are
generating.
Note: There are two pieces of more advanced functionality available when
defining your callback. The first is the ability to take in a `cache` of states
computed in previous generation steps, which can be used to speed up generation.
The second is the ability to output the final dense "hidden state" of each
generated token. This is used by `keras_nlp.samplers.ContrastiveSampler`, which
avoids repetition by penalizing repeated hidden states. Both are optional, and
we will ignore them for now.
```python
def next(prompt, cache, index):
logits = model(prompt)[:, index - 1, :]
# Ignore hidden states for now; only needed for contrastive search.
hidden_states = None
return logits, hidden_states, cache
```
Creating the wrapper function is the most complex part of using these functions. Now that
it's done, let's test out the different utilities, starting with greedy search.
### Greedy search
We greedily pick the most probable token at each timestep. In other words, we get the
argmax of the model output.
```python
sampler = keras_nlp.samplers.GreedySampler()
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1, # Start sampling immediately after the [BOS] token.
)
txt = tokenizer.detokenize(output_tokens)
print(f"Greedy search generated text: \n{txt}\n")
```
<div class="k-default-codeblock">
```
Greedy search generated text:
[b'[BOS] " i \' m going to tell you , " said the boy , " i \' ll tell you , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good friend , and you \' ll be a good']
```
</div>
As you can see, greedy search starts out making some sense, but quickly starts repeating
itself. This is a common problem with text generation that can be fixed by some of the
probabilistic text generation utilities shown later on!
### Beam search
At a high-level, beam search keeps track of the `num_beams` most probable sequences at
each timestep, and predicts the best next token from all sequences. It is an improvement
over greedy search since it stores more possibilities. However, it is less efficient than
greedy search since it has to compute and store multiple potential sequences.
**Note:** beam search with `num_beams=1` is identical to greedy search.
```python
sampler = keras_nlp.samplers.BeamSampler(num_beams=10)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Beam search generated text: \n{txt}\n")
```
<div class="k-default-codeblock">
```
Beam search generated text:
[b'[BOS] " i don \' t know anything about it , " she said . " i don \' t know anything about it . i don \' t know anything about it , but i don \' t know anything about it . i don \' t know anything about it , but i don \' t know anything about it . i don \' t know anything about it , but i don \' t know it . i don \' t know it , but i don \' t know it . i don \' t know it , but i don \' t know it . i don \' t know it , but i don \' t know it . i don \'']
```
</div>
Similar to greedy search, beam search quickly starts repeating itself, since it is still
a deterministic method.
### Random search
Random search is our first probabilistic method. At each time step, it samples the next
token using the softmax probabilities provided by the model.
```python
sampler = keras_nlp.samplers.RandomSampler()
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Random search generated text: \n{txt}\n")
```
<div class="k-default-codeblock">
```
Random search generated text:
[b'[BOS] eleanor . like ice , not children would have suspicious forehead . they will see him , no goods in her plums . i have made a stump one , on the occasion , - - it is sacred , and one is unholy - plaything - - the partial consequences , and one refuge in a style of a boy , who was his grandmother . it was a young gentleman who bore off upon the middle of the day , rush and as he maltreated the female society , were growing at once . in and out of the craid little plays , stopping']
```
</div>
Voilà, no repetitions! However, with random search, we may see some nonsensical words
appearing since any word in the vocabulary has a chance of appearing with this sampling
method. This is fixed by our next search utility, top-k search.
### Top-K search
Similar to random search, we sample the next token from the probability distribution
provided by the model. The only difference is that here, we select out the top `k` most
probable tokens, and distribute the probability mass over them before sampling. This way,
we won't be sampling from low probability tokens, and hence we would have less
nonsensical words!
```python
sampler = keras_nlp.samplers.TopKSampler(k=10)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-K search generated text: \n{txt}\n")
```
<div class="k-default-codeblock">
```
Top-K search generated text:
[b'[BOS] " the young man was not the one , and the boy went away to the green forest . they were a little girl \' s wife , and the child loved him as much as he did , and he had often heard of a little girl who lived near the house . they were too tired to go , and when they went down to the barns and get into the barn , and they got the first of the barns that they had been taught to do so , and the little people went to their homes . she did , she told them that she had been a very clever , and they had made the first . she knew they']
```
</div>
### Top-P search
Even with the top-k search, there is something to improve upon. With top-k search, the
number `k` is fixed, which means it selects the same number of tokens for any probability
distribution. Consider two scenarios, one where the probability mass is concentrated over
2 words and another where the probability mass is evenly concentrated across 10. Should
we choose `k=2` or `k=10`? There is no one size that fits all `k` here.
This is where top-p search comes in! Instead of choosing a `k`, we choose a probability
`p` that we want the probabilities of the top tokens to sum up to. This way, we can
dynamically adjust the `k` based on the probability distribution. By setting `p=0.9`, if
90% of the probability mass is concentrated on the top 2 tokens, we can filter out the
top 2 tokens to sample from. If instead the 90% is distributed over 10 tokens, it will
similarly filter out the top 10 tokens to sample from.
```python
sampler = keras_nlp.samplers.TopPSampler(p=0.5)
output_tokens = sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-P search generated text: \n{txt}\n")
```
<div class="k-default-codeblock">
```
Top-P search generated text:
[b'[BOS] the children were both born in the spring , and the youngest sister were very much like the other children , but they did not see them . they were very happy , and their mother was a beautiful one . the youngest was one of the youngest sister of the youngest , and the youngest baby was very fond of the children . when they came home , they would see a little girl in the house , and had the beautiful family , and the children of the children had to sit and look on their backs , and the eldest children were very long , and they were so bright and happy , as they were , they had never noticed their hair ,']
```
</div>
### Using callbacks for text generation
We can also wrap the utilities in a callback, which allows you to print out a prediction
sequence for every epoch of the model! Here is an example of a callback for top-k search:
```python
class TopKTextGenerator(keras.callbacks.Callback):
"""A callback to generate text from a trained model using top-k."""
def __init__(self, k):
self.sampler = keras_nlp.samplers.TopKSampler(k)
def on_epoch_end(self, epoch, logs=None):
output_tokens = self.sampler(
next=next,
prompt=prompt_tokens,
index=1,
)
txt = tokenizer.detokenize(output_tokens)
print(f"Top-K search generated text: \n{txt}\n")
text_generation_callback = TopKTextGenerator(k=10)
# Dummy training loop to demonstrate callback.
model.fit(train_ds.take(1), verbose=2, epochs=2, callbacks=[text_generation_callback])
```
<div class="k-default-codeblock">
```
Epoch 1/2
Top-K search generated text:
[b"[BOS] the young man was in the middle of a month , and he was able to take the crotch , but a long time , for he felt very well for himself in the sepoys ' s hands were chalks . he was the only boy , and he had a few years before been married , and the man said he was a tall one . he was a very handsome , and he was a very handsome young fellow , and a handsome , noble young man , but a boy , and man . he was a very handsome man , and was tall and handsome , and he looked like a gentleman . he was an"]
```
</div>
<div class="k-default-codeblock">
```
1/1 - 16s - 16s/step - loss: 3.9454 - perplexity: 51.6987
Epoch 2/2
Top-K search generated text:
[b'[BOS] " well , it is true . it is true that i should go to the house of a collector , in the matter of prussia that there is no other way there . there is no chance of being in the habit of being in the way of an invasion . i know not what i have done , but i have seen the man in the middle of a day . the next morning i shall take him to my father , for i am not the very day of the town , which would have been a little more than the one \' s daughter , i think it over and the whole affair will be']
```
</div>
<div class="k-default-codeblock">
```
1/1 - 17s - 17s/step - loss: 3.7860 - perplexity: 44.0932
<keras.src.callbacks.history.History at 0x7f7de0325600>
```
</div>
---
## Conclusion
To recap, in this example, we use KerasNLP layers to train a sub-word vocabulary,
tokenize training data, create a miniature GPT model, and perform inference with the
text generation library.
If you would like to understand how Transformers work, or learn more about training the
full GPT model, here are some further readings:
- Attention Is All You Need [Vaswani et al., 2017](https://arxiv.org/abs/1706.03762)
- GPT-3 Paper [Brown et al., 2020](https://arxiv.org/abs/2005.14165)
| keras-io/examples/generative/md/text_generation_gpt.md/0 | {
"file_path": "keras-io/examples/generative/md/text_generation_gpt.md",
"repo_id": "keras-io",
"token_count": 8656
} | 86 |
"""
Title: Vector-Quantized Variational Autoencoders
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/07/21
Last modified: 2022/06/27
Description: Training a VQ-VAE for image reconstruction and codebook sampling for generation.
Accelerator: GPU
"""
"""
In this example, we develop a Vector Quantized Variational Autoencoder (VQ-VAE).
VQ-VAE was proposed in
[Neural Discrete Representation Learning](https://arxiv.org/abs/1711.00937)
by van der Oord et al. In standard VAEs, the latent space is continuous and is sampled
from a Gaussian distribution. It is generally harder to learn such a continuous
distribution via gradient descent. VQ-VAEs, on the other hand,
operate on a discrete latent space, making the optimization problem simpler. It does so
by maintaining a discrete *codebook*. The codebook is developed by
discretizing the distance between continuous embeddings and the encoded
outputs. These discrete code words are then fed to the decoder, which is trained
to generate reconstructed samples.
For an overview of VQ-VAEs, please refer to the original paper and
[this video explanation](https://www.youtube.com/watch?v=VZFVUrYcig0).
If you need a refresher on VAEs, you can refer to
[this book chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-12/).
VQ-VAEs are one of the main recipes behind [DALL-E](https://openai.com/blog/dall-e/)
and the idea of a codebook is used in [VQ-GANs](https://arxiv.org/abs/2012.09841).
This example uses implementation details from the
[official VQ-VAE tutorial](https://github.com/deepmind/sonnet/blob/master/sonnet/examples/vqvae_example.ipynb)
from DeepMind.
## Requirements
To run this example, you will need TensorFlow 2.5 or higher, as well as
TensorFlow Probability, which can be installed using the command below.
"""
"""shell
pip install -q tensorflow-probability
"""
"""
## Imports
"""
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_probability as tfp
import tensorflow as tf
"""
## `VectorQuantizer` layer
First, we implement a custom layer for the vector quantizer, which is the layer in between
the encoder and decoder. Consider an output from the encoder, with shape `(batch_size, height, width,
num_filters)`. The vector quantizer will first flatten this output, only keeping the
`num_filters` dimension intact. So, the shape would become `(batch_size * height * width,
num_filters)`. The rationale behind this is to treat the total number of filters as the size for
the latent embeddings.
An embedding table is then initialized to learn a codebook. We measure the L2-normalized
distance between the flattened encoder outputs and code words of this codebook. We take the
code that yields the minimum distance, and we apply one-hot encoding to achieve quantization.
This way, the code yielding the minimum distance to the corresponding encoder output is
mapped as one and the remaining codes are mapped as zeros.
Since the quantization process is not differentiable, we apply a
[straight-through estimator](https://www.hassanaskary.com/python/pytorch/deep%20learning/2020/09/19/intuitive-explanation-of-straight-through-estimators.html)
in between the decoder and the encoder, so that the decoder gradients are directly propagated
to the encoder. As the encoder and decoder share the same channel space, the decoder gradients are
still meaningful to the encoder.
"""
class VectorQuantizer(layers.Layer):
def __init__(self, num_embeddings, embedding_dim, beta=0.25, **kwargs):
super().__init__(**kwargs)
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
# The `beta` parameter is best kept between [0.25, 2] as per the paper.
self.beta = beta
# Initialize the embeddings which we will quantize.
w_init = tf.random_uniform_initializer()
self.embeddings = tf.Variable(
initial_value=w_init(
shape=(self.embedding_dim, self.num_embeddings), dtype="float32"
),
trainable=True,
name="embeddings_vqvae",
)
def call(self, x):
# Calculate the input shape of the inputs and
# then flatten the inputs keeping `embedding_dim` intact.
input_shape = tf.shape(x)
flattened = tf.reshape(x, [-1, self.embedding_dim])
# Quantization.
encoding_indices = self.get_code_indices(flattened)
encodings = tf.one_hot(encoding_indices, self.num_embeddings)
quantized = tf.matmul(encodings, self.embeddings, transpose_b=True)
# Reshape the quantized values back to the original input shape
quantized = tf.reshape(quantized, input_shape)
# Calculate vector quantization loss and add that to the layer. You can learn more
# about adding losses to different layers here:
# https://keras.io/guides/making_new_layers_and_models_via_subclassing/. Check
# the original paper to get a handle on the formulation of the loss function.
commitment_loss = tf.reduce_mean((tf.stop_gradient(quantized) - x) ** 2)
codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2)
self.add_loss(self.beta * commitment_loss + codebook_loss)
# Straight-through estimator.
quantized = x + tf.stop_gradient(quantized - x)
return quantized
def get_code_indices(self, flattened_inputs):
# Calculate L2-normalized distance between the inputs and the codes.
similarity = tf.matmul(flattened_inputs, self.embeddings)
distances = (
tf.reduce_sum(flattened_inputs**2, axis=1, keepdims=True)
+ tf.reduce_sum(self.embeddings**2, axis=0)
- 2 * similarity
)
# Derive the indices for minimum distances.
encoding_indices = tf.argmin(distances, axis=1)
return encoding_indices
"""
**A note on straight-through estimation**:
This line of code does the straight-through estimation part: `quantized = x +
tf.stop_gradient(quantized - x)`. During backpropagation, `(quantized - x)` won't be
included in the computation graph and the gradients obtained for `quantized`
will be copied for `inputs`. Thanks to [this video](https://youtu.be/VZFVUrYcig0?t=1393)
for helping me understand this technique.
"""
"""
## Encoder and decoder
Now for the encoder and the decoder for the VQ-VAE. We will keep them small so
that their capacity is a good fit for the MNIST dataset. The implementation of the encoder and
decoder come from
[this example](https://keras.io/examples/generative/vae).
Note that activations _other than ReLU_ may not work for the encoder and decoder layers in the
quantization architecture: Leaky ReLU activated layers, for example, have proven difficult to
train, resulting in intermittent loss spikes that the model has trouble recovering from.
"""
def get_encoder(latent_dim=16):
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(
encoder_inputs
)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
encoder_outputs = layers.Conv2D(latent_dim, 1, padding="same")(x)
return keras.Model(encoder_inputs, encoder_outputs, name="encoder")
def get_decoder(latent_dim=16):
latent_inputs = keras.Input(shape=get_encoder(latent_dim).output.shape[1:])
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(
latent_inputs
)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, padding="same")(x)
return keras.Model(latent_inputs, decoder_outputs, name="decoder")
"""
## Standalone VQ-VAE model
"""
def get_vqvae(latent_dim=16, num_embeddings=64):
vq_layer = VectorQuantizer(num_embeddings, latent_dim, name="vector_quantizer")
encoder = get_encoder(latent_dim)
decoder = get_decoder(latent_dim)
inputs = keras.Input(shape=(28, 28, 1))
encoder_outputs = encoder(inputs)
quantized_latents = vq_layer(encoder_outputs)
reconstructions = decoder(quantized_latents)
return keras.Model(inputs, reconstructions, name="vq_vae")
get_vqvae().summary()
"""
Note that the output channels of the encoder should match the `latent_dim` for the vector
quantizer.
"""
"""
## Wrapping up the training loop inside `VQVAETrainer`
"""
class VQVAETrainer(keras.models.Model):
def __init__(self, train_variance, latent_dim=32, num_embeddings=128, **kwargs):
super().__init__(**kwargs)
self.train_variance = train_variance
self.latent_dim = latent_dim
self.num_embeddings = num_embeddings
self.vqvae = get_vqvae(self.latent_dim, self.num_embeddings)
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.vq_loss_tracker = keras.metrics.Mean(name="vq_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.vq_loss_tracker,
]
def train_step(self, x):
with tf.GradientTape() as tape:
# Outputs from the VQ-VAE.
reconstructions = self.vqvae(x)
# Calculate the losses.
reconstruction_loss = (
tf.reduce_mean((x - reconstructions) ** 2) / self.train_variance
)
total_loss = reconstruction_loss + sum(self.vqvae.losses)
# Backpropagation.
grads = tape.gradient(total_loss, self.vqvae.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.vqvae.trainable_variables))
# Loss tracking.
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.vq_loss_tracker.update_state(sum(self.vqvae.losses))
# Log results.
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"vqvae_loss": self.vq_loss_tracker.result(),
}
"""
## Load and preprocess the MNIST dataset
"""
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
x_train_scaled = (x_train / 255.0) - 0.5
x_test_scaled = (x_test / 255.0) - 0.5
data_variance = np.var(x_train / 255.0)
"""
## Train the VQ-VAE model
"""
vqvae_trainer = VQVAETrainer(data_variance, latent_dim=16, num_embeddings=128)
vqvae_trainer.compile(optimizer=keras.optimizers.Adam())
vqvae_trainer.fit(x_train_scaled, epochs=30, batch_size=128)
"""
## Reconstruction results on the test set
"""
def show_subplot(original, reconstructed):
plt.subplot(1, 2, 1)
plt.imshow(original.squeeze() + 0.5)
plt.title("Original")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(reconstructed.squeeze() + 0.5)
plt.title("Reconstructed")
plt.axis("off")
plt.show()
trained_vqvae_model = vqvae_trainer.vqvae
idx = np.random.choice(len(x_test_scaled), 10)
test_images = x_test_scaled[idx]
reconstructions_test = trained_vqvae_model.predict(test_images)
for test_image, reconstructed_image in zip(test_images, reconstructions_test):
show_subplot(test_image, reconstructed_image)
"""
These results look decent. You are encouraged to play with different hyperparameters
(especially the number of embeddings and the dimensions of the embeddings) and observe how
they affect the results.
"""
"""
## Visualizing the discrete codes
"""
encoder = vqvae_trainer.vqvae.get_layer("encoder")
quantizer = vqvae_trainer.vqvae.get_layer("vector_quantizer")
encoded_outputs = encoder.predict(test_images)
flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1])
codebook_indices = quantizer.get_code_indices(flat_enc_outputs)
codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1])
for i in range(len(test_images)):
plt.subplot(1, 2, 1)
plt.imshow(test_images[i].squeeze() + 0.5)
plt.title("Original")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(codebook_indices[i])
plt.title("Code")
plt.axis("off")
plt.show()
"""
The figure above shows that the discrete codes have been able to capture some
regularities from the dataset. Now, how do we sample from this codebook to create
novel images? Since these codes are discrete and we imposed a categorical distribution
on them, we cannot use them yet to generate anything meaningful until we can generate likely
sequences of codes that we can give to the decoder.
The authors use a PixelCNN to train these codes so that they can be used as powerful priors to
generate novel examples. PixelCNN was proposed in
[Conditional Image Generation with PixelCNN Decoders](https://arxiv.org/abs/1606.05328)
by van der Oord et al. We borrow the implementation from
[this PixelCNN example](https://keras.io/examples/generative/pixelcnn/). It's an autoregressive
generative model where the outputs are conditional on the prior ones. In other words, a PixelCNN
generates an image on a pixel-by-pixel basis. For the purpose in this example, however, its task
is to generate code book indices instead of pixels directly. The trained VQ-VAE decoder is used
to map the indices generated by the PixelCNN back into the pixel space.
"""
"""
## PixelCNN hyperparameters
"""
num_residual_blocks = 2
num_pixelcnn_layers = 2
pixelcnn_input_shape = encoded_outputs.shape[1:-1]
print(f"Input shape of the PixelCNN: {pixelcnn_input_shape}")
"""
This input shape represents the reduction in the resolution performed by the encoder. With "same" padding,
this exactly halves the "resolution" of the output shape for each stride-2 convolution layer. So, with these
two layers, we end up with an encoder output tensor of 7x7 on axes 2 and 3, with the first axis as the batch
size and the last axis being the code book embedding size. Since the quantization layer in the autoencoder
maps these 7x7 tensors to indices of the code book, these output layer axis sizes must be matched by the
PixelCNN as the input shape. The task of the PixelCNN for this architecture is to generate _likely_ 7x7
arrangements of codebook indices.
Note that this shape is something to optimize for in larger-sized image domains, along with the code
book sizes. Since the PixelCNN is autoregressive, it needs to pass over each codebook index sequentially
in order to generate novel images from the codebook. Each stride-2 (or rather more correctly a
stride (2, 2)) convolution layer will divide the image generation time by four. Note, however, that there
is probably a lower bound on this part: when the number of codes for the image to reconstruct is too small,
it has insufficient information for the decoder to represent the level of detail in the image, so the
output quality will suffer. This can be amended at least to some extent by using a larger code book.
Since the autoregressive part of the image generation procedure uses codebook indices, there is far less of
a performance penalty on using a larger code book as the lookup time for a larger-sized code from a larger
code book is much smaller in comparison to iterating over a larger sequence of code book indices, although
the size of the code book does impact on the batch size that can pass through the image generation procedure.
Finding the sweet spot for this trade-off can require some architecture tweaking and could very well differ
per dataset.
"""
"""
## PixelCNN model
Majority of this comes from
[this example](https://keras.io/examples/generative/pixelcnn/).
## Notes
Thanks to [Rein van 't Veer](https://github.com/reinvantveer) for improving this example with
copy-edits and minor code clean-ups.
"""
# The first layer is the PixelCNN layer. This layer simply
# builds on the 2D convolutional layer, but includes masking.
class PixelConvLayer(layers.Layer):
def __init__(self, mask_type, **kwargs):
super().__init__()
self.mask_type = mask_type
self.conv = layers.Conv2D(**kwargs)
def build(self, input_shape):
# Build the conv2d layer to initialize kernel variables
self.conv.build(input_shape)
# Use the initialized kernel to create the mask
kernel_shape = self.conv.kernel.get_shape()
self.mask = np.zeros(shape=kernel_shape)
self.mask[: kernel_shape[0] // 2, ...] = 1.0
self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0
if self.mask_type == "B":
self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0
def call(self, inputs):
self.conv.kernel.assign(self.conv.kernel * self.mask)
return self.conv(inputs)
# Next, we build our residual block layer.
# This is just a normal residual block, but based on the PixelConvLayer.
class ResidualBlock(keras.layers.Layer):
def __init__(self, filters, **kwargs):
super().__init__(**kwargs)
self.conv1 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
self.pixel_conv = PixelConvLayer(
mask_type="B",
filters=filters // 2,
kernel_size=3,
activation="relu",
padding="same",
)
self.conv2 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
def call(self, inputs):
x = self.conv1(inputs)
x = self.pixel_conv(x)
x = self.conv2(x)
return keras.layers.add([inputs, x])
pixelcnn_inputs = keras.Input(shape=pixelcnn_input_shape, dtype=tf.int32)
ohe = tf.one_hot(pixelcnn_inputs, vqvae_trainer.num_embeddings)
x = PixelConvLayer(
mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same"
)(ohe)
for _ in range(num_residual_blocks):
x = ResidualBlock(filters=128)(x)
for _ in range(num_pixelcnn_layers):
x = PixelConvLayer(
mask_type="B",
filters=128,
kernel_size=1,
strides=1,
activation="relu",
padding="valid",
)(x)
out = keras.layers.Conv2D(
filters=vqvae_trainer.num_embeddings, kernel_size=1, strides=1, padding="valid"
)(x)
pixel_cnn = keras.Model(pixelcnn_inputs, out, name="pixel_cnn")
pixel_cnn.summary()
"""
## Prepare data to train the PixelCNN
We will train the PixelCNN to learn a categorical distribution of the discrete codes.
First, we will generate code indices using the encoder and vector quantizer we just
trained. Our training objective will be to minimize the crossentropy loss between these
indices and the PixelCNN outputs. Here, the number of categories is equal to the number
of embeddings present in our codebook (128 in our case). The PixelCNN model is
trained to learn a distribution (as opposed to minimizing the L1/L2 loss), which is where
it gets its generative capabilities from.
"""
# Generate the codebook indices.
encoded_outputs = encoder.predict(x_train_scaled)
flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1])
codebook_indices = quantizer.get_code_indices(flat_enc_outputs)
codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1])
print(f"Shape of the training data for PixelCNN: {codebook_indices.shape}")
"""
## PixelCNN training
"""
pixel_cnn.compile(
optimizer=keras.optimizers.Adam(3e-4),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
pixel_cnn.fit(
x=codebook_indices,
y=codebook_indices,
batch_size=128,
epochs=30,
validation_split=0.1,
)
"""
We can improve these scores with more training and hyperparameter tuning.
"""
"""
## Codebook sampling
Now that our PixelCNN is trained, we can sample distinct codes from its outputs and pass
them to our decoder to generate novel images.
"""
# Create a mini sampler model.
inputs = layers.Input(shape=pixel_cnn.input_shape[1:])
outputs = pixel_cnn(inputs, training=False)
categorical_layer = tfp.layers.DistributionLambda(tfp.distributions.Categorical)
outputs = categorical_layer(outputs)
sampler = keras.Model(inputs, outputs)
"""
We now construct a prior to generate images. Here, we will generate 10 images.
"""
# Create an empty array of priors.
batch = 10
priors = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:])
batch, rows, cols = priors.shape
# Iterate over the priors because generation has to be done sequentially pixel by pixel.
for row in range(rows):
for col in range(cols):
# Feed the whole array and retrieving the pixel value probabilities for the next
# pixel.
probs = sampler.predict(priors)
# Use the probabilities to pick pixel values and append the values to the priors.
priors[:, row, col] = probs[:, row, col]
print(f"Prior shape: {priors.shape}")
"""
We can now use our decoder to generate the images.
"""
# Perform an embedding lookup.
pretrained_embeddings = quantizer.embeddings
priors_ohe = tf.one_hot(priors.astype("int32"), vqvae_trainer.num_embeddings).numpy()
quantized = tf.matmul(
priors_ohe.astype("float32"), pretrained_embeddings, transpose_b=True
)
quantized = tf.reshape(quantized, (-1, *(encoded_outputs.shape[1:])))
# Generate novel images.
decoder = vqvae_trainer.vqvae.get_layer("decoder")
generated_samples = decoder.predict(quantized)
for i in range(batch):
plt.subplot(1, 2, 1)
plt.imshow(priors[i])
plt.title("Code")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(generated_samples[i].squeeze() + 0.5)
plt.title("Generated Sample")
plt.axis("off")
plt.show()
"""
We can enhance the quality of these generated samples by tweaking the PixelCNN.
"""
"""
## Additional notes
* After the VQ-VAE paper was initially released, the authors developed an exponential
moving averaging scheme to update the embeddings inside the quantizer. If you're
interested you can check out
[this snippet](https://github.com/deepmind/sonnet/blob/master/sonnet/python/modules/nets/vqvae.py#L124).
* To further enhance the quality of the generated samples,
[VQ-VAE-2](https://arxiv.org/abs/1906.00446) was proposed that follows a cascaded
approach to learn the codebook and to generate the images.
"""
| keras-io/examples/generative/vq_vae.py/0 | {
"file_path": "keras-io/examples/generative/vq_vae.py",
"repo_id": "keras-io",
"token_count": 8041
} | 87 |
<jupyter_start><jupyter_text>Node Classification with Graph Neural Networks**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2021/05/30**Last modified:** 2021/05/30**Description:** Implementing a graph neural network model for predicting the topic of a paper given its citations. IntroductionMany datasets in various machine learning (ML) applications have structural relationshipsbetween their entities, which can be represented as graphs. Such application includessocial and communication networks analysis, traffic prediction, and fraud detection.[Graph representation Learning](https://www.cs.mcgill.ca/~wlh/grl_book/)aims to build and train models for graph datasets to be used for a variety of ML tasks.This example demonstrate a simple implementation of a [Graph Neural Network](https://arxiv.org/pdf/1901.00596.pdf)(GNN) model. The model is used for a node prediction task on the [Cora dataset](https://relational.fit.cvut.cz/dataset/CORA)to predict the subject of a paper given its words and citations network.Note that, **we implement a Graph Convolution Layer from scratch** to provide betterunderstanding of how they work. However, there is a number of specialized TensorFlow-basedlibraries that provide rich GNN APIs, such as [Spectral](https://graphneural.network/),[StellarGraph](https://stellargraph.readthedocs.io/en/stable/README.html), and[GraphNets](https://github.com/deepmind/graph_nets). Setup<jupyter_code>import os
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers<jupyter_output><empty_output><jupyter_text>Prepare the DatasetThe Cora dataset consists of 2,708 scientific papers classified into one of seven classes.The citation network consists of 5,429 links. Each paper has a binary word vector of size1,433, indicating the presence of a corresponding word. Download the datasetThe dataset has two tap-separated files: `cora.cites` and `cora.content`.1. The `cora.cites` includes the citation records with two columns:`cited_paper_id` (target) and `citing_paper_id` (source).2. The `cora.content` includes the paper content records with 1,435 columns:`paper_id`, `subject`, and 1,433 binary features.Let's download the dataset.<jupyter_code>zip_file = keras.utils.get_file(
fname="cora.tgz",
origin="https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz",
extract=True,
)
data_dir = os.path.join(os.path.dirname(zip_file), "cora")<jupyter_output><empty_output><jupyter_text>Process and visualize the datasetThen we load the citations data into a Pandas DataFrame.<jupyter_code>citations = pd.read_csv(
os.path.join(data_dir, "cora.cites"),
sep="\t",
header=None,
names=["target", "source"],
)
print("Citations shape:", citations.shape)<jupyter_output><empty_output><jupyter_text>Now we display a sample of the `citations` DataFrame.The `target` column includes the paper ids cited by the paper ids in the `source` column.<jupyter_code>citations.sample(frac=1).head()<jupyter_output><empty_output><jupyter_text>Now let's load the papers data into a Pandas DataFrame.<jupyter_code>column_names = ["paper_id"] + [f"term_{idx}" for idx in range(1433)] + ["subject"]
papers = pd.read_csv(
os.path.join(data_dir, "cora.content"), sep="\t", header=None, names=column_names,
)
print("Papers shape:", papers.shape)<jupyter_output><empty_output><jupyter_text>Now we display a sample of the `papers` DataFrame. The DataFrame includes the `paper_id`and the `subject` columns, as well as 1,433 binary column representing whether a term existsin the paper or not.<jupyter_code>print(papers.sample(5).T)<jupyter_output><empty_output><jupyter_text>Let's display the count of the papers in each subject.<jupyter_code>print(papers.subject.value_counts())<jupyter_output><empty_output><jupyter_text>We convert the paper ids and the subjects into zero-based indices.<jupyter_code>class_values = sorted(papers["subject"].unique())
class_idx = {name: id for id, name in enumerate(class_values)}
paper_idx = {name: idx for idx, name in enumerate(sorted(papers["paper_id"].unique()))}
papers["paper_id"] = papers["paper_id"].apply(lambda name: paper_idx[name])
citations["source"] = citations["source"].apply(lambda name: paper_idx[name])
citations["target"] = citations["target"].apply(lambda name: paper_idx[name])
papers["subject"] = papers["subject"].apply(lambda value: class_idx[value])<jupyter_output><empty_output><jupyter_text>Now let's visualize the citation graph. Each node in the graph represents a paper,and the color of the node corresponds to its subject. Note that we only show a sample ofthe papers in the dataset.<jupyter_code>plt.figure(figsize=(10, 10))
colors = papers["subject"].tolist()
cora_graph = nx.from_pandas_edgelist(citations.sample(n=1500))
subjects = list(papers[papers["paper_id"].isin(list(cora_graph.nodes))]["subject"])
nx.draw_spring(cora_graph, node_size=15, node_color=subjects)<jupyter_output><empty_output><jupyter_text>Split the dataset into stratified train and test sets<jupyter_code>train_data, test_data = [], []
for _, group_data in papers.groupby("subject"):
# Select around 50% of the dataset for training.
random_selection = np.random.rand(len(group_data.index)) <= 0.5
train_data.append(group_data[random_selection])
test_data.append(group_data[~random_selection])
train_data = pd.concat(train_data).sample(frac=1)
test_data = pd.concat(test_data).sample(frac=1)
print("Train data shape:", train_data.shape)
print("Test data shape:", test_data.shape)<jupyter_output><empty_output><jupyter_text>Implement Train and Evaluate Experiment<jupyter_code>hidden_units = [32, 32]
learning_rate = 0.01
dropout_rate = 0.5
num_epochs = 300
batch_size = 256<jupyter_output><empty_output><jupyter_text>This function compiles and trains an input model using the given training data.<jupyter_code>def run_experiment(model, x_train, y_train):
# Compile the model.
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_acc", patience=50, restore_best_weights=True
)
# Fit the model.
history = model.fit(
x=x_train,
y=y_train,
epochs=num_epochs,
batch_size=batch_size,
validation_split=0.15,
callbacks=[early_stopping],
)
return history<jupyter_output><empty_output><jupyter_text>This function displays the loss and accuracy curves of the model during training.<jupyter_code>def display_learning_curves(history):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
ax1.plot(history.history["loss"])
ax1.plot(history.history["val_loss"])
ax1.legend(["train", "test"], loc="upper right")
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Loss")
ax2.plot(history.history["acc"])
ax2.plot(history.history["val_acc"])
ax2.legend(["train", "test"], loc="upper right")
ax2.set_xlabel("Epochs")
ax2.set_ylabel("Accuracy")
plt.show()<jupyter_output><empty_output><jupyter_text>Implement Feedforward Network (FFN) ModuleWe will use this module in the baseline and the GNN models.<jupyter_code>def create_ffn(hidden_units, dropout_rate, name=None):
fnn_layers = []
for units in hidden_units:
fnn_layers.append(layers.BatchNormalization())
fnn_layers.append(layers.Dropout(dropout_rate))
fnn_layers.append(layers.Dense(units, activation=tf.nn.gelu))
return keras.Sequential(fnn_layers, name=name)<jupyter_output><empty_output><jupyter_text>Build a Baseline Neural Network Model Prepare the data for the baseline model<jupyter_code>feature_names = list(set(papers.columns) - {"paper_id", "subject"})
num_features = len(feature_names)
num_classes = len(class_idx)
# Create train and test features as a numpy array.
x_train = train_data[feature_names].to_numpy()
x_test = test_data[feature_names].to_numpy()
# Create train and test targets as a numpy array.
y_train = train_data["subject"]
y_test = test_data["subject"]<jupyter_output><empty_output><jupyter_text>Implement a baseline classifierWe add five FFN blocks with skip connections, so that we generate a baseline model withroughly the same number of parameters as the GNN models to be built later.<jupyter_code>def create_baseline_model(hidden_units, num_classes, dropout_rate=0.2):
inputs = layers.Input(shape=(num_features,), name="input_features")
x = create_ffn(hidden_units, dropout_rate, name=f"ffn_block1")(inputs)
for block_idx in range(4):
# Create an FFN block.
x1 = create_ffn(hidden_units, dropout_rate, name=f"ffn_block{block_idx + 2}")(x)
# Add skip connection.
x = layers.Add(name=f"skip_connection{block_idx + 2}")([x, x1])
# Compute logits.
logits = layers.Dense(num_classes, name="logits")(x)
# Create the model.
return keras.Model(inputs=inputs, outputs=logits, name="baseline")
baseline_model = create_baseline_model(hidden_units, num_classes, dropout_rate)
baseline_model.summary()<jupyter_output><empty_output><jupyter_text>Train the baseline classifier<jupyter_code>history = run_experiment(baseline_model, x_train, y_train)<jupyter_output><empty_output><jupyter_text>Let's plot the learning curves.<jupyter_code>display_learning_curves(history)<jupyter_output><empty_output><jupyter_text>Now we evaluate the baseline model on the test data split.<jupyter_code>_, test_accuracy = baseline_model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")<jupyter_output><empty_output><jupyter_text>Examine the baseline model predictionsLet's create new data instances by randomly generating binary word vectors with respect tothe word presence probabilities.<jupyter_code>def generate_random_instances(num_instances):
token_probability = x_train.mean(axis=0)
instances = []
for _ in range(num_instances):
probabilities = np.random.uniform(size=len(token_probability))
instance = (probabilities <= token_probability).astype(int)
instances.append(instance)
return np.array(instances)
def display_class_probabilities(probabilities):
for instance_idx, probs in enumerate(probabilities):
print(f"Instance {instance_idx + 1}:")
for class_idx, prob in enumerate(probs):
print(f"- {class_values[class_idx]}: {round(prob * 100, 2)}%")<jupyter_output><empty_output><jupyter_text>Now we show the baseline model predictions given these randomly generated instances.<jupyter_code>new_instances = generate_random_instances(num_classes)
logits = baseline_model.predict(new_instances)
probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy()
display_class_probabilities(probabilities)<jupyter_output><empty_output><jupyter_text>Build a Graph Neural Network Model Prepare the data for the graph modelPreparing and loading the graphs data into the model for training is the most challengingpart in GNN models, which is addressed in different ways by the specialised libraries.In this example, we show a simple approach for preparing and using graph data that is suitableif your dataset consists of a single graph that fits entirely in memory.The graph data is represented by the `graph_info` tuple, which consists of the followingthree elements:1. `node_features`: This is a `[num_nodes, num_features]` NumPy array that includes thenode features. In this dataset, the nodes are the papers, and the `node_features` are theword-presence binary vectors of each paper.2. `edges`: This is `[num_edges, num_edges]` NumPy array representing a sparse[adjacency matrix](https://en.wikipedia.org/wiki/Adjacency_matrix:~:text=In%20graph%20theory%20and%20computer,with%20zeros%20on%20its%20diagonal.)of the links between the nodes. In this example, the links are the citations between the papers.3. `edge_weights` (optional): This is a `[num_edges]` NumPy array that includes the edge weights, which *quantify*the relationships between nodes in the graph. In this example, there are no weights for the paper citations.<jupyter_code># Create an edges array (sparse adjacency matrix) of shape [2, num_edges].
edges = citations[["source", "target"]].to_numpy().T
# Create an edge weights array of ones.
edge_weights = tf.ones(shape=edges.shape[1])
# Create a node features array of shape [num_nodes, num_features].
node_features = tf.cast(
papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.dtypes.float32
)
# Create graph info tuple with node_features, edges, and edge_weights.
graph_info = (node_features, edges, edge_weights)
print("Edges shape:", edges.shape)
print("Nodes shape:", node_features.shape)<jupyter_output><empty_output><jupyter_text>Implement a graph convolution layerWe implement a graph convolution module as a [Keras Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer?version=nightly).Our `GraphConvLayer` performs the following steps:1. **Prepare**: The input node representations are processed using a FFN to produce a *message*. You can simplifythe processing by only applying linear transformation to the representations.2. **Aggregate**: The messages of the neighbours of each node are aggregated withrespect to the `edge_weights` using a *permutation invariant* pooling operation, such as *sum*, *mean*, and *max*,to prepare a single aggregated message for each node. See, for example, [tf.math.unsorted_segment_sum](https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum)APIs used to aggregate neighbour messages.3. **Update**: The `node_repesentations` and `aggregated_messages`—both of shape `[num_nodes, representation_dim]`—are combined and processed to produce the new state of the node representations (node embeddings).If `combination_type` is `gru`, the `node_repesentations` and `aggregated_messages` are stacked to create a sequence,then processed by a GRU layer. Otherwise, the `node_repesentations` and `aggregated_messages` are addedor concatenated, then processed using a FFN.The technique implemented use ideas from [Graph Convolutional Networks](https://arxiv.org/abs/1609.02907),[GraphSage](https://arxiv.org/abs/1706.02216), [Graph Isomorphism Network](https://arxiv.org/abs/1810.00826),[Simple Graph Networks](https://arxiv.org/abs/1902.07153), and[Gated Graph Sequence Neural Networks](https://arxiv.org/abs/1511.05493).Two other key techniques that are not covered are [Graph Attention Networks](https://arxiv.org/abs/1710.10903)and [Message Passing Neural Networks](https://arxiv.org/abs/1704.01212).<jupyter_code>def create_gru(hidden_units, dropout_rate):
inputs = keras.layers.Input(shape=(2, hidden_units[0]))
x = inputs
for units in hidden_units:
x = layers.GRU(
units=units,
activation="tanh",
recurrent_activation="sigmoid",
return_sequences=True,
dropout=dropout_rate,
return_state=False,
recurrent_dropout=dropout_rate,
)(x)
return keras.Model(inputs=inputs, outputs=x)
class GraphConvLayer(layers.Layer):
def __init__(
self,
hidden_units,
dropout_rate=0.2,
aggregation_type="mean",
combination_type="concat",
normalize=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.aggregation_type = aggregation_type
self.combination_type = combination_type
self.normalize = normalize
self.ffn_prepare = create_ffn(hidden_units, dropout_rate)
if self.combination_type == "gru":
self.update_fn = create_gru(hidden_units, dropout_rate)
else:
self.update_fn = create_ffn(hidden_units, dropout_rate)
def prepare(self, node_repesentations, weights=None):
# node_repesentations shape is [num_edges, embedding_dim].
messages = self.ffn_prepare(node_repesentations)
if weights is not None:
messages = messages * tf.expand_dims(weights, -1)
return messages
def aggregate(self, node_indices, neighbour_messages, node_repesentations):
# node_indices shape is [num_edges].
# neighbour_messages shape: [num_edges, representation_dim].
# node_repesentations shape is [num_nodes, representation_dim].
num_nodes = node_repesentations.shape[0]
if self.aggregation_type == "sum":
aggregated_message = tf.math.unsorted_segment_sum(
neighbour_messages, node_indices, num_segments=num_nodes
)
elif self.aggregation_type == "mean":
aggregated_message = tf.math.unsorted_segment_mean(
neighbour_messages, node_indices, num_segments=num_nodes
)
elif self.aggregation_type == "max":
aggregated_message = tf.math.unsorted_segment_max(
neighbour_messages, node_indices, num_segments=num_nodes
)
else:
raise ValueError(f"Invalid aggregation type: {self.aggregation_type}.")
return aggregated_message
def update(self, node_repesentations, aggregated_messages):
# node_repesentations shape is [num_nodes, representation_dim].
# aggregated_messages shape is [num_nodes, representation_dim].
if self.combination_type == "gru":
# Create a sequence of two elements for the GRU layer.
h = tf.stack([node_repesentations, aggregated_messages], axis=1)
elif self.combination_type == "concat":
# Concatenate the node_repesentations and aggregated_messages.
h = tf.concat([node_repesentations, aggregated_messages], axis=1)
elif self.combination_type == "add":
# Add node_repesentations and aggregated_messages.
h = node_repesentations + aggregated_messages
else:
raise ValueError(f"Invalid combination type: {self.combination_type}.")
# Apply the processing function.
node_embeddings = self.update_fn(h)
if self.combination_type == "gru":
node_embeddings = tf.unstack(node_embeddings, axis=1)[-1]
if self.normalize:
node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1)
return node_embeddings
def call(self, inputs):
"""Process the inputs to produce the node_embeddings.
inputs: a tuple of three elements: node_repesentations, edges, edge_weights.
Returns: node_embeddings of shape [num_nodes, representation_dim].
"""
node_repesentations, edges, edge_weights = inputs
# Get node_indices (source) and neighbour_indices (target) from edges.
node_indices, neighbour_indices = edges[0], edges[1]
# neighbour_repesentations shape is [num_edges, representation_dim].
neighbour_repesentations = tf.gather(node_repesentations, neighbour_indices)
# Prepare the messages of the neighbours.
neighbour_messages = self.prepare(neighbour_repesentations, edge_weights)
# Aggregate the neighbour messages.
aggregated_messages = self.aggregate(
node_indices, neighbour_messages, node_repesentations
)
# Update the node embedding with the neighbour messages.
return self.update(node_repesentations, aggregated_messages)<jupyter_output><empty_output><jupyter_text>Implement a graph neural network node classifierThe GNN classification model follows the [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843) approach,as follows:1. Apply preprocessing using FFN to the node features to generate initial node representations.2. Apply one or more graph convolutional layer, with skip connections, to the node representationto produce node embeddings.3. Apply post-processing using FFN to the node embeddings to generat the final node embeddings.4. Feed the node embeddings in a Softmax layer to predict the node class.Each graph convolutional layer added captures information from a further level of neighbours.However, adding many graph convolutional layer can cause oversmoothing, where the modelproduces similar embeddings for all the nodes.Note that the `graph_info` passed to the constructor of the Keras model, and used as a *property*of the Keras model object, rather than input data for training or prediction.The model will accept a **batch** of `node_indices`, which are used to lookup thenode features and neighbours from the `graph_info`.<jupyter_code>class GNNNodeClassifier(tf.keras.Model):
def __init__(
self,
graph_info,
num_classes,
hidden_units,
aggregation_type="sum",
combination_type="concat",
dropout_rate=0.2,
normalize=True,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
# Unpack graph_info to three elements: node_features, edges, and edge_weight.
node_features, edges, edge_weights = graph_info
self.node_features = node_features
self.edges = edges
self.edge_weights = edge_weights
# Set edge_weights to ones if not provided.
if self.edge_weights is None:
self.edge_weights = tf.ones(shape=edges.shape[1])
# Scale edge_weights to sum to 1.
self.edge_weights = self.edge_weights / tf.math.reduce_sum(self.edge_weights)
# Create a process layer.
self.preprocess = create_ffn(hidden_units, dropout_rate, name="preprocess")
# Create the first GraphConv layer.
self.conv1 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv1",
)
# Create the second GraphConv layer.
self.conv2 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv2",
)
# Create a postprocess layer.
self.postprocess = create_ffn(hidden_units, dropout_rate, name="postprocess")
# Create a compute logits layer.
self.compute_logits = layers.Dense(units=num_classes, name="logits")
def call(self, input_node_indices):
# Preprocess the node_features to produce node representations.
x = self.preprocess(self.node_features)
# Apply the first graph conv layer.
x1 = self.conv1((x, self.edges, self.edge_weights))
# Skip connection.
x = x1 + x
# Apply the second graph conv layer.
x2 = self.conv2((x, self.edges, self.edge_weights))
# Skip connection.
x = x2 + x
# Postprocess node embedding.
x = self.postprocess(x)
# Fetch node embeddings for the input node_indices.
node_embeddings = tf.gather(x, input_node_indices)
# Compute logits
return self.compute_logits(node_embeddings)<jupyter_output><empty_output><jupyter_text>Let's test instantiating and calling the GNN model.Notice that if you provide `N` node indices, the output will be a tensor of shape `[N, num_classes]`,regardless of the size of the graph.<jupyter_code>gnn_model = GNNNodeClassifier(
graph_info=graph_info,
num_classes=num_classes,
hidden_units=hidden_units,
dropout_rate=dropout_rate,
name="gnn_model",
)
print("GNN output shape:", gnn_model([1, 10, 100]))
gnn_model.summary()<jupyter_output><empty_output><jupyter_text>Train the GNN modelNote that we use the standard *supervised* cross-entropy loss to train the model.However, we can add another *self-supervised* loss term for the generated node embeddingsthat makes sure that neighbouring nodes in graph have similar representations, while farawaynodes have dissimilar representations.<jupyter_code>x_train = train_data.paper_id.to_numpy()
history = run_experiment(gnn_model, x_train, y_train)<jupyter_output><empty_output><jupyter_text>Let's plot the learning curves<jupyter_code>display_learning_curves(history)<jupyter_output><empty_output><jupyter_text>Now we evaluate the GNN model on the test data split.The results may vary depending on the training sample, however the GNN model always outperformsthe baseline model in terms of the test accuracy.<jupyter_code>x_test = test_data.paper_id.to_numpy()
_, test_accuracy = gnn_model.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")<jupyter_output><empty_output><jupyter_text>Examine the GNN model predictionsLet's add the new instances as nodes to the `node_features`, and generate links(citations) to existing nodes.<jupyter_code># First we add the N new_instances as nodes to the graph
# by appending the new_instance to node_features.
num_nodes = node_features.shape[0]
new_node_features = np.concatenate([node_features, new_instances])
# Second we add the M edges (citations) from each new node to a set
# of existing nodes in a particular subject
new_node_indices = [i + num_nodes for i in range(num_classes)]
new_citations = []
for subject_idx, group in papers.groupby("subject"):
subject_papers = list(group.paper_id)
# Select random x papers specific subject.
selected_paper_indices1 = np.random.choice(subject_papers, 5)
# Select random y papers from any subject (where y < x).
selected_paper_indices2 = np.random.choice(list(papers.paper_id), 2)
# Merge the selected paper indices.
selected_paper_indices = np.concatenate(
[selected_paper_indices1, selected_paper_indices2], axis=0
)
# Create edges between a citing paper idx and the selected cited papers.
citing_paper_indx = new_node_indices[subject_idx]
for cited_paper_idx in selected_paper_indices:
new_citations.append([citing_paper_indx, cited_paper_idx])
new_citations = np.array(new_citations).T
new_edges = np.concatenate([edges, new_citations], axis=1)<jupyter_output><empty_output><jupyter_text>Now let's update the `node_features` and the `edges` in the GNN model.<jupyter_code>print("Original node_features shape:", gnn_model.node_features.shape)
print("Original edges shape:", gnn_model.edges.shape)
gnn_model.node_features = new_node_features
gnn_model.edges = new_edges
gnn_model.edge_weights = tf.ones(shape=new_edges.shape[1])
print("New node_features shape:", gnn_model.node_features.shape)
print("New edges shape:", gnn_model.edges.shape)
logits = gnn_model.predict(tf.convert_to_tensor(new_node_indices))
probabilities = keras.activations.softmax(tf.convert_to_tensor(logits)).numpy()
display_class_probabilities(probabilities)<jupyter_output><empty_output> | keras-io/examples/graph/ipynb/gnn_citations.ipynb/0 | {
"file_path": "keras-io/examples/graph/ipynb/gnn_citations.ipynb",
"repo_id": "keras-io",
"token_count": 9632
} | 88 |
<jupyter_start><jupyter_text>Writing Keras Models With TensorFlow NumPy**Author:** [lukewood](https://lukewood.xyz)**Date created:** 2021/08/28**Last modified:** 2021/08/28**Description:** Overview of how to use the TensorFlow NumPy API to write Keras models. Introduction[NumPy](https://numpy.org/) is a hugely successful Python linear algebra library.TensorFlow recently launched [tf_numpy](https://www.tensorflow.org/guide/tf_numpy), aTensorFlow implementation of a large subset of the NumPy API.Thanks to `tf_numpy`, you can write Keras layers or models in the NumPy style!The TensorFlow NumPy API has full integration with the TensorFlow ecosystem.Features such as automatic differentiation, TensorBoard, Keras model callbacks,TPU distribution and model exporting are all supported.Let's run through a few examples. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import tensorflow.experimental.numpy as tnp
import keras
from keras import layers<jupyter_output><empty_output><jupyter_text>To test our models we will use the Boston housing prices regression dataset.<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
input_dim = x_train.shape[1]
def evaluate_model(model: keras.Model):
loss, percent_error = model.evaluate(x_test, y_test, verbose=0)
print("Mean absolute percent error before training: ", percent_error)
model.fit(x_train, y_train, epochs=200, verbose=0)
loss, percent_error = model.evaluate(x_test, y_test, verbose=0)
print("Mean absolute percent error after training:", percent_error)<jupyter_output><empty_output><jupyter_text>Subclassing keras.Model with TNPThe most flexible way to make use of the Keras API is to subclass the[`keras.Model`](https://keras.io/api/models/model/) class. Subclassing the Model classgives you the ability to fully customize what occurs in the training loop. This makessubclassing Model a popular option for researchers.In this example, we will implement a `Model` subclass that performs regression over theboston housing dataset using the TNP API. Note that differentiation and gradientdescent is handled automatically when using the TNP API alongside keras.First let's define a simple `TNPForwardFeedRegressionNetwork` class.<jupyter_code>class TNPForwardFeedRegressionNetwork(keras.Model):
def __init__(self, blocks=None, **kwargs):
super().__init__(**kwargs)
if not isinstance(blocks, list):
raise ValueError(f"blocks must be a list, got blocks={blocks}")
self.blocks = blocks
self.block_weights = None
self.biases = None
def build(self, input_shape):
current_shape = input_shape[1]
self.block_weights = []
self.biases = []
for i, block in enumerate(self.blocks):
self.block_weights.append(
self.add_weight(
shape=(current_shape, block),
trainable=True,
name=f"block-{i}",
initializer="glorot_normal",
)
)
self.biases.append(
self.add_weight(
shape=(block,),
trainable=True,
name=f"bias-{i}",
initializer="zeros",
)
)
current_shape = block
self.linear_layer = self.add_weight(
shape=(current_shape, 1),
name="linear_projector",
trainable=True,
initializer="glorot_normal",
)
def call(self, inputs):
activations = inputs
for w, b in zip(self.block_weights, self.biases):
activations = tnp.matmul(activations, w) + b
# ReLu activation function
activations = tnp.maximum(activations, 0.0)
return tnp.matmul(activations, self.linear_layer)<jupyter_output><empty_output><jupyter_text>Just like with any other Keras model we can utilize any supported optimizer, loss,metrics or callbacks that we want.Let's see how the model performs!<jupyter_code>model = TNPForwardFeedRegressionNetwork(blocks=[3, 3])
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
evaluate_model(model)<jupyter_output><empty_output><jupyter_text>Great! Our model seems to be effectively learning to solve the problem at hand.We can also write our own custom loss function using TNP.<jupyter_code>def tnp_mse(y_true, y_pred):
return tnp.mean(tnp.square(y_true - y_pred), axis=0)
keras.backend.clear_session()
model = TNPForwardFeedRegressionNetwork(blocks=[3, 3])
model.compile(
optimizer="adam",
loss=tnp_mse,
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
evaluate_model(model)<jupyter_output><empty_output><jupyter_text>Implementing a Keras Layer Based Model with TNPIf desired, TNP can also be used in layer oriented Keras code structure. Let'simplement the same model, but using a layered approach!<jupyter_code>def tnp_relu(x):
return tnp.maximum(x, 0)
class TNPDense(keras.layers.Layer):
def __init__(self, units, activation=None):
super().__init__()
self.units = units
self.activation = activation
def build(self, input_shape):
self.w = self.add_weight(
name="weights",
shape=(input_shape[1], self.units),
initializer="random_normal",
trainable=True,
)
self.bias = self.add_weight(
name="bias",
shape=(self.units,),
initializer="zeros",
trainable=True,
)
def call(self, inputs):
outputs = tnp.matmul(inputs, self.w) + self.bias
if self.activation:
return self.activation(outputs)
return outputs
def create_layered_tnp_model():
return keras.Sequential(
[
TNPDense(3, activation=tnp_relu),
TNPDense(3, activation=tnp_relu),
TNPDense(1),
]
)
model = create_layered_tnp_model()
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
model.build((None, input_dim))
model.summary()
evaluate_model(model)<jupyter_output><empty_output><jupyter_text>You can also seamlessly switch between TNP layers and native Keras layers!<jupyter_code>def create_mixed_model():
return keras.Sequential(
[
TNPDense(3, activation=tnp_relu),
# The model will have no issue using a normal Dense layer
layers.Dense(3, activation="relu"),
# ... or switching back to tnp layers!
TNPDense(1),
]
)
model = create_mixed_model()
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
model.build((None, input_dim))
model.summary()
evaluate_model(model)<jupyter_output><empty_output><jupyter_text>The Keras API offers a wide variety of layers. The ability to use them alongside NumPycode can be a huge time saver in projects. Distribution StrategyTensorFlow NumPy and Keras integrate with[TensorFlow Distribution Strategies](https://www.tensorflow.org/guide/distributed_training).This makes it simple to perform distributed training across multiple GPUs,or even an entire TPU Pod.<jupyter_code>gpus = tf.config.list_logical_devices("GPU")
if gpus:
strategy = tf.distribute.MirroredStrategy(gpus)
else:
# We can fallback to a no-op CPU strategy.
strategy = tf.distribute.get_strategy()
print("Running with strategy:", str(strategy.__class__.__name__))
with strategy.scope():
model = create_layered_tnp_model()
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
model.build((None, input_dim))
model.summary()
evaluate_model(model)<jupyter_output><empty_output><jupyter_text>TensorBoard IntegrationOne of the many benefits of using the Keras API is the ability to monitor trainingthrough TensorBoard. Using the TensorFlow NumPy API alongside Keras allows you to easilyleverage TensorBoard.<jupyter_code>keras.backend.clear_session()<jupyter_output><empty_output><jupyter_text>To load the TensorBoard from a Jupyter notebook, you can run the following magic:```%load_ext tensorboard```<jupyter_code>models = [
(
TNPForwardFeedRegressionNetwork(blocks=[3, 3]),
"TNPForwardFeedRegressionNetwork",
),
(create_layered_tnp_model(), "layered_tnp_model"),
(create_mixed_model(), "mixed_model"),
]
for model, model_name in models:
model.compile(
optimizer="adam",
loss="mean_squared_error",
metrics=[keras.metrics.MeanAbsolutePercentageError()],
)
model.fit(
x_train,
y_train,
epochs=200,
verbose=0,
callbacks=[keras.callbacks.TensorBoard(log_dir=f"logs/{model_name}")],
)<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/tensorflow_numpy_models.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/tensorflow_numpy_models.ipynb",
"repo_id": "keras-io",
"token_count": 3606
} | 89 |
# Customizing the convolution operation of a Conv2D layer
**Author:** [lukewood](https://lukewood.xyz)<br>
**Date created:** 11/03/2021<br>
**Last modified:** 11/03/2021<br>
**Description:** This example shows how to implement custom convolution layers using the `Conv.convolution_op()` API.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/subclassing_conv_layers.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/subclassing_conv_layers.py)
---
## Introduction
You may sometimes need to implement custom versions of convolution layers like `Conv1D` and `Conv2D`.
Keras enables you do this without implementing the entire layer from scratch: you can reuse
most of the base convolution layer and just customize the convolution op itself via the
`convolution_op()` method.
This method was introduced in Keras 2.7. So before using the
`convolution_op()` API, ensure that you are running Keras version 2.7.0 or greater.
---
## A Simple `StandardizedConv2D` implementation
There are two ways to use the `Conv.convolution_op()` API. The first way
is to override the `convolution_op()` method on a convolution layer subclass.
Using this approach, we can quickly implement a
[StandardizedConv2D](https://arxiv.org/abs/1903.10520) as shown below.
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
from keras import layers
import numpy as np
class StandardizedConv2DWithOverride(layers.Conv2D):
def convolution_op(self, inputs, kernel):
mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
return tf.nn.conv2d(
inputs,
(kernel - mean) / tf.sqrt(var + 1e-10),
padding="VALID",
strides=list(self.strides),
name=self.__class__.__name__,
)
```
The other way to use the `Conv.convolution_op()` API is to directly call the
`convolution_op()` method from the `call()` method of a convolution layer subclass.
A comparable class implemented using this approach is shown below.
```python
class StandardizedConv2DWithCall(layers.Conv2D):
def call(self, inputs):
mean, var = tf.nn.moments(self.kernel, axes=[0, 1, 2], keepdims=True)
result = self.convolution_op(
inputs, (self.kernel - mean) / tf.sqrt(var + 1e-10)
)
if self.use_bias:
result = result + self.bias
return result
```
---
## Example Usage
Both of these layers work as drop-in replacements for `Conv2D`. The following
demonstration performs classification on the MNIST dataset.
```python
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = keras.Sequential(
[
keras.layers.Input(shape=input_shape),
StandardizedConv2DWithCall(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
StandardizedConv2DWithOverride(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
```
<div class="k-default-codeblock">
```
x_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples
```
</div>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ standardized_conv2d_with_call │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">320</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">StandardizedConv2DWithCall</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">13</span>, <span style="color: #00af00; text-decoration-color: #00af00">13</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ standardized_conv2d_with_overr… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">11</span>, <span style="color: #00af00; text-decoration-color: #00af00">11</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">18,496</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">StandardizedConv2DWithOverrid…</span> │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1600</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1600</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,010</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">34,826</span> (136.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">34,826</span> (136.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
```python
batch_size = 128
epochs = 5
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=batch_size, epochs=5, validation_split=0.1)
```
<div class="k-default-codeblock">
```
Epoch 1/5
64/422 ━━━[37m━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.4439 - loss: 13.1274
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1699557098.952525 26800 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
422/422 ━━━━━━━━━━━━━━━━━━━━ 10s 14ms/step - accuracy: 0.7277 - loss: 4.5649 - val_accuracy: 0.9690 - val_loss: 0.1140
Epoch 2/5
422/422 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - accuracy: 0.9311 - loss: 0.2493 - val_accuracy: 0.9798 - val_loss: 0.0795
Epoch 3/5
422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9531 - loss: 0.1655 - val_accuracy: 0.9838 - val_loss: 0.0610
Epoch 4/5
422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9652 - loss: 0.1201 - val_accuracy: 0.9847 - val_loss: 0.0577
Epoch 5/5
422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9687 - loss: 0.1059 - val_accuracy: 0.9870 - val_loss: 0.0525
<keras.src.callbacks.history.History at 0x7fed258da200>
```
</div>
---
## Conclusion
The `Conv.convolution_op()` API provides an easy and readable way to implement custom
convolution layers. A `StandardizedConvolution` implementation using the API is quite
terse, consisting of only four lines of code.
| keras-io/examples/keras_recipes/md/subclassing_conv_layers.md/0 | {
"file_path": "keras-io/examples/keras_recipes/md/subclassing_conv_layers.md",
"repo_id": "keras-io",
"token_count": 4456
} | 90 |
"""
Title: Review Classification using Active Learning
Author: [Darshan Deshpande](https://twitter.com/getdarshan)
Date created: 2021/10/29
Last modified: 2021/10/29
Description: Demonstrating the advantages of active learning through review classification.
Accelerator: GPU
"""
"""
## Introduction
With the growth of data-centric Machine Learning, Active Learning has grown in popularity
amongst businesses and researchers. Active Learning seeks to progressively
train ML models so that the resultant model requires lesser amount of training data to
achieve competitive scores.
The structure of an Active Learning pipeline involves a classifier and an oracle. The
oracle is an annotator that cleans, selects, labels the data, and feeds it to the model
when required. The oracle is a trained individual or a group of individuals that
ensure consistency in labeling of new data.
The process starts with annotating a small subset of the full dataset and training an
initial model. The best model checkpoint is saved and then tested on a balanced test
set. The test set must be carefully sampled because the full training process will be
dependent on it. Once we have the initial evaluation scores, the oracle is tasked with
labeling more samples; the number of data points to be sampled is usually determined by
the business requirements. After that, the newly sampled data is added to the training
set, and the training procedure repeats. This cycle continues until either an
acceptable score is reached or some other business metric is met.
This tutorial provides a basic demonstration of how Active Learning works by
demonstrating a ratio-based (least confidence) sampling strategy that results in lower
overall false positive and negative rates when compared to a model trained on the entire
dataset. This sampling falls under the domain of *uncertainty sampling*, in which new
datasets are sampled based on the uncertainty that the model outputs for the
corresponding label. In our example, we compare our model's false positive and false
negative rates and annotate the new data based on their ratio.
Some other sampling techniques include:
1. [Committee sampling](https://www.researchgate.net/publication/51909346_Committee-Based_Sample_Selection_for_Probabilistic_Classifiers):
Using multiple models to vote for the best data points to be sampled
2. [Entropy reduction](https://www.researchgate.net/publication/51909346_Committee-Based_Sample_Selection_for_Probabilistic_Classifiers):
Sampling according to an entropy threshold, selecting more of the samples that produce the highest entropy score.
3. [Minimum margin based sampling](https://arxiv.org/abs/1906.00025v1):
Selects data points closest to the decision boundary
"""
"""
## Importing required libraries
"""
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import re
import string
tfds.disable_progress_bar()
"""
## Loading and preprocessing the data
We will be using the IMDB reviews dataset for our experiments. This dataset has 50,000
reviews in total, including training and testing splits. We will merge these splits and
sample our own, balanced training, validation and testing sets.
"""
dataset = tfds.load(
"imdb_reviews",
split="train + test",
as_supervised=True,
batch_size=-1,
shuffle_files=False,
)
reviews, labels = tfds.as_numpy(dataset)
print("Total examples:", reviews.shape[0])
"""
Active learning starts with labeling a subset of data.
For the ratio sampling technique that we will be using, we will need well-balanced training,
validation and testing splits.
"""
val_split = 2500
test_split = 2500
train_split = 7500
# Separating the negative and positive samples for manual stratification
x_positives, y_positives = reviews[labels == 1], labels[labels == 1]
x_negatives, y_negatives = reviews[labels == 0], labels[labels == 0]
# Creating training, validation and testing splits
x_val, y_val = (
tf.concat((x_positives[:val_split], x_negatives[:val_split]), 0),
tf.concat((y_positives[:val_split], y_negatives[:val_split]), 0),
)
x_test, y_test = (
tf.concat(
(
x_positives[val_split : val_split + test_split],
x_negatives[val_split : val_split + test_split],
),
0,
),
tf.concat(
(
y_positives[val_split : val_split + test_split],
y_negatives[val_split : val_split + test_split],
),
0,
),
)
x_train, y_train = (
tf.concat(
(
x_positives[val_split + test_split : val_split + test_split + train_split],
x_negatives[val_split + test_split : val_split + test_split + train_split],
),
0,
),
tf.concat(
(
y_positives[val_split + test_split : val_split + test_split + train_split],
y_negatives[val_split + test_split : val_split + test_split + train_split],
),
0,
),
)
# Remaining pool of samples are stored separately. These are only labeled as and when required
x_pool_positives, y_pool_positives = (
x_positives[val_split + test_split + train_split :],
y_positives[val_split + test_split + train_split :],
)
x_pool_negatives, y_pool_negatives = (
x_negatives[val_split + test_split + train_split :],
y_negatives[val_split + test_split + train_split :],
)
# Creating TF Datasets for faster prefetching and parallelization
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
pool_negatives = tf.data.Dataset.from_tensor_slices(
(x_pool_negatives, y_pool_negatives)
)
pool_positives = tf.data.Dataset.from_tensor_slices(
(x_pool_positives, y_pool_positives)
)
print(f"Initial training set size: {len(train_dataset)}")
print(f"Validation set size: {len(val_dataset)}")
print(f"Testing set size: {len(test_dataset)}")
print(f"Unlabeled negative pool: {len(pool_negatives)}")
print(f"Unlabeled positive pool: {len(pool_positives)}")
"""
### Fitting the `TextVectorization` layer
Since we are working with text data, we will need to encode the text strings as vectors which
would then be passed through an `Embedding` layer. To make this tokenization process
faster, we use the `map()` function with its parallelization functionality.
"""
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, f"[{re.escape(string.punctuation)}]", ""
)
vectorizer = layers.TextVectorization(
3000, standardize=custom_standardization, output_sequence_length=150
)
# Adapting the dataset
vectorizer.adapt(
train_dataset.map(lambda x, y: x, num_parallel_calls=tf.data.AUTOTUNE).batch(256)
)
def vectorize_text(text, label):
text = vectorizer(text)
return text, label
train_dataset = train_dataset.map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
).prefetch(tf.data.AUTOTUNE)
pool_negatives = pool_negatives.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
pool_positives = pool_positives.map(vectorize_text, num_parallel_calls=tf.data.AUTOTUNE)
val_dataset = val_dataset.batch(256).map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
)
test_dataset = test_dataset.batch(256).map(
vectorize_text, num_parallel_calls=tf.data.AUTOTUNE
)
"""
## Creating Helper Functions
"""
# Helper function for merging new history objects with older ones
def append_history(losses, val_losses, accuracy, val_accuracy, history):
losses = losses + history.history["loss"]
val_losses = val_losses + history.history["val_loss"]
accuracy = accuracy + history.history["binary_accuracy"]
val_accuracy = val_accuracy + history.history["val_binary_accuracy"]
return losses, val_losses, accuracy, val_accuracy
# Plotter function
def plot_history(losses, val_losses, accuracies, val_accuracies):
plt.plot(losses)
plt.plot(val_losses)
plt.legend(["train_loss", "val_loss"])
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
plt.plot(accuracies)
plt.plot(val_accuracies)
plt.legend(["train_accuracy", "val_accuracy"])
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.show()
"""
## Creating the Model
We create a small bidirectional LSTM model. When using Active Learning, you should make sure
that the model architecture is capable of overfitting to the initial data.
Overfitting gives a strong hint that the model will have enough capacity for
future, unseen data.
"""
def create_model():
model = keras.models.Sequential(
[
layers.Input(shape=(150,)),
layers.Embedding(input_dim=3000, output_dim=128),
layers.Bidirectional(layers.LSTM(32, return_sequences=True)),
layers.GlobalMaxPool1D(),
layers.Dense(20, activation="relu"),
layers.Dropout(0.5),
layers.Dense(1, activation="sigmoid"),
]
)
model.summary()
return model
"""
## Training on the entire dataset
To show the effectiveness of Active Learning, we will first train the model on the entire
dataset containing 40,000 labeled samples. This model will be used for comparison later.
"""
def train_full_model(full_train_dataset, val_dataset, test_dataset):
model = create_model()
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
# We will save the best model at every epoch and load the best one for evaluation on the test set
history = model.fit(
full_train_dataset.batch(256),
epochs=20,
validation_data=val_dataset,
callbacks=[
keras.callbacks.EarlyStopping(patience=4, verbose=1),
keras.callbacks.ModelCheckpoint(
"FullModelCheckpoint.h5", verbose=1, save_best_only=True
),
],
)
# Plot history
plot_history(
history.history["loss"],
history.history["val_loss"],
history.history["binary_accuracy"],
history.history["val_binary_accuracy"],
)
# Loading the best checkpoint
model = keras.models.load_model("FullModelCheckpoint.h5")
print("-" * 100)
print(
"Test set evaluation: ",
model.evaluate(test_dataset, verbose=0, return_dict=True),
)
print("-" * 100)
return model
# Sampling the full train dataset to train on
full_train_dataset = (
train_dataset.concatenate(pool_positives)
.concatenate(pool_negatives)
.cache()
.shuffle(20000)
)
# Training the full model
full_dataset_model = train_full_model(full_train_dataset, val_dataset, test_dataset)
"""
## Training via Active Learning
The general process we follow when performing Active Learning is demonstrated below:

The pipeline can be summarized in five parts:
1. Sample and annotate a small, balanced training dataset
2. Train the model on this small subset
3. Evaluate the model on a balanced testing set
4. If the model satisfies the business criteria, deploy it in a real time setting
5. If it doesn't pass the criteria, sample a few more samples according to the ratio of
false positives and negatives, add them to the training set and repeat from step 2 till
the model passes the tests or till all available data is exhausted.
For the code below, we will perform sampling using the following formula:<br/>

Active Learning techniques use callbacks extensively for progress tracking. We will be
using model checkpointing and early stopping for this example. The `patience` parameter
for Early Stopping can help minimize overfitting and the time required. We have set it
`patience=4` for now but since the model is robust, we can increase the patience level if
desired.
Note: We are not loading the checkpoint after the first training iteration. In my
experience working on Active Learning techniques, this helps the model probe the
newly formed loss landscape. Even if the model fails to improve in the second iteration,
we will still gain insight about the possible future false positive and negative rates.
This will help us sample a better set in the next iteration where the model will have a
greater chance to improve.
"""
def train_active_learning_models(
train_dataset,
pool_negatives,
pool_positives,
val_dataset,
test_dataset,
num_iterations=3,
sampling_size=5000,
):
# Creating lists for storing metrics
losses, val_losses, accuracies, val_accuracies = [], [], [], []
model = create_model()
# We will monitor the false positives and false negatives predicted by our model
# These will decide the subsequent sampling ratio for every Active Learning loop
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
# Defining checkpoints.
# The checkpoint callback is reused throughout the training since it only saves the best overall model.
checkpoint = keras.callbacks.ModelCheckpoint(
"AL_Model.h5", save_best_only=True, verbose=1
)
# Here, patience is set to 4. This can be set higher if desired.
early_stopping = keras.callbacks.EarlyStopping(patience=4, verbose=1)
print(f"Starting to train with {len(train_dataset)} samples")
# Initial fit with a small subset of the training set
history = model.fit(
train_dataset.cache().shuffle(20000).batch(256),
epochs=20,
validation_data=val_dataset,
callbacks=[checkpoint, early_stopping],
)
# Appending history
losses, val_losses, accuracies, val_accuracies = append_history(
losses, val_losses, accuracies, val_accuracies, history
)
for iteration in range(num_iterations):
# Getting predictions from previously trained model
predictions = model.predict(test_dataset)
# Generating labels from the output probabilities
rounded = tf.where(tf.greater(predictions, 0.5), 1, 0)
# Evaluating the number of zeros and ones incorrrectly classified
_, _, false_negatives, false_positives = model.evaluate(test_dataset, verbose=0)
print("-" * 100)
print(
f"Number of zeros incorrectly classified: {false_negatives}, Number of ones incorrectly classified: {false_positives}"
)
# This technique of Active Learning demonstrates ratio based sampling where
# Number of ones/zeros to sample = Number of ones/zeros incorrectly classified / Total incorrectly classified
if false_negatives != 0 and false_positives != 0:
total = false_negatives + false_positives
sample_ratio_ones, sample_ratio_zeros = (
false_positives / total,
false_negatives / total,
)
# In the case where all samples are correctly predicted, we can sample both classes equally
else:
sample_ratio_ones, sample_ratio_zeros = 0.5, 0.5
print(
f"Sample ratio for positives: {sample_ratio_ones}, Sample ratio for negatives:{sample_ratio_zeros}"
)
# Sample the required number of ones and zeros
sampled_dataset = pool_negatives.take(
int(sample_ratio_zeros * sampling_size)
).concatenate(pool_positives.take(int(sample_ratio_ones * sampling_size)))
# Skip the sampled data points to avoid repetition of sample
pool_negatives = pool_negatives.skip(int(sample_ratio_zeros * sampling_size))
pool_positives = pool_positives.skip(int(sample_ratio_ones * sampling_size))
# Concatenating the train_dataset with the sampled_dataset
train_dataset = train_dataset.concatenate(sampled_dataset).prefetch(
tf.data.AUTOTUNE
)
print(f"Starting training with {len(train_dataset)} samples")
print("-" * 100)
# We recompile the model to reset the optimizer states and retrain the model
model.compile(
loss="binary_crossentropy",
optimizer="rmsprop",
metrics=[
keras.metrics.BinaryAccuracy(),
keras.metrics.FalseNegatives(),
keras.metrics.FalsePositives(),
],
)
history = model.fit(
train_dataset.cache().shuffle(20000).batch(256),
validation_data=val_dataset,
epochs=20,
callbacks=[
checkpoint,
keras.callbacks.EarlyStopping(patience=4, verbose=1),
],
)
# Appending the history
losses, val_losses, accuracies, val_accuracies = append_history(
losses, val_losses, accuracies, val_accuracies, history
)
# Loading the best model from this training loop
model = keras.models.load_model("AL_Model.h5")
# Plotting the overall history and evaluating the final model
plot_history(losses, val_losses, accuracies, val_accuracies)
print("-" * 100)
print(
"Test set evaluation: ",
model.evaluate(test_dataset, verbose=0, return_dict=True),
)
print("-" * 100)
return model
active_learning_model = train_active_learning_models(
train_dataset, pool_negatives, pool_positives, val_dataset, test_dataset
)
"""
## Conclusion
Active Learning is a growing area of research. This example demonstrates the cost-efficiency
benefits of using Active Learning, as it eliminates the need to annotate large amounts of
data, saving resources.
The following are some noteworthy observations from this example:
1. We only require 30,000 samples to reach the same (if not better) scores as the model
trained on the full datatset. This means that in a real life setting, we save the effort
required for annotating 10,000 images!
2. The number of false negatives and false positives are well balanced at the end of the
training as compared to the skewed ratio obtained from the full training. This makes the
model slightly more useful in real life scenarios where both the labels hold equal
importance.
For further reading about the types of sampling ratios, training techniques or available
open source libraries/implementations, you can refer to the resources below:
1. [Active Learning Literature Survey](http://burrsettles.com/pub/settles.activelearning.pdf) (Burr Settles, 2010).
2. [modAL](https://github.com/modAL-python/modAL): A Modular Active Learning framework.
3. Google's unofficial [Active Learning playground](https://github.com/google/active-learning).
"""
| keras-io/examples/nlp/active_learning_review_classification.py/0 | {
"file_path": "keras-io/examples/nlp/active_learning_review_classification.py",
"repo_id": "keras-io",
"token_count": 6692
} | 91 |
<jupyter_start><jupyter_text>Named Entity Recognition using Transformers**Author:** [Varun Singh](https://www.linkedin.com/in/varunsingh2/)**Date created:** Jun 23, 2021**Last modified:** Jun 24, 2021**Description:** NER using the Transformers and data from CoNLL 2003 shared task. IntroductionNamed Entity Recognition (NER) is the process of identifying named entities in text.Example of named entities are: "Person", "Location", "Organization", "Dates" etc. NER isessentially a token classification task where every token is classified into one or morepredetermined categories.In this exercise, we will train a simple Transformer based model to perform NER. We willbe using the data from CoNLL 2003 shared task. For more information about the dataset,please visit [the dataset website](https://www.clips.uantwerpen.be/conll2003/ner/).However, since obtaining this data requires an additional step of getting a free license, we will be usingHuggingFace's datasets library which contains a processed version of this dataset. Install the open source datasets library from HuggingFaceWe also download the script used to evaluate NER models.<jupyter_code>!pip3 install datasets
!wget https://raw.githubusercontent.com/sighsmile/conlleval/master/conlleval.py
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import os
import keras
import numpy as np
import tensorflow as tf
from keras import layers
from datasets import load_dataset
from collections import Counter
from conlleval import evaluate<jupyter_output><empty_output><jupyter_text>We will be using the transformer implementation from this fantastic[example](https://keras.io/examples/nlp/text_classification_with_transformer/).Let's start by defining a `TransformerBlock` layer:<jupyter_code>class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super().__init__()
self.att = keras.layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.ffn = keras.Sequential(
[
keras.layers.Dense(ff_dim, activation="relu"),
keras.layers.Dense(embed_dim),
]
)
self.layernorm1 = keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = keras.layers.Dropout(rate)
self.dropout2 = keras.layers.Dropout(rate)
def call(self, inputs, training=False):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)<jupyter_output><empty_output><jupyter_text>Next, let's define a `TokenAndPositionEmbedding` layer:<jupyter_code>class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super().__init__()
self.token_emb = keras.layers.Embedding(
input_dim=vocab_size, output_dim=embed_dim
)
self.pos_emb = keras.layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, inputs):
maxlen = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
position_embeddings = self.pos_emb(positions)
token_embeddings = self.token_emb(inputs)
return token_embeddings + position_embeddings<jupyter_output><empty_output><jupyter_text>Build the NER model class as a `keras.Model` subclass<jupyter_code>class NERModel(keras.Model):
def __init__(
self, num_tags, vocab_size, maxlen=128, embed_dim=32, num_heads=2, ff_dim=32
):
super().__init__()
self.embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
self.transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
self.dropout1 = layers.Dropout(0.1)
self.ff = layers.Dense(ff_dim, activation="relu")
self.dropout2 = layers.Dropout(0.1)
self.ff_final = layers.Dense(num_tags, activation="softmax")
def call(self, inputs, training=False):
x = self.embedding_layer(inputs)
x = self.transformer_block(x)
x = self.dropout1(x, training=training)
x = self.ff(x)
x = self.dropout2(x, training=training)
x = self.ff_final(x)
return x<jupyter_output><empty_output><jupyter_text>Load the CoNLL 2003 dataset from the datasets library and process it<jupyter_code>conll_data = load_dataset("conll2003")<jupyter_output><empty_output><jupyter_text>We will export this data to a tab-separated file format which will be easy to read as a`tf.data.Dataset` object.<jupyter_code>def export_to_file(export_file_path, data):
with open(export_file_path, "w") as f:
for record in data:
ner_tags = record["ner_tags"]
tokens = record["tokens"]
if len(tokens) > 0:
f.write(
str(len(tokens))
+ "\t"
+ "\t".join(tokens)
+ "\t"
+ "\t".join(map(str, ner_tags))
+ "\n"
)
os.mkdir("data")
export_to_file("./data/conll_train.txt", conll_data["train"])
export_to_file("./data/conll_val.txt", conll_data["validation"])<jupyter_output><empty_output><jupyter_text>Make the NER label lookup tableNER labels are usually provided in IOB, IOB2 or IOBES formats. Checkout this link formore information:[Wikipedia](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging))Note that we start our label numbering from 1 since 0 will be reserved for padding. Wehave a total of 10 labels: 9 from the NER dataset and one for padding.<jupyter_code>def make_tag_lookup_table():
iob_labels = ["B", "I"]
ner_labels = ["PER", "ORG", "LOC", "MISC"]
all_labels = [(label1, label2) for label2 in ner_labels for label1 in iob_labels]
all_labels = ["-".join([a, b]) for a, b in all_labels]
all_labels = ["[PAD]", "O"] + all_labels
return dict(zip(range(0, len(all_labels) + 1), all_labels))
mapping = make_tag_lookup_table()
print(mapping)<jupyter_output><empty_output><jupyter_text>Get a list of all tokens in the training dataset. This will be used to create thevocabulary.<jupyter_code>all_tokens = sum(conll_data["train"]["tokens"], [])
all_tokens_array = np.array(list(map(str.lower, all_tokens)))
counter = Counter(all_tokens_array)
print(len(counter))
num_tags = len(mapping)
vocab_size = 20000
# We only take (vocab_size - 2) most commons words from the training data since
# the `StringLookup` class uses 2 additional tokens - one denoting an unknown
# token and another one denoting a masking token
vocabulary = [token for token, count in counter.most_common(vocab_size - 2)]
# The StringLook class will convert tokens to token IDs
lookup_layer = keras.layers.StringLookup(vocabulary=vocabulary)<jupyter_output><empty_output><jupyter_text>Create 2 new `Dataset` objects from the training and validation data<jupyter_code>train_data = tf.data.TextLineDataset("./data/conll_train.txt")
val_data = tf.data.TextLineDataset("./data/conll_val.txt")<jupyter_output><empty_output><jupyter_text>Print out one line to make sure it looks good. The first record in the line is the number of tokens.After that we will have all the tokens followed by all the ner tags.<jupyter_code>print(list(train_data.take(1).as_numpy_iterator()))<jupyter_output><empty_output><jupyter_text>We will be using the following map function to transform the data in the dataset:<jupyter_code>def map_record_to_training_data(record):
record = tf.strings.split(record, sep="\t")
length = tf.strings.to_number(record[0], out_type=tf.int32)
tokens = record[1 : length + 1]
tags = record[length + 1 :]
tags = tf.strings.to_number(tags, out_type=tf.int64)
tags += 1
return tokens, tags
def lowercase_and_convert_to_ids(tokens):
tokens = tf.strings.lower(tokens)
return lookup_layer(tokens)
# We use `padded_batch` here because each record in the dataset has a
# different length.
batch_size = 32
train_dataset = (
train_data.map(map_record_to_training_data)
.map(lambda x, y: (lowercase_and_convert_to_ids(x), y))
.padded_batch(batch_size)
)
val_dataset = (
val_data.map(map_record_to_training_data)
.map(lambda x, y: (lowercase_and_convert_to_ids(x), y))
.padded_batch(batch_size)
)
ner_model = NERModel(num_tags, vocab_size, embed_dim=32, num_heads=4, ff_dim=64)<jupyter_output><empty_output><jupyter_text>We will be using a custom loss function that will ignore the loss from padded tokens.<jupyter_code>class CustomNonPaddingTokenLoss(keras.losses.Loss):
def __init__(self, name="custom_ner_loss"):
super().__init__(name=name)
def call(self, y_true, y_pred):
loss_fn = keras.losses.SparseCategoricalCrossentropy(
from_logits=False, reduction=None
)
loss = loss_fn(y_true, y_pred)
mask = tf.cast((y_true > 0), dtype=tf.float32)
loss = loss * mask
return tf.reduce_sum(loss) / tf.reduce_sum(mask)
loss = CustomNonPaddingTokenLoss()<jupyter_output><empty_output><jupyter_text>Compile and fit the model<jupyter_code>ner_model.compile(optimizer="adam", loss=loss)
ner_model.fit(train_dataset, epochs=10)
def tokenize_and_convert_to_ids(text):
tokens = text.split()
return lowercase_and_convert_to_ids(tokens)
# Sample inference using the trained model
sample_input = tokenize_and_convert_to_ids(
"eu rejects german call to boycott british lamb"
)
sample_input = tf.reshape(sample_input, shape=[1, -1])
print(sample_input)
output = ner_model.predict(sample_input)
prediction = np.argmax(output, axis=-1)[0]
prediction = [mapping[i] for i in prediction]
# eu -> B-ORG, german -> B-MISC, british -> B-MISC
print(prediction)<jupyter_output><empty_output><jupyter_text>Metrics calculationHere is a function to calculate the metrics. The function calculates F1 score for theoverall NER dataset as well as individual scores for each NER tag.<jupyter_code>def calculate_metrics(dataset):
all_true_tag_ids, all_predicted_tag_ids = [], []
for x, y in dataset:
output = ner_model.predict(x, verbose=0)
predictions = np.argmax(output, axis=-1)
predictions = np.reshape(predictions, [-1])
true_tag_ids = np.reshape(y, [-1])
mask = (true_tag_ids > 0) & (predictions > 0)
true_tag_ids = true_tag_ids[mask]
predicted_tag_ids = predictions[mask]
all_true_tag_ids.append(true_tag_ids)
all_predicted_tag_ids.append(predicted_tag_ids)
all_true_tag_ids = np.concatenate(all_true_tag_ids)
all_predicted_tag_ids = np.concatenate(all_predicted_tag_ids)
predicted_tags = [mapping[tag] for tag in all_predicted_tag_ids]
real_tags = [mapping[tag] for tag in all_true_tag_ids]
evaluate(real_tags, predicted_tags)
calculate_metrics(val_dataset)<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/ner_transformers.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/ner_transformers.ipynb",
"repo_id": "keras-io",
"token_count": 4385
} | 92 |
# English-to-Spanish translation with a sequence-to-sequence Transformer
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2021/05/26<br>
**Last modified:** 2023/02/25<br>
**Description:** Implementing a sequence-to-sequence Transformer and training it on a machine translation task.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/neural_machine_translation_with_transformer.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/neural_machine_translation_with_transformer.py)
---
## Introduction
In this example, we'll build a sequence-to-sequence Transformer model, which
we'll train on an English-to-Spanish machine translation task.
You'll learn how to:
- Vectorize text using the Keras `TextVectorization` layer.
- Implement a `TransformerEncoder` layer, a `TransformerDecoder` layer,
and a `PositionalEmbedding` layer.
- Prepare data for training a sequence-to-sequence model.
- Use the trained model to generate translations of never-seen-before
input sentences (sequence-to-sequence inference).
The code featured here is adapted from the book
[Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition)
(chapter 11: Deep learning for text).
The present example is fairly barebones, so for detailed explanations of
how each building block works, as well as the theory behind Transformers,
I recommend reading the book.
---
## Setup
```python
# We set the backend to TensorFlow. The code works with
# both `tensorflow` and `torch`. It does not work with JAX
# due to the behavior of `jax.numpy.tile` in a jit scope
# (used in `TransformerDecoder.get_causal_attention_mask()`:
# `tile` in JAX does not support a dynamic `reps` argument.
# You can make the code work in JAX by wrapping the
# inside of the `get_causal_attention_mask` method in
# a decorator to prevent jit compilation:
# `with jax.ensure_compile_time_eval():`.
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import pathlib
import random
import string
import re
import numpy as np
import tensorflow.data as tf_data
import tensorflow.strings as tf_strings
import keras
from keras import layers
from keras import ops
from keras.layers import TextVectorization
```
---
## Downloading the data
We'll be working with an English-to-Spanish translation dataset
provided by [Anki](https://www.manythings.org/anki/). Let's download it:
```python
text_file = keras.utils.get_file(
fname="spa-eng.zip",
origin="http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip",
extract=True,
)
text_file = pathlib.Path(text_file).parent / "spa-eng" / "spa.txt"
```
---
## Parsing the data
Each line contains an English sentence and its corresponding Spanish sentence.
The English sentence is the *source sequence* and Spanish one is the *target sequence*.
We prepend the token `"[start]"` and we append the token `"[end]"` to the Spanish sentence.
```python
with open(text_file) as f:
lines = f.read().split("\n")[:-1]
text_pairs = []
for line in lines:
eng, spa = line.split("\t")
spa = "[start] " + spa + " [end]"
text_pairs.append((eng, spa))
```
Here's what our sentence pairs look like:
```python
for _ in range(5):
print(random.choice(text_pairs))
```
<div class="k-default-codeblock">
```
("On Saturday nights, it's difficult to find parking around here.", '[start] Los sábados por la noche es difícil encontrar aparcamiento por aquí. [end]')
('I was the worst student in the class.', '[start] Fui el peor estudiante en la clase. [end]')
('There is nothing to do today.', '[start] No hay nada que hacer hoy. [end]')
('The twins do resemble each other.', '[start] Los gemelos se parecen mutuamente. [end]')
('They found Tom in the crowd.', '[start] Encontraron a Tom entre la multitud. [end]')
```
</div>
Now, let's split the sentence pairs into a training set, a validation set,
and a test set.
```python
random.shuffle(text_pairs)
num_val_samples = int(0.15 * len(text_pairs))
num_train_samples = len(text_pairs) - 2 * num_val_samples
train_pairs = text_pairs[:num_train_samples]
val_pairs = text_pairs[num_train_samples : num_train_samples + num_val_samples]
test_pairs = text_pairs[num_train_samples + num_val_samples :]
print(f"{len(text_pairs)} total pairs")
print(f"{len(train_pairs)} training pairs")
print(f"{len(val_pairs)} validation pairs")
print(f"{len(test_pairs)} test pairs")
```
<div class="k-default-codeblock">
```
118964 total pairs
83276 training pairs
17844 validation pairs
17844 test pairs
```
</div>
---
## Vectorizing the text data
We'll use two instances of the `TextVectorization` layer to vectorize the text
data (one for English and one for Spanish),
that is to say, to turn the original strings into integer sequences
where each integer represents the index of a word in a vocabulary.
The English layer will use the default string standardization (strip punctuation characters)
and splitting scheme (split on whitespace), while
the Spanish layer will use a custom standardization, where we add the character
`"¿"` to the set of punctuation characters to be stripped.
Note: in a production-grade machine translation model, I would not recommend
stripping the punctuation characters in either language. Instead, I would recommend turning
each punctuation character into its own token,
which you could achieve by providing a custom `split` function to the `TextVectorization` layer.
```python
strip_chars = string.punctuation + "¿"
strip_chars = strip_chars.replace("[", "")
strip_chars = strip_chars.replace("]", "")
vocab_size = 15000
sequence_length = 20
batch_size = 64
def custom_standardization(input_string):
lowercase = tf_strings.lower(input_string)
return tf_strings.regex_replace(lowercase, "[%s]" % re.escape(strip_chars), "")
eng_vectorization = TextVectorization(
max_tokens=vocab_size,
output_mode="int",
output_sequence_length=sequence_length,
)
spa_vectorization = TextVectorization(
max_tokens=vocab_size,
output_mode="int",
output_sequence_length=sequence_length + 1,
standardize=custom_standardization,
)
train_eng_texts = [pair[0] for pair in train_pairs]
train_spa_texts = [pair[1] for pair in train_pairs]
eng_vectorization.adapt(train_eng_texts)
spa_vectorization.adapt(train_spa_texts)
```
Next, we'll format our datasets.
At each training step, the model will seek to predict target words N+1 (and beyond)
using the source sentence and the target words 0 to N.
As such, the training dataset will yield a tuple `(inputs, targets)`, where:
- `inputs` is a dictionary with the keys `encoder_inputs` and `decoder_inputs`.
`encoder_inputs` is the vectorized source sentence and `encoder_inputs` is the target sentence "so far",
that is to say, the words 0 to N used to predict word N+1 (and beyond) in the target sentence.
- `target` is the target sentence offset by one step:
it provides the next words in the target sentence -- what the model will try to predict.
```python
def format_dataset(eng, spa):
eng = eng_vectorization(eng)
spa = spa_vectorization(spa)
return (
{
"encoder_inputs": eng,
"decoder_inputs": spa[:, :-1],
},
spa[:, 1:],
)
def make_dataset(pairs):
eng_texts, spa_texts = zip(*pairs)
eng_texts = list(eng_texts)
spa_texts = list(spa_texts)
dataset = tf_data.Dataset.from_tensor_slices((eng_texts, spa_texts))
dataset = dataset.batch(batch_size)
dataset = dataset.map(format_dataset)
return dataset.cache().shuffle(2048).prefetch(16)
train_ds = make_dataset(train_pairs)
val_ds = make_dataset(val_pairs)
```
Let's take a quick look at the sequence shapes
(we have batches of 64 pairs, and all sequences are 20 steps long):
```python
for inputs, targets in train_ds.take(1):
print(f'inputs["encoder_inputs"].shape: {inputs["encoder_inputs"].shape}')
print(f'inputs["decoder_inputs"].shape: {inputs["decoder_inputs"].shape}')
print(f"targets.shape: {targets.shape}")
```
<div class="k-default-codeblock">
```
inputs["encoder_inputs"].shape: (64, 20)
inputs["decoder_inputs"].shape: (64, 20)
targets.shape: (64, 20)
```
</div>
---
## Building the model
Our sequence-to-sequence Transformer consists of a `TransformerEncoder`
and a `TransformerDecoder` chained together. To make the model aware of word order,
we also use a `PositionalEmbedding` layer.
The source sequence will be pass to the `TransformerEncoder`,
which will produce a new representation of it.
This new representation will then be passed
to the `TransformerDecoder`, together with the target sequence so far (target words 0 to N).
The `TransformerDecoder` will then seek to predict the next words in the target sequence (N+1 and beyond).
A key detail that makes this possible is causal masking
(see method `get_causal_attention_mask()` on the `TransformerDecoder`).
The `TransformerDecoder` sees the entire sequences at once, and thus we must make
sure that it only uses information from target tokens 0 to N when predicting token N+1
(otherwise, it could use information from the future, which would
result in a model that cannot be used at inference time).
```python
import keras.ops as ops
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.dense_proj = keras.Sequential(
[
layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
self.supports_masking = True
def call(self, inputs, mask=None):
if mask is not None:
padding_mask = ops.cast(mask[:, None, :], dtype="int32")
else:
padding_mask = None
attention_output = self.attention(
query=inputs, value=inputs, key=inputs, attention_mask=padding_mask
)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update(
{
"embed_dim": self.embed_dim,
"dense_dim": self.dense_dim,
"num_heads": self.num_heads,
}
)
return config
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=vocab_size, output_dim=embed_dim
)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=embed_dim
)
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.embed_dim = embed_dim
def call(self, inputs):
length = ops.shape(inputs)[-1]
positions = ops.arange(0, length, 1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
if mask is None:
return None
else:
return ops.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"vocab_size": self.vocab_size,
"embed_dim": self.embed_dim,
}
)
return config
class TransformerDecoder(layers.Layer):
def __init__(self, embed_dim, latent_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.latent_dim = latent_dim
self.num_heads = num_heads
self.attention_1 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.attention_2 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.dense_proj = keras.Sequential(
[
layers.Dense(latent_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
self.layernorm_3 = layers.LayerNormalization()
self.supports_masking = True
def call(self, inputs, encoder_outputs, mask=None):
causal_mask = self.get_causal_attention_mask(inputs)
if mask is not None:
padding_mask = ops.cast(mask[:, None, :], dtype="int32")
padding_mask = ops.minimum(padding_mask, causal_mask)
else:
padding_mask = None
attention_output_1 = self.attention_1(
query=inputs, value=inputs, key=inputs, attention_mask=causal_mask
)
out_1 = self.layernorm_1(inputs + attention_output_1)
attention_output_2 = self.attention_2(
query=out_1,
value=encoder_outputs,
key=encoder_outputs,
attention_mask=padding_mask,
)
out_2 = self.layernorm_2(out_1 + attention_output_2)
proj_output = self.dense_proj(out_2)
return self.layernorm_3(out_2 + proj_output)
def get_causal_attention_mask(self, inputs):
input_shape = ops.shape(inputs)
batch_size, sequence_length = input_shape[0], input_shape[1]
i = ops.arange(sequence_length)[:, None]
j = ops.arange(sequence_length)
mask = ops.cast(i >= j, dtype="int32")
mask = ops.reshape(mask, (1, input_shape[1], input_shape[1]))
mult = ops.concatenate(
[ops.expand_dims(batch_size, -1), ops.convert_to_tensor([1, 1])],
axis=0,
)
return ops.tile(mask, mult)
def get_config(self):
config = super().get_config()
config.update(
{
"embed_dim": self.embed_dim,
"latent_dim": self.latent_dim,
"num_heads": self.num_heads,
}
)
return config
```
Next, we assemble the end-to-end model.
```python
embed_dim = 256
latent_dim = 2048
num_heads = 8
encoder_inputs = keras.Input(shape=(None,), dtype="int64", name="encoder_inputs")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(encoder_inputs)
encoder_outputs = TransformerEncoder(embed_dim, latent_dim, num_heads)(x)
encoder = keras.Model(encoder_inputs, encoder_outputs)
decoder_inputs = keras.Input(shape=(None,), dtype="int64", name="decoder_inputs")
encoded_seq_inputs = keras.Input(shape=(None, embed_dim), name="decoder_state_inputs")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(decoder_inputs)
x = TransformerDecoder(embed_dim, latent_dim, num_heads)(x, encoded_seq_inputs)
x = layers.Dropout(0.5)(x)
decoder_outputs = layers.Dense(vocab_size, activation="softmax")(x)
decoder = keras.Model([decoder_inputs, encoded_seq_inputs], decoder_outputs)
decoder_outputs = decoder([decoder_inputs, encoder_outputs])
transformer = keras.Model(
[encoder_inputs, decoder_inputs], decoder_outputs, name="transformer"
)
```
---
## Training our model
We'll use accuracy as a quick way to monitor training progress on the validation data.
Note that machine translation typically uses BLEU scores as well as other metrics, rather than accuracy.
Here we only train for 1 epoch, but to get the model to actually converge
you should train for at least 30 epochs.
```python
epochs = 1 # This should be at least 30 for convergence
transformer.summary()
transformer.compile(
"rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
transformer.fit(train_ds, epochs=epochs, validation_data=val_ds)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "transformer"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ encoder_inputs │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ positional_embeddi… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3,845,…</span> │ encoder_inputs[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">PositionalEmbeddi…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ decoder_inputs │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ transformer_encoder │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3,155,…</span> │ positional_embeddin… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">TransformerEncode…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ functional_5 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">12,959…</span> │ decoder_inputs[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">15000</span>) │ │ transformer_encoder… │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">19,960,216</span> (76.14 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">19,960,216</span> (76.14 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<div class="k-default-codeblock">
```
5/1302 [37m━━━━━━━━━━━━━━━━━━━━ 42s 33ms/step - accuracy: 0.3558 - loss: 8.3596
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1699484373.932513 76082 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
1302/1302 ━━━━━━━━━━━━━━━━━━━━ 64s 39ms/step - accuracy: 0.7073 - loss: 2.2372 - val_accuracy: 0.7329 - val_loss: 1.6477
<keras.src.callbacks.history.History at 0x7ff611f21540>
```
</div>
---
## Decoding test sentences
Finally, let's demonstrate how to translate brand new English sentences.
We simply feed into the model the vectorized English sentence
as well as the target token `"[start]"`, then we repeatedly generated the next token, until
we hit the token `"[end]"`.
```python
spa_vocab = spa_vectorization.get_vocabulary()
spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))
max_decoded_sentence_length = 20
def decode_sequence(input_sentence):
tokenized_input_sentence = eng_vectorization([input_sentence])
decoded_sentence = "[start]"
for i in range(max_decoded_sentence_length):
tokenized_target_sentence = spa_vectorization([decoded_sentence])[:, :-1]
predictions = transformer([tokenized_input_sentence, tokenized_target_sentence])
# ops.argmax(predictions[0, i, :]) is not a concrete value for jax here
sampled_token_index = ops.convert_to_numpy(
ops.argmax(predictions[0, i, :])
).item(0)
sampled_token = spa_index_lookup[sampled_token_index]
decoded_sentence += " " + sampled_token
if sampled_token == "[end]":
break
return decoded_sentence
test_eng_texts = [pair[0] for pair in test_pairs]
for _ in range(30):
input_sentence = random.choice(test_eng_texts)
translated = decode_sequence(input_sentence)
```
After 30 epochs, we get results such as:
> She handed him the money.
> [start] ella le pasó el dinero [end]
> Tom has never heard Mary sing.
> [start] tom nunca ha oído cantar a mary [end]
> Perhaps she will come tomorrow.
> [start] tal vez ella vendrá mañana [end]
> I love to write.
> [start] me encanta escribir [end]
> His French is improving little by little.
> [start] su francés va a [UNK] sólo un poco [end]
> My hotel told me to call you.
> [start] mi hotel me dijo que te [UNK] [end]
| keras-io/examples/nlp/md/neural_machine_translation_with_transformer.md/0 | {
"file_path": "keras-io/examples/nlp/md/neural_machine_translation_with_transformer.md",
"repo_id": "keras-io",
"token_count": 9270
} | 93 |
"""
Title: Multimodal entailment
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/08/08
Last modified: 2021/08/15
Description: Training a multimodal model for predicting entailment.
Accelerator: GPU
"""
"""
## Introduction
In this example, we will build and train a model for predicting multimodal entailment. We will be
using the
[multimodal entailment dataset](https://github.com/google-research-datasets/recognizing-multimodal-entailment)
recently introduced by Google Research.
### What is multimodal entailment?
On social media platforms, to audit and moderate content
we may want to find answers to the
following questions in near real-time:
* Does a given piece of information contradict the other?
* Does a given piece of information imply the other?
In NLP, this task is called analyzing _textual entailment_. However, that's only
when the information comes from text content.
In practice, it's often the case the information available comes not just
from text content, but from a multimodal combination of text, images, audio, video, etc.
_Multimodal entailment_ is simply the extension of textual entailment to a variety
of new input modalities.
### Requirements
This example requires TensorFlow 2.5 or higher. In addition, TensorFlow Hub and
TensorFlow Text are required for the BERT model
([Devlin et al.](https://arxiv.org/abs/1810.04805)). These libraries can be installed
using the following command:
"""
"""shell
pip install -q tensorflow_text
"""
"""
## Imports
"""
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
from tensorflow import keras
"""
## Define a label map
"""
label_map = {"Contradictory": 0, "Implies": 1, "NoEntailment": 2}
"""
## Collect the dataset
The original dataset is available
[here](https://github.com/google-research-datasets/recognizing-multimodal-entailment).
It comes with URLs of images which are hosted on Twitter's photo storage system called
the
[Photo Blob Storage (PBS for short)](https://blog.twitter.com/engineering/en_us/a/2012/blobstore-twitter-s-in-house-photo-storage-system).
We will be working with the downloaded images along with additional data that comes with
the original dataset. Thanks to
[Nilabhra Roy Chowdhury](https://de.linkedin.com/in/nilabhraroychowdhury) who worked on
preparing the image data.
"""
image_base_path = keras.utils.get_file(
"tweet_images",
"https://github.com/sayakpaul/Multimodal-Entailment-Baseline/releases/download/v1.0.0/tweet_images.tar.gz",
untar=True,
)
"""
## Read the dataset and apply basic preprocessing
"""
df = pd.read_csv(
"https://github.com/sayakpaul/Multimodal-Entailment-Baseline/raw/main/csvs/tweets.csv"
)
df.sample(10)
"""
The columns we are interested in are the following:
* `text_1`
* `image_1`
* `text_2`
* `image_2`
* `label`
The entailment task is formulated as the following:
***Given the pairs of (`text_1`, `image_1`) and (`text_2`, `image_2`) do they entail (or
not entail or contradict) each other?***
We have the images already downloaded. `image_1` is downloaded as `id1` as its filename
and `image2` is downloaded as `id2` as its filename. In the next step, we will add two
more columns to `df` - filepaths of `image_1`s and `image_2`s.
"""
images_one_paths = []
images_two_paths = []
for idx in range(len(df)):
current_row = df.iloc[idx]
id_1 = current_row["id_1"]
id_2 = current_row["id_2"]
extentsion_one = current_row["image_1"].split(".")[-1]
extentsion_two = current_row["image_2"].split(".")[-1]
image_one_path = os.path.join(image_base_path, str(id_1) + f".{extentsion_one}")
image_two_path = os.path.join(image_base_path, str(id_2) + f".{extentsion_two}")
images_one_paths.append(image_one_path)
images_two_paths.append(image_two_path)
df["image_1_path"] = images_one_paths
df["image_2_path"] = images_two_paths
# Create another column containing the integer ids of
# the string labels.
df["label_idx"] = df["label"].apply(lambda x: label_map[x])
"""
## Dataset visualization
"""
def visualize(idx):
current_row = df.iloc[idx]
image_1 = plt.imread(current_row["image_1_path"])
image_2 = plt.imread(current_row["image_2_path"])
text_1 = current_row["text_1"]
text_2 = current_row["text_2"]
label = current_row["label"]
plt.subplot(1, 2, 1)
plt.imshow(image_1)
plt.axis("off")
plt.title("Image One")
plt.subplot(1, 2, 2)
plt.imshow(image_1)
plt.axis("off")
plt.title("Image Two")
plt.show()
print(f"Text one: {text_1}")
print(f"Text two: {text_2}")
print(f"Label: {label}")
random_idx = np.random.choice(len(df))
visualize(random_idx)
random_idx = np.random.choice(len(df))
visualize(random_idx)
"""
## Train/test split
The dataset suffers from
[class imbalance problem](https://developers.google.com/machine-learning/glossary#class-imbalanced-dataset).
We can confirm that in the following cell.
"""
df["label"].value_counts()
"""
To account for that we will go for a stratified split.
"""
# 10% for test
train_df, test_df = train_test_split(
df, test_size=0.1, stratify=df["label"].values, random_state=42
)
# 5% for validation
train_df, val_df = train_test_split(
train_df, test_size=0.05, stratify=train_df["label"].values, random_state=42
)
print(f"Total training examples: {len(train_df)}")
print(f"Total validation examples: {len(val_df)}")
print(f"Total test examples: {len(test_df)}")
"""
## Data input pipeline
TensorFlow Hub provides
[variety of BERT family of models](https://www.tensorflow.org/text/tutorials/bert_glue#loading_models_from_tensorflow_hub).
Each of those models comes with a
corresponding preprocessing layer. You can learn more about these models and their
preprocessing layers from
[this resource](https://www.tensorflow.org/text/tutorials/bert_glue#loading_models_from_tensorflow_hub).
To keep the runtime of this example relatively short, we will use a smaller variant of
the original BERT model.
"""
# Define TF Hub paths to the BERT encoder and its preprocessor
bert_model_path = (
"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1"
)
bert_preprocess_path = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
"""
Our text preprocessing code mostly comes from
[this tutorial](https://www.tensorflow.org/text/tutorials/bert_glue).
You are highly encouraged to check out the tutorial to learn more about the input
preprocessing.
"""
def make_bert_preprocessing_model(sentence_features, seq_length=128):
"""Returns Model mapping string features to BERT inputs.
Args:
sentence_features: A list with the names of string-valued features.
seq_length: An integer that defines the sequence length of BERT inputs.
Returns:
A Keras Model that can be called on a list or dict of string Tensors
(with the order or names, resp., given by sentence_features) and
returns a dict of tensors for input to BERT.
"""
input_segments = [
tf.keras.layers.Input(shape=(), dtype=tf.string, name=ft)
for ft in sentence_features
]
# Tokenize the text to word pieces.
bert_preprocess = hub.load(bert_preprocess_path)
tokenizer = hub.KerasLayer(bert_preprocess.tokenize, name="tokenizer")
segments = [tokenizer(s) for s in input_segments]
# Optional: Trim segments in a smart way to fit seq_length.
# Simple cases (like this example) can skip this step and let
# the next step apply a default truncation to approximately equal lengths.
truncated_segments = segments
# Pack inputs. The details (start/end token ids, dict of output tensors)
# are model-dependent, so this gets loaded from the SavedModel.
packer = hub.KerasLayer(
bert_preprocess.bert_pack_inputs,
arguments=dict(seq_length=seq_length),
name="packer",
)
model_inputs = packer(truncated_segments)
return keras.Model(input_segments, model_inputs)
bert_preprocess_model = make_bert_preprocessing_model(["text_1", "text_2"])
keras.utils.plot_model(bert_preprocess_model, show_shapes=True, show_dtype=True)
"""
### Run the preprocessor on a sample input
"""
idx = np.random.choice(len(train_df))
row = train_df.iloc[idx]
sample_text_1, sample_text_2 = row["text_1"], row["text_2"]
print(f"Text 1: {sample_text_1}")
print(f"Text 2: {sample_text_2}")
test_text = [np.array([sample_text_1]), np.array([sample_text_2])]
text_preprocessed = bert_preprocess_model(test_text)
print("Keys : ", list(text_preprocessed.keys()))
print("Shape Word Ids : ", text_preprocessed["input_word_ids"].shape)
print("Word Ids : ", text_preprocessed["input_word_ids"][0, :16])
print("Shape Mask : ", text_preprocessed["input_mask"].shape)
print("Input Mask : ", text_preprocessed["input_mask"][0, :16])
print("Shape Type Ids : ", text_preprocessed["input_type_ids"].shape)
print("Type Ids : ", text_preprocessed["input_type_ids"][0, :16])
"""
We will now create `tf.data.Dataset` objects from the dataframes.
Note that the text inputs will be preprocessed as a part of the data input pipeline. But
the preprocessing modules can also be a part of their corresponding BERT models. This
helps reduce the training/serving skew and lets our models operate with raw text inputs.
Follow [this tutorial](https://www.tensorflow.org/text/tutorials/classify_text_with_bert)
to learn more about how to incorporate the preprocessing modules directly inside the
models.
"""
def dataframe_to_dataset(dataframe):
columns = ["image_1_path", "image_2_path", "text_1", "text_2", "label_idx"]
dataframe = dataframe[columns].copy()
labels = dataframe.pop("label_idx")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.shuffle(buffer_size=len(dataframe))
return ds
"""
### Preprocessing utilities
"""
resize = (128, 128)
bert_input_features = ["input_word_ids", "input_type_ids", "input_mask"]
def preprocess_image(image_path):
extension = tf.strings.split(image_path)[-1]
image = tf.io.read_file(image_path)
if extension == b"jpg":
image = tf.image.decode_jpeg(image, 3)
else:
image = tf.image.decode_png(image, 3)
image = tf.image.resize(image, resize)
return image
def preprocess_text(text_1, text_2):
text_1 = tf.convert_to_tensor([text_1])
text_2 = tf.convert_to_tensor([text_2])
output = bert_preprocess_model([text_1, text_2])
output = {feature: tf.squeeze(output[feature]) for feature in bert_input_features}
return output
def preprocess_text_and_image(sample):
image_1 = preprocess_image(sample["image_1_path"])
image_2 = preprocess_image(sample["image_2_path"])
text = preprocess_text(sample["text_1"], sample["text_2"])
return {"image_1": image_1, "image_2": image_2, "text": text}
"""
### Create the final datasets
"""
batch_size = 32
auto = tf.data.AUTOTUNE
def prepare_dataset(dataframe, training=True):
ds = dataframe_to_dataset(dataframe)
if training:
ds = ds.shuffle(len(train_df))
ds = ds.map(lambda x, y: (preprocess_text_and_image(x), y)).cache()
ds = ds.batch(batch_size).prefetch(auto)
return ds
train_ds = prepare_dataset(train_df)
validation_ds = prepare_dataset(val_df, False)
test_ds = prepare_dataset(test_df, False)
"""
## Model building utilities
Our final model will accept two images along with their text counterparts. While the
images will be directly fed to the model the text inputs will first be preprocessed and
then will make it into the model. Below is a visual illustration of this approach:

The model consists of the following elements:
* A standalone encoder for the images. We will use a
[ResNet50V2](https://arxiv.org/abs/1603.05027) pre-trained on the ImageNet-1k dataset for
this.
* A standalone encoder for the images. A pre-trained BERT will be used for this.
After extracting the individual embeddings, they will be projected in an identical space.
Finally, their projections will be concatenated and be fed to the final classification
layer.
This is a multi-class classification problem involving the following classes:
* NoEntailment
* Implies
* Contradictory
`project_embeddings()`, `create_vision_encoder()`, and `create_text_encoder()` utilities
are referred from [this example](https://keras.io/examples/nlp/nl_image_search/).
"""
"""
Projection utilities
"""
def project_embeddings(
embeddings, num_projection_layers, projection_dims, dropout_rate
):
projected_embeddings = keras.layers.Dense(units=projection_dims)(embeddings)
for _ in range(num_projection_layers):
x = tf.nn.gelu(projected_embeddings)
x = keras.layers.Dense(projection_dims)(x)
x = keras.layers.Dropout(dropout_rate)(x)
x = keras.layers.Add()([projected_embeddings, x])
projected_embeddings = keras.layers.LayerNormalization()(x)
return projected_embeddings
"""
Vision encoder utilities
"""
def create_vision_encoder(
num_projection_layers, projection_dims, dropout_rate, trainable=False
):
# Load the pre-trained ResNet50V2 model to be used as the base encoder.
resnet_v2 = keras.applications.ResNet50V2(
include_top=False, weights="imagenet", pooling="avg"
)
# Set the trainability of the base encoder.
for layer in resnet_v2.layers:
layer.trainable = trainable
# Receive the images as inputs.
image_1 = keras.Input(shape=(128, 128, 3), name="image_1")
image_2 = keras.Input(shape=(128, 128, 3), name="image_2")
# Preprocess the input image.
preprocessed_1 = keras.applications.resnet_v2.preprocess_input(image_1)
preprocessed_2 = keras.applications.resnet_v2.preprocess_input(image_2)
# Generate the embeddings for the images using the resnet_v2 model
# concatenate them.
embeddings_1 = resnet_v2(preprocessed_1)
embeddings_2 = resnet_v2(preprocessed_2)
embeddings = keras.layers.Concatenate()([embeddings_1, embeddings_2])
# Project the embeddings produced by the model.
outputs = project_embeddings(
embeddings, num_projection_layers, projection_dims, dropout_rate
)
# Create the vision encoder model.
return keras.Model([image_1, image_2], outputs, name="vision_encoder")
"""
Text encoder utilities
"""
def create_text_encoder(
num_projection_layers, projection_dims, dropout_rate, trainable=False
):
# Load the pre-trained BERT model to be used as the base encoder.
bert = hub.KerasLayer(
bert_model_path,
name="bert",
)
# Set the trainability of the base encoder.
bert.trainable = trainable
# Receive the text as inputs.
bert_input_features = ["input_type_ids", "input_mask", "input_word_ids"]
inputs = {
feature: keras.Input(shape=(128,), dtype=tf.int32, name=feature)
for feature in bert_input_features
}
# Generate embeddings for the preprocessed text using the BERT model.
embeddings = bert(inputs)["pooled_output"]
# Project the embeddings produced by the model.
outputs = project_embeddings(
embeddings, num_projection_layers, projection_dims, dropout_rate
)
# Create the text encoder model.
return keras.Model(inputs, outputs, name="text_encoder")
"""
Multimodal model utilities
"""
def create_multimodal_model(
num_projection_layers=1,
projection_dims=256,
dropout_rate=0.1,
vision_trainable=False,
text_trainable=False,
):
# Receive the images as inputs.
image_1 = keras.Input(shape=(128, 128, 3), name="image_1")
image_2 = keras.Input(shape=(128, 128, 3), name="image_2")
# Receive the text as inputs.
bert_input_features = ["input_type_ids", "input_mask", "input_word_ids"]
text_inputs = {
feature: keras.Input(shape=(128,), dtype=tf.int32, name=feature)
for feature in bert_input_features
}
# Create the encoders.
vision_encoder = create_vision_encoder(
num_projection_layers, projection_dims, dropout_rate, vision_trainable
)
text_encoder = create_text_encoder(
num_projection_layers, projection_dims, dropout_rate, text_trainable
)
# Fetch the embedding projections.
vision_projections = vision_encoder([image_1, image_2])
text_projections = text_encoder(text_inputs)
# Concatenate the projections and pass through the classification layer.
concatenated = keras.layers.Concatenate()([vision_projections, text_projections])
outputs = keras.layers.Dense(3, activation="softmax")(concatenated)
return keras.Model([image_1, image_2, text_inputs], outputs)
multimodal_model = create_multimodal_model()
keras.utils.plot_model(multimodal_model, show_shapes=True)
"""
You can inspect the structure of the individual encoders as well by setting the
`expand_nested` argument of `plot_model()` to `True`. You are encouraged
to play with the different hyperparameters involved in building this model and
observe how the final performance is affected.
"""
"""
## Compile and train the model
"""
multimodal_model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics="accuracy"
)
history = multimodal_model.fit(train_ds, validation_data=validation_ds, epochs=10)
"""
## Evaluate the model
"""
_, acc = multimodal_model.evaluate(test_ds)
print(f"Accuracy on the test set: {round(acc * 100, 2)}%.")
"""
## Additional notes regarding training
**Incorporating regularization**:
The training logs suggest that the model is starting to overfit and may have benefitted
from regularization. Dropout ([Srivastava et al.](https://jmlr.org/papers/v15/srivastava14a.html))
is a simple yet powerful regularization technique that we can use in our model.
But how should we apply it here?
We could always introduce Dropout (`keras.layers.Dropout`) in between different layers of the model.
But here is another recipe. Our model expects inputs from two different data modalities.
What if either of the modalities is not present during inference? To account for this,
we can introduce Dropout to the individual projections just before they get concatenated:
```python
vision_projections = keras.layers.Dropout(rate)(vision_projections)
text_projections = keras.layers.Dropout(rate)(text_projections)
concatenated = keras.layers.Concatenate()([vision_projections, text_projections])
```
**Attending to what matters**:
Do all parts of the images correspond equally to their textual counterparts? It's likely
not the case. To make our model only focus on the most important bits of the images that relate
well to their corresponding textual parts we can use "cross-attention":
```python
# Embeddings.
vision_projections = vision_encoder([image_1, image_2])
text_projections = text_encoder(text_inputs)
# Cross-attention (Luong-style).
query_value_attention_seq = keras.layers.Attention(use_scale=True, dropout=0.2)(
[vision_projections, text_projections]
)
# Concatenate.
concatenated = keras.layers.Concatenate()([vision_projections, text_projections])
contextual = keras.layers.Concatenate()([concatenated, query_value_attention_seq])
```
To see this in action, refer to
[this notebook](https://github.com/sayakpaul/Multimodal-Entailment-Baseline/blob/main/multimodal_entailment_attn.ipynb).
**Handling class imbalance**:
The dataset suffers from class imbalance. Investigating the confusion matrix of the
above model reveals that it performs poorly on the minority classes. If we had used a
weighted loss then the training would have been more guided. You can check out
[this notebook](https://github.com/sayakpaul/Multimodal-Entailment-Baseline/blob/main/multimodal_entailment.ipynb)
that takes class-imbalance into account during model training.
**Using only text inputs**:
Also, what if we had only incorporated text inputs for the entailment task? Because of
the nature of the text inputs encountered on social media platforms, text inputs alone
would have hurt the final performance. Under a similar training setup, by only using
text inputs we get to 67.14% top-1 accuracy on the same test set. Refer to
[this notebook](https://github.com/sayakpaul/Multimodal-Entailment-Baseline/blob/main/text_entailment.ipynb)
for details.
Finally, here is a table comparing different approaches taken for the entailment task:
| Type | Standard<br>Cross-entropy | Loss-weighted<br>Cross-entropy | Focal Loss |
|:---: |:---: |:---: |:---: |
| Multimodal | 77.86% | 67.86% | 86.43% |
| Only text | 67.14% | 11.43% | 37.86% |
You can check out [this repository](https://git.io/JR0HU) to learn more about how the
experiments were conducted to obtain these numbers.
"""
"""
## Final remarks
* The architecture we used in this example is too large for the number of data points
available for training. It's going to benefit from more data.
* We used a smaller variant of the original BERT model. Chances are high that with a
larger variant, this performance will be improved. TensorFlow Hub
[provides](https://www.tensorflow.org/text/tutorials/bert_glue#loading_models_from_tensorflow_hub)
a number of different BERT models that you can experiment with.
* We kept the pre-trained models frozen. Fine-tuning them on the multimodal entailment
task would could resulted in better performance.
* We built a simple baseline model for the multimodal entailment task. There are various
approaches that have been proposed to tackle the entailment problem.
[This presentation deck](https://docs.google.com/presentation/d/1mAB31BCmqzfedreNZYn4hsKPFmgHA9Kxz219DzyRY3c/edit?usp=sharing)
from the
[Recognizing Multimodal Entailment](https://multimodal-entailment.github.io/)
tutorial provides a comprehensive overview.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/multimodal-entailment)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/multimodal_entailment)
"""
| keras-io/examples/nlp/multimodal_entailment.py/0 | {
"file_path": "keras-io/examples/nlp/multimodal_entailment.py",
"repo_id": "keras-io",
"token_count": 7718
} | 94 |
"""
Title: Text Extraction with BERT
Author: [Apoorv Nandan](https://twitter.com/NandanApoorv)
Date created: 2020/05/23
Last modified: 2020/05/23
Description: Fine tune pretrained BERT from HuggingFace Transformers on SQuAD.
Accelerator: TPU
"""
"""
## Introduction
This demonstration uses SQuAD (Stanford Question-Answering Dataset).
In SQuAD, an input consists of a question, and a paragraph for context.
The goal is to find the span of text in the paragraph that answers the question.
We evaluate our performance on this data with the "Exact Match" metric,
which measures the percentage of predictions that exactly match any one of the
ground-truth answers.
We fine-tune a BERT model to perform this task as follows:
1. Feed the context and the question as inputs to BERT.
2. Take two vectors S and T with dimensions equal to that of
hidden states in BERT.
3. Compute the probability of each token being the start and end of
the answer span. The probability of a token being the start of
the answer is given by a dot product between S and the representation
of the token in the last layer of BERT, followed by a softmax over all tokens.
The probability of a token being the end of the answer is computed
similarly with the vector T.
4. Fine-tune BERT and learn S and T along the way.
**References:**
- [BERT](https://arxiv.org/abs/1810.04805)
- [SQuAD](https://arxiv.org/abs/1606.05250)
"""
"""
## Setup
"""
import os
import re
import json
import string
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tokenizers import BertWordPieceTokenizer
from transformers import BertTokenizer, TFBertModel, BertConfig
max_len = 384
configuration = BertConfig() # default parameters and configuration for BERT
"""
## Set-up BERT tokenizer
"""
# Save the slow pretrained tokenizer
slow_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
save_path = "bert_base_uncased/"
if not os.path.exists(save_path):
os.makedirs(save_path)
slow_tokenizer.save_pretrained(save_path)
# Load the fast tokenizer from saved file
tokenizer = BertWordPieceTokenizer("bert_base_uncased/vocab.txt", lowercase=True)
"""
## Load the data
"""
train_data_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json"
train_path = keras.utils.get_file("train.json", train_data_url)
eval_data_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json"
eval_path = keras.utils.get_file("eval.json", eval_data_url)
"""
## Preprocess the data
1. Go through the JSON file and store every record as a `SquadExample` object.
2. Go through each `SquadExample` and create `x_train, y_train, x_eval, y_eval`.
"""
class SquadExample:
def __init__(self, question, context, start_char_idx, answer_text, all_answers):
self.question = question
self.context = context
self.start_char_idx = start_char_idx
self.answer_text = answer_text
self.all_answers = all_answers
self.skip = False
def preprocess(self):
context = self.context
question = self.question
answer_text = self.answer_text
start_char_idx = self.start_char_idx
# Clean context, answer and question
context = " ".join(str(context).split())
question = " ".join(str(question).split())
answer = " ".join(str(answer_text).split())
# Find end character index of answer in context
end_char_idx = start_char_idx + len(answer)
if end_char_idx >= len(context):
self.skip = True
return
# Mark the character indexes in context that are in answer
is_char_in_ans = [0] * len(context)
for idx in range(start_char_idx, end_char_idx):
is_char_in_ans[idx] = 1
# Tokenize context
tokenized_context = tokenizer.encode(context)
# Find tokens that were created from answer characters
ans_token_idx = []
for idx, (start, end) in enumerate(tokenized_context.offsets):
if sum(is_char_in_ans[start:end]) > 0:
ans_token_idx.append(idx)
if len(ans_token_idx) == 0:
self.skip = True
return
# Find start and end token index for tokens from answer
start_token_idx = ans_token_idx[0]
end_token_idx = ans_token_idx[-1]
# Tokenize question
tokenized_question = tokenizer.encode(question)
# Create inputs
input_ids = tokenized_context.ids + tokenized_question.ids[1:]
token_type_ids = [0] * len(tokenized_context.ids) + [1] * len(
tokenized_question.ids[1:]
)
attention_mask = [1] * len(input_ids)
# Pad and create attention masks.
# Skip if truncation is needed
padding_length = max_len - len(input_ids)
if padding_length > 0: # pad
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
elif padding_length < 0: # skip
self.skip = True
return
self.input_ids = input_ids
self.token_type_ids = token_type_ids
self.attention_mask = attention_mask
self.start_token_idx = start_token_idx
self.end_token_idx = end_token_idx
self.context_token_to_char = tokenized_context.offsets
with open(train_path) as f:
raw_train_data = json.load(f)
with open(eval_path) as f:
raw_eval_data = json.load(f)
def create_squad_examples(raw_data):
squad_examples = []
for item in raw_data["data"]:
for para in item["paragraphs"]:
context = para["context"]
for qa in para["qas"]:
question = qa["question"]
answer_text = qa["answers"][0]["text"]
all_answers = [_["text"] for _ in qa["answers"]]
start_char_idx = qa["answers"][0]["answer_start"]
squad_eg = SquadExample(
question, context, start_char_idx, answer_text, all_answers
)
squad_eg.preprocess()
squad_examples.append(squad_eg)
return squad_examples
def create_inputs_targets(squad_examples):
dataset_dict = {
"input_ids": [],
"token_type_ids": [],
"attention_mask": [],
"start_token_idx": [],
"end_token_idx": [],
}
for item in squad_examples:
if item.skip == False:
for key in dataset_dict:
dataset_dict[key].append(getattr(item, key))
for key in dataset_dict:
dataset_dict[key] = np.array(dataset_dict[key])
x = [
dataset_dict["input_ids"],
dataset_dict["token_type_ids"],
dataset_dict["attention_mask"],
]
y = [dataset_dict["start_token_idx"], dataset_dict["end_token_idx"]]
return x, y
train_squad_examples = create_squad_examples(raw_train_data)
x_train, y_train = create_inputs_targets(train_squad_examples)
print(f"{len(train_squad_examples)} training points created.")
eval_squad_examples = create_squad_examples(raw_eval_data)
x_eval, y_eval = create_inputs_targets(eval_squad_examples)
print(f"{len(eval_squad_examples)} evaluation points created.")
"""
Create the Question-Answering Model using BERT and Functional API
"""
def create_model():
## BERT encoder
encoder = TFBertModel.from_pretrained("bert-base-uncased")
## QA Model
input_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
token_type_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
attention_mask = layers.Input(shape=(max_len,), dtype=tf.int32)
embedding = encoder(
input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask
)[0]
start_logits = layers.Dense(1, name="start_logit", use_bias=False)(embedding)
start_logits = layers.Flatten()(start_logits)
end_logits = layers.Dense(1, name="end_logit", use_bias=False)(embedding)
end_logits = layers.Flatten()(end_logits)
start_probs = layers.Activation(keras.activations.softmax)(start_logits)
end_probs = layers.Activation(keras.activations.softmax)(end_logits)
model = keras.Model(
inputs=[input_ids, token_type_ids, attention_mask],
outputs=[start_probs, end_probs],
)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=False)
optimizer = keras.optimizers.Adam(lr=5e-5)
model.compile(optimizer=optimizer, loss=[loss, loss])
return model
"""
This code should preferably be run on Google Colab TPU runtime.
With Colab TPUs, each epoch will take 5-6 minutes.
"""
use_tpu = True
if use_tpu:
# Create distribution strategy
tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
strategy = tf.distribute.TPUStrategy(tpu)
# Create model
with strategy.scope():
model = create_model()
else:
model = create_model()
model.summary()
"""
## Create evaluation Callback
This callback will compute the exact match score using the validation data
after every epoch.
"""
def normalize_text(text):
text = text.lower()
# Remove punctuations
exclude = set(string.punctuation)
text = "".join(ch for ch in text if ch not in exclude)
# Remove articles
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
text = re.sub(regex, " ", text)
# Remove extra white space
text = " ".join(text.split())
return text
class ExactMatch(keras.callbacks.Callback):
"""
Each `SquadExample` object contains the character level offsets for each token
in its input paragraph. We use them to get back the span of text corresponding
to the tokens between our predicted start and end tokens.
All the ground-truth answers are also present in each `SquadExample` object.
We calculate the percentage of data points where the span of text obtained
from model predictions matches one of the ground-truth answers.
"""
def __init__(self, x_eval, y_eval):
self.x_eval = x_eval
self.y_eval = y_eval
def on_epoch_end(self, epoch, logs=None):
pred_start, pred_end = self.model.predict(self.x_eval)
count = 0
eval_examples_no_skip = [_ for _ in eval_squad_examples if _.skip == False]
for idx, (start, end) in enumerate(zip(pred_start, pred_end)):
squad_eg = eval_examples_no_skip[idx]
offsets = squad_eg.context_token_to_char
start = np.argmax(start)
end = np.argmax(end)
if start >= len(offsets):
continue
pred_char_start = offsets[start][0]
if end < len(offsets):
pred_char_end = offsets[end][1]
pred_ans = squad_eg.context[pred_char_start:pred_char_end]
else:
pred_ans = squad_eg.context[pred_char_start:]
normalized_pred_ans = normalize_text(pred_ans)
normalized_true_ans = [normalize_text(_) for _ in squad_eg.all_answers]
if normalized_pred_ans in normalized_true_ans:
count += 1
acc = count / len(self.y_eval[0])
print(f"\nepoch={epoch+1}, exact match score={acc:.2f}")
"""
## Train and Evaluate
"""
exact_match_callback = ExactMatch(x_eval, y_eval)
model.fit(
x_train,
y_train,
epochs=1, # For demonstration, 3 epochs are recommended
verbose=2,
batch_size=64,
callbacks=[exact_match_callback],
)
| keras-io/examples/nlp/text_extraction_with_bert.py/0 | {
"file_path": "keras-io/examples/nlp/text_extraction_with_bert.py",
"repo_id": "keras-io",
"token_count": 4689
} | 95 |
"""
Title: Classification with TensorFlow Decision Forests
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2022/01/25
Last modified: 2022/01/25
Description: Using TensorFlow Decision Forests for structured data classification.
Accelerator: GPU
"""
"""
## Introduction
[TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests)
is a collection of state-of-the-art algorithms of Decision Forest models
that are compatible with Keras APIs.
The models include [Random Forests](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/RandomForestModel),
[Gradient Boosted Trees](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel),
and [CART](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/CartModel),
and can be used for regression, classification, and ranking task.
For a beginner's guide to TensorFlow Decision Forests,
please refer to this [tutorial](https://www.tensorflow.org/decision_forests/tutorials/beginner_colab).
This example uses Gradient Boosted Trees model in binary classification of
structured data, and covers the following scenarios:
1. Build a decision forests model by specifying the input feature usage.
2. Implement a custom *Binary Target encoder* as a [Keras Preprocessing layer](https://keras.io/api/layers/preprocessing_layers/)
to encode the categorical features with respect to their target value co-occurrences,
and then use the encoded features to build a decision forests model.
3. Encode the categorical features as [embeddings](https://keras.io/api/layers/core_layers/embedding),
train these embeddings in a simple NN model, and then use the
trained embeddings as inputs to build decision forests model.
This example uses TensorFlow 2.7 or higher,
as well as [TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests),
which you can install using the following command:
```python
pip install -U tensorflow_decision_forests
```
"""
"""
## Setup
"""
import math
import urllib
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_decision_forests as tfdf
"""
## Prepare the data
This example uses the
[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29)
provided by the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).
The task is binary classification to determine whether a person makes over 50K a year.
The dataset includes ~300K instances with 41 input features: 7 numerical features
and 34 categorical features.
First we load the data from the UCI Machine Learning Repository into a Pandas DataFrame.
"""
BASE_PATH = "https://kdd.ics.uci.edu/databases/census-income/census-income"
CSV_HEADER = [
l.decode("utf-8").split(":")[0].replace(" ", "_")
for l in urllib.request.urlopen(f"{BASE_PATH}.names")
if not l.startswith(b"|")
][2:]
CSV_HEADER.append("income_level")
train_data = pd.read_csv(
f"{BASE_PATH}.data.gz",
header=None,
names=CSV_HEADER,
)
test_data = pd.read_csv(
f"{BASE_PATH}.test.gz",
header=None,
names=CSV_HEADER,
)
"""
## Define dataset metadata
Here, we define the metadata of the dataset that will be useful for encoding
the input features with respect to their types.
"""
# Target column name.
TARGET_COLUMN_NAME = "income_level"
# The labels of the target columns.
TARGET_LABELS = [" - 50000.", " 50000+."]
# Weight column name.
WEIGHT_COLUMN_NAME = "instance_weight"
# Numeric feature names.
NUMERIC_FEATURE_NAMES = [
"age",
"wage_per_hour",
"capital_gains",
"capital_losses",
"dividends_from_stocks",
"num_persons_worked_for_employer",
"weeks_worked_in_year",
]
# Categorical features and their vocabulary lists.
CATEGORICAL_FEATURE_NAMES = [
"class_of_worker",
"detailed_industry_recode",
"detailed_occupation_recode",
"education",
"enroll_in_edu_inst_last_wk",
"marital_stat",
"major_industry_code",
"major_occupation_code",
"race",
"hispanic_origin",
"sex",
"member_of_a_labor_union",
"reason_for_unemployment",
"full_or_part_time_employment_stat",
"tax_filer_stat",
"region_of_previous_residence",
"state_of_previous_residence",
"detailed_household_and_family_stat",
"detailed_household_summary_in_household",
"migration_code-change_in_msa",
"migration_code-change_in_reg",
"migration_code-move_within_reg",
"live_in_this_house_1_year_ago",
"migration_prev_res_in_sunbelt",
"family_members_under_18",
"country_of_birth_father",
"country_of_birth_mother",
"country_of_birth_self",
"citizenship",
"own_business_or_self_employed",
"fill_inc_questionnaire_for_veteran's_admin",
"veterans_benefits",
"year",
]
"""
Now we perform basic data preparation.
"""
def prepare_dataframe(dataframe):
# Convert the target labels from string to integer.
dataframe[TARGET_COLUMN_NAME] = dataframe[TARGET_COLUMN_NAME].map(
TARGET_LABELS.index
)
# Cast the categorical features to string.
for feature_name in CATEGORICAL_FEATURE_NAMES:
dataframe[feature_name] = dataframe[feature_name].astype(str)
prepare_dataframe(train_data)
prepare_dataframe(test_data)
"""
Now let's show the shapes of the training and test dataframes, and display some instances.
"""
print(f"Train data shape: {train_data.shape}")
print(f"Test data shape: {test_data.shape}")
print(train_data.head().T)
"""
## Configure hyperparameters
You can find all the parameters of the Gradient Boosted Tree model in the
[documentation](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel)
"""
# Maximum number of decision trees. The effective number of trained trees can be smaller if early stopping is enabled.
NUM_TREES = 250
# Minimum number of examples in a node.
MIN_EXAMPLES = 6
# Maximum depth of the tree. max_depth=1 means that all trees will be roots.
MAX_DEPTH = 5
# Ratio of the dataset (sampling without replacement) used to train individual trees for the random sampling method.
SUBSAMPLE = 0.65
# Control the sampling of the datasets used to train individual trees.
SAMPLING_METHOD = "RANDOM"
# Ratio of the training dataset used to monitor the training. Require to be >0 if early stopping is enabled.
VALIDATION_RATIO = 0.1
"""
## Implement a training and evaluation procedure
The `run_experiment()` method is responsible loading the train and test datasets,
training a given model, and evaluating the trained model.
Note that when training a Decision Forests model, only one epoch is needed to
read the full dataset. Any extra steps will result in unnecessary slower training.
Therefore, the default `num_epochs=1` is used in the `run_experiment()` method.
"""
def run_experiment(model, train_data, test_data, num_epochs=1, batch_size=None):
train_dataset = tfdf.keras.pd_dataframe_to_tf_dataset(
train_data, label=TARGET_COLUMN_NAME, weight=WEIGHT_COLUMN_NAME
)
test_dataset = tfdf.keras.pd_dataframe_to_tf_dataset(
test_data, label=TARGET_COLUMN_NAME, weight=WEIGHT_COLUMN_NAME
)
model.fit(train_dataset, epochs=num_epochs, batch_size=batch_size)
_, accuracy = model.evaluate(test_dataset, verbose=0)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
"""
## Experiment 1: Decision Forests with raw features
"""
"""
### Specify model input feature usages
You can attach semantics to each feature to control how it is used by the model.
If not specified, the semantics are inferred from the representation type.
It is recommended to specify the [feature usages](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/FeatureUsage)
explicitly to avoid incorrect inferred semantics is incorrect.
For example, a categorical value identifier (integer) will be be inferred as numerical,
while it is semantically categorical.
For numerical features, you can set the `discretized` parameters to the number
of buckets by which the numerical feature should be discretized.
This makes the training faster but may lead to worse models.
"""
def specify_feature_usages():
feature_usages = []
for feature_name in NUMERIC_FEATURE_NAMES:
feature_usage = tfdf.keras.FeatureUsage(
name=feature_name, semantic=tfdf.keras.FeatureSemantic.NUMERICAL
)
feature_usages.append(feature_usage)
for feature_name in CATEGORICAL_FEATURE_NAMES:
feature_usage = tfdf.keras.FeatureUsage(
name=feature_name, semantic=tfdf.keras.FeatureSemantic.CATEGORICAL
)
feature_usages.append(feature_usage)
return feature_usages
"""
### Create a Gradient Boosted Trees model
When compiling a decision forests model, you may only provide extra evaluation metrics.
The loss is specified in the model construction,
and the optimizer is irrelevant to decision forests models.
"""
def create_gbt_model():
# See all the model parameters in https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel
gbt_model = tfdf.keras.GradientBoostedTreesModel(
features=specify_feature_usages(),
exclude_non_specified_features=True,
num_trees=NUM_TREES,
max_depth=MAX_DEPTH,
min_examples=MIN_EXAMPLES,
subsample=SUBSAMPLE,
validation_ratio=VALIDATION_RATIO,
task=tfdf.keras.Task.CLASSIFICATION,
)
gbt_model.compile(metrics=[keras.metrics.BinaryAccuracy(name="accuracy")])
return gbt_model
"""
### Train and evaluate the model
"""
gbt_model = create_gbt_model()
run_experiment(gbt_model, train_data, test_data)
"""
### Inspect the model
The `model.summary()` method will display several types of information about
your decision trees model, model type, task, input features, and feature importance.
"""
print(gbt_model.summary())
"""
## Experiment 2: Decision Forests with target encoding
[Target encoding](https://dl.acm.org/doi/10.1145/507533.507538) is a common preprocessing
technique for categorical features that convert them into numerical features.
Using categorical features with high cardinality as-is may lead to overfitting.
Target encoding aims to replace each categorical feature value with one or more
numerical values that represent its co-occurrence with the target labels.
More precisely, given a categorical feature, the binary target encoder in this example
will produce three new numerical features:
1. `positive_frequency`: How many times each feature value occurred with a positive target label.
2. `negative_frequency`: How many times each feature value occurred with a negative target label.
3. `positive_probability`: The probability that the target label is positive,
given the feature value, which is computed as
`positive_frequency / (positive_frequency + negative_frequency + correction)`.
The `correction` term is added in to make the division more stable for rare categorical values.
The default value for `correction` is 1.0.
Note that target encoding is effective with models that cannot automatically
learn dense representations to categorical features, such as decision forests
or kernel methods. If neural network models are used, its recommended to
encode categorical features as embeddings.
"""
"""
### Implement Binary Target Encoder
For simplicity, we assume that the inputs for the `adapt` and `call` methods
are in the expected data types and shapes, so no validation logic is added.
It is recommended to pass the `vocabulary_size` of the categorical feature to the
`BinaryTargetEncoding` constructor. If not specified, it will be computed during
the `adapt()` method execution.
"""
class BinaryTargetEncoding(layers.Layer):
def __init__(self, vocabulary_size=None, correction=1.0, **kwargs):
super().__init__(**kwargs)
self.vocabulary_size = vocabulary_size
self.correction = correction
def adapt(self, data):
# data is expected to be an integer numpy array to a Tensor shape [num_exmples, 2].
# This contains feature values for a given feature in the dataset, and target values.
# Convert the data to a tensor.
data = tf.convert_to_tensor(data)
# Separate the feature values and target values
feature_values = tf.cast(data[:, 0], tf.dtypes.int32)
target_values = tf.cast(data[:, 1], tf.dtypes.bool)
# Compute the vocabulary_size of not specified.
if self.vocabulary_size is None:
self.vocabulary_size = tf.unique(feature_values).y.shape[0]
# Filter the data where the target label is positive.
positive_indices = tf.where(condition=target_values)
postive_feature_values = tf.gather_nd(
params=feature_values, indices=positive_indices
)
# Compute how many times each feature value occurred with a positive target label.
positive_frequency = tf.math.unsorted_segment_sum(
data=tf.ones(
shape=(postive_feature_values.shape[0], 1), dtype=tf.dtypes.float64
),
segment_ids=postive_feature_values,
num_segments=self.vocabulary_size,
)
# Filter the data where the target label is negative.
negative_indices = tf.where(condition=tf.math.logical_not(target_values))
negative_feature_values = tf.gather_nd(
params=feature_values, indices=negative_indices
)
# Compute how many times each feature value occurred with a negative target label.
negative_frequency = tf.math.unsorted_segment_sum(
data=tf.ones(
shape=(negative_feature_values.shape[0], 1), dtype=tf.dtypes.float64
),
segment_ids=negative_feature_values,
num_segments=self.vocabulary_size,
)
# Compute positive probability for the input feature values.
positive_probability = positive_frequency / (
positive_frequency + negative_frequency + self.correction
)
# Concatenate the computed statistics for traget_encoding.
target_encoding_statistics = tf.cast(
tf.concat(
[positive_frequency, negative_frequency, positive_probability], axis=1
),
dtype=tf.dtypes.float32,
)
self.target_encoding_statistics = tf.constant(target_encoding_statistics)
def call(self, inputs):
# inputs is expected to be an integer numpy array to a Tensor shape [num_exmples, 1].
# This includes the feature values for a given feature in the dataset.
# Raise an error if the target encoding statistics are not computed.
if self.target_encoding_statistics == None:
raise ValueError(
f"You need to call the adapt method to compute target encoding statistics."
)
# Convert the inputs to a tensor.
inputs = tf.convert_to_tensor(inputs)
# Cast the inputs int64 a tensor.
inputs = tf.cast(inputs, tf.dtypes.int64)
# Lookup target encoding statistics for the input feature values.
target_encoding_statistics = tf.cast(
tf.gather_nd(self.target_encoding_statistics, inputs),
dtype=tf.dtypes.float32,
)
return target_encoding_statistics
"""
Let's test the binary target encoder
"""
data = tf.constant(
[
[0, 1],
[2, 0],
[0, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0],
[0, 1],
[2, 1],
[1, 0],
[0, 1],
[2, 0],
[0, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0],
[0, 1],
[2, 0],
]
)
binary_target_encoder = BinaryTargetEncoding()
binary_target_encoder.adapt(data)
print(binary_target_encoder([[0], [1], [2]]))
"""
### Create model inputs
"""
def create_model_inputs():
inputs = {}
for feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.float32
)
for feature_name in CATEGORICAL_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.string
)
return inputs
"""
### Implement a feature encoding with target encoding
"""
def create_target_encoder():
inputs = create_model_inputs()
target_values = train_data[[TARGET_COLUMN_NAME]].to_numpy()
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
# Get the vocabulary of the categorical feature.
vocabulary = sorted(
[str(value) for value in list(train_data[feature_name].unique())]
)
# Create a lookup to convert string values to an integer indices.
# Since we are not using a mask token nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = layers.StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
# Convert the string input values into integer indices.
value_indices = lookup(inputs[feature_name])
# Prepare the data to adapt the target encoding.
print("### Adapting target encoding for:", feature_name)
feature_values = train_data[[feature_name]].to_numpy().astype(str)
feature_value_indices = lookup(feature_values)
data = tf.concat([feature_value_indices, target_values], axis=1)
feature_encoder = BinaryTargetEncoding()
feature_encoder.adapt(data)
# Convert the feature value indices to target encoding representations.
encoded_feature = feature_encoder(tf.expand_dims(value_indices, -1))
else:
# Expand the dimensions of the numerical input feature and use it as-is.
encoded_feature = tf.expand_dims(inputs[feature_name], -1)
# Add the encoded feature to the list.
encoded_features.append(encoded_feature)
# Concatenate all the encoded features.
encoded_features = tf.concat(encoded_features, axis=1)
# Create and return a Keras model with encoded features as outputs.
return keras.Model(inputs=inputs, outputs=encoded_features)
"""
### Create a Gradient Boosted Trees model with a preprocessor
In this scenario, we use the target encoding as a preprocessor for the Gradient Boosted Tree model,
and let the model infer semantics of the input features.
"""
def create_gbt_with_preprocessor(preprocessor):
gbt_model = tfdf.keras.GradientBoostedTreesModel(
preprocessing=preprocessor,
num_trees=NUM_TREES,
max_depth=MAX_DEPTH,
min_examples=MIN_EXAMPLES,
subsample=SUBSAMPLE,
validation_ratio=VALIDATION_RATIO,
task=tfdf.keras.Task.CLASSIFICATION,
)
gbt_model.compile(metrics=[keras.metrics.BinaryAccuracy(name="accuracy")])
return gbt_model
"""
### Train and evaluate the model
"""
gbt_model = create_gbt_with_preprocessor(create_target_encoder())
run_experiment(gbt_model, train_data, test_data)
"""
## Experiment 3: Decision Forests with trained embeddings
In this scenario, we build an encoder model that codes the categorical
features to embeddings, where the size of the embedding for a given categorical
feature is the square root to the size of its vocabulary.
We train these embeddings in a simple NN model through backpropagation.
After the embedding encoder is trained, we used it as a preprocessor to the
input features of a Gradient Boosted Tree model.
Note that the embeddings and a decision forest model cannot be trained
synergically in one phase, since decision forest models do not train with backpropagation.
Rather, embeddings has to be trained in an initial phase,
and then used as static inputs to the decision forest model.
"""
"""
### Implement feature encoding with embeddings
"""
def create_embedding_encoder(size=None):
inputs = create_model_inputs()
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
# Get the vocabulary of the categorical feature.
vocabulary = sorted(
[str(value) for value in list(train_data[feature_name].unique())]
)
# Create a lookup to convert string values to an integer indices.
# Since we are not using a mask token nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = layers.StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
# Convert the string input values into integer indices.
value_index = lookup(inputs[feature_name])
# Create an embedding layer with the specified dimensions
vocabulary_size = len(vocabulary)
embedding_size = int(math.sqrt(vocabulary_size))
feature_encoder = layers.Embedding(
input_dim=len(vocabulary), output_dim=embedding_size
)
# Convert the index values to embedding representations.
encoded_feature = feature_encoder(value_index)
else:
# Expand the dimensions of the numerical input feature and use it as-is.
encoded_feature = tf.expand_dims(inputs[feature_name], -1)
# Add the encoded feature to the list.
encoded_features.append(encoded_feature)
# Concatenate all the encoded features.
encoded_features = layers.concatenate(encoded_features, axis=1)
# Apply dropout.
encoded_features = layers.Dropout(rate=0.25)(encoded_features)
# Perform non-linearity projection.
encoded_features = layers.Dense(
units=size if size else encoded_features.shape[-1], activation="gelu"
)(encoded_features)
# Create and return a Keras model with encoded features as outputs.
return keras.Model(inputs=inputs, outputs=encoded_features)
"""
### Build an NN model to train the embeddings
"""
def create_nn_model(encoder):
inputs = create_model_inputs()
embeddings = encoder(inputs)
output = layers.Dense(units=1, activation="sigmoid")(embeddings)
nn_model = keras.Model(inputs=inputs, outputs=output)
nn_model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy("accuracy")],
)
return nn_model
embedding_encoder = create_embedding_encoder(size=64)
run_experiment(
create_nn_model(embedding_encoder),
train_data,
test_data,
num_epochs=5,
batch_size=256,
)
"""
### Train and evaluate a Gradient Boosted Tree model with embeddings
"""
gbt_model = create_gbt_with_preprocessor(embedding_encoder)
run_experiment(gbt_model, train_data, test_data)
"""
## Concluding remarks
TensorFlow Decision Forests provide powerful models, especially with structured data.
In our experiments, the Gradient Boosted Tree model achieved 95.79% test accuracy.
When using the target encoding with categorical feature, the same model achieved 95.81% test accuracy.
When pretraining embeddings to be used as inputs to the Gradient Boosted Tree model,
we achieved 95.82% test accuracy.
Decision Forests can be used with Neural Networks, either by
1) using Neural Networks to learn useful representation of the input data,
and then using Decision Forests for the supervised learning task, or by
2) creating an ensemble of both Decision Forests and Neural Network models.
Note that TensorFlow Decision Forests does not (yet) support hardware accelerators.
All training and inference is done on the CPU.
Besides, Decision Forests require a finite dataset that fits in memory
for their training procedures. However, there are diminishing returns
for increasing the size of the dataset, and Decision Forests algorithms
arguably need fewer examples for convergence than large Neural Network models.
"""
| keras-io/examples/structured_data/classification_with_tfdf.py/0 | {
"file_path": "keras-io/examples/structured_data/classification_with_tfdf.py",
"repo_id": "keras-io",
"token_count": 8628
} | 96 |
# Timeseries classification from scratch
**Author:** [hfawaz](https://github.com/hfawaz/)<br>
**Date created:** 2020/07/21<br>
**Last modified:** 2023/11/10<br>
**Description:** Training a timeseries classifier from scratch on the FordA dataset from the UCR/UEA archive.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/timeseries/ipynb/timeseries_classification_from_scratch.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/timeseries/timeseries_classification_from_scratch.py)
---
## Introduction
This example shows how to do timeseries classification from scratch, starting from raw
CSV timeseries files on disk. We demonstrate the workflow on the FordA dataset from the
[UCR/UEA archive](https://www.cs.ucr.edu/%7Eeamonn/time_series_data_2018/).
---
## Setup
```python
import keras
import numpy as np
import matplotlib.pyplot as plt
```
---
## Load the data: the FordA dataset
### Dataset description
The dataset we are using here is called FordA.
The data comes from the UCR archive.
The dataset contains 3601 training instances and another 1320 testing instances.
Each timeseries corresponds to a measurement of engine noise captured by a motor sensor.
For this task, the goal is to automatically detect the presence of a specific issue with
the engine. The problem is a balanced binary classification task. The full description of
this dataset can be found [here](http://www.j-wichard.de/publications/FordPaper.pdf).
### Read the TSV data
We will use the `FordA_TRAIN` file for training and the
`FordA_TEST` file for testing. The simplicity of this dataset
allows us to demonstrate effectively how to use ConvNets for timeseries classification.
In this file, the first column corresponds to the label.
```python
def readucr(filename):
data = np.loadtxt(filename, delimiter="\t")
y = data[:, 0]
x = data[:, 1:]
return x, y.astype(int)
root_url = "https://raw.githubusercontent.com/hfawaz/cd-diagram/master/FordA/"
x_train, y_train = readucr(root_url + "FordA_TRAIN.tsv")
x_test, y_test = readucr(root_url + "FordA_TEST.tsv")
```
---
## Visualize the data
Here we visualize one timeseries example for each class in the dataset.
```python
classes = np.unique(np.concatenate((y_train, y_test), axis=0))
plt.figure()
for c in classes:
c_x_train = x_train[y_train == c]
plt.plot(c_x_train[0], label="class " + str(c))
plt.legend(loc="best")
plt.show()
plt.close()
```

---
## Standardize the data
Our timeseries are already in a single length (500). However, their values are
usually in various ranges. This is not ideal for a neural network;
in general we should seek to make the input values normalized.
For this specific dataset, the data is already z-normalized: each timeseries sample
has a mean equal to zero and a standard deviation equal to one. This type of
normalization is very common for timeseries classification problems, see
[Bagnall et al. (2016)](https://link.springer.com/article/10.1007/s10618-016-0483-9).
Note that the timeseries data used here are univariate, meaning we only have one channel
per timeseries example.
We will therefore transform the timeseries into a multivariate one with one channel
using a simple reshaping via numpy.
This will allow us to construct a model that is easily applicable to multivariate time
series.
```python
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))
```
Finally, in order to use `sparse_categorical_crossentropy`, we will have to count
the number of classes beforehand.
```python
num_classes = len(np.unique(y_train))
```
Now we shuffle the training set because we will be using the `validation_split` option
later when training.
```python
idx = np.random.permutation(len(x_train))
x_train = x_train[idx]
y_train = y_train[idx]
```
Standardize the labels to positive integers.
The expected labels will then be 0 and 1.
```python
y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
```
---
## Build a model
We build a Fully Convolutional Neural Network originally proposed in
[this paper](https://arxiv.org/abs/1611.06455).
The implementation is based on the TF 2 version provided
[here](https://github.com/hfawaz/dl-4-tsc/).
The following hyperparameters (kernel_size, filters, the usage of BatchNorm) were found
via random search using [KerasTuner](https://github.com/keras-team/keras-tuner).
```python
def make_model(input_shape):
input_layer = keras.layers.Input(input_shape)
conv1 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(input_layer)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.ReLU()(conv1)
conv2 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(conv1)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.ReLU()(conv2)
conv3 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(conv2)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.ReLU()(conv3)
gap = keras.layers.GlobalAveragePooling1D()(conv3)
output_layer = keras.layers.Dense(num_classes, activation="softmax")(gap)
return keras.models.Model(inputs=input_layer, outputs=output_layer)
model = make_model(input_shape=x_train.shape[1:])
keras.utils.plot_model(model, show_shapes=True)
```

---
## Train the model
```python
epochs = 500
batch_size = 32
callbacks = [
keras.callbacks.ModelCheckpoint(
"best_model.keras", save_best_only=True, monitor="val_loss"
),
keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=20, min_lr=0.0001
),
keras.callbacks.EarlyStopping(monitor="val_loss", patience=50, verbose=1),
]
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_split=0.2,
verbose=1,
)
```
<div class="k-default-codeblock">
```
Epoch 1/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 5s 32ms/step - loss: 0.6056 - sparse_categorical_accuracy: 0.6818 - val_loss: 0.9692 - val_sparse_categorical_accuracy: 0.4591 - learning_rate: 0.0010
Epoch 2/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4623 - sparse_categorical_accuracy: 0.7619 - val_loss: 0.9336 - val_sparse_categorical_accuracy: 0.4591 - learning_rate: 0.0010
Epoch 3/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4383 - sparse_categorical_accuracy: 0.7888 - val_loss: 0.6842 - val_sparse_categorical_accuracy: 0.5409 - learning_rate: 0.0010
Epoch 4/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4295 - sparse_categorical_accuracy: 0.7826 - val_loss: 0.6632 - val_sparse_categorical_accuracy: 0.5118 - learning_rate: 0.0010
Epoch 5/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4311 - sparse_categorical_accuracy: 0.7831 - val_loss: 0.5693 - val_sparse_categorical_accuracy: 0.6935 - learning_rate: 0.0010
Epoch 6/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4250 - sparse_categorical_accuracy: 0.7832 - val_loss: 0.5001 - val_sparse_categorical_accuracy: 0.7712 - learning_rate: 0.0010
Epoch 7/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4179 - sparse_categorical_accuracy: 0.8079 - val_loss: 0.5151 - val_sparse_categorical_accuracy: 0.7379 - learning_rate: 0.0010
Epoch 8/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3929 - sparse_categorical_accuracy: 0.8073 - val_loss: 0.3992 - val_sparse_categorical_accuracy: 0.8377 - learning_rate: 0.0010
Epoch 9/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4074 - sparse_categorical_accuracy: 0.7947 - val_loss: 0.4053 - val_sparse_categorical_accuracy: 0.8225 - learning_rate: 0.0010
Epoch 10/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.4067 - sparse_categorical_accuracy: 0.7984 - val_loss: 0.3727 - val_sparse_categorical_accuracy: 0.8377 - learning_rate: 0.0010
Epoch 11/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3910 - sparse_categorical_accuracy: 0.8083 - val_loss: 0.3687 - val_sparse_categorical_accuracy: 0.8363 - learning_rate: 0.0010
Epoch 12/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3872 - sparse_categorical_accuracy: 0.8001 - val_loss: 0.3773 - val_sparse_categorical_accuracy: 0.8169 - learning_rate: 0.0010
Epoch 13/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3684 - sparse_categorical_accuracy: 0.8138 - val_loss: 0.3566 - val_sparse_categorical_accuracy: 0.8474 - learning_rate: 0.0010
Epoch 14/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3843 - sparse_categorical_accuracy: 0.8102 - val_loss: 0.3674 - val_sparse_categorical_accuracy: 0.8322 - learning_rate: 0.0010
Epoch 15/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3774 - sparse_categorical_accuracy: 0.8260 - val_loss: 0.4040 - val_sparse_categorical_accuracy: 0.7614 - learning_rate: 0.0010
Epoch 16/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3547 - sparse_categorical_accuracy: 0.8351 - val_loss: 0.6609 - val_sparse_categorical_accuracy: 0.6671 - learning_rate: 0.0010
Epoch 17/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3797 - sparse_categorical_accuracy: 0.8194 - val_loss: 0.3379 - val_sparse_categorical_accuracy: 0.8599 - learning_rate: 0.0010
Epoch 18/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3544 - sparse_categorical_accuracy: 0.8373 - val_loss: 0.3363 - val_sparse_categorical_accuracy: 0.8613 - learning_rate: 0.0010
Epoch 19/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3372 - sparse_categorical_accuracy: 0.8477 - val_loss: 0.4554 - val_sparse_categorical_accuracy: 0.7545 - learning_rate: 0.0010
Epoch 20/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3509 - sparse_categorical_accuracy: 0.8330 - val_loss: 0.4411 - val_sparse_categorical_accuracy: 0.7490 - learning_rate: 0.0010
Epoch 21/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3771 - sparse_categorical_accuracy: 0.8195 - val_loss: 0.3526 - val_sparse_categorical_accuracy: 0.8225 - learning_rate: 0.0010
Epoch 22/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3448 - sparse_categorical_accuracy: 0.8373 - val_loss: 0.3296 - val_sparse_categorical_accuracy: 0.8669 - learning_rate: 0.0010
Epoch 23/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3400 - sparse_categorical_accuracy: 0.8455 - val_loss: 0.3938 - val_sparse_categorical_accuracy: 0.7656 - learning_rate: 0.0010
Epoch 24/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3243 - sparse_categorical_accuracy: 0.8626 - val_loss: 0.8280 - val_sparse_categorical_accuracy: 0.5534 - learning_rate: 0.0010
Epoch 25/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3263 - sparse_categorical_accuracy: 0.8518 - val_loss: 0.3881 - val_sparse_categorical_accuracy: 0.8031 - learning_rate: 0.0010
Epoch 26/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3424 - sparse_categorical_accuracy: 0.8491 - val_loss: 0.3140 - val_sparse_categorical_accuracy: 0.8766 - learning_rate: 0.0010
Epoch 27/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3236 - sparse_categorical_accuracy: 0.8551 - val_loss: 0.3138 - val_sparse_categorical_accuracy: 0.8502 - learning_rate: 0.0010
Epoch 28/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3161 - sparse_categorical_accuracy: 0.8605 - val_loss: 0.3419 - val_sparse_categorical_accuracy: 0.8294 - learning_rate: 0.0010
Epoch 29/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3077 - sparse_categorical_accuracy: 0.8660 - val_loss: 0.3326 - val_sparse_categorical_accuracy: 0.8460 - learning_rate: 0.0010
Epoch 30/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3257 - sparse_categorical_accuracy: 0.8527 - val_loss: 0.2964 - val_sparse_categorical_accuracy: 0.8932 - learning_rate: 0.0010
Epoch 31/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2990 - sparse_categorical_accuracy: 0.8754 - val_loss: 0.3273 - val_sparse_categorical_accuracy: 0.8405 - learning_rate: 0.0010
Epoch 32/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3046 - sparse_categorical_accuracy: 0.8618 - val_loss: 0.2882 - val_sparse_categorical_accuracy: 0.8641 - learning_rate: 0.0010
Epoch 33/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2998 - sparse_categorical_accuracy: 0.8759 - val_loss: 0.3532 - val_sparse_categorical_accuracy: 0.7989 - learning_rate: 0.0010
Epoch 34/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2750 - sparse_categorical_accuracy: 0.8753 - val_loss: 0.5120 - val_sparse_categorical_accuracy: 0.7365 - learning_rate: 0.0010
Epoch 35/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2784 - sparse_categorical_accuracy: 0.8862 - val_loss: 0.3159 - val_sparse_categorical_accuracy: 0.8752 - learning_rate: 0.0010
Epoch 36/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2661 - sparse_categorical_accuracy: 0.8982 - val_loss: 0.3643 - val_sparse_categorical_accuracy: 0.8433 - learning_rate: 0.0010
Epoch 37/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2769 - sparse_categorical_accuracy: 0.8814 - val_loss: 0.4004 - val_sparse_categorical_accuracy: 0.7947 - learning_rate: 0.0010
Epoch 38/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2963 - sparse_categorical_accuracy: 0.8679 - val_loss: 0.4778 - val_sparse_categorical_accuracy: 0.7323 - learning_rate: 0.0010
Epoch 39/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2688 - sparse_categorical_accuracy: 0.8851 - val_loss: 0.2490 - val_sparse_categorical_accuracy: 0.9043 - learning_rate: 0.0010
Epoch 40/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2696 - sparse_categorical_accuracy: 0.8872 - val_loss: 0.2792 - val_sparse_categorical_accuracy: 0.8821 - learning_rate: 0.0010
Epoch 41/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2880 - sparse_categorical_accuracy: 0.8868 - val_loss: 0.2775 - val_sparse_categorical_accuracy: 0.8752 - learning_rate: 0.0010
Epoch 42/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2884 - sparse_categorical_accuracy: 0.8774 - val_loss: 0.3545 - val_sparse_categorical_accuracy: 0.8128 - learning_rate: 0.0010
Epoch 43/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2840 - sparse_categorical_accuracy: 0.8709 - val_loss: 0.3292 - val_sparse_categorical_accuracy: 0.8350 - learning_rate: 0.0010
Epoch 44/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.3000 - sparse_categorical_accuracy: 0.8569 - val_loss: 1.5013 - val_sparse_categorical_accuracy: 0.5479 - learning_rate: 0.0010
Epoch 45/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2618 - sparse_categorical_accuracy: 0.8896 - val_loss: 0.2766 - val_sparse_categorical_accuracy: 0.8835 - learning_rate: 0.0010
Epoch 46/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2604 - sparse_categorical_accuracy: 0.8955 - val_loss: 0.2397 - val_sparse_categorical_accuracy: 0.9098 - learning_rate: 0.0010
Epoch 47/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2520 - sparse_categorical_accuracy: 0.8975 - val_loss: 0.3794 - val_sparse_categorical_accuracy: 0.7975 - learning_rate: 0.0010
Epoch 48/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2521 - sparse_categorical_accuracy: 0.9067 - val_loss: 0.2871 - val_sparse_categorical_accuracy: 0.8641 - learning_rate: 0.0010
Epoch 49/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2554 - sparse_categorical_accuracy: 0.8904 - val_loss: 0.8962 - val_sparse_categorical_accuracy: 0.7115 - learning_rate: 0.0010
Epoch 50/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2501 - sparse_categorical_accuracy: 0.8989 - val_loss: 0.4592 - val_sparse_categorical_accuracy: 0.7864 - learning_rate: 0.0010
Epoch 51/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2362 - sparse_categorical_accuracy: 0.8944 - val_loss: 0.4599 - val_sparse_categorical_accuracy: 0.7684 - learning_rate: 0.0010
Epoch 52/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2538 - sparse_categorical_accuracy: 0.8986 - val_loss: 0.2748 - val_sparse_categorical_accuracy: 0.8849 - learning_rate: 0.0010
Epoch 53/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2648 - sparse_categorical_accuracy: 0.8934 - val_loss: 0.2725 - val_sparse_categorical_accuracy: 0.9001 - learning_rate: 0.0010
Epoch 54/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2292 - sparse_categorical_accuracy: 0.9117 - val_loss: 0.2617 - val_sparse_categorical_accuracy: 0.8766 - learning_rate: 0.0010
Epoch 55/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2704 - sparse_categorical_accuracy: 0.8826 - val_loss: 0.2929 - val_sparse_categorical_accuracy: 0.8488 - learning_rate: 0.0010
Epoch 56/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2388 - sparse_categorical_accuracy: 0.9022 - val_loss: 0.2365 - val_sparse_categorical_accuracy: 0.9112 - learning_rate: 0.0010
Epoch 57/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2309 - sparse_categorical_accuracy: 0.9087 - val_loss: 1.1993 - val_sparse_categorical_accuracy: 0.5784 - learning_rate: 0.0010
Epoch 58/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2639 - sparse_categorical_accuracy: 0.8893 - val_loss: 0.2410 - val_sparse_categorical_accuracy: 0.9098 - learning_rate: 0.0010
Epoch 59/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2229 - sparse_categorical_accuracy: 0.9104 - val_loss: 0.6126 - val_sparse_categorical_accuracy: 0.7212 - learning_rate: 0.0010
Epoch 60/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2451 - sparse_categorical_accuracy: 0.9084 - val_loss: 0.3189 - val_sparse_categorical_accuracy: 0.8655 - learning_rate: 0.0010
Epoch 61/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2200 - sparse_categorical_accuracy: 0.9169 - val_loss: 0.7695 - val_sparse_categorical_accuracy: 0.7212 - learning_rate: 0.0010
Epoch 62/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2249 - sparse_categorical_accuracy: 0.9149 - val_loss: 0.2900 - val_sparse_categorical_accuracy: 0.8835 - learning_rate: 0.0010
Epoch 63/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2476 - sparse_categorical_accuracy: 0.8988 - val_loss: 0.2863 - val_sparse_categorical_accuracy: 0.8682 - learning_rate: 0.0010
Epoch 64/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2263 - sparse_categorical_accuracy: 0.9010 - val_loss: 0.4034 - val_sparse_categorical_accuracy: 0.7961 - learning_rate: 0.0010
Epoch 65/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2404 - sparse_categorical_accuracy: 0.9041 - val_loss: 0.2965 - val_sparse_categorical_accuracy: 0.8696 - learning_rate: 0.0010
Epoch 66/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2257 - sparse_categorical_accuracy: 0.9051 - val_loss: 0.2227 - val_sparse_categorical_accuracy: 0.9029 - learning_rate: 0.0010
Epoch 67/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2218 - sparse_categorical_accuracy: 0.9088 - val_loss: 0.2274 - val_sparse_categorical_accuracy: 0.9154 - learning_rate: 0.0010
Epoch 68/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2106 - sparse_categorical_accuracy: 0.9159 - val_loss: 0.2703 - val_sparse_categorical_accuracy: 0.8877 - learning_rate: 0.0010
Epoch 69/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1945 - sparse_categorical_accuracy: 0.9278 - val_loss: 0.2688 - val_sparse_categorical_accuracy: 0.8724 - learning_rate: 0.0010
Epoch 70/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2269 - sparse_categorical_accuracy: 0.9108 - val_loss: 0.2003 - val_sparse_categorical_accuracy: 0.9196 - learning_rate: 0.0010
Epoch 71/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.2312 - sparse_categorical_accuracy: 0.9041 - val_loss: 0.3678 - val_sparse_categorical_accuracy: 0.8322 - learning_rate: 0.0010
Epoch 72/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1828 - sparse_categorical_accuracy: 0.9290 - val_loss: 0.2397 - val_sparse_categorical_accuracy: 0.9043 - learning_rate: 0.0010
Epoch 73/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1723 - sparse_categorical_accuracy: 0.9364 - val_loss: 0.2070 - val_sparse_categorical_accuracy: 0.9098 - learning_rate: 0.0010
Epoch 74/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1830 - sparse_categorical_accuracy: 0.9317 - val_loss: 0.3114 - val_sparse_categorical_accuracy: 0.8391 - learning_rate: 0.0010
Epoch 75/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1786 - sparse_categorical_accuracy: 0.9345 - val_loss: 0.7721 - val_sparse_categorical_accuracy: 0.6824 - learning_rate: 0.0010
Epoch 76/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1680 - sparse_categorical_accuracy: 0.9444 - val_loss: 0.1898 - val_sparse_categorical_accuracy: 0.9293 - learning_rate: 0.0010
Epoch 77/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1606 - sparse_categorical_accuracy: 0.9426 - val_loss: 0.1803 - val_sparse_categorical_accuracy: 0.9293 - learning_rate: 0.0010
Epoch 78/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1705 - sparse_categorical_accuracy: 0.9292 - val_loss: 0.6892 - val_sparse_categorical_accuracy: 0.7226 - learning_rate: 0.0010
Epoch 79/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1428 - sparse_categorical_accuracy: 0.9534 - val_loss: 0.2448 - val_sparse_categorical_accuracy: 0.8932 - learning_rate: 0.0010
Epoch 80/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1527 - sparse_categorical_accuracy: 0.9441 - val_loss: 0.3191 - val_sparse_categorical_accuracy: 0.8377 - learning_rate: 0.0010
Epoch 81/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1398 - sparse_categorical_accuracy: 0.9447 - val_loss: 0.9834 - val_sparse_categorical_accuracy: 0.6366 - learning_rate: 0.0010
Epoch 82/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1615 - sparse_categorical_accuracy: 0.9405 - val_loss: 0.3857 - val_sparse_categorical_accuracy: 0.8391 - learning_rate: 0.0010
Epoch 83/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1371 - sparse_categorical_accuracy: 0.9525 - val_loss: 0.1597 - val_sparse_categorical_accuracy: 0.9501 - learning_rate: 0.0010
Epoch 84/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1377 - sparse_categorical_accuracy: 0.9548 - val_loss: 0.4212 - val_sparse_categorical_accuracy: 0.8058 - learning_rate: 0.0010
Epoch 85/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1315 - sparse_categorical_accuracy: 0.9585 - val_loss: 0.3091 - val_sparse_categorical_accuracy: 0.8447 - learning_rate: 0.0010
Epoch 86/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1381 - sparse_categorical_accuracy: 0.9517 - val_loss: 0.1539 - val_sparse_categorical_accuracy: 0.9487 - learning_rate: 0.0010
Epoch 87/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1169 - sparse_categorical_accuracy: 0.9581 - val_loss: 0.1927 - val_sparse_categorical_accuracy: 0.9168 - learning_rate: 0.0010
Epoch 88/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1438 - sparse_categorical_accuracy: 0.9512 - val_loss: 0.1696 - val_sparse_categorical_accuracy: 0.9293 - learning_rate: 0.0010
Epoch 89/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1471 - sparse_categorical_accuracy: 0.9464 - val_loss: 0.2523 - val_sparse_categorical_accuracy: 0.8988 - learning_rate: 0.0010
Epoch 90/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1389 - sparse_categorical_accuracy: 0.9535 - val_loss: 0.2452 - val_sparse_categorical_accuracy: 0.8849 - learning_rate: 0.0010
Epoch 91/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1209 - sparse_categorical_accuracy: 0.9599 - val_loss: 0.3986 - val_sparse_categorical_accuracy: 0.8183 - learning_rate: 0.0010
Epoch 92/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1278 - sparse_categorical_accuracy: 0.9520 - val_loss: 0.2153 - val_sparse_categorical_accuracy: 0.9334 - learning_rate: 0.0010
Epoch 93/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1080 - sparse_categorical_accuracy: 0.9656 - val_loss: 0.1532 - val_sparse_categorical_accuracy: 0.9459 - learning_rate: 0.0010
Epoch 94/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1236 - sparse_categorical_accuracy: 0.9671 - val_loss: 0.1580 - val_sparse_categorical_accuracy: 0.9404 - learning_rate: 0.0010
Epoch 95/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0982 - sparse_categorical_accuracy: 0.9645 - val_loss: 0.1922 - val_sparse_categorical_accuracy: 0.9417 - learning_rate: 0.0010
Epoch 96/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1165 - sparse_categorical_accuracy: 0.9630 - val_loss: 0.3719 - val_sparse_categorical_accuracy: 0.8377 - learning_rate: 0.0010
Epoch 97/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1207 - sparse_categorical_accuracy: 0.9655 - val_loss: 0.2266 - val_sparse_categorical_accuracy: 0.8988 - learning_rate: 0.0010
Epoch 98/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1431 - sparse_categorical_accuracy: 0.9530 - val_loss: 0.1165 - val_sparse_categorical_accuracy: 0.9556 - learning_rate: 0.0010
Epoch 99/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1262 - sparse_categorical_accuracy: 0.9553 - val_loss: 0.1814 - val_sparse_categorical_accuracy: 0.9320 - learning_rate: 0.0010
Epoch 100/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0983 - sparse_categorical_accuracy: 0.9714 - val_loss: 0.1264 - val_sparse_categorical_accuracy: 0.9501 - learning_rate: 0.0010
Epoch 101/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1366 - sparse_categorical_accuracy: 0.9552 - val_loss: 0.1222 - val_sparse_categorical_accuracy: 0.9570 - learning_rate: 0.0010
Epoch 102/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1156 - sparse_categorical_accuracy: 0.9602 - val_loss: 0.3325 - val_sparse_categorical_accuracy: 0.8904 - learning_rate: 0.0010
Epoch 103/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1231 - sparse_categorical_accuracy: 0.9544 - val_loss: 0.7861 - val_sparse_categorical_accuracy: 0.7074 - learning_rate: 0.0010
Epoch 104/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1081 - sparse_categorical_accuracy: 0.9653 - val_loss: 0.1329 - val_sparse_categorical_accuracy: 0.9528 - learning_rate: 0.0010
Epoch 105/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1171 - sparse_categorical_accuracy: 0.9585 - val_loss: 0.1094 - val_sparse_categorical_accuracy: 0.9626 - learning_rate: 0.0010
Epoch 106/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1110 - sparse_categorical_accuracy: 0.9633 - val_loss: 0.1403 - val_sparse_categorical_accuracy: 0.9390 - learning_rate: 0.0010
Epoch 107/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1308 - sparse_categorical_accuracy: 0.9523 - val_loss: 0.2915 - val_sparse_categorical_accuracy: 0.8863 - learning_rate: 0.0010
Epoch 108/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1062 - sparse_categorical_accuracy: 0.9662 - val_loss: 0.1033 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 0.0010
Epoch 109/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1167 - sparse_categorical_accuracy: 0.9614 - val_loss: 0.1259 - val_sparse_categorical_accuracy: 0.9584 - learning_rate: 0.0010
Epoch 110/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1037 - sparse_categorical_accuracy: 0.9676 - val_loss: 0.1180 - val_sparse_categorical_accuracy: 0.9584 - learning_rate: 0.0010
Epoch 111/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1156 - sparse_categorical_accuracy: 0.9626 - val_loss: 0.1534 - val_sparse_categorical_accuracy: 0.9473 - learning_rate: 0.0010
Epoch 112/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1165 - sparse_categorical_accuracy: 0.9559 - val_loss: 0.2067 - val_sparse_categorical_accuracy: 0.9362 - learning_rate: 0.0010
Epoch 113/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1163 - sparse_categorical_accuracy: 0.9574 - val_loss: 0.4253 - val_sparse_categorical_accuracy: 0.8044 - learning_rate: 0.0010
Epoch 114/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1148 - sparse_categorical_accuracy: 0.9601 - val_loss: 0.1323 - val_sparse_categorical_accuracy: 0.9376 - learning_rate: 0.0010
Epoch 115/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1055 - sparse_categorical_accuracy: 0.9627 - val_loss: 0.1076 - val_sparse_categorical_accuracy: 0.9612 - learning_rate: 0.0010
Epoch 116/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0910 - sparse_categorical_accuracy: 0.9700 - val_loss: 0.7235 - val_sparse_categorical_accuracy: 0.6963 - learning_rate: 0.0010
Epoch 117/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1308 - sparse_categorical_accuracy: 0.9597 - val_loss: 0.1575 - val_sparse_categorical_accuracy: 0.9348 - learning_rate: 0.0010
Epoch 118/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1368 - sparse_categorical_accuracy: 0.9433 - val_loss: 0.1076 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 0.0010
Epoch 119/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0995 - sparse_categorical_accuracy: 0.9674 - val_loss: 0.1788 - val_sparse_categorical_accuracy: 0.9196 - learning_rate: 0.0010
Epoch 120/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1221 - sparse_categorical_accuracy: 0.9506 - val_loss: 0.1161 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 0.0010
Epoch 121/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0921 - sparse_categorical_accuracy: 0.9741 - val_loss: 0.1154 - val_sparse_categorical_accuracy: 0.9626 - learning_rate: 0.0010
Epoch 122/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1081 - sparse_categorical_accuracy: 0.9618 - val_loss: 0.1153 - val_sparse_categorical_accuracy: 0.9528 - learning_rate: 0.0010
Epoch 123/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0962 - sparse_categorical_accuracy: 0.9667 - val_loss: 0.1808 - val_sparse_categorical_accuracy: 0.9390 - learning_rate: 0.0010
Epoch 124/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1115 - sparse_categorical_accuracy: 0.9634 - val_loss: 0.1017 - val_sparse_categorical_accuracy: 0.9723 - learning_rate: 0.0010
Epoch 125/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1032 - sparse_categorical_accuracy: 0.9657 - val_loss: 0.1763 - val_sparse_categorical_accuracy: 0.9390 - learning_rate: 0.0010
Epoch 126/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1088 - sparse_categorical_accuracy: 0.9628 - val_loss: 0.1823 - val_sparse_categorical_accuracy: 0.9307 - learning_rate: 0.0010
Epoch 127/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1095 - sparse_categorical_accuracy: 0.9637 - val_loss: 0.1089 - val_sparse_categorical_accuracy: 0.9584 - learning_rate: 0.0010
Epoch 128/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1316 - sparse_categorical_accuracy: 0.9547 - val_loss: 0.1416 - val_sparse_categorical_accuracy: 0.9307 - learning_rate: 0.0010
Epoch 129/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1051 - sparse_categorical_accuracy: 0.9642 - val_loss: 0.2307 - val_sparse_categorical_accuracy: 0.8904 - learning_rate: 0.0010
Epoch 130/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1051 - sparse_categorical_accuracy: 0.9692 - val_loss: 1.0068 - val_sparse_categorical_accuracy: 0.6338 - learning_rate: 0.0010
Epoch 131/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1052 - sparse_categorical_accuracy: 0.9620 - val_loss: 0.2687 - val_sparse_categorical_accuracy: 0.9112 - learning_rate: 0.0010
Epoch 132/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1045 - sparse_categorical_accuracy: 0.9647 - val_loss: 0.0941 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 0.0010
Epoch 133/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0953 - sparse_categorical_accuracy: 0.9701 - val_loss: 0.1996 - val_sparse_categorical_accuracy: 0.9390 - learning_rate: 0.0010
Epoch 134/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1149 - sparse_categorical_accuracy: 0.9612 - val_loss: 0.4479 - val_sparse_categorical_accuracy: 0.8044 - learning_rate: 0.0010
Epoch 135/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0913 - sparse_categorical_accuracy: 0.9715 - val_loss: 0.0993 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 0.0010
Epoch 136/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1211 - sparse_categorical_accuracy: 0.9586 - val_loss: 0.1036 - val_sparse_categorical_accuracy: 0.9570 - learning_rate: 0.0010
Epoch 137/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0910 - sparse_categorical_accuracy: 0.9700 - val_loss: 0.1525 - val_sparse_categorical_accuracy: 0.9279 - learning_rate: 0.0010
Epoch 138/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0986 - sparse_categorical_accuracy: 0.9633 - val_loss: 0.1699 - val_sparse_categorical_accuracy: 0.9251 - learning_rate: 0.0010
Epoch 139/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0886 - sparse_categorical_accuracy: 0.9722 - val_loss: 0.0957 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 0.0010
Epoch 140/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1050 - sparse_categorical_accuracy: 0.9652 - val_loss: 1.6603 - val_sparse_categorical_accuracy: 0.6366 - learning_rate: 0.0010
Epoch 141/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0922 - sparse_categorical_accuracy: 0.9676 - val_loss: 0.1741 - val_sparse_categorical_accuracy: 0.9209 - learning_rate: 0.0010
Epoch 142/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1383 - sparse_categorical_accuracy: 0.9476 - val_loss: 0.2704 - val_sparse_categorical_accuracy: 0.8821 - learning_rate: 0.0010
Epoch 143/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1104 - sparse_categorical_accuracy: 0.9576 - val_loss: 0.3363 - val_sparse_categorical_accuracy: 0.8447 - learning_rate: 0.0010
Epoch 144/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1037 - sparse_categorical_accuracy: 0.9666 - val_loss: 0.4437 - val_sparse_categorical_accuracy: 0.8169 - learning_rate: 0.0010
Epoch 145/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0939 - sparse_categorical_accuracy: 0.9688 - val_loss: 0.2474 - val_sparse_categorical_accuracy: 0.9029 - learning_rate: 0.0010
Epoch 146/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1130 - sparse_categorical_accuracy: 0.9564 - val_loss: 0.1531 - val_sparse_categorical_accuracy: 0.9362 - learning_rate: 0.0010
Epoch 147/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1022 - sparse_categorical_accuracy: 0.9626 - val_loss: 0.1573 - val_sparse_categorical_accuracy: 0.9348 - learning_rate: 0.0010
Epoch 148/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0815 - sparse_categorical_accuracy: 0.9774 - val_loss: 0.1416 - val_sparse_categorical_accuracy: 0.9390 - learning_rate: 0.0010
Epoch 149/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0937 - sparse_categorical_accuracy: 0.9701 - val_loss: 0.2065 - val_sparse_categorical_accuracy: 0.9112 - learning_rate: 0.0010
Epoch 150/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0955 - sparse_categorical_accuracy: 0.9672 - val_loss: 0.1146 - val_sparse_categorical_accuracy: 0.9626 - learning_rate: 0.0010
Epoch 151/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1097 - sparse_categorical_accuracy: 0.9560 - val_loss: 0.3142 - val_sparse_categorical_accuracy: 0.8599 - learning_rate: 0.0010
Epoch 152/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1017 - sparse_categorical_accuracy: 0.9636 - val_loss: 0.3406 - val_sparse_categorical_accuracy: 0.8433 - learning_rate: 0.0010
Epoch 153/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0930 - sparse_categorical_accuracy: 0.9684 - val_loss: 0.0928 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 5.0000e-04
Epoch 154/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0969 - sparse_categorical_accuracy: 0.9685 - val_loss: 0.2657 - val_sparse_categorical_accuracy: 0.8904 - learning_rate: 5.0000e-04
Epoch 155/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1045 - sparse_categorical_accuracy: 0.9634 - val_loss: 0.1027 - val_sparse_categorical_accuracy: 0.9626 - learning_rate: 5.0000e-04
Epoch 156/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0915 - sparse_categorical_accuracy: 0.9699 - val_loss: 0.1175 - val_sparse_categorical_accuracy: 0.9542 - learning_rate: 5.0000e-04
Epoch 157/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0949 - sparse_categorical_accuracy: 0.9634 - val_loss: 0.1001 - val_sparse_categorical_accuracy: 0.9612 - learning_rate: 5.0000e-04
Epoch 158/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0830 - sparse_categorical_accuracy: 0.9733 - val_loss: 0.0899 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 5.0000e-04
Epoch 159/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0827 - sparse_categorical_accuracy: 0.9758 - val_loss: 0.1171 - val_sparse_categorical_accuracy: 0.9584 - learning_rate: 5.0000e-04
Epoch 160/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0903 - sparse_categorical_accuracy: 0.9686 - val_loss: 0.1056 - val_sparse_categorical_accuracy: 0.9612 - learning_rate: 5.0000e-04
Epoch 161/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0765 - sparse_categorical_accuracy: 0.9777 - val_loss: 0.1604 - val_sparse_categorical_accuracy: 0.9376 - learning_rate: 5.0000e-04
Epoch 162/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0848 - sparse_categorical_accuracy: 0.9707 - val_loss: 0.0911 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 5.0000e-04
Epoch 163/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0891 - sparse_categorical_accuracy: 0.9684 - val_loss: 0.0882 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 5.0000e-04
Epoch 164/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0796 - sparse_categorical_accuracy: 0.9721 - val_loss: 0.0989 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 5.0000e-04
Epoch 165/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0810 - sparse_categorical_accuracy: 0.9720 - val_loss: 0.2738 - val_sparse_categorical_accuracy: 0.8655 - learning_rate: 5.0000e-04
Epoch 166/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0903 - sparse_categorical_accuracy: 0.9712 - val_loss: 0.0985 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 5.0000e-04
Epoch 167/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0835 - sparse_categorical_accuracy: 0.9712 - val_loss: 0.1081 - val_sparse_categorical_accuracy: 0.9584 - learning_rate: 5.0000e-04
Epoch 168/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.1182 - sparse_categorical_accuracy: 0.9519 - val_loss: 0.1212 - val_sparse_categorical_accuracy: 0.9584 - learning_rate: 5.0000e-04
Epoch 169/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0909 - sparse_categorical_accuracy: 0.9666 - val_loss: 0.0909 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 5.0000e-04
Epoch 170/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0882 - sparse_categorical_accuracy: 0.9708 - val_loss: 0.0912 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 5.0000e-04
Epoch 171/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0863 - sparse_categorical_accuracy: 0.9735 - val_loss: 0.1391 - val_sparse_categorical_accuracy: 0.9487 - learning_rate: 5.0000e-04
Epoch 172/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0853 - sparse_categorical_accuracy: 0.9692 - val_loss: 0.0941 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 5.0000e-04
Epoch 173/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0922 - sparse_categorical_accuracy: 0.9679 - val_loss: 0.0924 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 5.0000e-04
Epoch 174/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0954 - sparse_categorical_accuracy: 0.9699 - val_loss: 0.0898 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 5.0000e-04
Epoch 175/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0823 - sparse_categorical_accuracy: 0.9701 - val_loss: 0.1449 - val_sparse_categorical_accuracy: 0.9431 - learning_rate: 5.0000e-04
Epoch 176/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0853 - sparse_categorical_accuracy: 0.9692 - val_loss: 0.0877 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 5.0000e-04
Epoch 177/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0834 - sparse_categorical_accuracy: 0.9692 - val_loss: 0.2338 - val_sparse_categorical_accuracy: 0.8974 - learning_rate: 5.0000e-04
Epoch 178/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0940 - sparse_categorical_accuracy: 0.9639 - val_loss: 0.1609 - val_sparse_categorical_accuracy: 0.9431 - learning_rate: 5.0000e-04
Epoch 179/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0965 - sparse_categorical_accuracy: 0.9628 - val_loss: 0.5213 - val_sparse_categorical_accuracy: 0.7947 - learning_rate: 5.0000e-04
Epoch 180/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0926 - sparse_categorical_accuracy: 0.9720 - val_loss: 0.0898 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 5.0000e-04
Epoch 181/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0854 - sparse_categorical_accuracy: 0.9732 - val_loss: 0.0949 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 5.0000e-04
Epoch 182/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0691 - sparse_categorical_accuracy: 0.9764 - val_loss: 0.0841 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 5.0000e-04
Epoch 183/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0768 - sparse_categorical_accuracy: 0.9766 - val_loss: 0.1021 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 5.0000e-04
Epoch 184/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0842 - sparse_categorical_accuracy: 0.9692 - val_loss: 0.1105 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 5.0000e-04
Epoch 185/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0731 - sparse_categorical_accuracy: 0.9760 - val_loss: 0.1487 - val_sparse_categorical_accuracy: 0.9404 - learning_rate: 5.0000e-04
Epoch 186/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0762 - sparse_categorical_accuracy: 0.9724 - val_loss: 0.1126 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 5.0000e-04
Epoch 187/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0783 - sparse_categorical_accuracy: 0.9723 - val_loss: 0.0954 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 5.0000e-04
Epoch 188/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0960 - sparse_categorical_accuracy: 0.9671 - val_loss: 0.1957 - val_sparse_categorical_accuracy: 0.9085 - learning_rate: 5.0000e-04
Epoch 189/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0831 - sparse_categorical_accuracy: 0.9695 - val_loss: 0.1711 - val_sparse_categorical_accuracy: 0.9431 - learning_rate: 5.0000e-04
Epoch 190/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0881 - sparse_categorical_accuracy: 0.9693 - val_loss: 0.0861 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 5.0000e-04
Epoch 191/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0735 - sparse_categorical_accuracy: 0.9769 - val_loss: 0.1154 - val_sparse_categorical_accuracy: 0.9584 - learning_rate: 5.0000e-04
Epoch 192/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0877 - sparse_categorical_accuracy: 0.9708 - val_loss: 0.0845 - val_sparse_categorical_accuracy: 0.9736 - learning_rate: 5.0000e-04
Epoch 193/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0899 - sparse_categorical_accuracy: 0.9709 - val_loss: 0.0977 - val_sparse_categorical_accuracy: 0.9709 - learning_rate: 5.0000e-04
Epoch 194/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0843 - sparse_categorical_accuracy: 0.9739 - val_loss: 0.0969 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 5.0000e-04
Epoch 195/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0755 - sparse_categorical_accuracy: 0.9765 - val_loss: 0.1345 - val_sparse_categorical_accuracy: 0.9473 - learning_rate: 5.0000e-04
Epoch 196/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0768 - sparse_categorical_accuracy: 0.9733 - val_loss: 0.0844 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 5.0000e-04
Epoch 197/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0751 - sparse_categorical_accuracy: 0.9801 - val_loss: 0.2736 - val_sparse_categorical_accuracy: 0.8793 - learning_rate: 5.0000e-04
Epoch 198/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0860 - sparse_categorical_accuracy: 0.9719 - val_loss: 0.0843 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 5.0000e-04
Epoch 199/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0835 - sparse_categorical_accuracy: 0.9712 - val_loss: 0.1799 - val_sparse_categorical_accuracy: 0.9209 - learning_rate: 5.0000e-04
Epoch 200/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0760 - sparse_categorical_accuracy: 0.9745 - val_loss: 0.1790 - val_sparse_categorical_accuracy: 0.9112 - learning_rate: 5.0000e-04
Epoch 201/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0714 - sparse_categorical_accuracy: 0.9742 - val_loss: 0.0918 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 5.0000e-04
Epoch 202/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0734 - sparse_categorical_accuracy: 0.9748 - val_loss: 0.1168 - val_sparse_categorical_accuracy: 0.9515 - learning_rate: 5.0000e-04
Epoch 203/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0654 - sparse_categorical_accuracy: 0.9823 - val_loss: 0.0825 - val_sparse_categorical_accuracy: 0.9723 - learning_rate: 2.5000e-04
Epoch 204/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0717 - sparse_categorical_accuracy: 0.9796 - val_loss: 0.1186 - val_sparse_categorical_accuracy: 0.9556 - learning_rate: 2.5000e-04
Epoch 205/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0935 - sparse_categorical_accuracy: 0.9679 - val_loss: 0.0847 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 2.5000e-04
Epoch 206/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0897 - sparse_categorical_accuracy: 0.9687 - val_loss: 0.0820 - val_sparse_categorical_accuracy: 0.9723 - learning_rate: 2.5000e-04
Epoch 207/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0661 - sparse_categorical_accuracy: 0.9763 - val_loss: 0.0790 - val_sparse_categorical_accuracy: 0.9723 - learning_rate: 2.5000e-04
Epoch 208/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0683 - sparse_categorical_accuracy: 0.9739 - val_loss: 0.0991 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 2.5000e-04
Epoch 209/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0744 - sparse_categorical_accuracy: 0.9792 - val_loss: 0.1057 - val_sparse_categorical_accuracy: 0.9570 - learning_rate: 2.5000e-04
Epoch 210/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0715 - sparse_categorical_accuracy: 0.9747 - val_loss: 0.0858 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 2.5000e-04
Epoch 211/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0715 - sparse_categorical_accuracy: 0.9764 - val_loss: 0.0856 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 2.5000e-04
Epoch 212/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0783 - sparse_categorical_accuracy: 0.9708 - val_loss: 0.0835 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 2.5000e-04
Epoch 213/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0680 - sparse_categorical_accuracy: 0.9761 - val_loss: 0.0894 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 2.5000e-04
Epoch 214/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0661 - sparse_categorical_accuracy: 0.9800 - val_loss: 0.0788 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 2.5000e-04
Epoch 215/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0736 - sparse_categorical_accuracy: 0.9744 - val_loss: 0.1047 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 2.5000e-04
Epoch 216/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0655 - sparse_categorical_accuracy: 0.9819 - val_loss: 0.1158 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 2.5000e-04
Epoch 217/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0722 - sparse_categorical_accuracy: 0.9777 - val_loss: 0.0940 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 2.5000e-04
Epoch 218/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0750 - sparse_categorical_accuracy: 0.9761 - val_loss: 0.0966 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 2.5000e-04
Epoch 219/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0695 - sparse_categorical_accuracy: 0.9753 - val_loss: 0.1727 - val_sparse_categorical_accuracy: 0.9293 - learning_rate: 2.5000e-04
Epoch 220/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0748 - sparse_categorical_accuracy: 0.9760 - val_loss: 0.1067 - val_sparse_categorical_accuracy: 0.9570 - learning_rate: 2.5000e-04
Epoch 221/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0848 - sparse_categorical_accuracy: 0.9740 - val_loss: 0.0818 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 2.5000e-04
Epoch 222/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0675 - sparse_categorical_accuracy: 0.9808 - val_loss: 0.0931 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 2.5000e-04
Epoch 223/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0695 - sparse_categorical_accuracy: 0.9760 - val_loss: 0.0785 - val_sparse_categorical_accuracy: 0.9723 - learning_rate: 2.5000e-04
Epoch 224/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0680 - sparse_categorical_accuracy: 0.9822 - val_loss: 0.0820 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 2.5000e-04
Epoch 225/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0637 - sparse_categorical_accuracy: 0.9772 - val_loss: 0.1084 - val_sparse_categorical_accuracy: 0.9612 - learning_rate: 2.5000e-04
Epoch 226/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0703 - sparse_categorical_accuracy: 0.9797 - val_loss: 0.1029 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 2.5000e-04
Epoch 227/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0821 - sparse_categorical_accuracy: 0.9704 - val_loss: 0.1545 - val_sparse_categorical_accuracy: 0.9431 - learning_rate: 2.5000e-04
Epoch 228/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0826 - sparse_categorical_accuracy: 0.9714 - val_loss: 0.0819 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 2.5000e-04
Epoch 229/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0755 - sparse_categorical_accuracy: 0.9750 - val_loss: 0.0788 - val_sparse_categorical_accuracy: 0.9723 - learning_rate: 2.5000e-04
Epoch 230/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0702 - sparse_categorical_accuracy: 0.9776 - val_loss: 0.1514 - val_sparse_categorical_accuracy: 0.9445 - learning_rate: 2.5000e-04
Epoch 231/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0749 - sparse_categorical_accuracy: 0.9775 - val_loss: 0.1150 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 2.5000e-04
Epoch 232/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0732 - sparse_categorical_accuracy: 0.9794 - val_loss: 0.1110 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 2.5000e-04
Epoch 233/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0667 - sparse_categorical_accuracy: 0.9781 - val_loss: 0.1451 - val_sparse_categorical_accuracy: 0.9445 - learning_rate: 2.5000e-04
Epoch 234/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0812 - sparse_categorical_accuracy: 0.9793 - val_loss: 0.0954 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 2.5000e-04
Epoch 235/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0629 - sparse_categorical_accuracy: 0.9844 - val_loss: 0.0982 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 2.5000e-04
Epoch 236/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0661 - sparse_categorical_accuracy: 0.9750 - val_loss: 0.0843 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 2.5000e-04
Epoch 237/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0722 - sparse_categorical_accuracy: 0.9775 - val_loss: 0.1315 - val_sparse_categorical_accuracy: 0.9542 - learning_rate: 2.5000e-04
Epoch 238/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0802 - sparse_categorical_accuracy: 0.9744 - val_loss: 0.0969 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 2.5000e-04
Epoch 239/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0697 - sparse_categorical_accuracy: 0.9795 - val_loss: 0.0890 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 2.5000e-04
Epoch 240/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0640 - sparse_categorical_accuracy: 0.9811 - val_loss: 0.0812 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 2.5000e-04
Epoch 241/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0637 - sparse_categorical_accuracy: 0.9852 - val_loss: 0.0750 - val_sparse_categorical_accuracy: 0.9709 - learning_rate: 2.5000e-04
Epoch 242/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0645 - sparse_categorical_accuracy: 0.9772 - val_loss: 0.0864 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 2.5000e-04
Epoch 243/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0776 - sparse_categorical_accuracy: 0.9746 - val_loss: 0.0885 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 2.5000e-04
Epoch 244/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0635 - sparse_categorical_accuracy: 0.9835 - val_loss: 0.1270 - val_sparse_categorical_accuracy: 0.9515 - learning_rate: 2.5000e-04
Epoch 245/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0669 - sparse_categorical_accuracy: 0.9761 - val_loss: 0.0803 - val_sparse_categorical_accuracy: 0.9709 - learning_rate: 2.5000e-04
Epoch 246/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0635 - sparse_categorical_accuracy: 0.9796 - val_loss: 0.0791 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 2.5000e-04
Epoch 247/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0622 - sparse_categorical_accuracy: 0.9801 - val_loss: 0.0928 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 2.5000e-04
Epoch 248/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0715 - sparse_categorical_accuracy: 0.9756 - val_loss: 0.0817 - val_sparse_categorical_accuracy: 0.9709 - learning_rate: 2.5000e-04
Epoch 249/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0652 - sparse_categorical_accuracy: 0.9821 - val_loss: 0.0804 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 2.5000e-04
Epoch 250/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0689 - sparse_categorical_accuracy: 0.9788 - val_loss: 0.0765 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 2.5000e-04
Epoch 251/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0720 - sparse_categorical_accuracy: 0.9773 - val_loss: 0.1128 - val_sparse_categorical_accuracy: 0.9626 - learning_rate: 2.5000e-04
Epoch 252/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0670 - sparse_categorical_accuracy: 0.9762 - val_loss: 0.0896 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 2.5000e-04
Epoch 253/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0743 - sparse_categorical_accuracy: 0.9776 - val_loss: 0.1141 - val_sparse_categorical_accuracy: 0.9556 - learning_rate: 2.5000e-04
Epoch 254/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0648 - sparse_categorical_accuracy: 0.9783 - val_loss: 0.1578 - val_sparse_categorical_accuracy: 0.9362 - learning_rate: 2.5000e-04
Epoch 255/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0554 - sparse_categorical_accuracy: 0.9862 - val_loss: 0.0835 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 2.5000e-04
Epoch 256/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0645 - sparse_categorical_accuracy: 0.9796 - val_loss: 0.0930 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 2.5000e-04
Epoch 257/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0645 - sparse_categorical_accuracy: 0.9838 - val_loss: 0.0784 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 2.5000e-04
Epoch 258/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0733 - sparse_categorical_accuracy: 0.9757 - val_loss: 0.0867 - val_sparse_categorical_accuracy: 0.9709 - learning_rate: 2.5000e-04
Epoch 259/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0601 - sparse_categorical_accuracy: 0.9836 - val_loss: 0.1279 - val_sparse_categorical_accuracy: 0.9528 - learning_rate: 2.5000e-04
Epoch 260/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0795 - sparse_categorical_accuracy: 0.9742 - val_loss: 0.1646 - val_sparse_categorical_accuracy: 0.9445 - learning_rate: 2.5000e-04
Epoch 261/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0755 - sparse_categorical_accuracy: 0.9755 - val_loss: 0.0781 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 2.5000e-04
Epoch 262/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0620 - sparse_categorical_accuracy: 0.9798 - val_loss: 0.0775 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 1.2500e-04
Epoch 263/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0671 - sparse_categorical_accuracy: 0.9777 - val_loss: 0.1033 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 1.2500e-04
Epoch 264/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0580 - sparse_categorical_accuracy: 0.9831 - val_loss: 0.0797 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 1.2500e-04
Epoch 265/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0620 - sparse_categorical_accuracy: 0.9828 - val_loss: 0.0770 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 1.2500e-04
Epoch 266/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0653 - sparse_categorical_accuracy: 0.9795 - val_loss: 0.0834 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 1.2500e-04
Epoch 267/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0646 - sparse_categorical_accuracy: 0.9808 - val_loss: 0.0911 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 1.2500e-04
Epoch 268/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0690 - sparse_categorical_accuracy: 0.9796 - val_loss: 0.0795 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 1.2500e-04
Epoch 269/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0727 - sparse_categorical_accuracy: 0.9737 - val_loss: 0.0812 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 1.2500e-04
Epoch 270/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0613 - sparse_categorical_accuracy: 0.9843 - val_loss: 0.0905 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 1.2500e-04
Epoch 271/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0624 - sparse_categorical_accuracy: 0.9782 - val_loss: 0.1130 - val_sparse_categorical_accuracy: 0.9542 - learning_rate: 1.2500e-04
Epoch 272/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0654 - sparse_categorical_accuracy: 0.9794 - val_loss: 0.0784 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 1.2500e-04
Epoch 273/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0693 - sparse_categorical_accuracy: 0.9804 - val_loss: 0.0980 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 1.2500e-04
Epoch 274/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0627 - sparse_categorical_accuracy: 0.9842 - val_loss: 0.0864 - val_sparse_categorical_accuracy: 0.9639 - learning_rate: 1.2500e-04
Epoch 275/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0713 - sparse_categorical_accuracy: 0.9778 - val_loss: 0.0956 - val_sparse_categorical_accuracy: 0.9598 - learning_rate: 1.2500e-04
Epoch 276/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0631 - sparse_categorical_accuracy: 0.9812 - val_loss: 0.0805 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 1.2500e-04
Epoch 277/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0613 - sparse_categorical_accuracy: 0.9797 - val_loss: 0.0982 - val_sparse_categorical_accuracy: 0.9584 - learning_rate: 1.2500e-04
Epoch 278/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0649 - sparse_categorical_accuracy: 0.9818 - val_loss: 0.0857 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 1.2500e-04
Epoch 279/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0668 - sparse_categorical_accuracy: 0.9788 - val_loss: 0.0845 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 1.2500e-04
Epoch 280/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0679 - sparse_categorical_accuracy: 0.9762 - val_loss: 0.0835 - val_sparse_categorical_accuracy: 0.9681 - learning_rate: 1.2500e-04
Epoch 281/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0766 - sparse_categorical_accuracy: 0.9734 - val_loss: 0.0810 - val_sparse_categorical_accuracy: 0.9695 - learning_rate: 1.2500e-04
Epoch 282/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0589 - sparse_categorical_accuracy: 0.9815 - val_loss: 0.0829 - val_sparse_categorical_accuracy: 0.9626 - learning_rate: 1.0000e-04
Epoch 283/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0676 - sparse_categorical_accuracy: 0.9771 - val_loss: 0.0856 - val_sparse_categorical_accuracy: 0.9653 - learning_rate: 1.0000e-04
Epoch 284/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0607 - sparse_categorical_accuracy: 0.9832 - val_loss: 0.0850 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 1.0000e-04
Epoch 285/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0723 - sparse_categorical_accuracy: 0.9782 - val_loss: 0.0844 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 1.0000e-04
Epoch 286/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0620 - sparse_categorical_accuracy: 0.9789 - val_loss: 0.1347 - val_sparse_categorical_accuracy: 0.9515 - learning_rate: 1.0000e-04
Epoch 287/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0641 - sparse_categorical_accuracy: 0.9802 - val_loss: 0.0765 - val_sparse_categorical_accuracy: 0.9709 - learning_rate: 1.0000e-04
Epoch 288/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0654 - sparse_categorical_accuracy: 0.9797 - val_loss: 0.1081 - val_sparse_categorical_accuracy: 0.9528 - learning_rate: 1.0000e-04
Epoch 289/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0690 - sparse_categorical_accuracy: 0.9785 - val_loss: 0.1734 - val_sparse_categorical_accuracy: 0.9362 - learning_rate: 1.0000e-04
Epoch 290/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0771 - sparse_categorical_accuracy: 0.9750 - val_loss: 0.0821 - val_sparse_categorical_accuracy: 0.9667 - learning_rate: 1.0000e-04
Epoch 291/500
90/90 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 0.0605 - sparse_categorical_accuracy: 0.9839 - val_loss: 0.0770 - val_sparse_categorical_accuracy: 0.9709 - learning_rate: 1.0000e-04
Epoch 291: early stopping
```
</div>
---
## Evaluate model on test data
```python
model = keras.models.load_model("best_model.keras")
test_loss, test_acc = model.evaluate(x_test, y_test)
print("Test accuracy", test_acc)
print("Test loss", test_loss)
```
<div class="k-default-codeblock">
```
42/42 ━━━━━━━━━━━━━━━━━━━━ 1s 13ms/step - loss: 0.0997 - sparse_categorical_accuracy: 0.9687
Test accuracy 0.9696969985961914
Test loss 0.09916326403617859
```
</div>
---
## Plot the model's training and validation loss
```python
metric = "sparse_categorical_accuracy"
plt.figure()
plt.plot(history.history[metric])
plt.plot(history.history["val_" + metric])
plt.title("model " + metric)
plt.ylabel(metric, fontsize="large")
plt.xlabel("epoch", fontsize="large")
plt.legend(["train", "val"], loc="best")
plt.show()
plt.close()
```

We can see how the training accuracy reaches almost 0.95 after 100 epochs.
However, by observing the validation accuracy we can see how the network still needs
training until it reaches almost 0.97 for both the validation and the training accuracy
after 200 epochs. Beyond the 200th epoch, if we continue on training, the validation
accuracy will start decreasing while the training accuracy will continue on increasing:
the model starts overfitting.
| keras-io/examples/timeseries/md/timeseries_classification_from_scratch.md/0 | {
"file_path": "keras-io/examples/timeseries/md/timeseries_classification_from_scratch.md",
"repo_id": "keras-io",
"token_count": 29983
} | 97 |
<jupyter_start><jupyter_text>Classification using Attention-based Deep Multiple Instance Learning (MIL).**Author:** [Mohamad Jaber](https://www.linkedin.com/in/mohamadjaber1/)**Date created:** 2021/08/16**Last modified:** 2021/11/25**Description:** MIL approach to classify bags of instances and get their individual instance score. Introduction What is Multiple Instance Learning (MIL)?Usually, with supervised learning algorithms, the learner receives labels for a set ofinstances. In the case of MIL, the learner receives labels for a set of bags, each of whichcontains a set of instances. The bag is labeled positive if it contains at leastone positive instance, and negative if it does not contain any. MotivationIt is often assumed in image classification tasks that each image clearly represents aclass label. In medical imaging (e.g. computational pathology, etc.) an *entire image*is represented by a single class label (cancerous/non-cancerous) or a region of interestcould be given. However, one will be interested in knowing which patterns in the imageis actually causing it to belong to that class. In this context, the image(s) will bedivided and the subimages will form the bag of instances.Therefore, the goals are to:1. Learn a model to predict a class label for a bag of instances.2. Find out which instances within the bag caused a position class labelprediction. ImplementationThe following steps describe how the model works:1. The feature extractor layers extract feature embeddings.2. The embeddings are fed into the MIL attention layer to getthe attention scores. The layer is designed as permutation-invariant.3. Input features and their corresponding attention scores are multiplied together.4. The resulting output is passed to a softmax function for classification. References- [Attention-based Deep Multiple Instance Learning](https://arxiv.org/abs/1802.04712).- Some of the attention operator code implementation was inspired from https://github.com/utayao/Atten_Deep_MIL.- Imbalanced data [tutorial](https://www.tensorflow.org/tutorials/structured_data/imbalanced_data)by TensorFlow. Setup<jupyter_code>import numpy as np
import keras
from keras import layers
from keras import ops
from tqdm import tqdm
from matplotlib import pyplot as plt
plt.style.use("ggplot")<jupyter_output><empty_output><jupyter_text>Create datasetWe will create a set of bags and assign their labels according to their contents.If at least one positive instanceis available in a bag, the bag is considered as a positive bag. If it does not contain anypositive instance, the bag will be considered as negative. Configuration parameters- `POSITIVE_CLASS`: The desired class to be kept in the positive bag.- `BAG_COUNT`: The number of training bags.- `VAL_BAG_COUNT`: The number of validation bags.- `BAG_SIZE`: The number of instances in a bag.- `PLOT_SIZE`: The number of bags to plot.- `ENSEMBLE_AVG_COUNT`: The number of models to create and average together. (Optional:often results in better performance - set to 1 for single model)<jupyter_code>POSITIVE_CLASS = 1
BAG_COUNT = 1000
VAL_BAG_COUNT = 300
BAG_SIZE = 3
PLOT_SIZE = 3
ENSEMBLE_AVG_COUNT = 1<jupyter_output><empty_output><jupyter_text>Prepare bagsSince the attention operator is a permutation-invariant operator, an instance with apositive class label is randomly placed among the instances in the positive bag.<jupyter_code>def create_bags(input_data, input_labels, positive_class, bag_count, instance_count):
# Set up bags.
bags = []
bag_labels = []
# Normalize input data.
input_data = np.divide(input_data, 255.0)
# Count positive samples.
count = 0
for _ in range(bag_count):
# Pick a fixed size random subset of samples.
index = np.random.choice(input_data.shape[0], instance_count, replace=False)
instances_data = input_data[index]
instances_labels = input_labels[index]
# By default, all bags are labeled as 0.
bag_label = 0
# Check if there is at least a positive class in the bag.
if positive_class in instances_labels:
# Positive bag will be labeled as 1.
bag_label = 1
count += 1
bags.append(instances_data)
bag_labels.append(np.array([bag_label]))
print(f"Positive bags: {count}")
print(f"Negative bags: {bag_count - count}")
return (list(np.swapaxes(bags, 0, 1)), np.array(bag_labels))
# Load the MNIST dataset.
(x_train, y_train), (x_val, y_val) = keras.datasets.mnist.load_data()
# Create training data.
train_data, train_labels = create_bags(
x_train, y_train, POSITIVE_CLASS, BAG_COUNT, BAG_SIZE
)
# Create validation data.
val_data, val_labels = create_bags(
x_val, y_val, POSITIVE_CLASS, VAL_BAG_COUNT, BAG_SIZE
)<jupyter_output><empty_output><jupyter_text>Create the modelWe will now build the attention layer, prepare some utilities, then build and train theentire model. Attention operator implementationThe output size of this layer is decided by the size of a single bag.The attention mechanism uses a weighted average of instances in a bag, in which the sumof the weights must equal to 1 (invariant of the bag size).The weight matrices (parameters) are **w** and **v**. To include positive and negativevalues, hyperbolic tangent element-wise non-linearity is utilized.A **Gated attention mechanism** can be used to deal with complex relations. Another weightmatrix, **u**, is added to the computation.A sigmoid non-linearity is used to overcome approximately linear behavior for *x* ∈ [−1, 1]by hyperbolic tangent non-linearity.<jupyter_code>class MILAttentionLayer(layers.Layer):
"""Implementation of the attention-based Deep MIL layer.
Args:
weight_params_dim: Positive Integer. Dimension of the weight matrix.
kernel_initializer: Initializer for the `kernel` matrix.
kernel_regularizer: Regularizer function applied to the `kernel` matrix.
use_gated: Boolean, whether or not to use the gated mechanism.
Returns:
List of 2D tensors with BAG_SIZE length.
The tensors are the attention scores after softmax with shape `(batch_size, 1)`.
"""
def __init__(
self,
weight_params_dim,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
use_gated=False,
**kwargs,
):
super().__init__(**kwargs)
self.weight_params_dim = weight_params_dim
self.use_gated = use_gated
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
self.v_init = self.kernel_initializer
self.w_init = self.kernel_initializer
self.u_init = self.kernel_initializer
self.v_regularizer = self.kernel_regularizer
self.w_regularizer = self.kernel_regularizer
self.u_regularizer = self.kernel_regularizer
def build(self, input_shape):
# Input shape.
# List of 2D tensors with shape: (batch_size, input_dim).
input_dim = input_shape[0][1]
self.v_weight_params = self.add_weight(
shape=(input_dim, self.weight_params_dim),
initializer=self.v_init,
name="v",
regularizer=self.v_regularizer,
trainable=True,
)
self.w_weight_params = self.add_weight(
shape=(self.weight_params_dim, 1),
initializer=self.w_init,
name="w",
regularizer=self.w_regularizer,
trainable=True,
)
if self.use_gated:
self.u_weight_params = self.add_weight(
shape=(input_dim, self.weight_params_dim),
initializer=self.u_init,
name="u",
regularizer=self.u_regularizer,
trainable=True,
)
else:
self.u_weight_params = None
self.input_built = True
def call(self, inputs):
# Assigning variables from the number of inputs.
instances = [self.compute_attention_scores(instance) for instance in inputs]
# Stack instances into a single tensor.
instances = ops.stack(instances)
# Apply softmax over instances such that the output summation is equal to 1.
alpha = ops.softmax(instances, axis=0)
# Split to recreate the same array of tensors we had as inputs.
return [alpha[i] for i in range(alpha.shape[0])]
def compute_attention_scores(self, instance):
# Reserve in-case "gated mechanism" used.
original_instance = instance
# tanh(v*h_k^T)
instance = ops.tanh(ops.tensordot(instance, self.v_weight_params, axes=1))
# for learning non-linear relations efficiently.
if self.use_gated:
instance = instance * ops.sigmoid(
ops.tensordot(original_instance, self.u_weight_params, axes=1)
)
# w^T*(tanh(v*h_k^T)) / w^T*(tanh(v*h_k^T)*sigmoid(u*h_k^T))
return ops.tensordot(instance, self.w_weight_params, axes=1)<jupyter_output><empty_output><jupyter_text>Visualizer toolPlot the number of bags (given by `PLOT_SIZE`) with respect to the class.Moreover, if activated, the class label prediction with its associated instance scorefor each bag (after the model has been trained) can be seen.<jupyter_code>def plot(data, labels, bag_class, predictions=None, attention_weights=None):
""" "Utility for plotting bags and attention weights.
Args:
data: Input data that contains the bags of instances.
labels: The associated bag labels of the input data.
bag_class: String name of the desired bag class.
The options are: "positive" or "negative".
predictions: Class labels model predictions.
If you don't specify anything, ground truth labels will be used.
attention_weights: Attention weights for each instance within the input data.
If you don't specify anything, the values won't be displayed.
"""
return ## TODO
labels = np.array(labels).reshape(-1)
if bag_class == "positive":
if predictions is not None:
labels = np.where(predictions.argmax(1) == 1)[0]
bags = np.array(data)[:, labels[0:PLOT_SIZE]]
else:
labels = np.where(labels == 1)[0]
bags = np.array(data)[:, labels[0:PLOT_SIZE]]
elif bag_class == "negative":
if predictions is not None:
labels = np.where(predictions.argmax(1) == 0)[0]
bags = np.array(data)[:, labels[0:PLOT_SIZE]]
else:
labels = np.where(labels == 0)[0]
bags = np.array(data)[:, labels[0:PLOT_SIZE]]
else:
print(f"There is no class {bag_class}")
return
print(f"The bag class label is {bag_class}")
for i in range(PLOT_SIZE):
figure = plt.figure(figsize=(8, 8))
print(f"Bag number: {labels[i]}")
for j in range(BAG_SIZE):
image = bags[j][i]
figure.add_subplot(1, BAG_SIZE, j + 1)
plt.grid(False)
if attention_weights is not None:
plt.title(np.around(attention_weights[labels[i]][j], 2))
plt.imshow(image)
plt.show()
# Plot some of validation data bags per class.
plot(val_data, val_labels, "positive")
plot(val_data, val_labels, "negative")<jupyter_output><empty_output><jupyter_text>Create modelFirst we will create some embeddings per instance, invoke the attention operator and thenuse the softmax function to output the class probabilities.<jupyter_code>def create_model(instance_shape):
# Extract features from inputs.
inputs, embeddings = [], []
shared_dense_layer_1 = layers.Dense(128, activation="relu")
shared_dense_layer_2 = layers.Dense(64, activation="relu")
for _ in range(BAG_SIZE):
inp = layers.Input(instance_shape)
flatten = layers.Flatten()(inp)
dense_1 = shared_dense_layer_1(flatten)
dense_2 = shared_dense_layer_2(dense_1)
inputs.append(inp)
embeddings.append(dense_2)
# Invoke the attention layer.
alpha = MILAttentionLayer(
weight_params_dim=256,
kernel_regularizer=keras.regularizers.L2(0.01),
use_gated=True,
name="alpha",
)(embeddings)
# Multiply attention weights with the input layers.
multiply_layers = [
layers.multiply([alpha[i], embeddings[i]]) for i in range(len(alpha))
]
# Concatenate layers.
concat = layers.concatenate(multiply_layers, axis=1)
# Classification output node.
output = layers.Dense(2, activation="softmax")(concat)
return keras.Model(inputs, output)<jupyter_output><empty_output><jupyter_text>Class weightsSince this kind of problem could simply turn into imbalanced data classification problem,class weighting should be considered.Let's say there are 1000 bags. There often could be cases were ~90 % of the bags do notcontain any positive label and ~10 % do.Such data can be referred to as **Imbalanced data**.Using class weights, the model will tend to give a higher weight to the rare class.<jupyter_code>def compute_class_weights(labels):
# Count number of postive and negative bags.
negative_count = len(np.where(labels == 0)[0])
positive_count = len(np.where(labels == 1)[0])
total_count = negative_count + positive_count
# Build class weight dictionary.
return {
0: (1 / negative_count) * (total_count / 2),
1: (1 / positive_count) * (total_count / 2),
}<jupyter_output><empty_output><jupyter_text>Build and train modelThe model is built and trained in this section.<jupyter_code>def train(train_data, train_labels, val_data, val_labels, model):
# Train model.
# Prepare callbacks.
# Path where to save best weights.
# Take the file name from the wrapper.
file_path = "/tmp/best_model.weights.h5"
# Initialize model checkpoint callback.
model_checkpoint = keras.callbacks.ModelCheckpoint(
file_path,
monitor="val_loss",
verbose=0,
mode="min",
save_best_only=True,
save_weights_only=True,
)
# Initialize early stopping callback.
# The model performance is monitored across the validation data and stops training
# when the generalization error cease to decrease.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=10, mode="min"
)
# Compile model.
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Fit model.
model.fit(
train_data,
train_labels,
validation_data=(val_data, val_labels),
epochs=20,
class_weight=compute_class_weights(train_labels),
batch_size=1,
callbacks=[early_stopping, model_checkpoint],
verbose=0,
)
# Load best weights.
model.load_weights(file_path)
return model
# Building model(s).
instance_shape = train_data[0][0].shape
models = [create_model(instance_shape) for _ in range(ENSEMBLE_AVG_COUNT)]
# Show single model architecture.
print(models[0].summary())
# Training model(s).
trained_models = [
train(train_data, train_labels, val_data, val_labels, model)
for model in tqdm(models)
]<jupyter_output><empty_output><jupyter_text>Model evaluationThe models are now ready for evaluation.With each model we also create an associated intermediate model to get theweights from the attention layer.We will compute a prediction for each of our `ENSEMBLE_AVG_COUNT` models, andaverage them together for our final prediction.<jupyter_code>def predict(data, labels, trained_models):
# Collect info per model.
models_predictions = []
models_attention_weights = []
models_losses = []
models_accuracies = []
for model in trained_models:
# Predict output classes on data.
predictions = model.predict(data)
models_predictions.append(predictions)
# Create intermediate model to get MIL attention layer weights.
intermediate_model = keras.Model(model.input, model.get_layer("alpha").output)
# Predict MIL attention layer weights.
intermediate_predictions = intermediate_model.predict(data)
attention_weights = np.squeeze(np.swapaxes(intermediate_predictions, 1, 0))
models_attention_weights.append(attention_weights)
loss, accuracy = model.evaluate(data, labels, verbose=0)
models_losses.append(loss)
models_accuracies.append(accuracy)
print(
f"The average loss and accuracy are {np.sum(models_losses, axis=0) / ENSEMBLE_AVG_COUNT:.2f}"
f" and {100 * np.sum(models_accuracies, axis=0) / ENSEMBLE_AVG_COUNT:.2f} % resp."
)
return (
np.sum(models_predictions, axis=0) / ENSEMBLE_AVG_COUNT,
np.sum(models_attention_weights, axis=0) / ENSEMBLE_AVG_COUNT,
)
# Evaluate and predict classes and attention scores on validation data.
class_predictions, attention_params = predict(val_data, val_labels, trained_models)
# Plot some results from our validation data.
plot(
val_data,
val_labels,
"positive",
predictions=class_predictions,
attention_weights=attention_params,
)
plot(
val_data,
val_labels,
"negative",
predictions=class_predictions,
attention_weights=attention_params,
)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/attention_mil_classification.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/attention_mil_classification.ipynb",
"repo_id": "keras-io",
"token_count": 6431
} | 98 |
<jupyter_start><jupyter_text>Enhanced Deep Residual Networks for single-image super-resolution**Author:** Gitesh Chawda**Date created:** 2022/04/07**Last modified:** 2022/04/07**Description:** Training an EDSR model on the DIV2K Dataset. IntroductionIn this example, we implement[Enhanced Deep Residual Networks for Single Image Super-Resolution (EDSR)](https://arxiv.org/abs/1707.02921)by Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Kyoung Mu Lee.The EDSR architecture is based on the SRResNet architecture and consists of multipleresidual blocks. It uses constant scaling layers instead of batch normalization layers toproduce consistent results (input and output have similar distributions, thusnormalizing intermediate features may not be desirable). Instead of using a L2 loss (mean squared error),the authors employed an L1 loss (mean absolute error), which performs better empirically.Our implementation only includes 16 residual blocks with 64 channels.Alternatively, as shown in the Keras example[Image Super-Resolution using an Efficient Sub-Pixel CNN](https://keras.io/examples/vision/super_resolution_sub_pixel/image-superresolution-using-an-efficient-subpixel-cnn),you can do super-resolution using an ESPCN Model. According to the survey paper, EDSR is one of the top-fivebest-performing super-resolution methods based on PSNR scores. However, it has moreparameters and requires more computational power than other approaches.It has a PSNR value (≈34db) that is slightly higher than ESPCN (≈32db).As per the survey paper, EDSR performs better than ESPCN.Paper:[A comprehensive review of deep learning based single image super-resolution](https://arxiv.org/abs/2102.09351)Comparison Graph: Imports<jupyter_code>import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
AUTOTUNE = tf.data.AUTOTUNE<jupyter_output><empty_output><jupyter_text>Download the training datasetWe use the DIV2K Dataset, a prominent single-image super-resolution dataset with 1,000images of scenes with various sorts of degradations,divided into 800 images for training, 100 images for validation, and 100images for testing. We use 4x bicubic downsampled images as our "low quality" reference.<jupyter_code># Download DIV2K from TF Datasets
# Using bicubic 4x degradation type
div2k_data = tfds.image.Div2k(config="bicubic_x4")
div2k_data.download_and_prepare()
# Taking train data from div2k_data object
train = div2k_data.as_dataset(split="train", as_supervised=True)
train_cache = train.cache()
# Validation data
val = div2k_data.as_dataset(split="validation", as_supervised=True)
val_cache = val.cache()<jupyter_output><empty_output><jupyter_text>Flip, crop and resize images<jupyter_code>def flip_left_right(lowres_img, highres_img):
"""Flips Images to left and right."""
# Outputs random values from a uniform distribution in between 0 to 1
rn = tf.random.uniform(shape=(), maxval=1)
# If rn is less than 0.5 it returns original lowres_img and highres_img
# If rn is greater than 0.5 it returns flipped image
return tf.cond(
rn < 0.5,
lambda: (lowres_img, highres_img),
lambda: (
tf.image.flip_left_right(lowres_img),
tf.image.flip_left_right(highres_img),
),
)
def random_rotate(lowres_img, highres_img):
"""Rotates Images by 90 degrees."""
# Outputs random values from uniform distribution in between 0 to 4
rn = tf.random.uniform(shape=(), maxval=4, dtype=tf.int32)
# Here rn signifies number of times the image(s) are rotated by 90 degrees
return tf.image.rot90(lowres_img, rn), tf.image.rot90(highres_img, rn)
def random_crop(lowres_img, highres_img, hr_crop_size=96, scale=4):
"""Crop images.
low resolution images: 24x24
high resolution images: 96x96
"""
lowres_crop_size = hr_crop_size // scale # 96//4=24
lowres_img_shape = tf.shape(lowres_img)[:2] # (height,width)
lowres_width = tf.random.uniform(
shape=(), maxval=lowres_img_shape[1] - lowres_crop_size + 1, dtype=tf.int32
)
lowres_height = tf.random.uniform(
shape=(), maxval=lowres_img_shape[0] - lowres_crop_size + 1, dtype=tf.int32
)
highres_width = lowres_width * scale
highres_height = lowres_height * scale
lowres_img_cropped = lowres_img[
lowres_height : lowres_height + lowres_crop_size,
lowres_width : lowres_width + lowres_crop_size,
] # 24x24
highres_img_cropped = highres_img[
highres_height : highres_height + hr_crop_size,
highres_width : highres_width + hr_crop_size,
] # 96x96
return lowres_img_cropped, highres_img_cropped<jupyter_output><empty_output><jupyter_text>Prepare a `tf.data.Dataset` objectWe augment the training data with random horizontal flips and 90 rotations.As low resolution images, we use 24x24 RGB input patches.<jupyter_code>def dataset_object(dataset_cache, training=True):
ds = dataset_cache
ds = ds.map(
lambda lowres, highres: random_crop(lowres, highres, scale=4),
num_parallel_calls=AUTOTUNE,
)
if training:
ds = ds.map(random_rotate, num_parallel_calls=AUTOTUNE)
ds = ds.map(flip_left_right, num_parallel_calls=AUTOTUNE)
# Batching Data
ds = ds.batch(16)
if training:
# Repeating Data, so that cardinality if dataset becomes infinte
ds = ds.repeat()
# prefetching allows later images to be prepared while the current image is being processed
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
train_ds = dataset_object(train_cache, training=True)
val_ds = dataset_object(val_cache, training=False)<jupyter_output><empty_output><jupyter_text>Visualize the dataLet's visualize a few sample images:<jupyter_code>lowres, highres = next(iter(train_ds))
# High Resolution Images
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(highres[i].numpy().astype("uint8"))
plt.title(highres[i].shape)
plt.axis("off")
# Low Resolution Images
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(lowres[i].numpy().astype("uint8"))
plt.title(lowres[i].shape)
plt.axis("off")
def PSNR(super_resolution, high_resolution):
"""Compute the peak signal-to-noise ratio, measures quality of image."""
# Max value of pixel is 255
psnr_value = tf.image.psnr(high_resolution, super_resolution, max_val=255)[0]
return psnr_value<jupyter_output><empty_output><jupyter_text>Build the modelIn the paper, the authors train three models: EDSR, MDSR, and a baseline model. In this code example,we only train the baseline model. Comparison with model with three residual blocksThe residual block design of EDSR differs from that of ResNet. Batch normalizationlayers have been removed (together with the final ReLU activation): since batch normalizationlayers normalize the features, they hurt output value range flexibility.It is thus better to remove them. Further, it also helps reduce theamount of GPU RAM required by the model, since the batch normalization layers consume the same amount ofmemory as the preceding convolutional layers.<jupyter_code>class EDSRModel(tf.keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def predict_step(self, x):
# Adding dummy dimension using tf.expand_dims and converting to float32 using tf.cast
x = tf.cast(tf.expand_dims(x, axis=0), tf.float32)
# Passing low resolution image to model
super_resolution_img = self(x, training=False)
# Clips the tensor from min(0) to max(255)
super_resolution_img = tf.clip_by_value(super_resolution_img, 0, 255)
# Rounds the values of a tensor to the nearest integer
super_resolution_img = tf.round(super_resolution_img)
# Removes dimensions of size 1 from the shape of a tensor and converting to uint8
super_resolution_img = tf.squeeze(
tf.cast(super_resolution_img, tf.uint8), axis=0
)
return super_resolution_img
# Residual Block
def ResBlock(inputs):
x = layers.Conv2D(64, 3, padding="same", activation="relu")(inputs)
x = layers.Conv2D(64, 3, padding="same")(x)
x = layers.Add()([inputs, x])
return x
# Upsampling Block
def Upsampling(inputs, factor=2, **kwargs):
x = layers.Conv2D(64 * (factor ** 2), 3, padding="same", **kwargs)(inputs)
x = tf.nn.depth_to_space(x, block_size=factor)
x = layers.Conv2D(64 * (factor ** 2), 3, padding="same", **kwargs)(x)
x = tf.nn.depth_to_space(x, block_size=factor)
return x
def make_model(num_filters, num_of_residual_blocks):
# Flexible Inputs to input_layer
input_layer = layers.Input(shape=(None, None, 3))
# Scaling Pixel Values
x = layers.Rescaling(scale=1.0 / 255)(input_layer)
x = x_new = layers.Conv2D(num_filters, 3, padding="same")(x)
# 16 residual blocks
for _ in range(num_of_residual_blocks):
x_new = ResBlock(x_new)
x_new = layers.Conv2D(num_filters, 3, padding="same")(x_new)
x = layers.Add()([x, x_new])
x = Upsampling(x)
x = layers.Conv2D(3, 3, padding="same")(x)
output_layer = layers.Rescaling(scale=255)(x)
return EDSRModel(input_layer, output_layer)
model = make_model(num_filters=64, num_of_residual_blocks=16)<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code># Using adam optimizer with initial learning rate as 1e-4, changing learning rate after 5000 steps to 5e-5
optim_edsr = keras.optimizers.Adam(
learning_rate=keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[5000], values=[1e-4, 5e-5]
)
)
# Compiling model with loss as mean absolute error(L1 Loss) and metric as psnr
model.compile(optimizer=optim_edsr, loss="mae", metrics=[PSNR])
# Training for more epochs will improve results
model.fit(train_ds, epochs=100, steps_per_epoch=200, validation_data=val_ds)<jupyter_output><empty_output><jupyter_text>Run inference on new images and plot the results<jupyter_code>def plot_results(lowres, preds):
"""
Displays low resolution image and super resolution image
"""
plt.figure(figsize=(24, 14))
plt.subplot(132), plt.imshow(lowres), plt.title("Low resolution")
plt.subplot(133), plt.imshow(preds), plt.title("Prediction")
plt.show()
for lowres, highres in val.take(10):
lowres = tf.image.random_crop(lowres, (150, 150, 3))
preds = model.predict_step(lowres)
plot_results(lowres, preds)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/edsr.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/edsr.ipynb",
"repo_id": "keras-io",
"token_count": 4220
} | 99 |
<jupyter_start><jupyter_text>Knowledge Distillation**Author:** [Kenneth Borup](https://twitter.com/Kennethborup)**Date created:** 2020/09/01**Last modified:** 2020/09/01**Description:** Implementation of classical Knowledge Distillation. Introduction to Knowledge DistillationKnowledge Distillation is a procedure for modelcompression, in which a small (student) model is trained to match a large pre-trained(teacher) model. Knowledge is transferred from the teacher model to the studentby minimizing a loss function, aimed at matching softened teacher logits as well asground-truth labels.The logits are softened by applying a "temperature" scaling function in the softmax,effectively smoothing out the probability distribution and revealinginter-class relationships learned by the teacher.**Reference:**- [Hinton et al. (2015)](https://arxiv.org/abs/1503.02531) Setup<jupyter_code>import os
import keras
from keras import layers
from keras import ops
import numpy as np<jupyter_output><empty_output><jupyter_text>Construct `Distiller()` classThe custom `Distiller()` class, overrides the `Model` methods `compile`, `compute_loss`,and `call`. In order to use the distiller, we need:- A trained teacher model- A student model to train- A student loss function on the difference between student predictions and ground-truth- A distillation loss function, along with a `temperature`, on the difference between thesoft student predictions and the soft teacher labels- An `alpha` factor to weight the student and distillation loss- An optimizer for the student and (optional) metrics to evaluate performanceIn the `compute_loss` method, we perform a forward pass of both the teacher and student,calculate the loss with weighting of the `student_loss` and `distillation_loss` by `alpha`and `1 - alpha`, respectively. Note: only the student weights are updated.<jupyter_code>class Distiller(keras.Model):
def __init__(self, student, teacher):
super().__init__()
self.teacher = teacher
self.student = student
def compile(
self,
optimizer,
metrics,
student_loss_fn,
distillation_loss_fn,
alpha=0.1,
temperature=3,
):
"""Configure the distiller.
Args:
optimizer: Keras optimizer for the student weights
metrics: Keras metrics for evaluation
student_loss_fn: Loss function of difference between student
predictions and ground-truth
distillation_loss_fn: Loss function of difference between soft
student predictions and soft teacher predictions
alpha: weight to student_loss_fn and 1-alpha to distillation_loss_fn
temperature: Temperature for softening probability distributions.
Larger temperature gives softer distributions.
"""
super().compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
self.alpha = alpha
self.temperature = temperature
def compute_loss(
self, x=None, y=None, y_pred=None, sample_weight=None, allow_empty=False
):
teacher_pred = self.teacher(x, training=False)
student_loss = self.student_loss_fn(y, y_pred)
distillation_loss = self.distillation_loss_fn(
ops.softmax(teacher_pred / self.temperature, axis=1),
ops.softmax(y_pred / self.temperature, axis=1),
) * (self.temperature**2)
loss = self.alpha * student_loss + (1 - self.alpha) * distillation_loss
return loss
def call(self, x):
return self.student(x)<jupyter_output><empty_output><jupyter_text>Create student and teacher modelsInitialy, we create a teacher model and a smaller student model. Both models areconvolutional neural networks and created using `Sequential()`,but could be any Keras model.<jupyter_code># Create the teacher
teacher = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(256, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(512, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="teacher",
)
# Create the student
student = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(16, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(32, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="student",
)
# Clone student for later comparison
student_scratch = keras.models.clone_model(student)<jupyter_output><empty_output><jupyter_text>Prepare the datasetThe dataset used for training the teacher and distilling the teacher is[MNIST](https://keras.io/api/datasets/mnist/), and the procedure would be equivalent forany otherdataset, e.g. [CIFAR-10](https://keras.io/api/datasets/cifar10/), with a suitable choiceof models. Both the student and teacher are trained on the training set and evaluated onthe test set.<jupyter_code># Prepare the train and test dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Normalize data
x_train = x_train.astype("float32") / 255.0
x_train = np.reshape(x_train, (-1, 28, 28, 1))
x_test = x_test.astype("float32") / 255.0
x_test = np.reshape(x_test, (-1, 28, 28, 1))<jupyter_output><empty_output><jupyter_text>Train the teacherIn knowledge distillation we assume that the teacher is trained and fixed. Thus, we startby training the teacher model on the training set in the usual way.<jupyter_code># Train teacher as usual
teacher.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate teacher on data.
teacher.fit(x_train, y_train, epochs=5)
teacher.evaluate(x_test, y_test)<jupyter_output><empty_output><jupyter_text>Distill teacher to studentWe have already trained the teacher model, and we only need to initialize a`Distiller(student, teacher)` instance, `compile()` it with the desired losses,hyperparameters and optimizer, and distill the teacher to the student.<jupyter_code># Initialize and compile distiller
distiller = Distiller(student=student, teacher=teacher)
distiller.compile(
optimizer=keras.optimizers.Adam(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
student_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
distillation_loss_fn=keras.losses.KLDivergence(),
alpha=0.1,
temperature=10,
)
# Distill teacher to student
distiller.fit(x_train, y_train, epochs=3)
# Evaluate student on test dataset
distiller.evaluate(x_test, y_test)<jupyter_output><empty_output><jupyter_text>Train student from scratch for comparisonWe can also train an equivalent student model from scratch without the teacher, in orderto evaluate the performance gain obtained by knowledge distillation.<jupyter_code># Train student as doen usually
student_scratch.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate student trained from scratch.
student_scratch.fit(x_train, y_train, epochs=3)
student_scratch.evaluate(x_test, y_test)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/knowledge_distillation.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/knowledge_distillation.ipynb",
"repo_id": "keras-io",
"token_count": 2669
} | 100 |
<jupyter_start><jupyter_text>Augmenting convnets with aggregated attention**Author:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498)**Date created:** 2022/01/22**Last modified:** 2022/01/22**Description:** Building a patch-convnet architecture and visualizing its attention maps. IntroductionVision transformers ([Dosovitskiy et. al](https://arxiv.org/abs/2010.11929))have emerged as a powerful alternative to Convolutional Neural Networks.ViTs process the images in a patch-based manner. The image informationis then aggregated into a `CLASS` token. This token correlates to themost important patches of the image for a particular classification decision.The interaction between the `CLASS` token and the patches can be visualizedto help explain a classification decision. In the academic paper[Augmenting convolutional networks with attention-based aggregation](https://arxiv.org/abs/2112.13692)by Touvron et. al, the authors propose to set up an equivalent visualization forconvnets. They propose to substitute the global average pooling layerof a convnet with a Transformer layer. The self-attention layer of theTransformer would produce attention maps that correspond to themost attended patches of the image for the classification decision.In this example, we minimally implement the ideas of[Augmenting Convolutional networks with attention-based aggregation](https://arxiv.org/abs/2112.13692).The main goal of this example is to cover the following ideas, withminor modifications (to adjust the implementation with CIFAR10):- The simple design for the attention-based pooling layer, such that it explicitly provides the weights (importance) of the different patches.- The novel architecture of convnet is called the **PatchConvNet** which deviates from the age old pyramidal architecture. Setup and ImportsThis example requires TensorFlow Addons, which can be installed usingthe following command:```shellpip install -U tensorflow-addons```<jupyter_code>import math
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import keras
from keras import layers
from keras import ops
from tensorflow import data as tf_data
# Set seed for reproducibiltiy
SEED = 42
keras.utils.set_random_seed(SEED)<jupyter_output><empty_output><jupyter_text>Hyperparameters<jupyter_code># DATA
BATCH_SIZE = 128
BUFFER_SIZE = BATCH_SIZE * 2
AUTO = tf_data.AUTOTUNE
INPUT_SHAPE = (32, 32, 3)
NUM_CLASSES = 10 # for CIFAR 10
# AUGMENTATION
IMAGE_SIZE = 48 # We will resize input images to this size.
# ARCHITECTURE
DIMENSIONS = 256
SE_RATIO = 8
TRUNK_DEPTH = 2
# OPTIMIZER
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-4
# PRETRAINING
EPOCHS = 50<jupyter_output><empty_output><jupyter_text>Load the CIFAR10 dataset<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:40000], y_train[:40000]),
(x_train[40000:], y_train[40000:]),
)
print(f"Training samples: {len(x_train)}")
print(f"Validation samples: {len(x_val)}")
print(f"Testing samples: {len(x_test)}")
train_ds = tf_data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(AUTO)
val_ds = tf_data.Dataset.from_tensor_slices((x_val, y_val))
val_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO)
test_ds = tf_data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)<jupyter_output><empty_output><jupyter_text>Augmentation layers<jupyter_code>def get_preprocessing():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
],
name="preprocessing",
)
return model
def get_train_augmentation_model():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
],
name="train_data_augmentation",
)
return model<jupyter_output><empty_output><jupyter_text>Convolutional stemThe stem of the model is a lightweight preprocessing module thatmaps images pixels to a set of vectors (patches).<jupyter_code>def build_convolutional_stem(dimensions):
"""Build the convolutional stem.
Args:
dimensions: The embedding dimension of the patches (d in paper).
Returs:
The convolutional stem as a keras seqeuntial
model.
"""
config = {
"kernel_size": (3, 3),
"strides": (2, 2),
"activation": ops.gelu,
"padding": "same",
}
convolutional_stem = keras.Sequential(
[
layers.Conv2D(filters=dimensions // 2, **config),
layers.Conv2D(filters=dimensions, **config),
],
name="convolutional_stem",
)
return convolutional_stem<jupyter_output><empty_output><jupyter_text>Convolutional trunkThe trunk of the model is the most compute-intesive part. It consistsof `N` stacked residual convolutional blocks.<jupyter_code>class SqueezeExcite(layers.Layer):
"""Applies squeeze and excitation to input feature maps as seen in
https://arxiv.org/abs/1709.01507.
Args:
ratio: The ratio with which the feature map needs to be reduced in
the reduction phase.
Inputs:
Convolutional features.
Outputs:
Attention modified feature maps.
"""
def __init__(self, ratio, **kwargs):
super().__init__(**kwargs)
self.ratio = ratio
def get_config(self):
config = super().get_config()
config.update({"ratio": self.ratio})
return config
def build(self, input_shape):
filters = input_shape[-1]
self.squeeze = layers.GlobalAveragePooling2D(keepdims=True)
self.reduction = layers.Dense(
units=filters // self.ratio,
activation="relu",
use_bias=False,
)
self.excite = layers.Dense(units=filters, activation="sigmoid", use_bias=False)
self.multiply = layers.Multiply()
def call(self, x):
shortcut = x
x = self.squeeze(x)
x = self.reduction(x)
x = self.excite(x)
x = self.multiply([shortcut, x])
return x
class Trunk(layers.Layer):
"""Convolutional residual trunk as in the https://arxiv.org/abs/2112.13692
Args:
depth: Number of trunk residual blocks
dimensions: Dimnesion of the model (denoted by d in the paper)
ratio: The Squeeze-Excitation ratio
Inputs:
Convolutional features extracted from the conv stem.
Outputs:
Flattened patches.
"""
def __init__(self, depth, dimensions, ratio, **kwargs):
super().__init__(**kwargs)
self.ratio = ratio
self.dimensions = dimensions
self.depth = depth
def get_config(self):
config = super().get_config()
config.update(
{
"ratio": self.ratio,
"dimensions": self.dimensions,
"depth": self.depth,
}
)
return config
def build(self, input_shape):
config = {
"filters": self.dimensions,
"activation": ops.gelu,
"padding": "same",
}
trunk_block = [
layers.LayerNormalization(epsilon=1e-6),
layers.Conv2D(kernel_size=(1, 1), **config),
layers.Conv2D(kernel_size=(3, 3), **config),
SqueezeExcite(ratio=self.ratio),
layers.Conv2D(kernel_size=(1, 1), filters=self.dimensions, padding="same"),
]
self.trunk_blocks = [keras.Sequential(trunk_block) for _ in range(self.depth)]
self.add = layers.Add()
self.flatten_spatial = layers.Reshape((-1, self.dimensions))
def call(self, x):
# Remember the input.
shortcut = x
for trunk_block in self.trunk_blocks:
output = trunk_block(x)
shortcut = self.add([output, shortcut])
x = shortcut
# Flatten the patches.
x = self.flatten_spatial(x)
return x<jupyter_output><empty_output><jupyter_text>Attention PoolingThe output of the convolutional trunk is attended with a trainable_query_ class token. The resulting attention map is the weight ofevery patch of the image for a classification decision.<jupyter_code>class AttentionPooling(layers.Layer):
"""Applies attention to the patches extracted form the
trunk with the CLS token.
Args:
dimensions: The dimension of the whole architecture.
num_classes: The number of classes in the dataset.
Inputs:
Flattened patches from the trunk.
Outputs:
The modifies CLS token.
"""
def __init__(self, dimensions, num_classes, **kwargs):
super().__init__(**kwargs)
self.dimensions = dimensions
self.num_classes = num_classes
self.cls = keras.Variable(ops.zeros((1, 1, dimensions)))
def get_config(self):
config = super().get_config()
config.update(
{
"dimensions": self.dimensions,
"num_classes": self.num_classes,
"cls": self.cls.numpy(),
}
)
return config
def build(self, input_shape):
self.attention = layers.MultiHeadAttention(
num_heads=1,
key_dim=self.dimensions,
dropout=0.2,
)
self.layer_norm1 = layers.LayerNormalization(epsilon=1e-6)
self.layer_norm2 = layers.LayerNormalization(epsilon=1e-6)
self.layer_norm3 = layers.LayerNormalization(epsilon=1e-6)
self.mlp = keras.Sequential(
[
layers.Dense(units=self.dimensions, activation=ops.gelu),
layers.Dropout(0.2),
layers.Dense(units=self.dimensions, activation=ops.gelu),
]
)
self.dense = layers.Dense(units=self.num_classes)
self.flatten = layers.Flatten()
def call(self, x):
batch_size = ops.shape(x)[0]
# Expand the class token batch number of times.
class_token = ops.repeat(self.cls, repeats=batch_size, axis=0)
# Concat the input with the trainable class token.
x = ops.concatenate([class_token, x], axis=1)
# Apply attention to x.
x = self.layer_norm1(x)
x, viz_weights = self.attention(
query=x[:, 0:1], key=x, value=x, return_attention_scores=True
)
class_token = class_token + x
class_token = self.layer_norm2(class_token)
class_token = self.flatten(class_token)
class_token = self.layer_norm3(class_token)
class_token = class_token + self.mlp(class_token)
# Build the logits
logits = self.dense(class_token)
return logits, ops.squeeze(viz_weights)[..., 1:]<jupyter_output><empty_output><jupyter_text>Patch convnetThe patch-convnet is shown in the figure below.| || :--: || [Source](https://arxiv.org/abs/2112.13692) |All the modules in the architecture are built in the earlier seciton.In this section, we stack all of the different modules together.<jupyter_code>class PatchConvNet(keras.Model):
def __init__(
self,
stem,
trunk,
attention_pooling,
preprocessing_model,
train_augmentation_model,
**kwargs,
):
super().__init__(**kwargs)
self.stem = stem
self.trunk = trunk
self.attention_pooling = attention_pooling
self.train_augmentation_model = train_augmentation_model
self.preprocessing_model = preprocessing_model
def get_config(self):
config = super().get_config()
config.update(
{
"stem": self.stem,
"trunk": self.trunk,
"attention_pooling": self.attention_pooling,
"train_augmentation_model": self.train_augmentation_model,
"preprocessing_model": self.preprocessing_model,
}
)
return config
def _calculate_loss(self, inputs, test=False):
images, labels = inputs
# Augment the input images.
if test:
augmented_images = self.preprocessing_model(images)
else:
augmented_images = self.train_augmentation_model(images)
# Pass through the stem.
x = self.stem(augmented_images)
# Pass through the trunk.
x = self.trunk(x)
# Pass through the attention pooling block.
logits, _ = self.attention_pooling(x)
# Compute the total loss.
total_loss = self.compiled_loss(labels, logits)
return total_loss, logits
def train_step(self, inputs):
with tf.GradientTape() as tape:
total_loss, logits = self._calculate_loss(inputs)
# Apply gradients.
train_vars = [
self.stem.trainable_variables,
self.trunk.trainable_variables,
self.attention_pooling.trainable_variables,
]
grads = tape.gradient(total_loss, train_vars)
trainable_variable_list = []
for grad, var in zip(grads, train_vars):
for g, v in zip(grad, var):
trainable_variable_list.append((g, v))
self.optimizer.apply_gradients(trainable_variable_list)
# Report progress.
_, labels = inputs
self.compiled_metrics.update_state(labels, logits)
return {m.name: m.result() for m in self.metrics}
def test_step(self, inputs):
total_loss, logits = self._calculate_loss(inputs, test=True)
# Report progress.
_, labels = inputs
self.compiled_metrics.update_state(labels, logits)
return {m.name: m.result() for m in self.metrics}
def call(self, images):
# Augment the input images.
augmented_images = self.preprocessing_model(images)
# Pass through the stem.
x = self.stem(augmented_images)
# Pass through the trunk.
x = self.trunk(x)
# Pass through the attention pooling block.
logits, viz_weights = self.attention_pooling(x)
return logits, viz_weights<jupyter_output><empty_output><jupyter_text>CallbacksThis callback will plot the image and the attention map overlayed onthe image.<jupyter_code># Taking a batch of test inputs to measure model's progress.
test_images, test_labels = next(iter(test_ds))
class TrainMonitor(keras.callbacks.Callback):
def __init__(self, epoch_interval=None):
self.epoch_interval = epoch_interval
def on_epoch_end(self, epoch, logs=None):
if self.epoch_interval and epoch % self.epoch_interval == 4:
test_augmented_images = self.model.preprocessing_model(test_images)
# Pass through the stem.
test_x = self.model.stem(test_augmented_images)
# Pass through the trunk.
test_x = self.model.trunk(test_x)
# Pass through the attention pooling block.
_, test_viz_weights = self.model.attention_pooling(test_x)
# Reshape the vizualization weights
num_patches = ops.shape(test_viz_weights)[-1]
height = width = int(math.sqrt(num_patches))
test_viz_weights = layers.Reshape((height, width))(test_viz_weights)
# Take a random image and its attention weights.
index = np.random.randint(low=0, high=ops.shape(test_augmented_images)[0])
selected_image = test_augmented_images[index]
selected_weight = test_viz_weights[index]
# Plot the images and the overlayed attention map.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].imshow(selected_image)
ax[0].set_title(f"Original: {epoch:03d}")
ax[0].axis("off")
img = ax[1].imshow(selected_image)
ax[1].imshow(
selected_weight, cmap="inferno", alpha=0.6, extent=img.get_extent()
)
ax[1].set_title(f"Attended: {epoch:03d}")
ax[1].axis("off")
plt.axis("off")
plt.show()
plt.close()<jupyter_output><empty_output><jupyter_text>Learning rate schedule<jupyter_code>class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps
):
super().__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.pi = np.pi
def __call__(self, step):
if self.total_steps < self.warmup_steps:
raise ValueError("Total_steps must be larger or equal to warmup_steps.")
cos_annealed_lr = ops.cos(
self.pi
* (ops.cast(step, "float32") - self.warmup_steps)
/ float(self.total_steps - self.warmup_steps)
)
learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr)
if self.warmup_steps > 0:
if self.learning_rate_base < self.warmup_learning_rate:
raise ValueError(
"Learning_rate_base must be larger or equal to "
"warmup_learning_rate."
)
slope = (
self.learning_rate_base - self.warmup_learning_rate
) / self.warmup_steps
warmup_rate = slope * ops.cast(step, "float32") + self.warmup_learning_rate
learning_rate = ops.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
return ops.where(
step > self.total_steps,
0.0,
learning_rate,
)
total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS)
warmup_epoch_percentage = 0.15
warmup_steps = int(total_steps * warmup_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=LEARNING_RATE,
total_steps=total_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)<jupyter_output><empty_output><jupyter_text>TrainingWe build the model, compile it, and train it.<jupyter_code>train_augmentation_model = get_train_augmentation_model()
preprocessing_model = get_preprocessing()
conv_stem = build_convolutional_stem(dimensions=DIMENSIONS)
conv_trunk = Trunk(depth=TRUNK_DEPTH, dimensions=DIMENSIONS, ratio=SE_RATIO)
attention_pooling = AttentionPooling(dimensions=DIMENSIONS, num_classes=NUM_CLASSES)
patch_conv_net = PatchConvNet(
stem=conv_stem,
trunk=conv_trunk,
attention_pooling=attention_pooling,
train_augmentation_model=train_augmentation_model,
preprocessing_model=preprocessing_model,
)
# Assemble the callbacks.
train_callbacks = [TrainMonitor(epoch_interval=5)]
# Get the optimizer.
optimizer = keras.optimizers.AdamW(
learning_rate=scheduled_lrs, weight_decay=WEIGHT_DECAY
)
# Compile and pretrain the model.
patch_conv_net.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = patch_conv_net.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
callbacks=train_callbacks,
)
# Evaluate the model with the test dataset.
loss, acc_top1, acc_top5 = patch_conv_net.evaluate(test_ds)
print(f"Loss: {loss:0.2f}")
print(f"Top 1 test accuracy: {acc_top1*100:0.2f}%")
print(f"Top 5 test accuracy: {acc_top5*100:0.2f}%")<jupyter_output><empty_output><jupyter_text>InferenceHere, we use the trained model to plot the attention map.<jupyter_code>def plot_attention(image):
"""Plots the attention map on top of the image.
Args:
image: A numpy image of arbitrary size.
"""
# Resize the image to a (32, 32) dim.
image = ops.image.resize(image, (32, 32))
image = image[np.newaxis, ...]
test_augmented_images = patch_conv_net.preprocessing_model(image)
# Pass through the stem.
test_x = patch_conv_net.stem(test_augmented_images)
# Pass through the trunk.
test_x = patch_conv_net.trunk(test_x)
# Pass through the attention pooling block.
_, test_viz_weights = patch_conv_net.attention_pooling(test_x)
test_viz_weights = test_viz_weights[np.newaxis, ...]
# Reshape the vizualization weights.
num_patches = ops.shape(test_viz_weights)[-1]
height = width = int(math.sqrt(num_patches))
test_viz_weights = layers.Reshape((height, width))(test_viz_weights)
selected_image = test_augmented_images[0]
selected_weight = test_viz_weights[0]
# Plot the images.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].imshow(selected_image)
ax[0].set_title(f"Original")
ax[0].axis("off")
img = ax[1].imshow(selected_image)
ax[1].imshow(selected_weight, cmap="inferno", alpha=0.6, extent=img.get_extent())
ax[1].set_title(f"Attended")
ax[1].axis("off")
plt.axis("off")
plt.show()
plt.close()
url = "http://farm9.staticflickr.com/8017/7140384795_385b1f48df_z.jpg"
image_name = keras.utils.get_file(fname="image.jpg", origin=url)
image = keras.utils.load_img(image_name)
image = keras.utils.img_to_array(image)
plot_attention(image)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/patch_convnet.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/patch_convnet.ipynb",
"repo_id": "keras-io",
"token_count": 9230
} | 101 |
<jupyter_start><jupyter_text>Image Super-Resolution using an Efficient Sub-Pixel CNN**Author:** [Xingyu Long](https://github.com/xingyu-long)**Date created:** 2020/07/28**Last modified:** 2020/08/27**Description:** Implementing Super-Resolution using Efficient sub-pixel model on BSDS500. IntroductionESPCN (Efficient Sub-Pixel CNN), proposed by [Shi, 2016](https://arxiv.org/abs/1609.05158)is a model that reconstructs a high-resolution version of an image given a low-resolutionversion.It leverages efficient "sub-pixel convolution" layers, which learns an array ofimage upscaling filters.In this code example, we will implement the model from the paper and train it on a smalldataset,[BSDS500](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html).[BSDS500](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html). Setup<jupyter_code>import keras
from keras import layers
from keras import ops
from keras.utils import load_img
from keras.utils import array_to_img
from keras.utils import img_to_array
from keras.preprocessing import image_dataset_from_directory
import tensorflow as tf # only for data preprocessing
import os
import math
import numpy as np
from IPython.display import display<jupyter_output><empty_output><jupyter_text>Load data: BSDS500 dataset Download datasetWe use the built-in `keras.utils.get_file` utility to retrieve the dataset.<jupyter_code>dataset_url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz"
data_dir = keras.utils.get_file(origin=dataset_url, fname="BSR", untar=True)
root_dir = os.path.join(data_dir, "BSDS500/data")<jupyter_output><empty_output><jupyter_text>We create training and validation datasets via `image_dataset_from_directory`.<jupyter_code>crop_size = 300
upscale_factor = 3
input_size = crop_size // upscale_factor
batch_size = 8
train_ds = image_dataset_from_directory(
root_dir,
batch_size=batch_size,
image_size=(crop_size, crop_size),
validation_split=0.2,
subset="training",
seed=1337,
label_mode=None,
)
valid_ds = image_dataset_from_directory(
root_dir,
batch_size=batch_size,
image_size=(crop_size, crop_size),
validation_split=0.2,
subset="validation",
seed=1337,
label_mode=None,
)<jupyter_output><empty_output><jupyter_text>We rescale the images to take values in the range [0, 1].<jupyter_code>def scaling(input_image):
input_image = input_image / 255.0
return input_image
# Scale from (0, 255) to (0, 1)
train_ds = train_ds.map(scaling)
valid_ds = valid_ds.map(scaling)<jupyter_output><empty_output><jupyter_text>Let's visualize a few sample images:<jupyter_code>for batch in train_ds.take(1):
for img in batch:
display(array_to_img(img))<jupyter_output><empty_output><jupyter_text>We prepare a dataset of test image paths that we will use forvisual evaluation at the end of this example.<jupyter_code>dataset = os.path.join(root_dir, "images")
test_path = os.path.join(dataset, "test")
test_img_paths = sorted(
[
os.path.join(test_path, fname)
for fname in os.listdir(test_path)
if fname.endswith(".jpg")
]
)<jupyter_output><empty_output><jupyter_text>Crop and resize imagesLet's process image data.First, we convert our images from the RGB color space to the[YUV colour space](https://en.wikipedia.org/wiki/YUV).For the input data (low-resolution images),we crop the image, retrieve the `y` channel (luninance),and resize it with the `area` method (use `BICUBIC` if you use PIL).We only consider the luminance channelin the YUV color space because humans are more sensitive toluminance change.For the target data (high-resolution images), we just crop the imageand retrieve the `y` channel.<jupyter_code># Use TF Ops to process.
def process_input(input, input_size, upscale_factor):
input = tf.image.rgb_to_yuv(input)
last_dimension_axis = len(input.shape) - 1
y, u, v = tf.split(input, 3, axis=last_dimension_axis)
return tf.image.resize(y, [input_size, input_size], method="area")
def process_target(input):
input = tf.image.rgb_to_yuv(input)
last_dimension_axis = len(input.shape) - 1
y, u, v = tf.split(input, 3, axis=last_dimension_axis)
return y
train_ds = train_ds.map(
lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
)
train_ds = train_ds.prefetch(buffer_size=32)
valid_ds = valid_ds.map(
lambda x: (process_input(x, input_size, upscale_factor), process_target(x))
)
valid_ds = valid_ds.prefetch(buffer_size=32)<jupyter_output><empty_output><jupyter_text>Let's take a look at the input and target data.<jupyter_code>for batch in train_ds.take(1):
for img in batch[0]:
display(array_to_img(img))
for img in batch[1]:
display(array_to_img(img))<jupyter_output><empty_output><jupyter_text>Build a modelCompared to the paper, we add one more layer and we use the `relu` activation functioninstead of `tanh`.It achieves better performance even though we train the model for fewer epochs.<jupyter_code>class DepthToSpace(layers.Layer):
def __init__(self, block_size):
super().__init__()
self.block_size = block_size
def call(self, input):
batch, height, width, depth = ops.shape(input)
depth = depth // (self.block_size**2)
x = ops.reshape(
input, [batch, height, width, self.block_size, self.block_size, depth]
)
x = ops.transpose(x, [0, 1, 3, 2, 4, 5])
x = ops.reshape(
x, [batch, height * self.block_size, width * self.block_size, depth]
)
return x
def get_model(upscale_factor=3, channels=1):
conv_args = {
"activation": "relu",
"kernel_initializer": "orthogonal",
"padding": "same",
}
inputs = keras.Input(shape=(None, None, channels))
x = layers.Conv2D(64, 5, **conv_args)(inputs)
x = layers.Conv2D(64, 3, **conv_args)(x)
x = layers.Conv2D(32, 3, **conv_args)(x)
x = layers.Conv2D(channels * (upscale_factor**2), 3, **conv_args)(x)
outputs = DepthToSpace(upscale_factor)(x)
return keras.Model(inputs, outputs)<jupyter_output><empty_output><jupyter_text>Define utility functionsWe need to define several utility functions to monitor our results:- `plot_results` to plot an save an image.- `get_lowres_image` to convert an image to its low-resolution version.- `upscale_image` to turn a low-resolution image toa high-resolution version reconstructed by the model.In this function, we use the `y` channel from the YUV color spaceas input to the model and then combine the output with theother channels to obtain an RGB image.<jupyter_code>import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import PIL
def plot_results(img, prefix, title):
"""Plot the result with zoom-in area."""
img_array = img_to_array(img)
img_array = img_array.astype("float32") / 255.0
# Create a new figure with a default 111 subplot.
fig, ax = plt.subplots()
im = ax.imshow(img_array[::-1], origin="lower")
plt.title(title)
# zoom-factor: 2.0, location: upper-left
axins = zoomed_inset_axes(ax, 2, loc=2)
axins.imshow(img_array[::-1], origin="lower")
# Specify the limits.
x1, x2, y1, y2 = 200, 300, 100, 200
# Apply the x-limits.
axins.set_xlim(x1, x2)
# Apply the y-limits.
axins.set_ylim(y1, y2)
plt.yticks(visible=False)
plt.xticks(visible=False)
# Make the line.
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="blue")
plt.savefig(str(prefix) + "-" + title + ".png")
plt.show()
def get_lowres_image(img, upscale_factor):
"""Return low-resolution image to use as model input."""
return img.resize(
(img.size[0] // upscale_factor, img.size[1] // upscale_factor),
PIL.Image.BICUBIC,
)
def upscale_image(model, img):
"""Predict the result based on input image and restore the image as RGB."""
ycbcr = img.convert("YCbCr")
y, cb, cr = ycbcr.split()
y = img_to_array(y)
y = y.astype("float32") / 255.0
input = np.expand_dims(y, axis=0)
out = model.predict(input)
out_img_y = out[0]
out_img_y *= 255.0
# Restore the image in RGB color space.
out_img_y = out_img_y.clip(0, 255)
out_img_y = out_img_y.reshape((np.shape(out_img_y)[0], np.shape(out_img_y)[1]))
out_img_y = PIL.Image.fromarray(np.uint8(out_img_y), mode="L")
out_img_cb = cb.resize(out_img_y.size, PIL.Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, PIL.Image.BICUBIC)
out_img = PIL.Image.merge("YCbCr", (out_img_y, out_img_cb, out_img_cr)).convert(
"RGB"
)
return out_img<jupyter_output><empty_output><jupyter_text>Define callbacks to monitor trainingThe `ESPCNCallback` object will compute and displaythe [PSNR](https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio) metric.This is the main metric we use to evaluate super-resolution performance.<jupyter_code>class ESPCNCallback(keras.callbacks.Callback):
def __init__(self):
super().__init__()
self.test_img = get_lowres_image(load_img(test_img_paths[0]), upscale_factor)
# Store PSNR value in each epoch.
def on_epoch_begin(self, epoch, logs=None):
self.psnr = []
def on_epoch_end(self, epoch, logs=None):
print("Mean PSNR for epoch: %.2f" % (np.mean(self.psnr)))
if epoch % 20 == 0:
prediction = upscale_image(self.model, self.test_img)
plot_results(prediction, "epoch-" + str(epoch), "prediction")
def on_test_batch_end(self, batch, logs=None):
self.psnr.append(10 * math.log10(1 / logs["loss"]))<jupyter_output><empty_output><jupyter_text>Define `ModelCheckpoint` and `EarlyStopping` callbacks.<jupyter_code>early_stopping_callback = keras.callbacks.EarlyStopping(monitor="loss", patience=10)
checkpoint_filepath = "/tmp/checkpoint.keras"
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=False,
monitor="loss",
mode="min",
save_best_only=True,
)
model = get_model(upscale_factor=upscale_factor, channels=1)
model.summary()
callbacks = [ESPCNCallback(), early_stopping_callback, model_checkpoint_callback]
loss_fn = keras.losses.MeanSquaredError()
optimizer = keras.optimizers.Adam(learning_rate=0.001)<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>epochs = 100
model.compile(
optimizer=optimizer,
loss=loss_fn,
)
model.fit(
train_ds, epochs=epochs, callbacks=callbacks, validation_data=valid_ds, verbose=2
)
# The model weights (that are considered the best) are loaded into the model.
model.load_weights(checkpoint_filepath)<jupyter_output><empty_output><jupyter_text>Run model prediction and plot the resultsLet's compute the reconstructed version of a few images and save the results.<jupyter_code>total_bicubic_psnr = 0.0
total_test_psnr = 0.0
for index, test_img_path in enumerate(test_img_paths[50:60]):
img = load_img(test_img_path)
lowres_input = get_lowres_image(img, upscale_factor)
w = lowres_input.size[0] * upscale_factor
h = lowres_input.size[1] * upscale_factor
highres_img = img.resize((w, h))
prediction = upscale_image(model, lowres_input)
lowres_img = lowres_input.resize((w, h))
lowres_img_arr = img_to_array(lowres_img)
highres_img_arr = img_to_array(highres_img)
predict_img_arr = img_to_array(prediction)
bicubic_psnr = tf.image.psnr(lowres_img_arr, highres_img_arr, max_val=255)
test_psnr = tf.image.psnr(predict_img_arr, highres_img_arr, max_val=255)
total_bicubic_psnr += bicubic_psnr
total_test_psnr += test_psnr
print(
"PSNR of low resolution image and high resolution image is %.4f" % bicubic_psnr
)
print("PSNR of predict and high resolution is %.4f" % test_psnr)
plot_results(lowres_img, index, "lowres")
plot_results(highres_img, index, "highres")
plot_results(prediction, index, "prediction")
print("Avg. PSNR of lowres images is %.4f" % (total_bicubic_psnr / 10))
print("Avg. PSNR of reconstructions is %.4f" % (total_test_psnr / 10))<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/super_resolution_sub_pixel.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/super_resolution_sub_pixel.ipynb",
"repo_id": "keras-io",
"token_count": 4788
} | 102 |
"""
Title: Masked image modeling with Autoencoders
Author: [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/12/20
Last modified: 2021/12/21
Description: Implementing Masked Autoencoders for self-supervised pretraining.
Accelerator: GPU
"""
"""
## Introduction
In deep learning, models with growing **capacity** and **capability** can easily overfit
on large datasets (ImageNet-1K). In the field of natural language processing, the
appetite for data has been **successfully addressed** by self-supervised pretraining.
In the academic paper
[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)
by He et. al. the authors propose a simple yet effective method to pretrain large
vision models (here [ViT Huge](https://arxiv.org/abs/2010.11929)). Inspired from
the pretraining algorithm of BERT ([Devlin et al.](https://arxiv.org/abs/1810.04805)),
they mask patches of an image and, through an autoencoder predict the masked patches.
In the spirit of "masked language modeling", this pretraining task could be referred
to as "masked image modeling".
In this example, we implement
[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)
with the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. After
pretraining a scaled down version of ViT, we also implement the linear evaluation
pipeline on CIFAR-10.
This implementation covers (MAE refers to Masked Autoencoder):
- The masking algorithm
- MAE encoder
- MAE decoder
- Evaluation with linear probing
As a reference, we reuse some of the code presented in
[this example](https://keras.io/examples/vision/image_classification_with_vision_transformer/).
"""
"""
## Imports
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
from keras import layers
import matplotlib.pyplot as plt
import numpy as np
import random
# Setting seeds for reproducibility.
SEED = 42
keras.utils.set_random_seed(SEED)
"""
## Hyperparameters for pretraining
Please feel free to change the hyperparameters and check your results. The best way to
get an intuition about the architecture is to experiment with it. Our hyperparameters are
heavily inspired by the design guidelines laid out by the authors in
[the original paper](https://arxiv.org/abs/2111.06377).
"""
# DATA
BUFFER_SIZE = 1024
BATCH_SIZE = 256
AUTO = tf.data.AUTOTUNE
INPUT_SHAPE = (32, 32, 3)
NUM_CLASSES = 10
# OPTIMIZER
LEARNING_RATE = 5e-3
WEIGHT_DECAY = 1e-4
# PRETRAINING
EPOCHS = 100
# AUGMENTATION
IMAGE_SIZE = 48 # We will resize input images to this size.
PATCH_SIZE = 6 # Size of the patches to be extracted from the input images.
NUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2
MASK_PROPORTION = 0.75 # We have found 75% masking to give us the best results.
# ENCODER and DECODER
LAYER_NORM_EPS = 1e-6
ENC_PROJECTION_DIM = 128
DEC_PROJECTION_DIM = 64
ENC_NUM_HEADS = 4
ENC_LAYERS = 6
DEC_NUM_HEADS = 4
DEC_LAYERS = (
2 # The decoder is lightweight but should be reasonably deep for reconstruction.
)
ENC_TRANSFORMER_UNITS = [
ENC_PROJECTION_DIM * 2,
ENC_PROJECTION_DIM,
] # Size of the transformer layers.
DEC_TRANSFORMER_UNITS = [
DEC_PROJECTION_DIM * 2,
DEC_PROJECTION_DIM,
]
"""
## Load and prepare the CIFAR-10 dataset
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:40000], y_train[:40000]),
(x_train[40000:], y_train[40000:]),
)
print(f"Training samples: {len(x_train)}")
print(f"Validation samples: {len(x_val)}")
print(f"Testing samples: {len(x_test)}")
train_ds = tf.data.Dataset.from_tensor_slices(x_train)
train_ds = train_ds.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(AUTO)
val_ds = tf.data.Dataset.from_tensor_slices(x_val)
val_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO)
test_ds = tf.data.Dataset.from_tensor_slices(x_test)
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)
"""
## Data augmentation
In previous self-supervised pretraining methodologies
([SimCLR](https://arxiv.org/abs/2002.05709) alike), we have noticed that the data
augmentation pipeline plays an important role. On the other hand the authors of this
paper point out that Masked Autoencoders **do not** rely on augmentations. They propose a
simple augmentation pipeline of:
- Resizing
- Random cropping (fixed-sized or random sized)
- Random horizontal flipping
"""
def get_train_augmentation_model():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
layers.RandomFlip("horizontal"),
],
name="train_data_augmentation",
)
return model
def get_test_augmentation_model():
model = keras.Sequential(
[
layers.Rescaling(1 / 255.0),
layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
],
name="test_data_augmentation",
)
return model
"""
## A layer for extracting patches from images
This layer takes images as input and divides them into patches. The layer also includes
two utility method:
- `show_patched_image` -- Takes a batch of images and its corresponding patches to plot a
random pair of image and patches.
- `reconstruct_from_patch` -- Takes a single instance of patches and stitches them
together into the original image.
"""
class Patches(layers.Layer):
def __init__(self, patch_size=PATCH_SIZE, **kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
# Assuming the image has three channels each patch would be
# of size (patch_size, patch_size, 3).
self.resize = layers.Reshape((-1, patch_size * patch_size * 3))
def call(self, images):
# Create patches from the input images
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
# Reshape the patches to (batch, num_patches, patch_area) and return it.
patches = self.resize(patches)
return patches
def show_patched_image(self, images, patches):
# This is a utility function which accepts a batch of images and its
# corresponding patches and help visualize one image and its patches
# side by side.
idx = np.random.choice(patches.shape[0])
print(f"Index selected: {idx}.")
plt.figure(figsize=(4, 4))
plt.imshow(keras.utils.array_to_img(images[idx]))
plt.axis("off")
plt.show()
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[idx]):
ax = plt.subplot(n, n, i + 1)
patch_img = tf.reshape(patch, (self.patch_size, self.patch_size, 3))
plt.imshow(keras.utils.img_to_array(patch_img))
plt.axis("off")
plt.show()
# Return the index chosen to validate it outside the method.
return idx
# taken from https://stackoverflow.com/a/58082878/10319735
def reconstruct_from_patch(self, patch):
# This utility function takes patches from a *single* image and
# reconstructs it back into the image. This is useful for the train
# monitor callback.
num_patches = patch.shape[0]
n = int(np.sqrt(num_patches))
patch = tf.reshape(patch, (num_patches, self.patch_size, self.patch_size, 3))
rows = tf.split(patch, n, axis=0)
rows = [tf.concat(tf.unstack(x), axis=1) for x in rows]
reconstructed = tf.concat(rows, axis=0)
return reconstructed
"""
Let's visualize the image patches.
"""
# Get a batch of images.
image_batch = next(iter(train_ds))
# Augment the images.
augmentation_model = get_train_augmentation_model()
augmented_images = augmentation_model(image_batch)
# Define the patch layer.
patch_layer = Patches()
# Get the patches from the batched images.
patches = patch_layer(images=augmented_images)
# Now pass the images and the corresponding patches
# to the `show_patched_image` method.
random_index = patch_layer.show_patched_image(images=augmented_images, patches=patches)
# Chose the same chose image and try reconstructing the patches
# into the original image.
image = patch_layer.reconstruct_from_patch(patches[random_index])
plt.imshow(image)
plt.axis("off")
plt.show()
"""
## Patch encoding with masking
Quoting the paper
> Following ViT, we divide an image into regular non-overlapping patches. Then we sample
a subset of patches and mask (i.e., remove) the remaining ones. Our sampling strategy is
straightforward: we sample random patches without replacement, following a uniform
distribution. We simply refer to this as “random sampling”.
This layer includes masking and encoding the patches.
The utility methods of the layer are:
- `get_random_indices` -- Provides the mask and unmask indices.
- `generate_masked_image` -- Takes patches and unmask indices, results in a random masked
image. This is an essential utility method for our training monitor callback (defined
later).
"""
class PatchEncoder(layers.Layer):
def __init__(
self,
patch_size=PATCH_SIZE,
projection_dim=ENC_PROJECTION_DIM,
mask_proportion=MASK_PROPORTION,
downstream=False,
**kwargs,
):
super().__init__(**kwargs)
self.patch_size = patch_size
self.projection_dim = projection_dim
self.mask_proportion = mask_proportion
self.downstream = downstream
# This is a trainable mask token initialized randomly from a normal
# distribution.
self.mask_token = tf.Variable(
tf.random.normal([1, patch_size * patch_size * 3]), trainable=True
)
def build(self, input_shape):
(_, self.num_patches, self.patch_area) = input_shape
# Create the projection layer for the patches.
self.projection = layers.Dense(units=self.projection_dim)
# Create the positional embedding layer.
self.position_embedding = layers.Embedding(
input_dim=self.num_patches, output_dim=self.projection_dim
)
# Number of patches that will be masked.
self.num_mask = int(self.mask_proportion * self.num_patches)
def call(self, patches):
# Get the positional embeddings.
batch_size = tf.shape(patches)[0]
positions = tf.range(start=0, limit=self.num_patches, delta=1)
pos_embeddings = self.position_embedding(positions[tf.newaxis, ...])
pos_embeddings = tf.tile(
pos_embeddings, [batch_size, 1, 1]
) # (B, num_patches, projection_dim)
# Embed the patches.
patch_embeddings = (
self.projection(patches) + pos_embeddings
) # (B, num_patches, projection_dim)
if self.downstream:
return patch_embeddings
else:
mask_indices, unmask_indices = self.get_random_indices(batch_size)
# The encoder input is the unmasked patch embeddings. Here we gather
# all the patches that should be unmasked.
unmasked_embeddings = tf.gather(
patch_embeddings, unmask_indices, axis=1, batch_dims=1
) # (B, unmask_numbers, projection_dim)
# Get the unmasked and masked position embeddings. We will need them
# for the decoder.
unmasked_positions = tf.gather(
pos_embeddings, unmask_indices, axis=1, batch_dims=1
) # (B, unmask_numbers, projection_dim)
masked_positions = tf.gather(
pos_embeddings, mask_indices, axis=1, batch_dims=1
) # (B, mask_numbers, projection_dim)
# Repeat the mask token number of mask times.
# Mask tokens replace the masks of the image.
mask_tokens = tf.repeat(self.mask_token, repeats=self.num_mask, axis=0)
mask_tokens = tf.repeat(
mask_tokens[tf.newaxis, ...], repeats=batch_size, axis=0
)
# Get the masked embeddings for the tokens.
masked_embeddings = self.projection(mask_tokens) + masked_positions
return (
unmasked_embeddings, # Input to the encoder.
masked_embeddings, # First part of input to the decoder.
unmasked_positions, # Added to the encoder outputs.
mask_indices, # The indices that were masked.
unmask_indices, # The indices that were unmaksed.
)
def get_random_indices(self, batch_size):
# Create random indices from a uniform distribution and then split
# it into mask and unmask indices.
rand_indices = tf.argsort(
tf.random.uniform(shape=(batch_size, self.num_patches)), axis=-1
)
mask_indices = rand_indices[:, : self.num_mask]
unmask_indices = rand_indices[:, self.num_mask :]
return mask_indices, unmask_indices
def generate_masked_image(self, patches, unmask_indices):
# Choose a random patch and it corresponding unmask index.
idx = np.random.choice(patches.shape[0])
patch = patches[idx]
unmask_index = unmask_indices[idx]
# Build a numpy array of same shape as patch.
new_patch = np.zeros_like(patch)
# Iterate of the new_patch and plug the unmasked patches.
count = 0
for i in range(unmask_index.shape[0]):
new_patch[unmask_index[i]] = patch[unmask_index[i]]
return new_patch, idx
"""
Let's see the masking process in action on a sample image.
"""
# Create the patch encoder layer.
patch_encoder = PatchEncoder()
# Get the embeddings and positions.
(
unmasked_embeddings,
masked_embeddings,
unmasked_positions,
mask_indices,
unmask_indices,
) = patch_encoder(patches=patches)
# Show a maksed patch image.
new_patch, random_index = patch_encoder.generate_masked_image(patches, unmask_indices)
plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
img = patch_layer.reconstruct_from_patch(new_patch)
plt.imshow(keras.utils.array_to_img(img))
plt.axis("off")
plt.title("Masked")
plt.subplot(1, 2, 2)
img = augmented_images[random_index]
plt.imshow(keras.utils.array_to_img(img))
plt.axis("off")
plt.title("Original")
plt.show()
"""
## MLP
This serves as the fully connected feed forward network of the transformer architecture.
"""
def mlp(x, dropout_rate, hidden_units):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
"""
## MAE encoder
The MAE encoder is ViT. The only point to note here is that the encoder outputs a layer
normalized output.
"""
def create_encoder(num_heads=ENC_NUM_HEADS, num_layers=ENC_LAYERS):
inputs = layers.Input((None, ENC_PROJECTION_DIM))
x = inputs
for _ in range(num_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=ENC_PROJECTION_DIM, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, x])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2)
# MLP.
x3 = mlp(x3, hidden_units=ENC_TRANSFORMER_UNITS, dropout_rate=0.1)
# Skip connection 2.
x = layers.Add()([x3, x2])
outputs = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x)
return keras.Model(inputs, outputs, name="mae_encoder")
"""
## MAE decoder
The authors point out that they use an **asymmetric** autoencoder model. They use a
lightweight decoder that takes "<10% computation per token vs. the encoder". We are not
specific with the "<10% computation" in our implementation but have used a smaller
decoder (both in terms of depth and projection dimensions).
"""
def create_decoder(
num_layers=DEC_LAYERS, num_heads=DEC_NUM_HEADS, image_size=IMAGE_SIZE
):
inputs = layers.Input((NUM_PATCHES, ENC_PROJECTION_DIM))
x = layers.Dense(DEC_PROJECTION_DIM)(inputs)
for _ in range(num_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=DEC_PROJECTION_DIM, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, x])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2)
# MLP.
x3 = mlp(x3, hidden_units=DEC_TRANSFORMER_UNITS, dropout_rate=0.1)
# Skip connection 2.
x = layers.Add()([x3, x2])
x = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x)
x = layers.Flatten()(x)
pre_final = layers.Dense(units=image_size * image_size * 3, activation="sigmoid")(x)
outputs = layers.Reshape((image_size, image_size, 3))(pre_final)
return keras.Model(inputs, outputs, name="mae_decoder")
"""
## MAE trainer
This is the trainer module. We wrap the encoder and decoder inside of a `tf.keras.Model`
subclass. This allows us to customize what happens in the `model.fit()` loop.
"""
class MaskedAutoencoder(keras.Model):
def __init__(
self,
train_augmentation_model,
test_augmentation_model,
patch_layer,
patch_encoder,
encoder,
decoder,
**kwargs,
):
super().__init__(**kwargs)
self.train_augmentation_model = train_augmentation_model
self.test_augmentation_model = test_augmentation_model
self.patch_layer = patch_layer
self.patch_encoder = patch_encoder
self.encoder = encoder
self.decoder = decoder
def calculate_loss(self, images, test=False):
# Augment the input images.
if test:
augmented_images = self.test_augmentation_model(images)
else:
augmented_images = self.train_augmentation_model(images)
# Patch the augmented images.
patches = self.patch_layer(augmented_images)
# Encode the patches.
(
unmasked_embeddings,
masked_embeddings,
unmasked_positions,
mask_indices,
unmask_indices,
) = self.patch_encoder(patches)
# Pass the unmaksed patche to the encoder.
encoder_outputs = self.encoder(unmasked_embeddings)
# Create the decoder inputs.
encoder_outputs = encoder_outputs + unmasked_positions
decoder_inputs = tf.concat([encoder_outputs, masked_embeddings], axis=1)
# Decode the inputs.
decoder_outputs = self.decoder(decoder_inputs)
decoder_patches = self.patch_layer(decoder_outputs)
loss_patch = tf.gather(patches, mask_indices, axis=1, batch_dims=1)
loss_output = tf.gather(decoder_patches, mask_indices, axis=1, batch_dims=1)
# Compute the total loss.
total_loss = self.compute_loss(y=loss_patch, y_pred=loss_output)
return total_loss, loss_patch, loss_output
def train_step(self, images):
with tf.GradientTape() as tape:
total_loss, loss_patch, loss_output = self.calculate_loss(images)
# Apply gradients.
train_vars = [
self.train_augmentation_model.trainable_variables,
self.patch_layer.trainable_variables,
self.patch_encoder.trainable_variables,
self.encoder.trainable_variables,
self.decoder.trainable_variables,
]
grads = tape.gradient(total_loss, train_vars)
tv_list = []
for grad, var in zip(grads, train_vars):
for g, v in zip(grad, var):
tv_list.append((g, v))
self.optimizer.apply_gradients(tv_list)
# Report progress.
results = {}
for metric in self.metrics:
metric.update_state(loss_patch, loss_output)
results[metric.name] = metric.result()
return results
def test_step(self, images):
total_loss, loss_patch, loss_output = self.calculate_loss(images, test=True)
# Update the trackers.
results = {}
for metric in self.metrics:
metric.update_state(loss_patch, loss_output)
results[metric.name] = metric.result()
return results
"""
## Model initialization
"""
train_augmentation_model = get_train_augmentation_model()
test_augmentation_model = get_test_augmentation_model()
patch_layer = Patches()
patch_encoder = PatchEncoder()
encoder = create_encoder()
decoder = create_decoder()
mae_model = MaskedAutoencoder(
train_augmentation_model=train_augmentation_model,
test_augmentation_model=test_augmentation_model,
patch_layer=patch_layer,
patch_encoder=patch_encoder,
encoder=encoder,
decoder=decoder,
)
"""
## Training callbacks
"""
"""
### Visualization callback
"""
# Taking a batch of test inputs to measure model's progress.
test_images = next(iter(test_ds))
class TrainMonitor(keras.callbacks.Callback):
def __init__(self, epoch_interval=None):
self.epoch_interval = epoch_interval
def on_epoch_end(self, epoch, logs=None):
if self.epoch_interval and epoch % self.epoch_interval == 0:
test_augmented_images = self.model.test_augmentation_model(test_images)
test_patches = self.model.patch_layer(test_augmented_images)
(
test_unmasked_embeddings,
test_masked_embeddings,
test_unmasked_positions,
test_mask_indices,
test_unmask_indices,
) = self.model.patch_encoder(test_patches)
test_encoder_outputs = self.model.encoder(test_unmasked_embeddings)
test_encoder_outputs = test_encoder_outputs + test_unmasked_positions
test_decoder_inputs = tf.concat(
[test_encoder_outputs, test_masked_embeddings], axis=1
)
test_decoder_outputs = self.model.decoder(test_decoder_inputs)
# Show a maksed patch image.
test_masked_patch, idx = self.model.patch_encoder.generate_masked_image(
test_patches, test_unmask_indices
)
print(f"\nIdx chosen: {idx}")
original_image = test_augmented_images[idx]
masked_image = self.model.patch_layer.reconstruct_from_patch(
test_masked_patch
)
reconstructed_image = test_decoder_outputs[idx]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 5))
ax[0].imshow(original_image)
ax[0].set_title(f"Original: {epoch:03d}")
ax[1].imshow(masked_image)
ax[1].set_title(f"Masked: {epoch:03d}")
ax[2].imshow(reconstructed_image)
ax[2].set_title(f"Resonstructed: {epoch:03d}")
plt.show()
plt.close()
"""
### Learning rate scheduler
"""
# Some code is taken from:
# https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2.
class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps
):
super().__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.pi = tf.constant(np.pi)
def __call__(self, step):
if self.total_steps < self.warmup_steps:
raise ValueError("Total_steps must be larger or equal to warmup_steps.")
cos_annealed_lr = tf.cos(
self.pi
* (tf.cast(step, tf.float32) - self.warmup_steps)
/ float(self.total_steps - self.warmup_steps)
)
learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr)
if self.warmup_steps > 0:
if self.learning_rate_base < self.warmup_learning_rate:
raise ValueError(
"Learning_rate_base must be larger or equal to "
"warmup_learning_rate."
)
slope = (
self.learning_rate_base - self.warmup_learning_rate
) / self.warmup_steps
warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate
learning_rate = tf.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
return tf.where(
step > self.total_steps, 0.0, learning_rate, name="learning_rate"
)
total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS)
warmup_epoch_percentage = 0.15
warmup_steps = int(total_steps * warmup_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=LEARNING_RATE,
total_steps=total_steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)
lrs = [scheduled_lrs(step) for step in range(total_steps)]
plt.plot(lrs)
plt.xlabel("Step", fontsize=14)
plt.ylabel("LR", fontsize=14)
plt.show()
# Assemble the callbacks.
train_callbacks = [TrainMonitor(epoch_interval=5)]
"""
## Model compilation and training
"""
optimizer = keras.optimizers.AdamW(
learning_rate=scheduled_lrs, weight_decay=WEIGHT_DECAY
)
# Compile and pretrain the model.
mae_model.compile(
optimizer=optimizer, loss=keras.losses.MeanSquaredError(), metrics=["mae"]
)
history = mae_model.fit(
train_ds,
epochs=EPOCHS,
validation_data=val_ds,
callbacks=train_callbacks,
)
# Measure its performance.
loss, mae = mae_model.evaluate(test_ds)
print(f"Loss: {loss:.2f}")
print(f"MAE: {mae:.2f}")
"""
## Evaluation with linear probing
"""
"""
### Extract the encoder model along with other layers
"""
# Extract the augmentation layers.
train_augmentation_model = mae_model.train_augmentation_model
test_augmentation_model = mae_model.test_augmentation_model
# Extract the patchers.
patch_layer = mae_model.patch_layer
patch_encoder = mae_model.patch_encoder
patch_encoder.downstream = True # Swtich the downstream flag to True.
# Extract the encoder.
encoder = mae_model.encoder
# Pack as a model.
downstream_model = keras.Sequential(
[
layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)),
patch_layer,
patch_encoder,
encoder,
layers.BatchNormalization(), # Refer to A.1 (Linear probing).
layers.GlobalAveragePooling1D(),
layers.Dense(NUM_CLASSES, activation="softmax"),
],
name="linear_probe_model",
)
# Only the final classification layer of the `downstream_model` should be trainable.
for layer in downstream_model.layers[:-1]:
layer.trainable = False
downstream_model.summary()
"""
We are using average pooling to extract learned representations from the MAE encoder.
Another approach would be to use a learnable dummy token inside the encoder during
pretraining (resembling the [CLS] token). Then we can extract representations from that
token during the downstream tasks.
"""
"""
### Prepare datasets for linear probing
"""
def prepare_data(images, labels, is_train=True):
if is_train:
augmentation_model = train_augmentation_model
else:
augmentation_model = test_augmentation_model
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
if is_train:
dataset = dataset.shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE).map(
lambda x, y: (augmentation_model(x), y), num_parallel_calls=AUTO
)
return dataset.prefetch(AUTO)
train_ds = prepare_data(x_train, y_train)
val_ds = prepare_data(x_train, y_train, is_train=False)
test_ds = prepare_data(x_test, y_test, is_train=False)
"""
### Perform linear probing
"""
linear_probe_epochs = 50
linear_prob_lr = 0.1
warm_epoch_percentage = 0.1
steps = int((len(x_train) // BATCH_SIZE) * linear_probe_epochs)
warmup_steps = int(steps * warm_epoch_percentage)
scheduled_lrs = WarmUpCosine(
learning_rate_base=linear_prob_lr,
total_steps=steps,
warmup_learning_rate=0.0,
warmup_steps=warmup_steps,
)
optimizer = keras.optimizers.SGD(learning_rate=scheduled_lrs, momentum=0.9)
downstream_model.compile(
optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
downstream_model.fit(train_ds, validation_data=val_ds, epochs=linear_probe_epochs)
loss, accuracy = downstream_model.evaluate(test_ds)
accuracy = round(accuracy * 100, 2)
print(f"Accuracy on the test set: {accuracy}%.")
"""
We believe that with a more sophisticated hyperparameter tuning process and a longer
pretraining it is possible to improve this performance further. For comparison, we took
the encoder architecture and
[trained it from scratch](https://github.com/ariG23498/mae-scalable-vision-learners/blob/master/regular-classification.ipynb)
in a fully supervised manner. This gave us ~76% test top-1 accuracy. The authors of
MAE demonstrates strong performance on the ImageNet-1k dataset as well as
other downstream tasks like object detection and semantic segmentation.
"""
"""
## Final notes
We refer the interested readers to other examples on self-supervised learning present on
keras.io:
* [SimCLR](https://keras.io/examples/vision/semisupervised_simclr/)
* [NNCLR](https://keras.io/examples/vision/nnclr)
* [SimSiam](https://keras.io/examples/vision/simsiam)
This idea of using BERT flavored pretraining in computer vision was also explored in
[Selfie](https://arxiv.org/abs/1906.02940), but it could not demonstrate strong results.
Another concurrent work that explores the idea of masked image modeling is
[SimMIM](https://arxiv.org/abs/2111.09886). Finally, as a fun fact, we, the authors of
this example also explored the idea of ["reconstruction as a pretext task"](https://i.ibb.co/k5CpwDX/image.png)
in 2020 but we could not prevent the network from representation collapse, and
hence we did not get strong downstream performance.
We would like to thank [Xinlei Chen](http://xinleic.xyz/)
(one of the authors of MAE) for helpful discussions. We are grateful to
[JarvisLabs](https://jarvislabs.ai/) and
[Google Developers Experts](https://developers.google.com/programs/experts/)
program for helping with GPU credits.
"""
| keras-io/examples/vision/masked_image_modeling.py/0 | {
"file_path": "keras-io/examples/vision/masked_image_modeling.py",
"repo_id": "keras-io",
"token_count": 12553
} | 103 |
# Image similarity estimation using a Siamese Network with a contrastive loss
**Author:** Mehdi<br>
**Date created:** 2021/05/06<br>
**Last modified:** 2022/09/10<br>
**Description:** Similarity learning using a siamese network trained with a contrastive loss.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/siamese_contrastive.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/siamese_contrastive.py)
---
## Introduction
[Siamese Networks](https://en.wikipedia.org/wiki/Siamese_neural_network)
are neural networks which share weights between two or more sister networks,
each producing embedding vectors of its respective inputs.
In supervised similarity learning, the networks are then trained to maximize the
contrast (distance) between embeddings of inputs of different classes, while minimizing the distance between
embeddings of similar classes, resulting in embedding spaces that reflect
the class segmentation of the training inputs.
---
## Setup
```python
import random
import numpy as np
import keras
from keras import ops
import matplotlib.pyplot as plt
```
---
## Hyperparameters
```python
epochs = 10
batch_size = 16
margin = 1 # Margin for contrastive loss.
```
---
## Load the MNIST dataset
```python
(x_train_val, y_train_val), (x_test, y_test) = keras.datasets.mnist.load_data()
# Change the data type to a floating point format
x_train_val = x_train_val.astype("float32")
x_test = x_test.astype("float32")
```
---
## Define training and validation sets
```python
# Keep 50% of train_val in validation set
x_train, x_val = x_train_val[:30000], x_train_val[30000:]
y_train, y_val = y_train_val[:30000], y_train_val[30000:]
del x_train_val, y_train_val
```
---
## Create pairs of images
We will train the model to differentiate between digits of different classes. For
example, digit `0` needs to be differentiated from the rest of the
digits (`1` through `9`), digit `1` - from `0` and `2` through `9`, and so on.
To carry this out, we will select N random images from class A (for example,
for digit `0`) and pair them with N random images from another class B
(for example, for digit `1`). Then, we can repeat this process for all classes
of digits (until digit `9`). Once we have paired digit `0` with other digits,
we can repeat this process for the remaining classes for the rest of the digits
(from `1` until `9`).
```python
def make_pairs(x, y):
"""Creates a tuple containing image pairs with corresponding label.
Arguments:
x: List containing images, each index in this list corresponds to one image.
y: List containing labels, each label with datatype of `int`.
Returns:
Tuple containing two numpy arrays as (pairs_of_samples, labels),
where pairs_of_samples' shape is (2len(x), 2,n_features_dims) and
labels are a binary array of shape (2len(x)).
"""
num_classes = max(y) + 1
digit_indices = [np.where(y == i)[0] for i in range(num_classes)]
pairs = []
labels = []
for idx1 in range(len(x)):
# add a matching example
x1 = x[idx1]
label1 = y[idx1]
idx2 = random.choice(digit_indices[label1])
x2 = x[idx2]
pairs += [[x1, x2]]
labels += [0]
# add a non-matching example
label2 = random.randint(0, num_classes - 1)
while label2 == label1:
label2 = random.randint(0, num_classes - 1)
idx2 = random.choice(digit_indices[label2])
x2 = x[idx2]
pairs += [[x1, x2]]
labels += [1]
return np.array(pairs), np.array(labels).astype("float32")
# make train pairs
pairs_train, labels_train = make_pairs(x_train, y_train)
# make validation pairs
pairs_val, labels_val = make_pairs(x_val, y_val)
# make test pairs
pairs_test, labels_test = make_pairs(x_test, y_test)
```
We get:
**pairs_train.shape = (60000, 2, 28, 28)**
- We have 60,000 pairs
- Each pair contains 2 images
- Each image has shape `(28, 28)`
Split the training pairs
```python
x_train_1 = pairs_train[:, 0] # x_train_1.shape is (60000, 28, 28)
x_train_2 = pairs_train[:, 1]
```
Split the validation pairs
```python
x_val_1 = pairs_val[:, 0] # x_val_1.shape = (60000, 28, 28)
x_val_2 = pairs_val[:, 1]
```
Split the test pairs
```python
x_test_1 = pairs_test[:, 0] # x_test_1.shape = (20000, 28, 28)
x_test_2 = pairs_test[:, 1]
```
---
## Visualize pairs and their labels
```python
def visualize(pairs, labels, to_show=6, num_col=3, predictions=None, test=False):
"""Creates a plot of pairs and labels, and prediction if it's test dataset.
Arguments:
pairs: Numpy Array, of pairs to visualize, having shape
(Number of pairs, 2, 28, 28).
to_show: Int, number of examples to visualize (default is 6)
`to_show` must be an integral multiple of `num_col`.
Otherwise it will be trimmed if it is greater than num_col,
and incremented if if it is less then num_col.
num_col: Int, number of images in one row - (default is 3)
For test and train respectively, it should not exceed 3 and 7.
predictions: Numpy Array of predictions with shape (to_show, 1) -
(default is None)
Must be passed when test=True.
test: Boolean telling whether the dataset being visualized is
train dataset or test dataset - (default False).
Returns:
None.
"""
# Define num_row
# If to_show % num_col != 0
# trim to_show,
# to trim to_show limit num_row to the point where
# to_show % num_col == 0
#
# If to_show//num_col == 0
# then it means num_col is greater then to_show
# increment to_show
# to increment to_show set num_row to 1
num_row = to_show // num_col if to_show // num_col != 0 else 1
# `to_show` must be an integral multiple of `num_col`
# we found num_row and we have num_col
# to increment or decrement to_show
# to make it integral multiple of `num_col`
# simply set it equal to num_row * num_col
to_show = num_row * num_col
# Plot the images
fig, axes = plt.subplots(num_row, num_col, figsize=(5, 5))
for i in range(to_show):
# If the number of rows is 1, the axes array is one-dimensional
if num_row == 1:
ax = axes[i % num_col]
else:
ax = axes[i // num_col, i % num_col]
ax.imshow(ops.concatenate([pairs[i][0], pairs[i][1]], axis=1), cmap="gray")
ax.set_axis_off()
if test:
ax.set_title("True: {} | Pred: {:.5f}".format(labels[i], predictions[i][0]))
else:
ax.set_title("Label: {}".format(labels[i]))
if test:
plt.tight_layout(rect=(0, 0, 1.9, 1.9), w_pad=0.0)
else:
plt.tight_layout(rect=(0, 0, 1.5, 1.5))
plt.show()
```
Inspect training pairs
```python
visualize(pairs_train[:-1], labels_train[:-1], to_show=4, num_col=4)
```

Inspect validation pairs
```python
visualize(pairs_val[:-1], labels_val[:-1], to_show=4, num_col=4)
```

Inspect test pairs
```python
visualize(pairs_test[:-1], labels_test[:-1], to_show=4, num_col=4)
```

---
## Define the model
There are two input layers, each leading to its own network, which
produces embeddings. A `Lambda` layer then merges them using an
[Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) and the
merged output is fed to the final network.
```python
# Provided two tensors t1 and t2
# Euclidean distance = sqrt(sum(square(t1-t2)))
def euclidean_distance(vects):
"""Find the Euclidean distance between two vectors.
Arguments:
vects: List containing two tensors of same length.
Returns:
Tensor containing euclidean distance
(as floating point value) between vectors.
"""
x, y = vects
sum_square = ops.sum(ops.square(x - y), axis=1, keepdims=True)
return ops.sqrt(ops.maximum(sum_square, keras.backend.epsilon()))
input = keras.layers.Input((28, 28, 1))
x = keras.layers.BatchNormalization()(input)
x = keras.layers.Conv2D(4, (5, 5), activation="tanh")(x)
x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)
x = keras.layers.Conv2D(16, (5, 5), activation="tanh")(x)
x = keras.layers.AveragePooling2D(pool_size=(2, 2))(x)
x = keras.layers.Flatten()(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Dense(10, activation="tanh")(x)
embedding_network = keras.Model(input, x)
input_1 = keras.layers.Input((28, 28, 1))
input_2 = keras.layers.Input((28, 28, 1))
# As mentioned above, Siamese Network share weights between
# tower networks (sister networks). To allow this, we will use
# same embedding network for both tower networks.
tower_1 = embedding_network(input_1)
tower_2 = embedding_network(input_2)
merge_layer = keras.layers.Lambda(euclidean_distance, output_shape=(1,))(
[tower_1, tower_2]
)
normal_layer = keras.layers.BatchNormalization()(merge_layer)
output_layer = keras.layers.Dense(1, activation="sigmoid")(normal_layer)
siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer)
```
---
## Define the contrastive Loss
```python
def loss(margin=1):
"""Provides 'contrastive_loss' an enclosing scope with variable 'margin'.
Arguments:
margin: Integer, defines the baseline for distance for which pairs
should be classified as dissimilar. - (default is 1).
Returns:
'contrastive_loss' function with data ('margin') attached.
"""
# Contrastive loss = mean( (1-true_value) * square(prediction) +
# true_value * square( max(margin-prediction, 0) ))
def contrastive_loss(y_true, y_pred):
"""Calculates the contrastive loss.
Arguments:
y_true: List of labels, each label is of type float32.
y_pred: List of predictions of same length as of y_true,
each label is of type float32.
Returns:
A tensor containing contrastive loss as floating point value.
"""
square_pred = ops.square(y_pred)
margin_square = ops.square(ops.maximum(margin - (y_pred), 0))
return ops.mean((1 - y_true) * square_pred + (y_true) * margin_square)
return contrastive_loss
```
---
## Compile the model with the contrastive loss
```python
siamese.compile(loss=loss(margin=margin), optimizer="RMSprop", metrics=["accuracy"])
siamese.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_3"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ input_layer_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ input_layer_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ functional_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">5,318</span> │ input_layer_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ │ │ input_layer_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ lambda (<span style="color: #0087ff; text-decoration-color: #0087ff">Lambda</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ functional_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │
│ │ │ │ functional_1[<span style="color: #00af00; text-decoration-color: #00af00">1</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">4</span> │ lambda[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2</span> │ batch_normalization… │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">5,324</span> (20.80 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">4,808</span> (18.78 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">516</span> (2.02 KB)
</pre>
---
## Train the model
```python
history = siamese.fit(
[x_train_1, x_train_2],
labels_train,
validation_data=([x_val_1, x_val_2], labels_val),
batch_size=batch_size,
epochs=epochs,
)
```
<div class="k-default-codeblock">
```
Epoch 1/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 16s 3ms/step - accuracy: 0.4802 - loss: 0.2768 - val_accuracy: 0.7363 - val_loss: 0.1864
Epoch 2/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.7368 - loss: 0.1827 - val_accuracy: 0.8193 - val_loss: 0.1279
Epoch 3/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.8480 - loss: 0.1117 - val_accuracy: 0.8420 - val_loss: 0.1126
Epoch 4/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.8834 - loss: 0.0871 - val_accuracy: 0.9037 - val_loss: 0.0714
Epoch 5/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.8932 - loss: 0.0797 - val_accuracy: 0.8952 - val_loss: 0.0791
Epoch 6/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.9047 - loss: 0.0721 - val_accuracy: 0.9223 - val_loss: 0.0595
Epoch 7/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.9070 - loss: 0.0704 - val_accuracy: 0.9032 - val_loss: 0.0718
Epoch 8/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.9122 - loss: 0.0680 - val_accuracy: 0.8615 - val_loss: 0.1022
Epoch 9/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.9132 - loss: 0.0664 - val_accuracy: 0.8630 - val_loss: 0.1039
Epoch 10/10
3750/3750 ━━━━━━━━━━━━━━━━━━━━ 11s 3ms/step - accuracy: 0.9187 - loss: 0.0621 - val_accuracy: 0.8117 - val_loss: 0.1401
```
</div>
---
## Visualize results
```python
def plt_metric(history, metric, title, has_valid=True):
"""Plots the given 'metric' from 'history'.
Arguments:
history: history attribute of History object returned from Model.fit.
metric: Metric to plot, a string value present as key in 'history'.
title: A string to be used as title of plot.
has_valid: Boolean, true if valid data was passed to Model.fit else false.
Returns:
None.
"""
plt.plot(history[metric])
if has_valid:
plt.plot(history["val_" + metric])
plt.legend(["train", "validation"], loc="upper left")
plt.title(title)
plt.ylabel(metric)
plt.xlabel("epoch")
plt.show()
# Plot the accuracy
plt_metric(history=history.history, metric="accuracy", title="Model accuracy")
# Plot the contrastive loss
plt_metric(history=history.history, metric="loss", title="Contrastive Loss")
```


---
## Evaluate the model
```python
results = siamese.evaluate([x_test_1, x_test_2], labels_test)
print("test loss, test acc:", results)
```
<div class="k-default-codeblock">
```
625/625 ━━━━━━━━━━━━━━━━━━━━ 1s 1ms/step - accuracy: 0.8068 - loss: 0.1439
test loss, test acc: [0.13836927711963654, 0.8143500089645386]
```
</div>
---
## Visualize the predictions
```python
predictions = siamese.predict([x_test_1, x_test_2])
visualize(pairs_test, labels_test, to_show=3, predictions=predictions, test=True)
```
<div class="k-default-codeblock">
```
625/625 ━━━━━━━━━━━━━━━━━━━━ 1s 619us/step
```
</div>

| keras-io/examples/vision/md/siamese_contrastive.md/0 | {
"file_path": "keras-io/examples/vision/md/siamese_contrastive.md",
"repo_id": "keras-io",
"token_count": 8400
} | 104 |
"""
Title: Point cloud segmentation with PointNet
Author: [Soumik Rakshit](https://github.com/soumik12345), [Sayak Paul](https://github.com/sayakpaul)
Date created: 2020/10/23
Last modified: 2020/10/24
Description: Implementation of a PointNet-based model for segmenting point clouds.
Accelerator: GPU
"""
"""
## Introduction
A "point cloud" is an important type of data structure for storing geometric shape data.
Due to its irregular format, it's often transformed into
regular 3D voxel grids or collections of images before being used in deep learning applications,
a step which makes the data unnecessarily large.
The PointNet family of models solves this problem by directly consuming point clouds, respecting
the permutation-invariance property of the point data. The PointNet family of
models provides a simple, unified architecture
for applications ranging from **object classification**, **part segmentation**, to
**scene semantic parsing**.
In this example, we demonstrate the implementation of the PointNet architecture
for shape segmentation.
### References
- [PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation](https://arxiv.org/abs/1612.00593)
- [Point cloud classification with PointNet](https://keras.io/examples/vision/pointnet/)
- [Spatial Transformer Networks](https://arxiv.org/abs/1506.02025)
"""
"""
## Imports
"""
import os
import json
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
from glob import glob
import tensorflow as tf # For tf.data
import keras
from keras import layers
import matplotlib.pyplot as plt
"""
## Downloading Dataset
The [ShapeNet dataset](https://shapenet.org/) is an ongoing effort to establish a richly-annotated,
large-scale dataset of 3D shapes. **ShapeNetCore** is a subset of the full ShapeNet
dataset with clean single 3D models and manually verified category and alignment
annotations. It covers 55 common object categories, with about 51,300 unique 3D models.
For this example, we use one of the 12 object categories of
[PASCAL 3D+](http://cvgl.stanford.edu/projects/pascal3d.html),
included as part of the ShapenetCore dataset.
"""
dataset_url = "https://git.io/JiY4i"
dataset_path = keras.utils.get_file(
fname="shapenet.zip",
origin=dataset_url,
cache_subdir="datasets",
hash_algorithm="auto",
extract=True,
archive_format="auto",
cache_dir="datasets",
)
"""
## Loading the dataset
We parse the dataset metadata in order to easily map model categories to their
respective directories and segmentation classes to colors for the purpose of
visualization.
"""
with open("/tmp/.keras/datasets/PartAnnotation/metadata.json") as json_file:
metadata = json.load(json_file)
print(metadata)
"""
In this example, we train PointNet to segment the parts of an `Airplane` model.
"""
points_dir = "/tmp/.keras/datasets/PartAnnotation/{}/points".format(
metadata["Airplane"]["directory"]
)
labels_dir = "/tmp/.keras/datasets/PartAnnotation/{}/points_label".format(
metadata["Airplane"]["directory"]
)
LABELS = metadata["Airplane"]["lables"]
COLORS = metadata["Airplane"]["colors"]
VAL_SPLIT = 0.2
NUM_SAMPLE_POINTS = 1024
BATCH_SIZE = 32
EPOCHS = 60
INITIAL_LR = 1e-3
"""
## Structuring the dataset
We generate the following in-memory data structures from the Airplane point clouds and
their labels:
- `point_clouds` is a list of `np.array` objects that represent the point cloud data in
the form of x, y and z coordinates. Axis 0 represents the number of points in the
point cloud, while axis 1 represents the coordinates. `all_labels` is the list
that represents the label of each coordinate as a string (needed mainly for
visualization purposes).
- `test_point_clouds` is in the same format as `point_clouds`, but doesn't have
corresponding the labels of the point clouds.
- `all_labels` is a list of `np.array` objects that represent the point cloud labels
for each coordinate, corresponding to the `point_clouds` list.
- `point_cloud_labels` is a list of `np.array` objects that represent the point cloud
labels for each coordinate in one-hot encoded form, corresponding to the `point_clouds`
list.
"""
point_clouds, test_point_clouds = [], []
point_cloud_labels, all_labels = [], []
points_files = glob(os.path.join(points_dir, "*.pts"))
for point_file in tqdm(points_files):
point_cloud = np.loadtxt(point_file)
if point_cloud.shape[0] < NUM_SAMPLE_POINTS:
continue
# Get the file-id of the current point cloud for parsing its
# labels.
file_id = point_file.split("/")[-1].split(".")[0]
label_data, num_labels = {}, 0
for label in LABELS:
label_file = os.path.join(labels_dir, label, file_id + ".seg")
if os.path.exists(label_file):
label_data[label] = np.loadtxt(label_file).astype("float32")
num_labels = len(label_data[label])
# Point clouds having labels will be our training samples.
try:
label_map = ["none"] * num_labels
for label in LABELS:
for i, data in enumerate(label_data[label]):
label_map[i] = label if data == 1 else label_map[i]
label_data = [
LABELS.index(label) if label != "none" else len(LABELS)
for label in label_map
]
# Apply one-hot encoding to the dense label representation.
label_data = keras.utils.to_categorical(label_data, num_classes=len(LABELS) + 1)
point_clouds.append(point_cloud)
point_cloud_labels.append(label_data)
all_labels.append(label_map)
except KeyError:
test_point_clouds.append(point_cloud)
"""
Next, we take a look at some samples from the in-memory arrays we just generated:
"""
for _ in range(5):
i = random.randint(0, len(point_clouds) - 1)
print(f"point_clouds[{i}].shape:", point_clouds[0].shape)
print(f"point_cloud_labels[{i}].shape:", point_cloud_labels[0].shape)
for j in range(5):
print(
f"all_labels[{i}][{j}]:",
all_labels[i][j],
f"\tpoint_cloud_labels[{i}][{j}]:",
point_cloud_labels[i][j],
"\n",
)
"""
Now, let's visualize some of the point clouds along with their labels.
"""
def visualize_data(point_cloud, labels):
df = pd.DataFrame(
data={
"x": point_cloud[:, 0],
"y": point_cloud[:, 1],
"z": point_cloud[:, 2],
"label": labels,
}
)
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
for index, label in enumerate(LABELS):
c_df = df[df["label"] == label]
try:
ax.scatter(
c_df["x"], c_df["y"], c_df["z"], label=label, alpha=0.5, c=COLORS[index]
)
except IndexError:
pass
ax.legend()
plt.show()
visualize_data(point_clouds[0], all_labels[0])
visualize_data(point_clouds[300], all_labels[300])
"""
### Preprocessing
Note that all the point clouds that we have loaded consist of a variable number of points,
which makes it difficult for us to batch them together. In order to overcome this problem, we
randomly sample a fixed number of points from each point cloud. We also normalize the
point clouds in order to make the data scale-invariant.
"""
for index in tqdm(range(len(point_clouds))):
current_point_cloud = point_clouds[index]
current_label_cloud = point_cloud_labels[index]
current_labels = all_labels[index]
num_points = len(current_point_cloud)
# Randomly sampling respective indices.
sampled_indices = random.sample(list(range(num_points)), NUM_SAMPLE_POINTS)
# Sampling points corresponding to sampled indices.
sampled_point_cloud = np.array([current_point_cloud[i] for i in sampled_indices])
# Sampling corresponding one-hot encoded labels.
sampled_label_cloud = np.array([current_label_cloud[i] for i in sampled_indices])
# Sampling corresponding labels for visualization.
sampled_labels = np.array([current_labels[i] for i in sampled_indices])
# Normalizing sampled point cloud.
norm_point_cloud = sampled_point_cloud - np.mean(sampled_point_cloud, axis=0)
norm_point_cloud /= np.max(np.linalg.norm(norm_point_cloud, axis=1))
point_clouds[index] = norm_point_cloud
point_cloud_labels[index] = sampled_label_cloud
all_labels[index] = sampled_labels
"""
Let's visualize the sampled and normalized point clouds along with their corresponding
labels.
"""
visualize_data(point_clouds[0], all_labels[0])
visualize_data(point_clouds[300], all_labels[300])
"""
### Creating TensorFlow datasets
We create `tf.data.Dataset` objects for the training and validation data.
We also augment the training point clouds by applying random jitter to them.
"""
def load_data(point_cloud_batch, label_cloud_batch):
point_cloud_batch.set_shape([NUM_SAMPLE_POINTS, 3])
label_cloud_batch.set_shape([NUM_SAMPLE_POINTS, len(LABELS) + 1])
return point_cloud_batch, label_cloud_batch
def augment(point_cloud_batch, label_cloud_batch):
noise = tf.random.uniform(
tf.shape(label_cloud_batch), -0.001, 0.001, dtype=tf.float64
)
point_cloud_batch += noise[:, :, :3]
return point_cloud_batch, label_cloud_batch
def generate_dataset(point_clouds, label_clouds, is_training=True):
dataset = tf.data.Dataset.from_tensor_slices((point_clouds, label_clouds))
dataset = dataset.shuffle(BATCH_SIZE * 100) if is_training else dataset
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(batch_size=BATCH_SIZE)
dataset = (
dataset.map(augment, num_parallel_calls=tf.data.AUTOTUNE)
if is_training
else dataset
)
return dataset
split_index = int(len(point_clouds) * (1 - VAL_SPLIT))
train_point_clouds = point_clouds[:split_index]
train_label_cloud = point_cloud_labels[:split_index]
total_training_examples = len(train_point_clouds)
val_point_clouds = point_clouds[split_index:]
val_label_cloud = point_cloud_labels[split_index:]
print("Num train point clouds:", len(train_point_clouds))
print("Num train point cloud labels:", len(train_label_cloud))
print("Num val point clouds:", len(val_point_clouds))
print("Num val point cloud labels:", len(val_label_cloud))
train_dataset = generate_dataset(train_point_clouds, train_label_cloud)
val_dataset = generate_dataset(val_point_clouds, val_label_cloud, is_training=False)
print("Train Dataset:", train_dataset)
print("Validation Dataset:", val_dataset)
"""
## PointNet model
The figure below depicts the internals of the PointNet model family:

Given that PointNet is meant to consume an ***unordered set*** of coordinates as its input data,
its architecture needs to match the following characteristic properties
of point cloud data:
### Permutation invariance
Given the unstructured nature of point cloud data, a scan made up of `n` points has `n!`
permutations. The subsequent data processing must be invariant to the different
representations. In order to make PointNet invariant to input permutations, we use a
symmetric function (such as max-pooling) once the `n` input points are mapped to
higher-dimensional space. The result is a **global feature vector** that aims to capture
an aggregate signature of the `n` input points. The global feature vector is used alongside
local point features for segmentation.

### Transformation invariance
Segmentation outputs should be unchanged if the object undergoes certain transformations,
such as translation or scaling. For a given input point cloud, we apply an appropriate
rigid or affine transformation to achieve pose normalization. Because each of the `n` input
points are represented as a vector and are mapped to the embedding spaces independently,
applying a geometric transformation simply amounts to matrix multiplying each point with
a transformation matrix. This is motivated by the concept of
[Spatial Transformer Networks](https://arxiv.org/abs/1506.02025).
The operations comprising the T-Net are motivated by the higher-level architecture of
PointNet. MLPs (or fully-connected layers) are used to map the input points independently
and identically to a higher-dimensional space; max-pooling is used to encode a global
feature vector whose dimensionality is then reduced with fully-connected layers. The
input-dependent features at the final fully-connected layer are then combined with
globally trainable weights and biases, resulting in a 3-by-3 transformation matrix.

### Point interactions
The interaction between neighboring points often carries useful information (i.e., a
single point should not be treated in isolation). Whereas classification need only make
use of global features, segmentation must be able to leverage local point features along
with global point features.
**Note**: The figures presented in this section have been taken from the
[original paper](https://arxiv.org/abs/1612.00593).
"""
"""
Now that we know the pieces that compose the PointNet model, we can implement the model.
We start by implementing the basic blocks i.e., the convolutional block and the multi-layer
perceptron block.
"""
def conv_block(x, filters, name):
x = layers.Conv1D(filters, kernel_size=1, padding="valid", name=f"{name}_conv")(x)
x = layers.BatchNormalization(name=f"{name}_batch_norm")(x)
return layers.Activation("relu", name=f"{name}_relu")(x)
def mlp_block(x, filters, name):
x = layers.Dense(filters, name=f"{name}_dense")(x)
x = layers.BatchNormalization(name=f"{name}_batch_norm")(x)
return layers.Activation("relu", name=f"{name}_relu")(x)
"""
We implement a regularizer (taken from
[this example](https://keras.io/examples/vision/pointnet/#build-a-model))
to enforce orthogonality in the feature space. This is needed to ensure
that the magnitudes of the transformed features do not vary too much.
"""
class OrthogonalRegularizer(keras.regularizers.Regularizer):
"""Reference: https://keras.io/examples/vision/pointnet/#build-a-model"""
def __init__(self, num_features, l2reg=0.001):
self.num_features = num_features
self.l2reg = l2reg
self.identity = keras.ops.eye(num_features)
def __call__(self, x):
x = keras.ops.reshape(x, (-1, self.num_features, self.num_features))
xxt = keras.ops.tensordot(x, x, axes=(2, 2))
xxt = keras.ops.reshape(xxt, (-1, self.num_features, self.num_features))
return keras.ops.sum(self.l2reg * keras.ops.square(xxt - self.identity))
def get_config(self):
config = super().get_config()
config.update({"num_features": self.num_features, "l2reg_strength": self.l2reg})
return config
"""
The next piece is the transformation network which we explained earlier.
"""
def transformation_net(inputs, num_features, name):
"""
Reference: https://keras.io/examples/vision/pointnet/#build-a-model.
The `filters` values come from the original paper:
https://arxiv.org/abs/1612.00593.
"""
x = conv_block(inputs, filters=64, name=f"{name}_1")
x = conv_block(x, filters=128, name=f"{name}_2")
x = conv_block(x, filters=1024, name=f"{name}_3")
x = layers.GlobalMaxPooling1D()(x)
x = mlp_block(x, filters=512, name=f"{name}_1_1")
x = mlp_block(x, filters=256, name=f"{name}_2_1")
return layers.Dense(
num_features * num_features,
kernel_initializer="zeros",
bias_initializer=keras.initializers.Constant(np.eye(num_features).flatten()),
activity_regularizer=OrthogonalRegularizer(num_features),
name=f"{name}_final",
)(x)
def transformation_block(inputs, num_features, name):
transformed_features = transformation_net(inputs, num_features, name=name)
transformed_features = layers.Reshape((num_features, num_features))(
transformed_features
)
return layers.Dot(axes=(2, 1), name=f"{name}_mm")([inputs, transformed_features])
"""
Finally, we piece the above blocks together and implement the segmentation model.
"""
def get_shape_segmentation_model(num_points, num_classes):
input_points = keras.Input(shape=(None, 3))
# PointNet Classification Network.
transformed_inputs = transformation_block(
input_points, num_features=3, name="input_transformation_block"
)
features_64 = conv_block(transformed_inputs, filters=64, name="features_64")
features_128_1 = conv_block(features_64, filters=128, name="features_128_1")
features_128_2 = conv_block(features_128_1, filters=128, name="features_128_2")
transformed_features = transformation_block(
features_128_2, num_features=128, name="transformed_features"
)
features_512 = conv_block(transformed_features, filters=512, name="features_512")
features_2048 = conv_block(features_512, filters=2048, name="pre_maxpool_block")
global_features = layers.MaxPool1D(pool_size=num_points, name="global_features")(
features_2048
)
global_features = keras.ops.tile(global_features, [1, num_points, 1])
# Segmentation head.
segmentation_input = layers.Concatenate(name="segmentation_input")(
[
features_64,
features_128_1,
features_128_2,
transformed_features,
features_512,
global_features,
]
)
segmentation_features = conv_block(
segmentation_input, filters=128, name="segmentation_features"
)
outputs = layers.Conv1D(
num_classes, kernel_size=1, activation="softmax", name="segmentation_head"
)(segmentation_features)
return keras.Model(input_points, outputs)
"""
## Instantiate the model
"""
x, y = next(iter(train_dataset))
num_points = x.shape[1]
num_classes = y.shape[-1]
segmentation_model = get_shape_segmentation_model(num_points, num_classes)
segmentation_model.summary()
"""
## Training
For the training the authors recommend using a learning rate schedule that decays the
initial learning rate by half every 20 epochs. In this example, we use 5 epochs.
"""
steps_per_epoch = total_training_examples // BATCH_SIZE
total_training_steps = steps_per_epoch * EPOCHS
print(f"Steps per epoch: {steps_per_epoch}.")
print(f"Total training steps: {total_training_steps}.")
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=0.003,
decay_steps=steps_per_epoch * 5,
decay_rate=0.5,
staircase=True,
)
steps = range(total_training_steps)
lrs = [lr_schedule(step) for step in steps]
plt.plot(lrs)
plt.xlabel("Steps")
plt.ylabel("Learning Rate")
plt.show()
"""
Finally, we implement a utility for running our experiments and launch model training.
"""
def run_experiment(epochs):
segmentation_model = get_shape_segmentation_model(num_points, num_classes)
segmentation_model.compile(
optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
loss=keras.losses.CategoricalCrossentropy(),
metrics=["accuracy"],
)
checkpoint_filepath = "checkpoint.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
history = segmentation_model.fit(
train_dataset,
validation_data=val_dataset,
epochs=epochs,
callbacks=[checkpoint_callback],
)
segmentation_model.load_weights(checkpoint_filepath)
return segmentation_model, history
segmentation_model, history = run_experiment(epochs=EPOCHS)
"""
## Visualize the training landscape
"""
def plot_result(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_result("loss")
plot_result("accuracy")
"""
## Inference
"""
validation_batch = next(iter(val_dataset))
val_predictions = segmentation_model.predict(validation_batch[0])
print(f"Validation prediction shape: {val_predictions.shape}")
def visualize_single_point_cloud(point_clouds, label_clouds, idx):
label_map = LABELS + ["none"]
point_cloud = point_clouds[idx]
label_cloud = label_clouds[idx]
visualize_data(point_cloud, [label_map[np.argmax(label)] for label in label_cloud])
idx = np.random.choice(len(validation_batch[0]))
print(f"Index selected: {idx}")
# Plotting with ground-truth.
visualize_single_point_cloud(validation_batch[0], validation_batch[1], idx)
# Plotting with predicted labels.
visualize_single_point_cloud(validation_batch[0], val_predictions, idx)
"""
## Final notes
If you are interested in learning more about this topic, you may find
[this repository](https://github.com/soumik12345/point-cloud-segmentation)
useful.
"""
| keras-io/examples/vision/pointnet_segmentation.py/0 | {
"file_path": "keras-io/examples/vision/pointnet_segmentation.py",
"repo_id": "keras-io",
"token_count": 7459
} | 105 |
"""
Title: When Recurrence meets Transformers
Author: [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Suvaditya Mukherjee](https://twitter.com/halcyonrayes)
Date created: 2023/03/12
Last modified: 2023/03/12
Description: Image Classification with Temporal Latent Bottleneck Networks.
Accelerator: GPU
"""
"""
## Introduction
A simple Recurrent Neural Network (RNN) displays a strong inductive bias towards learning
**temporally compressed representations**. **Equation 1** shows the recurrence formula,
where `h_t` is the compressed representation (a single vector) of the entire input
sequence `x`.
|  |
| :--: |
| **Equation 1**: The recurrence equation. (Source: Aritra and Suvaditya)|
On the other hand, Transformers ([Vaswani et. al](https://arxiv.org/abs/1706.03762)) have
little inductive bias towards learning temporally compressed representations.
Transformer has achieved SoTA results in Natural Language Processing (NLP)
and Vision tasks with its pairwise attention mechanism.
While the Transformer has the ability to **attend** to different sections of the input
sequence, the computation of attention is quadratic in nature.
[Didolkar et. al](https://arxiv.org/abs/2205.14794) argue that having a more compressed
representation of a sequence may be beneficial for *generalization*, as it can be easily
**re-used** and **re-purposed** with fewer irrelevant details. While compression is good,
they also notice that too much of it can harm expressiveness.
The authors propose a solution that divides computation into **two streams**. A *slow
stream* that is recurrent in nature and a *fast stream* that is parameterized as a
Transformer. While this method has the novelty of introducing different processing
streams in order to preserve and process latent states, it has parallels drawn in other
works like the [Perceiver Mechanism (by Jaegle et. al.)](https://arxiv.org/abs/2103.03206)
and [Grounded Language Learning Fast and Slow (by Hill et. al.)](https://arxiv.org/abs/2009.01719).
The following example explores how we can make use of the new Temporal Latent Bottleneck
mechanism to perform image classification on the CIFAR-10 dataset. We implement this
model by making a custom `RNNCell` implementation in order to make a **performant** and
**vectorized** design.
_Note_: This example makes use of `TensorFlow 2.12.0`, which must be installed into our
system
"""
"""
## Setup imports
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import mixed_precision
from tensorflow.keras.optimizers import AdamW
import random
from matplotlib import pyplot as plt
# Set seed for reproducibility.
keras.utils.set_random_seed(42)
AUTO = tf.data.AUTOTUNE
"""
## Setting required configuration
We set a few configuration parameters that are needed within the pipeline we have
designed. The current parameters are for use with the
[CIFAR10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html).
The model also supports `mixed-precision` settings, which would quantize the model to use
`16-bit` float numbers where it can, while keeping some parameters in `32-bit` as needed
for numerical stability. This brings performance benefits as the footprint of the model
decreases significantly while bringing speed boosts at inference-time.
"""
config = {
"mixed_precision": True,
"dataset": "cifar10",
"train_slice": 40_000,
"batch_size": 2048,
"buffer_size": 2048 * 2,
"input_shape": [32, 32, 3],
"image_size": 48,
"num_classes": 10,
"learning_rate": 1e-4,
"weight_decay": 1e-4,
"epochs": 30,
"patch_size": 4,
"embed_dim": 64,
"chunk_size": 8,
"r": 2,
"num_layers": 4,
"ffn_drop": 0.2,
"attn_drop": 0.2,
"num_heads": 1,
}
if config["mixed_precision"]:
policy = mixed_precision.Policy("mixed_float16")
mixed_precision.set_global_policy(policy)
"""
## Loading the CIFAR-10 dataset
We are going to use the CIFAR10 dataset for running our experiments. This dataset
contains a training set of `50,000` images for `10` classes with the standard image size
of `(32, 32, 3)`.
It also has a separate set of `10,000` images with similar characteristics. More
information about the dataset may be found at the official site for the dataset as well
as [`keras.datasets.cifar10`](https://keras.io/api/datasets/cifar10/) API reference
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[: config["train_slice"]], y_train[: config["train_slice"]]),
(x_train[config["train_slice"] :], y_train[config["train_slice"] :]),
)
"""
## Define data augmentation for the training and validation/test pipelines
We define separate pipelines for performing image augmentation on our data. This step is
important to make the model more robust to changes, helping it generalize better.
The preprocessing and augmentation steps we perform are as follows:
- `Rescaling` (training, test): This step is performed to normalize all image pixel
values from the `[0,255]` range to `[0,1)`. This helps in maintaining numerical stability
later ahead during training.
- `Resizing` (training, test): We resize the image from it's original size of (32, 32) to
(52, 52). This is done to account for the Random Crop, as well as comply with the
specifications of the data given in the paper.
- `RandomCrop` (training): This layer randomly selects a crop/sub-region of the image
with size `(48, 48)`.
- `RandomFlip` (training): This layer randomly flips all the images horizontally,
keeping image sizes the same.
"""
# Build the `train` augmentation pipeline.
train_augmentation = keras.Sequential(
[
layers.Rescaling(1 / 255.0, dtype="float32"),
layers.Resizing(
config["input_shape"][0] + 20,
config["input_shape"][0] + 20,
dtype="float32",
),
layers.RandomCrop(config["image_size"], config["image_size"], dtype="float32"),
layers.RandomFlip("horizontal", dtype="float32"),
],
name="train_data_augmentation",
)
# Build the `val` and `test` data pipeline.
test_augmentation = keras.Sequential(
[
layers.Rescaling(1 / 255.0, dtype="float32"),
layers.Resizing(config["image_size"], config["image_size"], dtype="float32"),
],
name="test_data_augmentation",
)
# We define functions in place of simple lambda functions to run through the
# `keras.Sequential`in order to solve this warning:
# (https://github.com/tensorflow/tensorflow/issues/56089)
def train_map_fn(image, label):
return train_augmentation(image), label
def test_map_fn(image, label):
return test_augmentation(image), label
"""
## Load dataset into `tf.data.Dataset` object
- We take the `np.ndarray` instance of the datasets and move them into a
`tf.data.Dataset` instance
- Apply augmentations using
[`.map()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map)
- Shuffle the dataset using
[`.shuffle()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle)
- Batch the dataset using
[`.batch()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#batch)
- Enable pre-fetching of batches using
[`.prefetch()`](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#prefetch)
"""
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = (
train_ds.map(train_map_fn, num_parallel_calls=AUTO)
.shuffle(config["buffer_size"])
.batch(config["batch_size"], num_parallel_calls=AUTO)
.prefetch(AUTO)
)
val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_ds = (
val_ds.map(test_map_fn, num_parallel_calls=AUTO)
.batch(config["batch_size"], num_parallel_calls=AUTO)
.prefetch(AUTO)
)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = (
test_ds.map(test_map_fn, num_parallel_calls=AUTO)
.batch(config["batch_size"], num_parallel_calls=AUTO)
.prefetch(AUTO)
)
"""
## Temporal Latent Bottleneck
An excerpt from the paper:
> In the brain, short-term and long-term memory have developed in a specialized way.
Short-term memory is allowed to change very quickly to react to immediate sensory inputs
and perception. By contrast, long-term memory changes slowly, is highly selective and
involves repeated consolidation.
Inspired from the short-term and long-term memory the authors introduce the fast stream
and slow stream computation. The fast stream has a short-term memory with a high capacity
that reacts quickly to sensory input (Transformers). The slow stream has long-term memory
which updates at a slower rate and summarizes the most relevant information (Recurrence).
To implement this idea we need to:
- Take a sequence of data.
- Divide the sequence into fixed-size chunks.
- Fast stream operates within each chunk. It provides fine-grained local information.
- Slow stream consolidates and aggregates information across chunks. It provides
coarse-grained distant information.
The fast and slow stream induce what is called **information asymmetry**. The two streams
interact with each other through a bottleneck of attention. **Figure 1** shows the
architecture of the model.
|  |
| :--: |
| Figure 1: Architecture of the model. (Source: https://arxiv.org/abs/2205.14794) |
A PyTorch-style pseudocode is also proposed by the authors as shown in **Algorithm 1**.
|  |
| :--: |
| Algorithm 1: PyTorch style pseudocode. (Source: https://arxiv.org/abs/2205.14794) |
"""
"""
### `PatchEmbedding` layer
This custom `keras.layers.Layer` is useful for generating patches from the image and
transform them into a higher-dimensional embedding space using `keras.layers.Embedding`.
The patching operation is done using a `keras.layers.Conv2D` instance instead of a
traditional `tf.image.extract_patches` to allow for vectorization.
Once the patching of images is complete, we reshape the image patches in order to get a
flattened representation where the number of dimensions is the embedding dimension. At
this stage, we also inject positional information to the tokens.
After we obtain the tokens we chunk them. The chunking operation involves taking
fixed-size sequences from the embedding output to create 'chunks', which will then be
used as the final input to the model.
"""
class PatchEmbedding(layers.Layer):
"""Image to Patch Embedding.
Args:
image_size (`Tuple[int]`): Size of the input image.
patch_size (`Tuple[int]`): Size of the patch.
embed_dim (`int`): Dimension of the embedding.
chunk_size (`int`): Number of patches to be chunked.
"""
def __init__(
self,
image_size,
patch_size,
embed_dim,
chunk_size,
**kwargs,
):
super().__init__(**kwargs)
# Compute the patch resolution.
patch_resolution = [
image_size[0] // patch_size[0],
image_size[1] // patch_size[1],
]
# Store the parameters.
self.image_size = image_size
self.patch_size = patch_size
self.embed_dim = embed_dim
self.patch_resolution = patch_resolution
self.num_patches = patch_resolution[0] * patch_resolution[1]
# Define the positions of the patches.
self.positions = tf.range(start=0, limit=self.num_patches, delta=1)
# Create the layers.
self.projection = layers.Conv2D(
filters=embed_dim,
kernel_size=patch_size,
strides=patch_size,
name="projection",
)
self.flatten = layers.Reshape(
target_shape=(-1, embed_dim),
name="flatten",
)
self.position_embedding = layers.Embedding(
input_dim=self.num_patches,
output_dim=embed_dim,
name="position_embedding",
)
self.layernorm = keras.layers.LayerNormalization(
epsilon=1e-5,
name="layernorm",
)
self.chunking_layer = layers.Reshape(
target_shape=(self.num_patches // chunk_size, chunk_size, embed_dim),
name="chunking_layer",
)
def call(self, inputs):
# Project the inputs to the embedding dimension.
x = self.projection(inputs)
# Flatten the pathces and add position embedding.
x = self.flatten(x)
x = x + self.position_embedding(self.positions)
# Normalize the embeddings.
x = self.layernorm(x)
# Chunk the tokens.
x = self.chunking_layer(x)
return x
"""
### `FeedForwardNetwork` Layer
This custom `keras.layers.Layer` instance allows us to define a generic FFN along with a
dropout.
"""
class FeedForwardNetwork(layers.Layer):
"""Feed Forward Network.
Args:
dims (`int`): Number of units in FFN.
dropout (`float`): Dropout probability for FFN.
"""
def __init__(self, dims, dropout, **kwargs):
super().__init__(**kwargs)
# Create the layers.
self.ffn = keras.Sequential(
[
layers.Dense(units=4 * dims, activation=tf.nn.gelu),
layers.Dense(units=dims),
layers.Dropout(rate=dropout),
],
name="ffn",
)
self.layernorm = layers.LayerNormalization(
epsilon=1e-5,
name="layernorm",
)
def call(self, inputs):
# Apply the FFN.
x = self.layernorm(inputs)
x = inputs + self.ffn(x)
return x
"""
### `BaseAttention` layer
This custom `keras.layers.Layer` instance is a `super`/`base` class that wraps a
`keras.layers.MultiHeadAttention` layer along with some other components. This gives us
basic common denominator functionality for all the Attention layers/modules in our model.
"""
class BaseAttention(layers.Layer):
"""Base Attention Module.
Args:
num_heads (`int`): Number of attention heads.
key_dim (`int`): Size of each attention head for key.
dropout (`float`): Dropout probability for attention module.
"""
def __init__(self, num_heads, key_dim, dropout, **kwargs):
super().__init__(**kwargs)
self.multi_head_attention = layers.MultiHeadAttention(
num_heads=num_heads,
key_dim=key_dim,
dropout=dropout,
name="mha",
)
self.query_layernorm = layers.LayerNormalization(
epsilon=1e-5,
name="q_layernorm",
)
self.key_layernorm = layers.LayerNormalization(
epsilon=1e-5,
name="k_layernorm",
)
self.value_layernorm = layers.LayerNormalization(
epsilon=1e-5,
name="v_layernorm",
)
self.attention_scores = None
def call(self, input_query, key, value):
# Apply the attention module.
query = self.query_layernorm(input_query)
key = self.key_layernorm(key)
value = self.value_layernorm(value)
(attention_outputs, attention_scores) = self.multi_head_attention(
query=query,
key=key,
value=value,
return_attention_scores=True,
)
# Save the attention scores for later visualization.
self.attention_scores = attention_scores
# Add the input to the attention output.
x = input_query + attention_outputs
return x
"""
### `Attention` with `FeedForwardNetwork` layer
This custom `keras.layers.Layer` implementation combines the `BaseAttention` and
`FeedForwardNetwork` components to develop one block which will be used repeatedly within
the model. This module is highly customizable and flexible, allowing for changes within
the internal layers.
"""
class AttentionWithFFN(layers.Layer):
"""Attention with Feed Forward Network.
Args:
ffn_dims (`int`): Number of units in FFN.
ffn_dropout (`float`): Dropout probability for FFN.
num_heads (`int`): Number of attention heads.
key_dim (`int`): Size of each attention head for key.
attn_dropout (`float`): Dropout probability for attention module.
"""
def __init__(
self,
ffn_dims,
ffn_dropout,
num_heads,
key_dim,
attn_dropout,
**kwargs,
):
super().__init__(**kwargs)
# Create the layers.
self.attention = BaseAttention(
num_heads=num_heads,
key_dim=key_dim,
dropout=attn_dropout,
name="base_attn",
)
self.ffn = FeedForwardNetwork(
dims=ffn_dims,
dropout=ffn_dropout,
name="ffn",
)
self.attention_scores = None
def call(self, query, key, value):
# Apply the attention module.
x = self.attention(query, key, value)
# Save the attention scores for later visualization.
self.attention_scores = self.attention.attention_scores
# Apply the FFN.
x = self.ffn(x)
return x
"""
### Custom RNN Cell for **Temporal Latent Bottleneck** and **Perceptual Module**
**Algorithm 1** (the pseudocode) depicts recurrence with the help of for loops. Looping
does make the implementation simpler, harming the training time. In this section we wrap
the custom recurrence logic inside of the `CustomRecurrentCell`. This custom cell will
then be wrapped with the [Keras RNN API](https://keras.io/api/layers/recurrent_layers/rnn/)
that makes the entire code vectorizable.
This custom cell, implemented as a `keras.layers.Layer`, is the integral part of the
logic for the model.
The cell's functionality can be divided into 2 parts:
- **Slow Stream (Temporal Latent Bottleneck):**
- This module consists of a single `AttentionWithFFN` layer that parses the output of the
previous Slow Stream, an intermediate hidden representation (which is the *latent* in
Temporal Latent Bottleneck) as the Query, and the output of the latest Fast Stream as Key
and Value. This layer can also be construed as a *CrossAttention* layer.
- **Fast Stream (Perceptual Module):**
- This module consists of intertwined `AttentionWithFFN` layers. This stream consists of
*n* layers of `SelfAttention` and `CrossAttention` in a sequential manner.
- Here, some layers take the chunked input as the Query, Key and Value (Also referred to
as the *SelfAttention* layer).
- The other layers take the intermediate state outputs from within the Temporal Latent
Bottleneck module as the Query while using the output of the previous Self-Attention
layers before it as the Key and Value.
"""
class CustomRecurrentCell(layers.Layer):
"""Custom Recurrent Cell.
Args:
chunk_size (`int`): Number of tokens in a chunk.
r (`int`): One Cross Attention per **r** Self Attention.
num_layers (`int`): Number of layers.
ffn_dims (`int`): Number of units in FFN.
ffn_dropout (`float`): Dropout probability for FFN.
num_heads (`int`): Number of attention heads.
key_dim (`int`): Size of each attention head for key.
attn_dropout (`float`): Dropout probability for attention module.
"""
def __init__(
self,
chunk_size,
r,
num_layers,
ffn_dims,
ffn_dropout,
num_heads,
key_dim,
attn_dropout,
**kwargs,
):
super().__init__(**kwargs)
# Save the arguments.
self.chunk_size = chunk_size
self.r = r
self.num_layers = num_layers
self.ffn_dims = ffn_dims
self.ffn_droput = ffn_dropout
self.num_heads = num_heads
self.key_dim = key_dim
self.attn_dropout = attn_dropout
# Create the state_size and output_size. This is important for
# custom recurrence logic.
self.state_size = tf.TensorShape([chunk_size, ffn_dims])
self.output_size = tf.TensorShape([chunk_size, ffn_dims])
self.get_attention_scores = False
self.attention_scores = []
# Perceptual Module
perceptual_module = list()
for layer_idx in range(num_layers):
perceptual_module.append(
AttentionWithFFN(
ffn_dims=ffn_dims,
ffn_dropout=ffn_dropout,
num_heads=num_heads,
key_dim=key_dim,
attn_dropout=attn_dropout,
name=f"pm_self_attn_{layer_idx}",
)
)
if layer_idx % r == 0:
perceptual_module.append(
AttentionWithFFN(
ffn_dims=ffn_dims,
ffn_dropout=ffn_dropout,
num_heads=num_heads,
key_dim=key_dim,
attn_dropout=attn_dropout,
name=f"pm_cross_attn_ffn_{layer_idx}",
)
)
self.perceptual_module = perceptual_module
# Temporal Latent Bottleneck Module
self.tlb_module = AttentionWithFFN(
ffn_dims=ffn_dims,
ffn_dropout=ffn_dropout,
num_heads=num_heads,
key_dim=key_dim,
attn_dropout=attn_dropout,
name=f"tlb_cross_attn_ffn",
)
def call(self, inputs, states):
# inputs => (batch, chunk_size, dims)
# states => [(batch, chunk_size, units)]
slow_stream = states[0]
fast_stream = inputs
for layer_idx, layer in enumerate(self.perceptual_module):
fast_stream = layer(query=fast_stream, key=fast_stream, value=fast_stream)
if layer_idx % self.r == 0:
fast_stream = layer(
query=fast_stream, key=slow_stream, value=slow_stream
)
slow_stream = self.tlb_module(
query=slow_stream, key=fast_stream, value=fast_stream
)
# Save the attention scores for later visualization.
if self.get_attention_scores:
self.attention_scores.append(self.tlb_module.attention_scores)
return fast_stream, [slow_stream]
"""
### `TemporalLatentBottleneckModel` to encapsulate full model
Here, we just wrap the full model as to expose it for training.
"""
class TemporalLatentBottleneckModel(keras.Model):
"""Model Trainer.
Args:
patch_layer (`keras.layers.Layer`): Patching layer.
custom_cell (`keras.layers.Layer`): Custom Recurrent Cell.
"""
def __init__(self, patch_layer, custom_cell, **kwargs):
super().__init__(**kwargs)
self.patch_layer = patch_layer
self.rnn = layers.RNN(custom_cell, name="rnn")
self.gap = layers.GlobalAveragePooling1D(name="gap")
self.head = layers.Dense(10, activation="softmax", dtype="float32", name="head")
def call(self, inputs):
x = self.patch_layer(inputs)
x = self.rnn(x)
x = self.gap(x)
outputs = self.head(x)
return outputs
"""
## Build the model
To begin training, we now define the components individually and pass them as arguments
to our wrapper class, which will prepare the final model for training. We define a
`PatchEmbed` layer, and the `CustomCell`-based RNN.
"""
# Build the model.
patch_layer = PatchEmbedding(
image_size=(config["image_size"], config["image_size"]),
patch_size=(config["patch_size"], config["patch_size"]),
embed_dim=config["embed_dim"],
chunk_size=config["chunk_size"],
)
custom_rnn_cell = CustomRecurrentCell(
chunk_size=config["chunk_size"],
r=config["r"],
num_layers=config["num_layers"],
ffn_dims=config["embed_dim"],
ffn_dropout=config["ffn_drop"],
num_heads=config["num_heads"],
key_dim=config["embed_dim"],
attn_dropout=config["attn_drop"],
)
model = TemporalLatentBottleneckModel(
patch_layer=patch_layer,
custom_cell=custom_rnn_cell,
)
"""
## Metrics and Callbacks
We use the `AdamW` optimizer since it has been shown to perform very well on several benchmark
tasks from an optimization perspective. It is a version of the `keras.optimizers.Adam`
optimizer, along with Weight Decay in place.
For a loss function, we make use of the `keras.losses.SparseCategoricalCrossentropy`
function that makes use of simple Cross-entropy between prediction and actual logits. We
also calculate accuracy on our data as a sanity-check.
"""
optimizer = AdamW(
learning_rate=config["learning_rate"], weight_decay=config["weight_decay"]
)
model.compile(
optimizer=optimizer,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
"""
## Train the model with `model.fit()`
We pass the training dataset and run training.
"""
history = model.fit(
train_ds,
epochs=config["epochs"],
validation_data=val_ds,
)
"""
## Visualize training metrics
The `model.fit()` will return a `history` object, which stores the values of the metrics
generated during the training run (but it is ephemeral and needs to be saved manually).
We now display the Loss and Accuracy curves for the training and validation sets.
"""
plt.plot(history.history["loss"], label="loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.legend()
plt.show()
plt.plot(history.history["accuracy"], label="accuracy")
plt.plot(history.history["val_accuracy"], label="val_accuracy")
plt.legend()
plt.show()
"""
## Visualize attention maps from the Temporal Latent Bottleneck
Now that we have trained our model, it is time for some visualizations. The Fast Stream
(Transformers) processes a chunk of tokens. The Slow Stream processes each chunk and
attends to tokens that are useful for the task.
In this section we visualize the attention map of the Slow Stream. This is done by
extracting the attention scores from the TLB layer at each chunk's intersection and
storing it within the RNN's state. This is followed by 'ballooning' it up and returning
these values.
"""
def score_to_viz(chunk_score):
# get the most attended token
chunk_viz = tf.math.reduce_max(chunk_score, axis=-2)
# get the mean across heads
chunk_viz = tf.math.reduce_mean(chunk_viz, axis=1)
return chunk_viz
# Get a batch of images and labels from the testing dataset
images, labels = next(iter(test_ds))
# Set the get_attn_scores flag to True
model.rnn.cell.get_attention_scores = True
# Run the model with the testing images and grab the
# attention scores.
outputs = model(images)
list_chunk_scores = model.rnn.cell.attention_scores
# Process the attention scores in order to visualize them
list_chunk_viz = [score_to_viz(x) for x in list_chunk_scores]
chunk_viz = tf.concat(list_chunk_viz[1:], axis=-1)
chunk_viz = tf.reshape(
chunk_viz,
(
config["batch_size"],
config["image_size"] // config["patch_size"],
config["image_size"] // config["patch_size"],
1,
),
)
upsampled_heat_map = layers.UpSampling2D(
size=(4, 4), interpolation="bilinear", dtype="float32"
)(chunk_viz)
"""
Run the following code snippet to get different images and their attention maps.
"""
# Sample a random image
index = random.randint(0, config["batch_size"])
orig_image = images[index]
overlay_image = upsampled_heat_map[index, ..., 0]
# Plot the visualization
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].imshow(orig_image)
ax[0].set_title("Original:")
ax[0].axis("off")
image = ax[1].imshow(orig_image)
ax[1].imshow(
overlay_image,
cmap="inferno",
alpha=0.6,
extent=image.get_extent(),
)
ax[1].set_title("TLB Attention:")
plt.show()
"""
## Conclusion
This example has demonstrated an implementation of the Temporal Latent Bottleneck
mechanism. The example highlights the use of compression and storage of historical states
in the form of a Temporal Latent Bottleneck with regular updates from a Perceptual Module
as an effective method to do so.
In the original paper, the authors have conducted highly extensive tests around different
modalities ranging from Supervised Image Classification to applications in Reinforcement
Learning.
While we have only displayed a method to apply this mechanism to Image Classification, it
can be extended to other modalities too with minimal changes.
*Note*: While building this example we did not have the official code to refer to. This
means that our implementation is inspired by the paper with no claims of being a
complete reproduction. For more details on the training process one can head over to
[our GitHub repository](https://github.com/suvadityamuk/Temporal-Latent-Bottleneck-TF).
"""
"""
## Acknowledgement
Thanks to [Aniket Didolkar](https://www.aniketdidolkar.in/) (the first author) and
[Anirudh Goyal](https://anirudh9119.github.io/) (the third author)
for revieweing our work.
We would like to thank
[PyImageSearch](https://pyimagesearch.com/) for a Colab Pro account and
[JarvisLabs.ai](https://cloud.jarvislabs.ai/) for the GPU credits.
"""
| keras-io/examples/vision/temporal_latent_bottleneck.py/0 | {
"file_path": "keras-io/examples/vision/temporal_latent_bottleneck.py",
"repo_id": "keras-io",
"token_count": 11063
} | 106 |
<jupyter_start><jupyter_text>SimSiam Training with TensorFlow Similarity and KerasCV**Author:** [lukewood](https://lukewood.xyz), Ian Stenbit, Owen Vallis**Date created:** 2023/01/22**Last modified:** 2023/01/22**Description:** Train a KerasCV model using unlabelled data with SimSiam. Overview[TensorFlow similarity](https://github.com/tensorflow/similarity) makes it easy to trainKerasCV models on unlabelled corpuses of data using contrastive learning algorithms suchas SimCLR, SimSiam, and Barlow Twins. In this guide, we will train a KerasCV modelusing the SimSiam implementation from TensorFlow Similarity. BackgroundSelf-supervised learning is an approach to pre-training models using unlabeled data.This approach drastically increases accuracy when you have very few labeled examples buta lot of unlabelled data.The key insight is that you can train a self-supervised model to learn datarepresentations by contrasting multiple augmented views of the same example.These learned representations capture data invariants, e.g., object translation, colorjitter, noise, etc. Training a simple linear classifier on top of the frozenrepresentations is easier and requires fewer labels because the pre-trained modelalready produces meaningful and generally useful features.Overall, self-supervised pre-training learns representations which are [more generic androbust than other approaches to augmented training and pre-training](https://arxiv.org/abs/2002.05709).An overview of the general contrastive learning process is shown below:In this tutorial, we will use the [SimSiam](https://arxiv.org/abs/2011.10566) algorithmfor contrastive learning. As of 2022, SimSiam is the state of the art algorithm forcontrastive learning; allowing for unprecedented scores on CIFAR-100 and other datasets.You may need to install:```pip -q install tensorflow_similaritypip -q install keras-cv```To get started, we will sort out some imports.<jupyter_code>import resource
import gc
import os
import random
import time
import tensorflow_addons as tfa
import keras_cv
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from tabulate import tabulate
import tensorflow_similarity as tfsim # main package
import tensorflow as tf
from keras_cv import layers as cv_layers
import tensorflow_datasets as tfds<jupyter_output><empty_output><jupyter_text>Lets sort out some high level config issues and define some constants.The resource limit increase is required to load STL-10, `tfsim.utils.tf_cap_memory()`prevents TensorFlow from hogging the GPU memory in a cluster, and`tfds.disable_progress_bar()` makes tfds less noisy.<jupyter_code>low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
tfsim.utils.tf_cap_memory() # Avoid GPU memory blow up
tfds.disable_progress_bar()
BATCH_SIZE = 512
PRE_TRAIN_EPOCHS = 50
VAL_STEPS_PER_EPOCH = 20
WEIGHT_DECAY = 5e-4
INIT_LR = 3e-2 * int(BATCH_SIZE / 256)
WARMUP_LR = 0.0
WARMUP_STEPS = 0
DIM = 2048<jupyter_output><empty_output><jupyter_text>Data loadingNext, we will load the STL-10 dataset. STL-10 is a dataset consisting of 100k unlabelledimages, 5k labelled training images, and 10k labelled test images. Due to this distribution,STL-10 is commonly used as a benchmark for contrastive learning models.First lets load our unlabelled data<jupyter_code>train_ds = tfds.load("stl10", split="unlabelled")
train_ds = train_ds.map(
lambda entry: entry["image"], num_parallel_calls=tf.data.AUTOTUNE
)
train_ds = train_ds.map(
lambda image: tf.cast(image, tf.float32), num_parallel_calls=tf.data.AUTOTUNE
)
train_ds = train_ds.shuffle(buffer_size=8 * BATCH_SIZE, reshuffle_each_iteration=True)<jupyter_output><empty_output><jupyter_text>Next, we need to prepare some labelled samples.This is done so that TensorFlow similarity can probe the learned embedding to ensurethat the model is learning appropriately.<jupyter_code>(x_raw_train, y_raw_train), ds_info = tfds.load(
"stl10", split="train", as_supervised=True, batch_size=-1, with_info=True
)
x_raw_train, y_raw_train = tf.cast(x_raw_train, tf.float32), tf.cast(
y_raw_train, tf.float32
)
x_test, y_test = tfds.load(
"stl10",
split="test",
as_supervised=True,
batch_size=-1,
)
x_test, y_test = tf.cast(x_test, tf.float32), tf.cast(y_test, tf.float32)<jupyter_output><empty_output><jupyter_text>In self supervised learning, queries and indexes are labeled subset datasets used toevaluate the quality of the produced latent embedding. The following code assemblesthese datasets:<jupyter_code># Compute the indicies for query, index, val, and train splits
query_idxs, index_idxs, val_idxs, train_idxs = [], [], [], []
for cid in range(ds_info.features["label"].num_classes):
idxs = tf.random.shuffle(tf.where(y_raw_train == cid))
idxs = tf.reshape(idxs, (-1,))
query_idxs.extend(idxs[:100]) # 200 query examples per class
index_idxs.extend(idxs[100:200]) # 200 index examples per class
val_idxs.extend(idxs[200:300]) # 100 validation examples per class
train_idxs.extend(idxs[300:]) # The remaining are used for training
random.shuffle(query_idxs)
random.shuffle(index_idxs)
random.shuffle(val_idxs)
random.shuffle(train_idxs)
def create_split(idxs: list) -> tuple:
x, y = [], []
for idx in idxs:
x.append(x_raw_train[int(idx)])
y.append(y_raw_train[int(idx)])
return tf.convert_to_tensor(np.array(x), dtype=tf.float32), tf.convert_to_tensor(
np.array(y), dtype=tf.int64
)
x_query, y_query = create_split(query_idxs)
x_index, y_index = create_split(index_idxs)
x_val, y_val = create_split(val_idxs)
x_train, y_train = create_split(train_idxs)
PRE_TRAIN_STEPS_PER_EPOCH = tf.data.experimental.cardinality(train_ds) // BATCH_SIZE
PRE_TRAIN_STEPS_PER_EPOCH = int(PRE_TRAIN_STEPS_PER_EPOCH.numpy())
print(
tabulate(
[
["train", tf.data.experimental.cardinality(train_ds), None],
["val", x_val.shape, y_val.shape],
["query", x_query.shape, y_query.shape],
["index", x_index.shape, y_index.shape],
["test", x_test.shape, y_test.shape],
],
headers=["# of Examples", "Labels"],
)
)<jupyter_output><empty_output><jupyter_text>AugmentationsSelf-supervised networks require at least two augmented "views" of each example.This can be created using a dataset and an augmentation function.The dataset treats each example in the batch as its own class and then the augmentfunction produces two separate views for each example.This means the resulting batch will yield tuples containing the two views, i.e.,Tuple[(BATCH_SIZE, 32, 32, 3), (BATCH_SIZE, 32, 32, 3)].Using KerasCV, it is trivial to construct an augmenter that performs as the onedescribed in the original SimSiam paper. Lets do that below.<jupyter_code>target_size = (96, 96)
crop_area_factor = (0.08, 1)
aspect_ratio_factor = (3 / 4, 4 / 3)
grayscale_rate = 0.2
color_jitter_rate = 0.8
brightness_factor = 0.2
contrast_factor = 0.8
saturation_factor = (0.3, 0.7)
hue_factor = 0.2
augmenter = keras.Sequential(
[
cv_layers.RandomFlip("horizontal"),
cv_layers.RandomCropAndResize(
target_size,
crop_area_factor=crop_area_factor,
aspect_ratio_factor=aspect_ratio_factor,
),
cv_layers.RandomApply(
cv_layers.Grayscale(output_channels=3), rate=grayscale_rate
),
cv_layers.RandomApply(
cv_layers.RandomColorJitter(
value_range=(0, 255),
brightness_factor=brightness_factor,
contrast_factor=contrast_factor,
saturation_factor=saturation_factor,
hue_factor=hue_factor,
),
rate=color_jitter_rate,
),
],
)<jupyter_output><empty_output><jupyter_text>Next, lets pass our images through this pipeline.Note that KerasCV supports batched augmentation, so batching beforeaugmentation dramatically improves performance<jupyter_code>@tf.function()
def process(img):
return augmenter(img), augmenter(img)
def prepare_dataset(dataset):
dataset = dataset.repeat()
dataset = dataset.shuffle(1024)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(process, num_parallel_calls=tf.data.AUTOTUNE)
return dataset.prefetch(tf.data.AUTOTUNE)
train_ds = prepare_dataset(train_ds)
val_ds = tf.data.Dataset.from_tensor_slices(x_val)
val_ds = prepare_dataset(val_ds)
print("train_ds", train_ds)
print("val_ds", val_ds)<jupyter_output><empty_output><jupyter_text>Lets visualize our pairs using the `tfsim.visualization` utility package.<jupyter_code>display_imgs = next(train_ds.as_numpy_iterator())
max_pixel = np.max([display_imgs[0].max(), display_imgs[1].max()])
min_pixel = np.min([display_imgs[0].min(), display_imgs[1].min()])
tfsim.visualization.visualize_views(
views=display_imgs,
num_imgs=16,
views_per_col=8,
max_pixel_value=max_pixel,
min_pixel_value=min_pixel,
)<jupyter_output><empty_output><jupyter_text>Model CreationNow that our data and augmentation pipeline is setup, we can move on toconstructing the contrastive learning pipeline. First, lets produce a backbone.For this task, we will use a KerasCV ResNet18 model as the backbone.<jupyter_code>def get_backbone(input_shape):
inputs = layers.Input(shape=input_shape)
x = inputs
x = keras_cv.models.ResNet18(
input_shape=input_shape,
include_rescaling=True,
include_top=False,
pooling="avg",
)(x)
return tfsim.models.SimilarityModel(inputs, x)
backbone = get_backbone((96, 96, 3))
backbone.summary()<jupyter_output><empty_output><jupyter_text>This MLP is common to all the self-supervised models and is typically a stack of 3layers of the same size. However, SimSiam only uses 2 layers for the smaller CIFARimages. Having too much capacity in the models can make it difficult for the loss tostabilize and converge.Note: This is the model output that is returned by `ContrastiveModel.predict()` andrepresents the distance based embedding. This embedding can be used for the KNNlookups and matching classification metrics. However, when using the pre-trainmodel for downstream tasks, only the `ContrastiveModel.backbone` is used.<jupyter_code>def get_projector(input_dim, dim, activation="relu", num_layers: int = 3):
inputs = tf.keras.layers.Input((input_dim,), name="projector_input")
x = inputs
for i in range(num_layers - 1):
x = tf.keras.layers.Dense(
dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name=f"projector_layer_{i}",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5, name=f"batch_normalization_{i}"
)(x)
x = tf.keras.layers.Activation(activation, name=f"{activation}_activation_{i}")(
x
)
x = tf.keras.layers.Dense(
dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="projector_output",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5,
center=False, # Page:5, Paragraph:2 of SimSiam paper
scale=False, # Page:5, Paragraph:2 of SimSiam paper
name=f"batch_normalization_ouput",
)(x)
# Metric Logging layer. Monitors the std of the layer activations.
# Degnerate solutions colapse to 0 while valid solutions will move
# towards something like 0.0220. The actual number will depend on the layer size.
o = tfsim.layers.ActivationStdLoggingLayer(name="proj_std")(x)
projector = tf.keras.Model(inputs, o, name="projector")
return projector
projector = get_projector(input_dim=backbone.output.shape[-1], dim=DIM, num_layers=2)
projector.summary()<jupyter_output><empty_output><jupyter_text>Finally, we must construct the predictor. The predictor is used in SimSiam, and is asimple stack of two MLP layers, containing a bottleneck in the hidden layer.<jupyter_code>def get_predictor(input_dim, hidden_dim=512, activation="relu"):
inputs = tf.keras.layers.Input(shape=(input_dim,), name="predictor_input")
x = inputs
x = tf.keras.layers.Dense(
hidden_dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="predictor_layer_0",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5, name="batch_normalization_0"
)(x)
x = tf.keras.layers.Activation(activation, name=f"{activation}_activation_0")(x)
x = tf.keras.layers.Dense(
input_dim,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="predictor_output",
)(x)
# Metric Logging layer. Monitors the std of the layer activations.
# Degnerate solutions colapse to 0 while valid solutions will move
# towards something like 0.0220. The actual number will depend on the layer size.
o = tfsim.layers.ActivationStdLoggingLayer(name="pred_std")(x)
predictor = tf.keras.Model(inputs, o, name="predictor")
return predictor
predictor = get_predictor(input_dim=DIM, hidden_dim=512)
predictor.summary()<jupyter_output><empty_output><jupyter_text>TrainingFirst, we need to initialize our training model, loss, and optimizer.<jupyter_code>loss = tfsim.losses.SimSiamLoss(projection_type="cosine_distance", name="simsiam")
contrastive_model = tfsim.models.ContrastiveModel(
backbone=backbone,
projector=projector,
predictor=predictor, # NOTE: simiam requires predictor model.
algorithm="simsiam",
name="simsiam",
)
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate=INIT_LR,
decay_steps=PRE_TRAIN_EPOCHS * PRE_TRAIN_STEPS_PER_EPOCH,
)
wd_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate=WEIGHT_DECAY,
decay_steps=PRE_TRAIN_EPOCHS * PRE_TRAIN_STEPS_PER_EPOCH,
)
optimizer = tfa.optimizers.SGDW(
learning_rate=lr_decayed_fn, weight_decay=wd_decayed_fn, momentum=0.9
)<jupyter_output><empty_output><jupyter_text>Next we can compile the model the same way you compile any other Keras model.<jupyter_code>contrastive_model.compile(
optimizer=optimizer,
loss=loss,
)<jupyter_output><empty_output><jupyter_text>We track the training using `EvalCallback`.`EvalCallback` creates an index at the end of each epoch and provides a proxy for thenearest neighbor matching classification using `binary_accuracy`.Calculates how often the query label matches the derived lookup label.Accuracy is technically (TP+TN)/(TP+FP+TN+FN), but here we filter allqueries above the distance threshold. In the case of binary matching, thismakes all the TPs and FPs below the distance threshold and all the TNs andFNs above the distance threshold.As we are only concerned with the matches below the distance threshold, theaccuracy simplifies to TP/(TP+FP) and is equivalent to the precision withrespect to the unfiltered queries. However, we also want to consider thequery coverage at the distance threshold, i.e., the percentage of queriesthat retrun a match, computed as (TP+FP)/(TP+FP+TN+FN). Therefore, we cantake $ precision \times query_coverage $ to produce a measure that capturethe precision scaled by the query coverage. This simplifies down to thebinary accuracy presented here, giving TP/(TP+FP+TN+FN).<jupyter_code>DATA_PATH = Path("./")
log_dir = DATA_PATH / "models" / "logs" / f"{loss.name}_{time.time()}"
chkpt_dir = DATA_PATH / "models" / "checkpoints" / f"{loss.name}_{time.time()}"
callbacks = [
tfsim.callbacks.EvalCallback(
tf.cast(x_query, tf.float32),
y_query,
tf.cast(x_index, tf.float32),
y_index,
metrics=["binary_accuracy"],
k=1,
tb_logdir=log_dir,
),
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=1,
update_freq=100,
),
tf.keras.callbacks.ModelCheckpoint(
filepath=chkpt_dir,
monitor="val_loss",
mode="min",
save_best_only=True,
save_weights_only=True,
),
]<jupyter_output><empty_output><jupyter_text>All that is left to do is run fit()!<jupyter_code>print(train_ds)
print(val_ds)
history = contrastive_model.fit(
train_ds,
epochs=PRE_TRAIN_EPOCHS,
steps_per_epoch=PRE_TRAIN_STEPS_PER_EPOCH,
validation_data=val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
callbacks=callbacks,
)<jupyter_output><empty_output><jupyter_text>Plotting and Evaluation<jupyter_code>plt.figure(figsize=(15, 4))
plt.subplot(1, 3, 1)
plt.plot(history.history["loss"])
plt.grid()
plt.title(f"{loss.name} - loss")
plt.subplot(1, 3, 2)
plt.plot(history.history["proj_std"], label="proj")
if "pred_std" in history.history:
plt.plot(history.history["pred_std"], label="pred")
plt.grid()
plt.title(f"{loss.name} - std metrics")
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(history.history["binary_accuracy"], label="acc")
plt.grid()
plt.title(f"{loss.name} - match metrics")
plt.legend()
plt.show()<jupyter_output><empty_output><jupyter_text>Fine Tuning on the Labelled DataAs a final step we will fine tune a classifier on 10% of the training data. This willallow us to evaluate the quality of our learned representation. First, we handle dataloading:<jupyter_code>eval_augmenter = keras.Sequential(
[
keras_cv.layers.RandomCropAndResize(
(96, 96), crop_area_factor=(0.8, 1.0), aspect_ratio_factor=(1.0, 1.0)
),
keras_cv.layers.RandomFlip(mode="horizontal"),
]
)
eval_train_ds = tf.data.Dataset.from_tensor_slices(
(x_raw_train, tf.keras.utils.to_categorical(y_raw_train, 10))
)
eval_train_ds = eval_train_ds.repeat()
eval_train_ds = eval_train_ds.shuffle(1024)
eval_train_ds = eval_train_ds.map(lambda x, y: (eval_augmenter(x), y), tf.data.AUTOTUNE)
eval_train_ds = eval_train_ds.batch(BATCH_SIZE)
eval_train_ds = eval_train_ds.prefetch(tf.data.AUTOTUNE)
eval_val_ds = tf.data.Dataset.from_tensor_slices(
(x_test, tf.keras.utils.to_categorical(y_test, 10))
)
eval_val_ds = eval_val_ds.repeat()
eval_val_ds = eval_val_ds.shuffle(1024)
eval_val_ds = eval_val_ds.batch(BATCH_SIZE)
eval_val_ds = eval_val_ds.prefetch(tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Benchmark Against a Naive ModelFinally, lets setup a naive model that does not leverage the unlabeled data corpus.<jupyter_code>TEST_EPOCHS = 50
TEST_STEPS_PER_EPOCH = x_raw_train.shape[0] // BATCH_SIZE
def get_eval_model(img_size, backbone, total_steps, trainable=True, lr=1.8):
backbone.trainable = trainable
inputs = tf.keras.layers.Input((img_size, img_size, 3), name="eval_input")
x = backbone(inputs, training=trainable)
o = tf.keras.layers.Dense(10, activation="softmax")(x)
model = tf.keras.Model(inputs, o)
cosine_decayed_lr = tf.keras.experimental.CosineDecay(
initial_learning_rate=lr, decay_steps=total_steps
)
opt = tf.keras.optimizers.SGD(cosine_decayed_lr, momentum=0.9)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["acc"])
return model
no_pt_eval_model = get_eval_model(
img_size=96,
backbone=get_backbone((96, 96, 3)),
total_steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
trainable=True,
lr=1e-3,
)
no_pt_history = no_pt_eval_model.fit(
eval_train_ds,
batch_size=BATCH_SIZE,
epochs=TEST_EPOCHS,
steps_per_epoch=TEST_STEPS_PER_EPOCH,
validation_data=eval_val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
)<jupyter_output><empty_output><jupyter_text>Pretty bad results! Lets try fine-tuning our SimSiam pretrained model:<jupyter_code>pt_eval_model = get_eval_model(
img_size=96,
backbone=contrastive_model.backbone,
total_steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
trainable=False,
lr=30.0,
)
pt_eval_model.summary()
pt_history = pt_eval_model.fit(
eval_train_ds,
batch_size=BATCH_SIZE,
epochs=TEST_EPOCHS,
steps_per_epoch=TEST_STEPS_PER_EPOCH,
validation_data=eval_val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
)<jupyter_output><empty_output><jupyter_text>All that is left to do is evaluate the models:<jupyter_code>print(
"no pretrain",
no_pt_eval_model.evaluate(
eval_val_ds,
steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
),
)
print(
"pretrained",
pt_eval_model.evaluate(
eval_val_ds,
steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
),
)<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_cv/simsiam_with_kerascv.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_cv/simsiam_with_kerascv.ipynb",
"repo_id": "keras-io",
"token_count": 7968
} | 107 |
"""
Title: SimSiam Training with TensorFlow Similarity and KerasCV
Author: [lukewood](https://lukewood.xyz), Ian Stenbit, Owen Vallis
Date created: 2023/01/22
Last modified: 2023/01/22
Description: Train a KerasCV model using unlabelled data with SimSiam.
"""
"""
## Overview
[TensorFlow similarity](https://github.com/tensorflow/similarity) makes it easy to train
KerasCV models on unlabelled corpuses of data using contrastive learning algorithms such
as SimCLR, SimSiam, and Barlow Twins. In this guide, we will train a KerasCV model
using the SimSiam implementation from TensorFlow Similarity.
## Background
Self-supervised learning is an approach to pre-training models using unlabeled data.
This approach drastically increases accuracy when you have very few labeled examples but
a lot of unlabelled data.
The key insight is that you can train a self-supervised model to learn data
representations by contrasting multiple augmented views of the same example.
These learned representations capture data invariants, e.g., object translation, color
jitter, noise, etc. Training a simple linear classifier on top of the frozen
representations is easier and requires fewer labels because the pre-trained model
already produces meaningful and generally useful features.
Overall, self-supervised pre-training learns representations which are [more generic and
robust than other approaches to augmented training and pre-training](https://arxiv.org/abs/2002.05709).
An overview of the general contrastive learning process is shown below:

In this tutorial, we will use the [SimSiam](https://arxiv.org/abs/2011.10566) algorithm
for contrastive learning. As of 2022, SimSiam is the state of the art algorithm for
contrastive learning; allowing for unprecedented scores on CIFAR-100 and other datasets.
You may need to install:
```
pip -q install tensorflow_similarity
pip -q install keras-cv
```
To get started, we will sort out some imports.
"""
import resource
import gc
import os
import random
import time
import tensorflow_addons as tfa
import keras_cv
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from tabulate import tabulate
import tensorflow_similarity as tfsim # main package
import tensorflow as tf
from keras_cv import layers as cv_layers
import tensorflow_datasets as tfds
"""
Lets sort out some high level config issues and define some constants.
The resource limit increase is required to load STL-10, `tfsim.utils.tf_cap_memory()`
prevents TensorFlow from hogging the GPU memory in a cluster, and
`tfds.disable_progress_bar()` makes tfds less noisy.
"""
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
tfsim.utils.tf_cap_memory() # Avoid GPU memory blow up
tfds.disable_progress_bar()
BATCH_SIZE = 512
PRE_TRAIN_EPOCHS = 50
VAL_STEPS_PER_EPOCH = 20
WEIGHT_DECAY = 5e-4
INIT_LR = 3e-2 * int(BATCH_SIZE / 256)
WARMUP_LR = 0.0
WARMUP_STEPS = 0
DIM = 2048
"""
## Data loading
Next, we will load the STL-10 dataset. STL-10 is a dataset consisting of 100k unlabelled
images, 5k labelled training images, and 10k labelled test images. Due to this distribution,
STL-10 is commonly used as a benchmark for contrastive learning models.
First lets load our unlabelled data
"""
train_ds = tfds.load("stl10", split="unlabelled")
train_ds = train_ds.map(
lambda entry: entry["image"], num_parallel_calls=tf.data.AUTOTUNE
)
train_ds = train_ds.map(
lambda image: tf.cast(image, tf.float32), num_parallel_calls=tf.data.AUTOTUNE
)
train_ds = train_ds.shuffle(buffer_size=8 * BATCH_SIZE, reshuffle_each_iteration=True)
"""
Next, we need to prepare some labelled samples.
This is done so that TensorFlow similarity can probe the learned embedding to ensure
that the model is learning appropriately.
"""
(x_raw_train, y_raw_train), ds_info = tfds.load(
"stl10", split="train", as_supervised=True, batch_size=-1, with_info=True
)
x_raw_train, y_raw_train = tf.cast(x_raw_train, tf.float32), tf.cast(
y_raw_train, tf.float32
)
x_test, y_test = tfds.load(
"stl10",
split="test",
as_supervised=True,
batch_size=-1,
)
x_test, y_test = tf.cast(x_test, tf.float32), tf.cast(y_test, tf.float32)
"""
In self supervised learning, queries and indexes are labeled subset datasets used to
evaluate the quality of the produced latent embedding. The following code assembles
these datasets:
"""
# Compute the indices for query, index, val, and train splits
query_idxs, index_idxs, val_idxs, train_idxs = [], [], [], []
for cid in range(ds_info.features["label"].num_classes):
idxs = tf.random.shuffle(tf.where(y_raw_train == cid))
idxs = tf.reshape(idxs, (-1,))
query_idxs.extend(idxs[:100]) # 200 query examples per class
index_idxs.extend(idxs[100:200]) # 200 index examples per class
val_idxs.extend(idxs[200:300]) # 100 validation examples per class
train_idxs.extend(idxs[300:]) # The remaining are used for training
random.shuffle(query_idxs)
random.shuffle(index_idxs)
random.shuffle(val_idxs)
random.shuffle(train_idxs)
def create_split(idxs: list) -> tuple:
x, y = [], []
for idx in idxs:
x.append(x_raw_train[int(idx)])
y.append(y_raw_train[int(idx)])
return tf.convert_to_tensor(np.array(x), dtype=tf.float32), tf.convert_to_tensor(
np.array(y), dtype=tf.int64
)
x_query, y_query = create_split(query_idxs)
x_index, y_index = create_split(index_idxs)
x_val, y_val = create_split(val_idxs)
x_train, y_train = create_split(train_idxs)
PRE_TRAIN_STEPS_PER_EPOCH = tf.data.experimental.cardinality(train_ds) // BATCH_SIZE
PRE_TRAIN_STEPS_PER_EPOCH = int(PRE_TRAIN_STEPS_PER_EPOCH.numpy())
print(
tabulate(
[
["train", tf.data.experimental.cardinality(train_ds), None],
["val", x_val.shape, y_val.shape],
["query", x_query.shape, y_query.shape],
["index", x_index.shape, y_index.shape],
["test", x_test.shape, y_test.shape],
],
headers=["# of Examples", "Labels"],
)
)
"""
## Augmentations
Self-supervised networks require at least two augmented "views" of each example.
This can be created using a dataset and an augmentation function.
The dataset treats each example in the batch as its own class and then the augment
function produces two separate views for each example.
This means the resulting batch will yield tuples containing the two views, i.e.,
Tuple[(BATCH_SIZE, 32, 32, 3), (BATCH_SIZE, 32, 32, 3)].
Using KerasCV, it is trivial to construct an augmenter that performs as the one
described in the original SimSiam paper. Lets do that below.
"""
target_size = (96, 96)
crop_area_factor = (0.08, 1)
aspect_ratio_factor = (3 / 4, 4 / 3)
grayscale_rate = 0.2
color_jitter_rate = 0.8
brightness_factor = 0.2
contrast_factor = 0.8
saturation_factor = (0.3, 0.7)
hue_factor = 0.2
augmenter = keras.Sequential(
[
cv_layers.RandomFlip("horizontal"),
cv_layers.RandomCropAndResize(
target_size,
crop_area_factor=crop_area_factor,
aspect_ratio_factor=aspect_ratio_factor,
),
cv_layers.RandomApply(
cv_layers.Grayscale(output_channels=3), rate=grayscale_rate
),
cv_layers.RandomApply(
cv_layers.RandomColorJitter(
value_range=(0, 255),
brightness_factor=brightness_factor,
contrast_factor=contrast_factor,
saturation_factor=saturation_factor,
hue_factor=hue_factor,
),
rate=color_jitter_rate,
),
],
)
"""
Next, lets pass our images through this pipeline.
Note that KerasCV supports batched augmentation, so batching before
augmentation dramatically improves performance
"""
@tf.function()
def process(img):
return augmenter(img), augmenter(img)
def prepare_dataset(dataset):
dataset = dataset.repeat()
dataset = dataset.shuffle(1024)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(process, num_parallel_calls=tf.data.AUTOTUNE)
return dataset.prefetch(tf.data.AUTOTUNE)
train_ds = prepare_dataset(train_ds)
val_ds = tf.data.Dataset.from_tensor_slices(x_val)
val_ds = prepare_dataset(val_ds)
print("train_ds", train_ds)
print("val_ds", val_ds)
"""
Lets visualize our pairs using the `tfsim.visualization` utility package.
"""
display_imgs = next(train_ds.as_numpy_iterator())
max_pixel = np.max([display_imgs[0].max(), display_imgs[1].max()])
min_pixel = np.min([display_imgs[0].min(), display_imgs[1].min()])
tfsim.visualization.visualize_views(
views=display_imgs,
num_imgs=16,
views_per_col=8,
max_pixel_value=max_pixel,
min_pixel_value=min_pixel,
)
"""
## Model Creation
Now that our data and augmentation pipeline is setup, we can move on to
constructing the contrastive learning pipeline. First, lets produce a backbone.
For this task, we will use a KerasCV ResNet18 model as the backbone.
"""
def get_backbone(input_shape):
inputs = layers.Input(shape=input_shape)
x = inputs
x = keras_cv.models.ResNet18(
input_shape=input_shape,
include_rescaling=True,
include_top=False,
pooling="avg",
)(x)
return tfsim.models.SimilarityModel(inputs, x)
backbone = get_backbone((96, 96, 3))
backbone.summary()
"""
This MLP is common to all the self-supervised models and is typically a stack of 3
layers of the same size. However, SimSiam only uses 2 layers for the smaller CIFAR
images. Having too much capacity in the models can make it difficult for the loss to
stabilize and converge.
Note: This is the model output that is returned by `ContrastiveModel.predict()` and
represents the distance based embedding. This embedding can be used for the KNN
lookups and matching classification metrics. However, when using the pre-train
model for downstream tasks, only the `ContrastiveModel.backbone` is used.
"""
def get_projector(input_dim, dim, activation="relu", num_layers: int = 3):
inputs = tf.keras.layers.Input((input_dim,), name="projector_input")
x = inputs
for i in range(num_layers - 1):
x = tf.keras.layers.Dense(
dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name=f"projector_layer_{i}",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5, name=f"batch_normalization_{i}"
)(x)
x = tf.keras.layers.Activation(activation, name=f"{activation}_activation_{i}")(
x
)
x = tf.keras.layers.Dense(
dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="projector_output",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5,
center=False, # Page:5, Paragraph:2 of SimSiam paper
scale=False, # Page:5, Paragraph:2 of SimSiam paper
name=f"batch_normalization_ouput",
)(x)
# Metric Logging layer. Monitors the std of the layer activations.
# Degenerate solutions colapse to 0 while valid solutions will move
# towards something like 0.0220. The actual number will depend on the layer size.
o = tfsim.layers.ActivationStdLoggingLayer(name="proj_std")(x)
projector = tf.keras.Model(inputs, o, name="projector")
return projector
projector = get_projector(input_dim=backbone.output.shape[-1], dim=DIM, num_layers=2)
projector.summary()
"""
Finally, we must construct the predictor. The predictor is used in SimSiam, and is a
simple stack of two MLP layers, containing a bottleneck in the hidden layer.
"""
def get_predictor(input_dim, hidden_dim=512, activation="relu"):
inputs = tf.keras.layers.Input(shape=(input_dim,), name="predictor_input")
x = inputs
x = tf.keras.layers.Dense(
hidden_dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="predictor_layer_0",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5, name="batch_normalization_0"
)(x)
x = tf.keras.layers.Activation(activation, name=f"{activation}_activation_0")(x)
x = tf.keras.layers.Dense(
input_dim,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="predictor_output",
)(x)
# Metric Logging layer. Monitors the std of the layer activations.
# Degenerate solutions colapse to 0 while valid solutions will move
# towards something like 0.0220. The actual number will depend on the layer size.
o = tfsim.layers.ActivationStdLoggingLayer(name="pred_std")(x)
predictor = tf.keras.Model(inputs, o, name="predictor")
return predictor
predictor = get_predictor(input_dim=DIM, hidden_dim=512)
predictor.summary()
"""
## Training
First, we need to initialize our training model, loss, and optimizer.
"""
loss = tfsim.losses.SimSiamLoss(projection_type="cosine_distance", name="simsiam")
contrastive_model = tfsim.models.ContrastiveModel(
backbone=backbone,
projector=projector,
predictor=predictor, # NOTE: simiam requires predictor model.
algorithm="simsiam",
name="simsiam",
)
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate=INIT_LR,
decay_steps=PRE_TRAIN_EPOCHS * PRE_TRAIN_STEPS_PER_EPOCH,
)
wd_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate=WEIGHT_DECAY,
decay_steps=PRE_TRAIN_EPOCHS * PRE_TRAIN_STEPS_PER_EPOCH,
)
optimizer = tfa.optimizers.SGDW(
learning_rate=lr_decayed_fn, weight_decay=wd_decayed_fn, momentum=0.9
)
"""
Next we can compile the model the same way you compile any other Keras model.
"""
contrastive_model.compile(
optimizer=optimizer,
loss=loss,
)
"""
We track the training using `EvalCallback`.
`EvalCallback` creates an index at the end of each epoch and provides a proxy for the
nearest neighbor matching classification using `binary_accuracy`.
Calculates how often the query label matches the derived lookup label.
Accuracy is technically (TP+TN)/(TP+FP+TN+FN), but here we filter all
queries above the distance threshold. In the case of binary matching, this
makes all the TPs and FPs below the distance threshold and all the TNs and
FNs above the distance threshold.
As we are only concerned with the matches below the distance threshold, the
accuracy simplifies to TP/(TP+FP) and is equivalent to the precision with
respect to the unfiltered queries. However, we also want to consider the
query coverage at the distance threshold, i.e., the percentage of queries
that return a match, computed as (TP+FP)/(TP+FP+TN+FN). Therefore, we can
take $ precision \times query_coverage $ to produce a measure that capture
the precision scaled by the query coverage. This simplifies down to the
binary accuracy presented here, giving TP/(TP+FP+TN+FN).
"""
DATA_PATH = Path("./")
log_dir = DATA_PATH / "models" / "logs" / f"{loss.name}_{time.time()}"
chkpt_dir = DATA_PATH / "models" / "checkpoints" / f"{loss.name}_{time.time()}"
callbacks = [
tfsim.callbacks.EvalCallback(
tf.cast(x_query, tf.float32),
y_query,
tf.cast(x_index, tf.float32),
y_index,
metrics=["binary_accuracy"],
k=1,
tb_logdir=log_dir,
),
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=1,
update_freq=100,
),
tf.keras.callbacks.ModelCheckpoint(
filepath=chkpt_dir,
monitor="val_loss",
mode="min",
save_best_only=True,
save_weights_only=True,
),
]
"""
All that is left to do is run fit()!
"""
print(train_ds)
print(val_ds)
history = contrastive_model.fit(
train_ds,
epochs=PRE_TRAIN_EPOCHS,
steps_per_epoch=PRE_TRAIN_STEPS_PER_EPOCH,
validation_data=val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
callbacks=callbacks,
)
"""
## Plotting and Evaluation
"""
plt.figure(figsize=(15, 4))
plt.subplot(1, 3, 1)
plt.plot(history.history["loss"])
plt.grid()
plt.title(f"{loss.name} - loss")
plt.subplot(1, 3, 2)
plt.plot(history.history["proj_std"], label="proj")
if "pred_std" in history.history:
plt.plot(history.history["pred_std"], label="pred")
plt.grid()
plt.title(f"{loss.name} - std metrics")
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(history.history["binary_accuracy"], label="acc")
plt.grid()
plt.title(f"{loss.name} - match metrics")
plt.legend()
plt.show()
"""
## Fine Tuning on the Labelled Data
As a final step we will fine tune a classifier on 10% of the training data. This will
allow us to evaluate the quality of our learned representation. First, we handle data
loading:
"""
eval_augmenter = keras.Sequential(
[
keras_cv.layers.RandomCropAndResize(
(96, 96), crop_area_factor=(0.8, 1.0), aspect_ratio_factor=(1.0, 1.0)
),
keras_cv.layers.RandomFlip(mode="horizontal"),
]
)
eval_train_ds = tf.data.Dataset.from_tensor_slices(
(x_raw_train, tf.keras.utils.to_categorical(y_raw_train, 10))
)
eval_train_ds = eval_train_ds.repeat()
eval_train_ds = eval_train_ds.shuffle(1024)
eval_train_ds = eval_train_ds.map(lambda x, y: (eval_augmenter(x), y), tf.data.AUTOTUNE)
eval_train_ds = eval_train_ds.batch(BATCH_SIZE)
eval_train_ds = eval_train_ds.prefetch(tf.data.AUTOTUNE)
eval_val_ds = tf.data.Dataset.from_tensor_slices(
(x_test, tf.keras.utils.to_categorical(y_test, 10))
)
eval_val_ds = eval_val_ds.repeat()
eval_val_ds = eval_val_ds.shuffle(1024)
eval_val_ds = eval_val_ds.batch(BATCH_SIZE)
eval_val_ds = eval_val_ds.prefetch(tf.data.AUTOTUNE)
"""
## Benchmark Against a Naive Model
Finally, lets setup a naive model that does not leverage the unlabeled data corpus.
"""
TEST_EPOCHS = 50
TEST_STEPS_PER_EPOCH = x_raw_train.shape[0] // BATCH_SIZE
def get_eval_model(img_size, backbone, total_steps, trainable=True, lr=1.8):
backbone.trainable = trainable
inputs = tf.keras.layers.Input((img_size, img_size, 3), name="eval_input")
x = backbone(inputs, training=trainable)
o = tf.keras.layers.Dense(10, activation="softmax")(x)
model = tf.keras.Model(inputs, o)
cosine_decayed_lr = tf.keras.experimental.CosineDecay(
initial_learning_rate=lr, decay_steps=total_steps
)
opt = tf.keras.optimizers.SGD(cosine_decayed_lr, momentum=0.9)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["acc"])
return model
no_pt_eval_model = get_eval_model(
img_size=96,
backbone=get_backbone((96, 96, 3)),
total_steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
trainable=True,
lr=1e-3,
)
no_pt_history = no_pt_eval_model.fit(
eval_train_ds,
batch_size=BATCH_SIZE,
epochs=TEST_EPOCHS,
steps_per_epoch=TEST_STEPS_PER_EPOCH,
validation_data=eval_val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
)
"""
Pretty bad results! Lets try fine-tuning our SimSiam pretrained model:
"""
pt_eval_model = get_eval_model(
img_size=96,
backbone=contrastive_model.backbone,
total_steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
trainable=False,
lr=30.0,
)
pt_eval_model.summary()
pt_history = pt_eval_model.fit(
eval_train_ds,
batch_size=BATCH_SIZE,
epochs=TEST_EPOCHS,
steps_per_epoch=TEST_STEPS_PER_EPOCH,
validation_data=eval_val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
)
"""
All that is left to do is evaluate the models:
"""
print(
"no pretrain",
no_pt_eval_model.evaluate(
eval_val_ds,
steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
),
)
print(
"pretrained",
pt_eval_model.evaluate(
eval_val_ds,
steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
),
)
"""
Awesome! Our pretrained model stomped the non-pretrained model.
This accuracy is quite good for a ResNet18 on the STL-10 dataset.
For better results, try using an EfficientNetV2B0 instead.
Unfortunately, this will require a higher end graphics card as
SimSiam has a minimum batch size of 512.
## Conclusion
TensorFlow Similarity can be used to easily train KerasCV models using
contrastive algorithms such as SimCLR, SimSiam and BarlowTwins.
This allows you to leverage large corpuses of unlabelled data in your
model trainining pipeline.
Some follow-up exercises to this tutorial:
- Train a [`keras_cv.models.EfficientNetV2B0`](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/efficientnet_v2.py)
on STL-10
- Experiment with other data augmentation techniques in pretraining
- Train a model using the [BarlowTwins implementation](https://github.com/tensorflow/similarity/blob/master/examples/unsupervised_hello_world.ipynb) in TensorFlow similarity
- Try pretraining on your own dataset
"""
| keras-io/guides/keras_cv/simsiam_with_kerascv.py/0 | {
"file_path": "keras-io/guides/keras_cv/simsiam_with_kerascv.py",
"repo_id": "keras-io",
"token_count": 8009
} | 108 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_nlp/modeling_layers/fnet_encoder'" />
| keras-io/redirects/api/keras_nlp/layers/fnet_encoder/index.html/0 | {
"file_path": "keras-io/redirects/api/keras_nlp/layers/fnet_encoder/index.html",
"repo_id": "keras-io",
"token_count": 46
} | 109 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/applications/'" />
| keras-io/redirects/applications/index.html/0 | {
"file_path": "keras-io/redirects/applications/index.html",
"repo_id": "keras-io",
"token_count": 33
} | 110 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/activation_layers/'" />
| keras-io/redirects/layers/advanced-activations/index.html/0 | {
"file_path": "keras-io/redirects/layers/advanced-activations/index.html",
"repo_id": "keras-io",
"token_count": 37
} | 111 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/models/sequential/'" />
| keras-io/redirects/models/sequential/index.html/0 | {
"file_path": "keras-io/redirects/models/sequential/index.html",
"repo_id": "keras-io",
"token_count": 34
} | 112 |
"""Lightweight fork of Keras-Autodocs.
"""
import warnings
import black
import re
import inspect
import importlib
import itertools
import render_tags
class KerasDocumentationGenerator:
def __init__(self, project_url=None):
self.project_url = project_url
def process_docstring(self, docstring):
docstring = docstring.replace("Args:", "# Arguments")
docstring = docstring.replace("Arguments:", "# Arguments")
docstring = docstring.replace("Attributes:", "# Attributes")
docstring = docstring.replace("Returns:", "# Returns")
docstring = docstring.replace("Raises:", "# Raises")
docstring = docstring.replace("Input shape:", "# Input shape")
docstring = docstring.replace("Output shape:", "# Output shape")
docstring = docstring.replace("Call arguments:", "# Call arguments")
docstring = docstring.replace("Returns:", "# Returns")
docstring = docstring.replace("Example:", "# Example\n")
docstring = docstring.replace("Examples:", "# Examples\n")
docstring = re.sub(r"\nReference:\n\s*", "\n**Reference**\n\n", docstring)
docstring = re.sub(r"\nReferences:\n\s*", "\n**References**\n\n", docstring)
# Fix typo
docstring = docstring.replace("\n >>> ", "\n>>> ")
lines = docstring.split("\n")
doctest_lines = []
usable_lines = []
def flush_docstest(usable_lines, doctest_lines):
usable_lines.append("```python")
usable_lines += doctest_lines
usable_lines.append("```")
usable_lines.append("")
for line in lines:
if doctest_lines:
if not line or set(line) == {" "}:
flush_docstest(usable_lines, doctest_lines)
doctest_lines = []
else:
doctest_lines.append(line)
else:
if line.startswith(">>>"):
doctest_lines.append(line)
else:
usable_lines.append(line)
if doctest_lines:
flush_docstest(usable_lines, doctest_lines)
docstring = "\n".join(usable_lines)
return process_docstring(docstring)
def process_signature(self, signature):
signature = signature.replace("tensorflow.keras", "tf.keras")
signature = signature.replace("*args, **kwargs", "")
return signature
def render(self, element):
if isinstance(element, str):
object_ = import_object(element)
if ismethod(object_):
# we remove the modules when displaying the methods
signature_override = ".".join(element.split(".")[-2:])
else:
signature_override = element
else:
signature_override = None
object_ = element
return self.render_from_object(object_, signature_override, element)
def render_from_object(self, object_, signature_override: str, element):
subblocks = []
source_link = make_source_link(object_, self.project_url)
if source_link is not None:
subblocks.append(source_link)
signature = get_signature(object_, signature_override)
signature = self.process_signature(signature)
subblocks.append(f"### `{get_name(object_)}` {get_type(object_)}\n")
subblocks.append(code_snippet(signature))
docstring = inspect.getdoc(object_)
if docstring:
docstring = self.process_docstring(docstring)
subblocks.append(docstring)
# Render preset table for KerasCV and KerasNLP
if element.endswith("from_preset"):
table = render_tags.render_table(import_object(element.rsplit(".", 1)[0]))
if table is not None:
subblocks.append(table)
return "\n\n".join(subblocks) + "\n\n----\n\n"
def ismethod(function):
return get_class_from_method(function) is not None
def import_object(string: str):
"""Import an object from a string.
The object can be a function, class or method.
For example: `'keras.layers.Dense.get_weights'` is valid.
"""
last_object_got = None
seen_names = []
for name in string.split("."):
seen_names.append(name)
try:
last_object_got = importlib.import_module(".".join(seen_names))
except ModuleNotFoundError:
assert last_object_got is not None, f"Failed to import path {string}"
last_object_got = getattr(last_object_got, name)
return last_object_got
def make_source_link(cls, project_url):
if not hasattr(cls, "__module__"):
return None
if not project_url:
return None
base_module = cls.__module__.split(".")[0]
project_url = project_url[base_module]
assert project_url.endswith("/"), f"{base_module} not found"
project_url_version = project_url.split("/")[-2].removeprefix("v")
module_version = importlib.import_module(base_module).__version__
if module_version != project_url_version:
raise RuntimeError(
f"For project {base_module}, URL {project_url} "
f"has version number {project_url_version} which does not match the "
f"current imported package version {module_version}"
)
path = cls.__module__.replace(".", "/")
if base_module in ("keras_nlp", "keras_cv", "keras", "tf_keras"):
path = path.replace("/src/", "/")
line = inspect.getsourcelines(cls)[-1]
return (
f'<span style="float:right;">'
f"[[source]]({project_url}/{path}.py#L{line})"
f"</span>"
)
def code_snippet(snippet):
return f"```python\n{snippet}\n```\n"
def get_type(object_) -> str:
if inspect.isclass(object_):
return "class"
elif ismethod(object_):
return "method"
elif inspect.isfunction(object_):
return "function"
elif hasattr(object_, "fget"):
return "property"
else:
raise TypeError(
f"{object_} is detected as not a class, a method, "
f"a property, nor a function."
)
def get_name(object_) -> str:
if hasattr(object_, "fget"):
return object_.fget.__name__
return object_.__name__
def get_function_name(function):
if hasattr(function, "__wrapped__"):
return get_function_name(function.__wrapped__)
return function.__name__
def get_signature_start(function):
"""For the Dense layer, it should return the string 'keras.layers.Dense'"""
if ismethod(function):
prefix = f"{get_class_from_method(function).__name__}."
else:
try:
prefix = f"{function.__module__}."
except AttributeError:
warnings.warn(
f"function {function} has no module. "
f"It will not be included in the signature."
)
prefix = ""
return f"{prefix}{get_function_name(function)}"
def get_signature_end(function):
params = inspect.signature(function).parameters.values()
signature_end = "(" + ", ".join([str(x) for x in params]) + ")"
if ismethod(function):
signature_end = signature_end.replace("(self, ", "(")
signature_end = signature_end.replace("(self)", "()")
# work around case-specific bug
signature_end = signature_end.replace(
"synchronization=<VariableSynchronization.AUTO: 0>, aggregation=<VariableAggregationV2.NONE: 0>",
"synchronization=tf.VariableSynchronization.AUTO, aggregation=tf.VariableSynchronization.NONE",
)
return signature_end
def get_function_signature(function, override=None):
if override is None:
signature_start = get_signature_start(function)
else:
signature_start = override
signature_end = get_signature_end(function)
return format_signature(signature_start, signature_end)
def get_class_signature(cls, override=None):
if override is None:
signature_start = f"{cls.__module__}.{cls.__name__}"
else:
signature_start = override
signature_end = get_signature_end(cls.__init__)
return format_signature(signature_start, signature_end)
def get_signature(object_, override):
if inspect.isclass(object_):
return get_class_signature(object_, override)
elif inspect.isfunction(object_) or inspect.ismethod(object_):
return get_function_signature(object_, override)
elif hasattr(object_, "fget"):
# properties
if override:
return override
return get_function_signature(object_.fget)
raise ValueError(f"Not able to retrieve signature for object {object_}")
def format_signature(signature_start: str, signature_end: str):
"""pretty formatting to avoid long signatures on one single line"""
# first, we make it look like a real function declaration.
fake_signature_start = "x" * len(signature_start)
fake_signature = fake_signature_start + signature_end
fake_python_code = f"def {fake_signature}:\n pass\n"
# we format with black
mode = black.FileMode(line_length=90)
formatted_fake_python_code = black.format_str(fake_python_code, mode=mode)
# we make the final, multiline signature
new_signature_end = extract_signature_end(formatted_fake_python_code)
return signature_start + new_signature_end
def extract_signature_end(function_definition):
start = function_definition.find("(")
stop = function_definition.rfind(")")
return function_definition[start : stop + 1]
def get_code_blocks(docstring):
code_blocks = {}
tmp = docstring[:]
while "```" in tmp:
tmp = tmp[tmp.find("```") :]
index = tmp[3:].find("```") + 6
snippet = tmp[:index]
# Place marker in docstring for later reinjection.
# Print the index with 4 digits so we know the symbol is unique.
token = f"$KERAS_AUTODOC_CODE_BLOCK_{len(code_blocks):04d}"
docstring = docstring.replace(snippet, token)
code_blocks[token] = snippet
tmp = tmp[index:]
return code_blocks, docstring
def get_section_end(docstring, section_start):
regex_indented_sections_end = re.compile(r"\S\n+(\S|$)")
end = re.search(regex_indented_sections_end, docstring[section_start:])
section_end = section_start + end.end()
if section_end == len(docstring):
return section_end
else:
return section_end - 2
def get_google_style_sections_without_code(docstring):
regex_indented_sections_start = re.compile(r"\n# .+?\n")
google_style_sections = {}
for i in itertools.count():
match = re.search(regex_indented_sections_start, docstring)
if match is None:
break
section_start = match.start() + 1
section_end = get_section_end(docstring, section_start)
google_style_section = docstring[section_start:section_end]
token = f"KERAS_AUTODOC_GOOGLE_STYLE_SECTION_{i}"
google_style_sections[token] = google_style_section
docstring = insert_in_string(docstring, token, section_start, section_end)
return google_style_sections, docstring
def get_google_style_sections(docstring):
# First, extract code blocks and process them.
# The parsing is easier if the #, : and other symbols aren't there.
code_blocks, docstring = get_code_blocks(docstring)
google_style_sections, docstring = get_google_style_sections_without_code(docstring)
docstring = reinject_strings(docstring, code_blocks)
for section_token, section in google_style_sections.items():
section = reinject_strings(section, code_blocks)
google_style_sections[section_token] = reinject_strings(section, code_blocks)
return google_style_sections, docstring
def to_markdown(google_style_section: str) -> str:
end_first_line = google_style_section.find("\n")
section_title = google_style_section[2:end_first_line]
section_body = google_style_section[end_first_line:]
section_body = remove_indentation(section_body)
if section_title in (
"Arguments",
"Attributes",
"Raises",
"Call arguments",
"Returns",
):
section_body = format_as_markdown_list(section_body)
if section_body:
return f"__{section_title}__\n\n{section_body}\n"
else:
return f"__{section_title}__\n"
def format_as_markdown_list(section_body):
section_body = re.sub(r"\n([^ ].*?):", r"\n- __\1__:", section_body)
section_body = re.sub(r"^([^ ].*?):", r"- __\1__:", section_body)
# Switch to 2-space indent so we can render nested lists.
section_body = section_body.replace("\n ", "\n ")
return section_body
def reinject_strings(target, strings_to_inject):
for token, string_to_inject in strings_to_inject.items():
target = target.replace(token, string_to_inject)
return target
def process_docstring(docstring):
if docstring[-1] != "\n":
docstring += "\n"
google_style_sections, docstring = get_google_style_sections(docstring)
for token, google_style_section in google_style_sections.items():
markdown_section = to_markdown(google_style_section)
docstring = docstring.replace(token, markdown_section)
return docstring
def get_class_from_method(meth):
if inspect.ismethod(meth):
for cls in inspect.getmro(meth.__self__.__class__):
if cls.__dict__.get(meth.__name__) is meth:
return cls
meth = meth.__func__ # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(
inspect.getmodule(meth),
meth.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0],
)
if isinstance(cls, type):
return cls
return getattr(meth, "__objclass__", None) # handle special descriptor objects
def insert_in_string(target, string_to_insert, start, end):
target_start_cut = target[:start]
target_end_cut = target[end:]
return target_start_cut + string_to_insert + target_end_cut
def remove_indentation(string):
lines = string.split("\n")
leading_spaces = [count_leading_spaces(l) for l in lines if l]
if leading_spaces:
min_leading_spaces = min(leading_spaces)
string = "\n".join(l[min_leading_spaces:] for l in lines)
return string.strip() # Drop leading/closing empty lines
def count_leading_spaces(s):
ws = re.search(r"\S", s)
if ws:
return ws.start()
return 0
| keras-io/scripts/docstrings.py/0 | {
"file_path": "keras-io/scripts/docstrings.py",
"repo_id": "keras-io",
"token_count": 6006
} | 113 |
# Data loading
Keras data loading utilities, located in `keras.utils`,
help you go from raw data on disk to a `tf.data.Dataset` object that can be
used to efficiently train a model.
These loading utilites can be combined with
[preprocessing layers](https://keras.io/api/layers/preprocessing_layers/) to
futher transform your input dataset before training.
Here's a quick example: let's say you have 10 folders, each containing
10,000 images from a different category, and you want to train a
classifier that maps an image to its category.
Your training data folder would look like this:
```
training_data/
...class_a/
......a_image_1.jpg
......a_image_2.jpg
...class_b/
......b_image_1.jpg
......b_image_2.jpg
etc.
```
You may also have a validation data folder `validation_data/` structured in the
same way.
You could simply do:
```python
import keras
train_ds = keras.utils.image_dataset_from_directory(
directory='training_data/',
labels='inferred',
label_mode='categorical',
batch_size=32,
image_size=(256, 256))
validation_ds = keras.utils.image_dataset_from_directory(
directory='validation_data/',
labels='inferred',
label_mode='categorical',
batch_size=32,
image_size=(256, 256))
model = keras.applications.Xception(
weights=None, input_shape=(256, 256, 3), classes=10)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit(train_ds, epochs=10, validation_data=validation_ds)
```
## Available dataset loading utilities
{{toc}}
| keras-io/templates/api/data_loading/index.md/0 | {
"file_path": "keras-io/templates/api/data_loading/index.md",
"repo_id": "keras-io",
"token_count": 505
} | 114 |
# KerasNLP Tokenizers
Tokenizers convert raw string input into integer input suitable for a Keras `Embedding` layer.
They can also convert back from predicted integer sequences to raw string output.
All tokenizers subclass `keras_nlp.tokenizers.Tokenizer`, which in turn
subclasses `keras.layers.Layer`. Tokenizers should generally be applied inside a
[tf.data.Dataset.map](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map)
for training, and can be included inside a `keras.Model` for inference.
{{toc}}
| keras-io/templates/api/keras_nlp/tokenizers/index.md/0 | {
"file_path": "keras-io/templates/api/keras_nlp/tokenizers/index.md",
"repo_id": "keras-io",
"token_count": 150
} | 115 |
# Mixed precision
## What is mixed precision training?
Mixed precision training is the use of lower-precision operations (`float16` and `bfloat16`) in a model
during training to make it run faster and use less memory.
Using mixed precision can improve performance by more than 3 times on modern GPUs and 60% on TPUs.
Today, most models use the `float32` dtype, which takes 32 bits of memory.
However, there are two lower-precision dtypes, `float16` and `bfloat16`,
each which take 16 bits of memory instead. Modern accelerators like Google TPUs and NVIDIA GPUs
can run operations faster in the 16-bit dtypes,
as they have specialized hardware to run 16-bit computations and 16-bit dtypes can be read from memory faster.
Therefore, these lower-precision dtypes should be used whenever possible on those devices.
However, variables storage (as well as certain sensitive computations) should still be in `float32`
to preserve numerical stability. By using 16-bit precision whenever possible and keeping certain critical
parts of the model in `float32`, the model will run faster,
while training as well as when using 32-bit precision.
## Using mixed precision training in Keras
The precision policy used by Keras layers or models is controled by a `keras.mixed_precision.DTypePolicy` instance.
Each layer has its own `DTypePolicy`. You can either set it on an individual layer via the `dtype` argument
(e.g. `MyLayer(..., dtype="mixed_float16")`), or you can set a global value to be used by all layers by
default, via the utility `keras.mixed_precision.set_global_policy`.
Typically, to start using mixed precision on GPU, you would simply call `keras.mixed_precision.set_global_policy("mixed_float16")`
at the start of your program. On TPU, you would call `keras.mixed_precision.set_global_policy("mixed_bfloat16")`.
## API documentation
{{toc}}
## Supported hardware
While mixed precision will run on most hardware, it will only speed up models on recent NVIDIA GPUs and Google TPUs.
NVIDIA GPUs support using a mix of float16 and float32, while TPUs support a mix of bfloat16 and float32.
Among NVIDIA GPUs, those with compute capability 7.0 or higher will see the greatest performance benefit
from mixed precision because they have special hardware units, called Tensor Cores,
to accelerate float16 matrix multiplications and convolutions. Older GPUs offer no math
performance benefit for using mixed precision, however memory and bandwidth savings can enable some speedups.
You can look up the compute capability for your GPU at NVIDIA's [CUDA GPU web page](https://developer.nvidia.com/cuda-gpus).
Examples of GPUs that will benefit most from mixed precision include RTX GPUs, the V100, and the A100.
Even on CPUs and older GPUs, where no speedup is expected, mixed precision APIs can still be used for unit testing,
debugging, or just to try out the API. On CPUs, mixed precision will run significantly slower, however.
You can check your GPU type with the following command:
```
nvidia-smi -L
``` | keras-io/templates/api/mixed_precision/index.md/0 | {
"file_path": "keras-io/templates/api/mixed_precision/index.md",
"repo_id": "keras-io",
"token_count": 765
} | 116 |
# KerasCV
These guides cover the [KerasCV](/keras_cv/) library.
## Available guides
{{toc}}
| keras-io/templates/guides/keras_cv/index.md/0 | {
"file_path": "keras-io/templates/guides/keras_cv/index.md",
"repo_id": "keras-io",
"token_count": 35
} | 117 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import sys
import tensorflow as tf
from absl import app
from absl import flags
from absl import logging
from tensorflow import keras
import keras_nlp
from examples.bert_pretraining.bert_config import MODEL_CONFIGS
from examples.bert_pretraining.bert_config import PREPROCESSING_CONFIG
from examples.bert_pretraining.bert_config import TRAINING_CONFIG
FLAGS = flags.FLAGS
flags.DEFINE_string(
"input_directory",
None,
"The directory of training data. It can be a local disk path, or the URL "
"of Google cloud storage bucket.",
)
flags.DEFINE_string(
"saved_model_output",
None,
"Output directory to save the model to.",
)
flags.DEFINE_string(
"checkpoint_save_directory",
None,
"Output directory to save checkpoints to.",
)
flags.DEFINE_bool(
"skip_restore",
False,
"Skip restoring from checkpoint if True",
)
flags.DEFINE_bool(
"tpu_name",
None,
"The TPU to connect to. If None, TPU will not be used.",
)
flags.DEFINE_bool(
"enable_cloud_logging",
False,
"If True, the script will use cloud logging.",
)
flags.DEFINE_string(
"tensorboard_log_path",
None,
"The path to save tensorboard log to.",
)
flags.DEFINE_string(
"model_size",
"tiny",
"One of: tiny, mini, small, medium, base, or large.",
)
flags.DEFINE_string(
"vocab_file",
None,
"The vocabulary file for tokenization.",
)
flags.DEFINE_integer(
"num_train_steps",
None,
"Override the pre-configured number of train steps..",
)
class MaskedLMHead(keras.layers.Layer):
"""Masked language model network head for BERT.
This layer implements a masked language model based on the provided
transformer based encoder. It assumes that the encoder network being passed
has a "get_embedding_table()" method.
Example:
```python
encoder = keras_nlp.models.BertBackbone(
vocabulary_size=30552,
num_layers=12,
num_heads=12,
hidden_dim=768,
intermediate_dim=3072,
max_sequence_length=12,
)
lm_layer = MaskedLMHead(embedding_table=encoder.get_embedding_table())
```
Args:
embedding_table: The embedding table from encoder network.
intermediate_activation: The activation, if any, for the inner dense
layer.
initializer: The initializer for the dense layer. Defaults to a Glorot
uniform initializer.
output: The output style for this layer. Can be either 'logits' or
'predictions'.
"""
def __init__(
self,
embedding_table,
intermediate_activation="gelu",
initializer="glorot_uniform",
**kwargs,
):
super().__init__(**kwargs)
self.embedding_table = embedding_table
self.intermediate_activation = keras.activations.get(
intermediate_activation
)
self.initializer = initializer
def build(self, input_shape):
self._vocab_size, hidden_dim = self.embedding_table.shape
self.dense = keras.layers.Dense(
hidden_dim,
activation=self.intermediate_activation,
kernel_initializer=self.initializer,
name="transform/dense",
)
self.layer_norm = keras.layers.LayerNormalization(
axis=-1, epsilon=1e-12, name="transform/LayerNorm"
)
self.bias = self.add_weight(
name="output_bias/bias",
shape=(self._vocab_size,),
initializer="zeros",
trainable=True,
)
super().build(input_shape)
def call(self, sequence_data, masked_positions):
masked_lm_input = self._gather_indexes(sequence_data, masked_positions)
lm_data = self.dense(masked_lm_input)
lm_data = self.layer_norm(lm_data)
lm_data = tf.matmul(lm_data, self.embedding_table, transpose_b=True)
logits = tf.nn.bias_add(lm_data, self.bias)
masked_positions_length = (
masked_positions.shape.as_list()[1] or tf.shape(masked_positions)[1]
)
return tf.reshape(
logits, [-1, masked_positions_length, self._vocab_size]
)
def _gather_indexes(self, sequence_tensor, positions):
"""Gathers the vectors at the specific positions, for performance.
Args:
sequence_tensor: Sequence output of shape
(`batch_size`, `seq_length`, `hidden_dim`) where `hidden_dim`
is number of hidden units.
positions: Positions ids of tokens in sequence to mask for
pretraining of with dimension (batch_size, num_predictions)
where `num_predictions` is maximum number of tokens to mask out
and predict per each sequence.
Returns:
Masked out sequence tensor of shape (batch_size * num_predictions,
`hidden_dim`).
"""
sequence_shape = tf.shape(sequence_tensor)
batch_size, seq_length = sequence_shape[0], sequence_shape[1]
width = sequence_tensor.shape.as_list()[2] or sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype="int32") * seq_length, [-1, 1]
)
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(
sequence_tensor, [batch_size * seq_length, width]
)
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
class BertPretrainingModel(keras.Model):
"""MaskedLM + NSP model with Bert encoder."""
def __init__(self, encoder, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
# TODO(jbischof): replace with keras_nlp.layers.MaskedLMHead (Issue #166)
self.masked_lm_head = MaskedLMHead(
embedding_table=encoder.token_embedding.embeddings,
initializer=keras.initializers.TruncatedNormal(stddev=0.02),
name="mlm_layer",
)
self.next_sentence_head = keras.layers.Dense(
encoder.num_segments,
kernel_initializer=keras.initializers.TruncatedNormal(stddev=0.02),
name="nsp_layer",
)
def call(self, data):
encoder_output = self.encoder(
{
"token_ids": data["token_ids"],
"segment_ids": data["segment_ids"],
"padding_mask": data["padding_mask"],
}
)
sequence_output, pooled_output = (
encoder_output["sequence_output"],
encoder_output["pooled_output"],
)
lm_preds = self.masked_lm_head(
sequence_output, data["masked_lm_positions"]
)
nsp_preds = self.next_sentence_head(pooled_output)
return {"mlm": lm_preds, "nsp": nsp_preds}
class LinearDecayWithWarmup(keras.optimizers.schedules.LearningRateSchedule):
"""
A learning rate schedule with linear warmup and decay.
This schedule implements a linear warmup for the first `num_warmup_steps`
and a linear ramp down until `num_train_steps`.
"""
def __init__(self, learning_rate, num_warmup_steps, num_train_steps):
self.learning_rate = learning_rate
self.warmup_steps = num_warmup_steps
self.train_steps = num_train_steps
def __call__(self, step):
peak_lr = tf.cast(self.learning_rate, dtype="float32")
warmup = tf.cast(self.warmup_steps, dtype="float32")
training = tf.cast(self.train_steps, dtype="float32")
step = tf.cast(step, dtype="float32")
is_warmup = step < warmup
# Linear Warmup will be implemented if current step is less than
# `num_warmup_steps` else Linear Decay will be implemented.
return tf.cond(
is_warmup,
lambda: peak_lr * (step / warmup),
lambda: tf.math.maximum(
0.0, peak_lr * (training - step) / (training - warmup)
),
)
def get_config(self):
return {
"learning_rate": self.learning_rate,
"num_warmup_steps": self.warmup_steps,
"num_train_steps": self.train_steps,
}
def decode_record(record):
"""Decodes a record to a TensorFlow example."""
seq_length = PREPROCESSING_CONFIG["max_seq_length"]
lm_length = PREPROCESSING_CONFIG["max_predictions_per_seq"]
name_to_features = {
"token_ids": tf.io.FixedLenFeature([seq_length], "int64"),
"padding_mask": tf.io.FixedLenFeature([seq_length], "int64"),
"segment_ids": tf.io.FixedLenFeature([seq_length], "int64"),
"masked_lm_positions": tf.io.FixedLenFeature([lm_length], "int64"),
"masked_lm_ids": tf.io.FixedLenFeature([lm_length], "int64"),
"masked_lm_weights": tf.io.FixedLenFeature([lm_length], "float32"),
"next_sentence_labels": tf.io.FixedLenFeature([1], "int64"),
}
# tf.Example only supports "int64", but the TPU only supports "int32".
# So cast all int64 to int32.
example = tf.io.parse_single_example(record, name_to_features)
for name in list(example.keys()):
value = example[name]
if value.dtype == "int64":
value = tf.cast(value, "int32")
example[name] = value
inputs = {
"token_ids": example["token_ids"],
"padding_mask": example["padding_mask"],
"segment_ids": example["segment_ids"],
"masked_lm_positions": example["masked_lm_positions"],
}
labels = {
"mlm": example["masked_lm_ids"],
"nsp": example["next_sentence_labels"],
}
sample_weights = {"mlm": example["masked_lm_weights"], "nsp": tf.ones((1,))}
sample = (inputs, labels, sample_weights)
return sample
def get_checkpoint_callback():
if tf.io.gfile.exists(FLAGS.checkpoint_save_directory):
if not tf.io.gfile.isdir(FLAGS.checkpoint_save_directory):
raise ValueError(
"`checkpoint_save_directory` should be a directory, "
f"but {FLAGS.checkpoint_save_directory} is not a "
"directory. Please set `checkpoint_save_directory` as "
"a directory."
)
elif FLAGS.skip_restore:
# Clear up the directory if users want to skip restoring.
tf.io.gfile.rmtree(FLAGS.checkpoint_save_directory)
checkpoint_path = FLAGS.checkpoint_save_directory
return keras.callbacks.BackupAndRestore(
backup_dir=checkpoint_path,
)
def get_tensorboard_callback():
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log_dir = FLAGS.tensorboard_log_path + timestamp
return keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
def main(_):
if FLAGS.enable_cloud_logging:
# If the job is on cloud, we will use cloud logging.
import google.cloud.logging
keras.utils.disable_interactive_logging()
client = google.cloud.logging.Client()
client.setup_logging()
logging.info(f"Reading input data from {FLAGS.input_directory}")
if not tf.io.gfile.isdir(FLAGS.input_directory):
raise ValueError(
"`input_directory` should be a directory, "
f"but {FLAGS.input_directory} is not a directory. Please "
"set `input_directory` flag as a directory."
)
files = tf.io.gfile.listdir(FLAGS.input_directory)
input_filenames = [FLAGS.input_directory + "/" + file for file in files]
if not input_filenames:
logging.info("No input files found. Check `input_directory` flag.")
sys.exit(1)
vocab = []
with tf.io.gfile.GFile(FLAGS.vocab_file) as vocab_file:
for line in vocab_file:
vocab.append(line.strip())
model_config = MODEL_CONFIGS[FLAGS.model_size]
if FLAGS.tpu_name is None:
# Use default strategy if not using TPU.
strategy = tf.distribute.get_strategy()
else:
# Connect to TPU and create TPU strategy.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver.connect(
tpu=FLAGS.tpu_name
)
strategy = tf.distribute.TPUStrategy(resolver)
# Decode and batch data.
dataset = tf.data.TFRecordDataset(input_filenames)
dataset = dataset.map(
lambda record: decode_record(record),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
dataset = dataset.batch(TRAINING_CONFIG["batch_size"], drop_remainder=True)
dataset = dataset.repeat()
with strategy.scope():
# Create a Bert model the input config.
encoder = keras_nlp.models.BertBackbone(
vocabulary_size=len(vocab), **model_config
)
# Make sure model has been called.
encoder(encoder.inputs)
encoder.summary()
# Allow overriding train steps from the command line for quick testing.
if FLAGS.num_train_steps is not None:
num_train_steps = FLAGS.num_train_steps
else:
num_train_steps = TRAINING_CONFIG["num_train_steps"]
num_warmup_steps = int(
num_train_steps * TRAINING_CONFIG["warmup_percentage"]
)
learning_rate_schedule = LinearDecayWithWarmup(
learning_rate=TRAINING_CONFIG["learning_rate"],
num_warmup_steps=num_warmup_steps,
num_train_steps=num_train_steps,
)
optimizer = keras.optimizers.Adam(learning_rate=learning_rate_schedule)
lm_loss = keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
name="lm_loss",
)
nsp_loss = keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
name="nsp_loss",
)
lm_accuracy = keras.metrics.SparseCategoricalAccuracy(name="accuracy")
nsp_accuracy = keras.metrics.SparseCategoricalAccuracy(name="accuracy")
pretraining_model = BertPretrainingModel(encoder)
pretraining_model.compile(
optimizer=optimizer,
loss={"mlm": lm_loss, "nsp": nsp_loss},
weighted_metrics={"mlm": lm_accuracy, "nsp": nsp_accuracy},
)
epochs = TRAINING_CONFIG["epochs"]
steps_per_epoch = num_train_steps // epochs
callbacks = []
if FLAGS.checkpoint_save_directory:
callbacks.append(get_checkpoint_callback())
if FLAGS.tensorboard_log_path:
callbacks.append(get_tensorboard_callback())
pretraining_model.fit(
dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
)
model_path = FLAGS.saved_model_output
logging.info(f"Saving to {FLAGS.saved_model_output}")
encoder.save(model_path)
if __name__ == "__main__":
flags.mark_flag_as_required("input_directory")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("saved_model_output")
app.run(main)
| keras-nlp/examples/bert_pretraining/bert_pretrain.py/0 | {
"file_path": "keras-nlp/examples/bert_pretraining/bert_pretrain.py",
"repo_id": "keras-nlp",
"token_count": 6759
} | 118 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for data handling."""
import os
import tensorflow as tf
from google import protobuf
def preview_tfrecord(filepath):
"""Pretty prints a single record from a tfrecord file."""
dataset = tf.data.TFRecordDataset(os.path.expanduser(filepath))
example = tf.train.Example()
example.ParseFromString(next(iter(dataset)).numpy())
formatted = protobuf.text_format.MessageToString(
example, use_short_repeated_primitives=True
)
print(formatted)
| keras-nlp/examples/utils/data_utils.py/0 | {
"file_path": "keras-nlp/examples/utils/data_utils.py",
"repo_id": "keras-nlp",
"token_count": 324
} | 119 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.backend import random
from keras_nlp.layers.modeling.alibi_bias import AlibiBias
from keras_nlp.tests.test_case import TestCase
class AlibiBiasTest(TestCase):
def test_layer_behaviors(self):
alibi_bias_max = 8
batch_size = 4
num_heads = 8
query_length = 10
key_length = 10
self.run_layer_test(
cls=AlibiBias,
init_kwargs={
"alibi_bias_max": alibi_bias_max,
},
input_data=random.uniform(
shape=(batch_size, num_heads, query_length, key_length)
),
expected_output_shape=(
batch_size,
num_heads,
query_length,
key_length,
),
)
def test_float16_dtype(self):
# Create a 4-dimensional input (the first dimension is implicit).
alibi_bias_max = 8
num_heads = 8
query_length = 5
key_length = 10
test_layer = AlibiBias(alibi_bias_max=alibi_bias_max, dtype="float16")
input_tensor = keras.Input(shape=(num_heads, query_length, key_length))
output_tensor = test_layer(input_tensor)
# the output is expected to be the same as the input shape in all
# dimensions. here, the first dimension is implicit and is for batch
expected_output_shape = (None, num_heads, query_length, key_length)
self.assertEqual(expected_output_shape, output_tensor.shape)
# The default output dtype for this layer should be "float32".
self.assertEqual("float16", output_tensor.dtype)
def test_dynamic_layer_output_shape(self):
query_length = 10
key_length = 10
num_heads = 4
test_layer = AlibiBias()
# Create a 4-dimensional input (the first dimension is implicit).
input_tensor = keras.Input(shape=(num_heads, query_length, key_length))
output_tensor = test_layer(input_tensor)
# the output is expected to be the same as the input shape in all
# dimensions.
expected_output_shape = (
None,
num_heads,
query_length,
key_length,
)
self.assertEqual(expected_output_shape, output_tensor.shape)
def test_value_error_when_inputs_shape_is_not_4(self):
with self.assertRaises(ValueError):
AlibiBias()(random.uniform(shape=(12, 12)))
def test_num_heads_is_not_power_of_two(self):
inputs_shape = (1, 12, 12, 12)
inputs = random.uniform(shape=inputs_shape)
layer = AlibiBias()
outputs = layer(inputs)
self.assertEqual(inputs_shape, outputs.shape)
def test_correct_output(self):
batch_size = 1
num_heads = 8
query_length = 1
key_length = 3
input_shape = (batch_size, num_heads, query_length, key_length)
input_tensor = ops.zeros(input_shape)
layer = AlibiBias()
output_tensor = layer(input_tensor)
print(output_tensor)
self.assertAllClose(
output_tensor,
ops.convert_to_tensor(
[
[
[[-1.0, -0.5, 0.0]],
[[-0.5, -0.25, 0.0]],
[[-0.25, -0.125, 0.0]],
[[-0.125, -0.0625, 0.0]],
[[-0.0625, -0.03125, 0.0]],
[[-0.03125, -0.015625, 0.0]],
[[-0.015625, -0.0078125, 0.0]],
[[-0.0078125, -0.00390625, 0.0]],
]
]
),
)
def test_correct_output_num_heads_not_power_of_two(self):
batch_size = 1
num_heads = 14
query_length = 1
key_length = 3
input_shape = (batch_size, num_heads, query_length, key_length)
input_tensor = ops.zeros(input_shape)
layer = AlibiBias()
output_tensor = layer(input_tensor)
print(output_tensor)
self.assertAllClose(
output_tensor,
ops.convert_to_tensor(
[
[
[[-1.0, -0.5, 0.0]],
[[-0.5, -0.25, 0.0]],
[[-0.25, -0.125, 0.0]],
[[-0.125, -0.0625, 0.0]],
[[-0.0625, -0.03125, 0.0]],
[[-0.03125, -0.015625, 0.0]],
[[-0.015625, -0.0078125, 0.0]],
[[-0.0078125, -0.00390625, 0.0]],
[[-1.4142135, -0.70710677, 0.0]],
[[-0.70710677, -0.35355338, 0.0]],
[[-0.35355338, -0.17677669, 0.0]],
[[-0.17677669, -0.08838835, 0.0]],
[[-0.08838835, -0.04419417, 0.0]],
[[-0.04419417, -0.02209709, 0.0]],
]
]
),
)
def test_correct_output_alibi_bias_max(self):
alibi_bias_max = 12
batch_size = 1
num_heads = 2
query_length = 1
key_length = 3
input_shape = (batch_size, num_heads, query_length, key_length)
input_tensor = ops.zeros(input_shape)
layer = AlibiBias(alibi_bias_max=alibi_bias_max)
output_tensor = layer(input_tensor)
print(output_tensor)
self.assertAllClose(
output_tensor,
ops.convert_to_tensor(
[
[
[[-0.03125, -0.015625, 0.0]],
[[-0.00048828, -0.00024414, 0.0]],
]
]
),
)
def test_correct_output_alibi_bias_max_num_heads_not_power_of_two(
self,
):
alibi_bias_max = 6
batch_size = 1
num_heads = 3
query_length = 1
key_length = 3
input_shape = (batch_size, num_heads, query_length, key_length)
input_tensor = ops.zeros(input_shape)
layer = AlibiBias(alibi_bias_max=alibi_bias_max)
output_tensor = layer(input_tensor)
print(output_tensor)
self.assertAllClose(
output_tensor,
ops.convert_to_tensor(
[
[
[[-0.25, -0.125, 0.0]],
[[-0.03125, -0.015625, 0.0]],
[[-0.70710677, -0.35355338, 0.0]],
]
]
),
)
| keras-nlp/keras_nlp/layers/modeling/alibi_bias_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/alibi_bias_test.py",
"repo_id": "keras-nlp",
"token_count": 3970
} | 120 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_nlp.backend import ops
from keras_nlp.backend import random
from keras_nlp.layers.modeling.token_and_position_embedding import (
TokenAndPositionEmbedding,
)
from keras_nlp.tests.test_case import TestCase
class TokenAndPositionEmbeddingTest(TestCase):
def test_layer_behaviors(self):
self.run_layer_test(
cls=TokenAndPositionEmbedding,
init_kwargs={
"vocabulary_size": 5,
"sequence_length": 4,
"embedding_dim": 3,
"embeddings_initializer": "ones",
},
input_data=random.randint(minval=0, maxval=5, shape=(2, 4)),
expected_output_shape=(2, 4, 3),
expected_output_data=ops.ones((2, 4, 3)) * 2,
expected_num_trainable_weights=2,
)
def test_mask_propagation(self):
test_layer = TokenAndPositionEmbedding(
vocabulary_size=5,
sequence_length=4,
embedding_dim=3,
mask_zero=True,
)
input_data = np.array([[1, 0], [1, 0]])
mask = input_data != 0
outputs = test_layer(input_data)
self.assertAllEqual(outputs._keras_mask, mask)
| keras-nlp/keras_nlp/layers/modeling/token_and_position_embedding_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/token_and_position_embedding_test.py",
"repo_id": "keras-nlp",
"token_count": 758
} | 121 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.backend import keras
from keras_nlp.layers.preprocessing.random_swap import RandomSwap
from keras_nlp.tests.test_case import TestCase
class RandomSwapTest(TestCase):
def test_shape_and_output_from_word_swap(self):
keras.utils.set_random_seed(1337)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.split(inputs)
augmenter = RandomSwap(rate=0.7, max_swaps=3, seed=42)
augmented = augmenter(split)
output = [
tf.strings.reduce_join(x, separator=" ", axis=-1) for x in augmented
]
exp_output = ["like I Hey", "Tensorflow Keras and"]
self.assertAllEqual(output, exp_output)
def test_shape_and_output_from_character_swap(self):
keras.utils.set_random_seed(1337)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.unicode_split(inputs, "UTF-8")
augmenter = RandomSwap(rate=0.7, max_swaps=6, seed=42)
augmented = augmenter(split)
output = [tf.strings.reduce_join(x, axis=-1) for x in augmented]
exp_output = ["yli I eHke", "seaad rnK Tensolrfow"]
self.assertAllEqual(output, exp_output)
def test_with_integer_tokens(self):
keras.utils.set_random_seed(1337)
inputs = tf.constant([[1, 2, 3], [4, 5, 6]])
augmenter = RandomSwap(rate=0.7, max_swaps=6, seed=42)
output = augmenter(inputs)
exp_output = [[3, 2, 1], [6, 4, 5]]
self.assertAllEqual(output, exp_output)
def test_skip_options(self):
keras.utils.set_random_seed(1337)
augmenter = RandomSwap(
rate=0.9, max_swaps=3, seed=11, skip_list=["Tensorflow", "like"]
)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.split(inputs)
augmented = augmenter(split)
output = tf.strings.reduce_join(augmented, separator=" ", axis=-1)
exp_output = ["I Hey like", "Keras and Tensorflow"]
self.assertAllEqual(output, exp_output)
def skip_fn(word):
if word == "Tensorflow" or word == "like":
return True
return False
augmenter = RandomSwap(rate=0.9, max_swaps=3, seed=11, skip_fn=skip_fn)
augmented = augmenter(split)
output = tf.strings.reduce_join(augmented, separator=" ", axis=-1)
exp_output = ["I Hey like", "Keras and Tensorflow"]
self.assertAllEqual(output, exp_output)
def skip_py_fn(word):
if word == "Tensorflow" or word == "like":
return True
return False
augmenter = RandomSwap(
rate=0.9, max_swaps=3, seed=11, skip_py_fn=skip_py_fn
)
augmented = augmenter(split)
output = tf.strings.reduce_join(augmented, separator=" ", axis=-1)
exp_output = ["I Hey like", "Keras and Tensorflow"]
self.assertAllEqual(output, exp_output)
def test_get_config_and_from_config(self):
augmenter = RandomSwap(rate=0.4, max_swaps=3, seed=42)
expected_config_subset = {"rate": 0.4, "max_swaps": 3, "seed": 42}
config = augmenter.get_config()
self.assertEqual(config, {**config, **expected_config_subset})
restored_augmenter = RandomSwap.from_config(
config,
)
self.assertEqual(
restored_augmenter.get_config(),
{**config, **expected_config_subset},
)
def test_augment_first_batch_second(self):
keras.utils.set_random_seed(1337)
augmenter = RandomSwap(rate=0.7, max_swaps=3, seed=42)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.split(inputs)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.map(augmenter)
ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(2))
output = ds.take(1).get_single_element()
exp_output = [
["like", "I", "Hey"],
["and", "Tensorflow", "Keras"],
]
self.assertAllEqual(output, exp_output)
def skip_fn(word):
# Regex to match words starting with I or a
return tf.strings.regex_full_match(word, r"[I, a].*")
def skip_py_fn(word):
return len(word) < 2
augmenter = RandomSwap(rate=0.7, max_swaps=5, seed=11, skip_fn=skip_fn)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.map(augmenter)
ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(2))
output = ds.take(1).get_single_element()
exp_output = [
["like", "I", "Hey"],
["Keras", "and", "Tensorflow"],
]
self.assertAllEqual(output, exp_output)
augmenter = RandomSwap(
rate=0.7, max_swaps=2, seed=42, skip_py_fn=skip_py_fn
)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.map(augmenter)
ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(2))
output = ds.take(1).get_single_element()
exp_output = [
["Hey", "I", "like"],
["Tensorflow", "Keras", "and"],
]
self.assertAllEqual(output, exp_output)
def test_batch_first_augment_second(self):
keras.utils.set_random_seed(1337)
augmenter = RandomSwap(rate=0.7, max_swaps=2, seed=42)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.split(inputs)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.batch(2).map(augmenter)
output = ds.take(1).get_single_element()
exp_output = [
["like", "I", "Hey"],
["Tensorflow", "Keras", "and"],
]
self.assertAllEqual(output, exp_output)
def skip_fn(word):
# Regex to match words starting with I
return tf.strings.regex_full_match(word, r"[I].*")
def skip_py_fn(word):
return len(word) < 2
augmenter = RandomSwap(rate=0.7, max_swaps=2, seed=42, skip_fn=skip_fn)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.batch(2).map(augmenter)
output = ds.take(1).get_single_element()
exp_output = [
["Hey", "I", "like"],
["and", "Keras", "Tensorflow"],
]
self.assertAllEqual(output, exp_output)
augmenter = RandomSwap(
rate=0.7, max_swaps=2, seed=42, skip_py_fn=skip_py_fn
)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.batch(2).map(augmenter)
output = ds.take(1).get_single_element()
exp_output = [
["Hey", "I", "like"],
["and", "Keras", "Tensorflow"],
]
self.assertAllEqual(output, exp_output)
| keras-nlp/keras_nlp/layers/preprocessing/random_swap_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/preprocessing/random_swap_test.py",
"repo_id": "keras-nlp",
"token_count": 3506
} | 122 |
# Copyright 2022 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.position_embedding import PositionEmbedding
from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding
from keras_nlp.layers.modeling.transformer_decoder import TransformerDecoder
from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder
from keras_nlp.models.backbone import Backbone
from keras_nlp.models.bart.bart_presets import backbone_presets
from keras_nlp.utils.python_utils import classproperty
def bart_kernel_initializer(stddev=0.02):
return keras.initializers.TruncatedNormal(stddev=stddev)
@keras_nlp_export("keras_nlp.models.BartBackbone")
class BartBackbone(Backbone):
"""BART encoder-decoder network.
This class implements a Transformer-based encoder-decoder model as
described in
["BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension"](https://arxiv.org/abs/1910.13461).
The default constructor gives a fully customizable, randomly initialized BART
model with any number of layers, heads, and embedding dimensions. To load
preset architectures and weights, use the `from_preset` constructor.
Disclaimer: Pre-trained models are provided on an "as is" basis, without
warranties or conditions of any kind. The underlying model is provided by a
third party and subject to a separate license, available
[here](https://github.com/facebookresearch/fairseq/).
Args:
vocabulary_size: int. The size of the token vocabulary.
num_layers: int. The number of transformer encoder layers and
transformer decoder layers.
num_heads: int. The number of attention heads for each transformer.
The hidden size must be divisible by the number of attention heads.
hidden_dim: int. The size of the transformer encoding and pooler layers.
intermediate_dim: int. The output dimension of the first Dense layer in
a two-layer feedforward network for each transformer.
dropout: float. Dropout probability for the Transformer encoder.
max_sequence_length: int. The maximum sequence length that this encoder
can consume. If None, `max_sequence_length` uses the value from
sequence length. This determines the variable shape for positional
embeddings.
dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use
for model computations and weights. Note that some computations,
such as softmax and layer normalization, will always be done at
float32 precision regardless of dtype.
Examples:
```python
input_data = {
"encoder_token_ids": np.ones(shape=(1, 12), dtype="int32"),
"encoder_padding_mask": np.array(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]
),
"decoder_token_ids": np.ones(shape=(1, 12), dtype="int32"),
"decoder_padding_mask": np.array(
[[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]
),
}
# Pretrained BART encoder.
model = keras_nlp.models.BartBackbone.from_preset("bart_base_en")
model(input_data)
# Randomly initialized BART encoder-decoder model with a custom config
model = keras_nlp.models.BartBackbone(
vocabulary_size=50265,
num_layers=6,
num_heads=12,
hidden_dim=768,
intermediate_dim=3072,
max_sequence_length=12,
)
output = model(input_data)
```
"""
def __init__(
self,
vocabulary_size,
num_layers,
num_heads,
hidden_dim,
intermediate_dim,
dropout=0.1,
max_sequence_length=1024,
dtype=None,
**kwargs,
):
# === Layers ===
self.token_embedding = ReversibleEmbedding(
input_dim=vocabulary_size,
output_dim=hidden_dim,
embeddings_initializer=bart_kernel_initializer(),
dtype=dtype,
name="token_embedding",
)
self.encoder_position_embedding = PositionEmbedding(
initializer=bart_kernel_initializer(),
sequence_length=max_sequence_length,
dtype=dtype,
name="encoder_position_embedding",
)
self.encoder_embeddings_add = keras.layers.Add(
dtype=dtype,
name="encoder_embeddings_add",
)
self.encoder_embeddings_layer_norm = keras.layers.LayerNormalization(
axis=-1,
epsilon=1e-5,
dtype=dtype,
name="encoder_embeddings_layer_norm",
)
self.encoder_embeddings_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="encoder_embeddings_dropout",
)
self.encoder_transformer_layers = []
for i in range(num_layers):
layer = TransformerEncoder(
num_heads=num_heads,
intermediate_dim=intermediate_dim,
activation=keras.activations.gelu,
dropout=dropout,
layer_norm_epsilon=1e-5,
kernel_initializer=bart_kernel_initializer(),
dtype=dtype,
name=f"transformer_encoder_layer_{i}",
)
self.encoder_transformer_layers.append(layer)
self.decoder_position_embedding = PositionEmbedding(
initializer=bart_kernel_initializer(),
sequence_length=max_sequence_length,
dtype=dtype,
name="decoder_position_embedding",
)
self.decoder_embeddings_add = keras.layers.Add(
dtype=dtype,
name="decoder_embeddings_add",
)
self.decoder_embeddings_layer_norm = keras.layers.LayerNormalization(
axis=-1,
epsilon=1e-5,
dtype=dtype,
name="decoder_embeddings_layer_norm",
)
self.decoder_embeddings_dropout = keras.layers.Dropout(
dropout,
dtype=dtype,
name="decoder_embeddings_dropout",
)
self.decoder_transformer_layers = []
for i in range(num_layers):
layer = TransformerDecoder(
intermediate_dim=intermediate_dim,
num_heads=num_heads,
dropout=dropout,
activation=keras.activations.gelu,
layer_norm_epsilon=1e-5,
kernel_initializer=bart_kernel_initializer(),
dtype=dtype,
name=f"transformer_decoder_layer_{i}",
)
self.decoder_transformer_layers.append(layer)
# === Functional Model ===
encoder_token_id_input = keras.Input(
shape=(None,), dtype="int32", name="encoder_token_ids"
)
encoder_padding_mask_input = keras.Input(
shape=(None,), dtype="int32", name="encoder_padding_mask"
)
decoder_token_id_input = keras.Input(
shape=(None,), dtype="int32", name="decoder_token_ids"
)
decoder_padding_mask_input = keras.Input(
shape=(None,), dtype="int32", name="decoder_padding_mask"
)
# Encoder.
tokens = self.token_embedding(encoder_token_id_input)
positions = self.encoder_position_embedding(tokens)
x = self.encoder_embeddings_add((tokens, positions))
x = self.encoder_embeddings_layer_norm(x)
x = self.encoder_embeddings_dropout(x)
for transformer_layer in self.encoder_transformer_layers:
x = transformer_layer(x, padding_mask=encoder_padding_mask_input)
encoder_output = x
# Decoder.
tokens = self.token_embedding(decoder_token_id_input)
positions = self.decoder_position_embedding(tokens)
x = self.decoder_embeddings_add((tokens, positions))
x = self.decoder_embeddings_layer_norm(x)
x = self.decoder_embeddings_dropout(x)
for transformer_layer in self.decoder_transformer_layers:
x = transformer_layer(
decoder_sequence=x,
encoder_sequence=encoder_output,
decoder_padding_mask=decoder_padding_mask_input,
encoder_padding_mask=encoder_padding_mask_input,
)
decoder_output = x
# Instantiate using Functional API Model constructor
super().__init__(
inputs={
"encoder_token_ids": encoder_token_id_input,
"encoder_padding_mask": encoder_padding_mask_input,
"decoder_token_ids": decoder_token_id_input,
"decoder_padding_mask": decoder_padding_mask_input,
},
outputs={
"encoder_sequence_output": encoder_output,
"decoder_sequence_output": decoder_output,
},
**kwargs,
)
# === Config ===
self.vocabulary_size = vocabulary_size
self.num_layers = num_layers
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.max_sequence_length = max_sequence_length
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"num_layers": self.num_layers,
"num_heads": self.num_heads,
"hidden_dim": self.hidden_dim,
"intermediate_dim": self.intermediate_dim,
"dropout": self.dropout,
"max_sequence_length": self.max_sequence_length,
}
)
return config
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
| keras-nlp/keras_nlp/models/bart/bart_backbone.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bart/bart_backbone.py",
"repo_id": "keras-nlp",
"token_count": 4724
} | 123 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DeBERTa model preset configurations."""
backbone_presets = {
"deberta_v3_extra_small_en": {
"metadata": {
"description": (
"12-layer DeBERTaV3 model where case is maintained. "
"Trained on English Wikipedia, BookCorpus and OpenWebText."
),
"params": 70682112,
"official_name": "DeBERTaV3",
"path": "deberta_v3",
"model_card": "https://huggingface.co/microsoft/deberta-v3-xsmall",
},
"kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_extra_small_en/2",
},
"deberta_v3_small_en": {
"metadata": {
"description": (
"6-layer DeBERTaV3 model where case is maintained. "
"Trained on English Wikipedia, BookCorpus and OpenWebText."
),
"params": 141304320,
"official_name": "DeBERTaV3",
"path": "deberta_v3",
"model_card": "https://huggingface.co/microsoft/deberta-v3-small",
},
"kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_small_en/2",
},
"deberta_v3_base_en": {
"metadata": {
"description": (
"12-layer DeBERTaV3 model where case is maintained. "
"Trained on English Wikipedia, BookCorpus and OpenWebText."
),
"params": 183831552,
"official_name": "DeBERTaV3",
"path": "deberta_v3",
"model_card": "https://huggingface.co/microsoft/deberta-v3-base",
},
"kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_base_en/2",
},
"deberta_v3_large_en": {
"metadata": {
"description": (
"24-layer DeBERTaV3 model where case is maintained. "
"Trained on English Wikipedia, BookCorpus and OpenWebText."
),
"params": 434012160,
"official_name": "DeBERTaV3",
"path": "deberta_v3",
"model_card": "https://huggingface.co/microsoft/deberta-v3-large",
},
"kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_large_en/2",
},
"deberta_v3_base_multi": {
"metadata": {
"description": (
"12-layer DeBERTaV3 model where case is maintained. "
"Trained on the 2.5TB multilingual CC100 dataset."
),
"params": 278218752,
"official_name": "DeBERTaV3",
"path": "deberta_v3",
"model_card": "https://huggingface.co/microsoft/mdeberta-v3-base",
},
"kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_base_multi/2",
},
}
| keras-nlp/keras_nlp/models/deberta_v3/deberta_v3_presets.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/deberta_v3/deberta_v3_presets.py",
"repo_id": "keras-nlp",
"token_count": 1610
} | 124 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from keras_nlp.models.f_net.f_net_masked_lm_preprocessor import (
FNetMaskedLMPreprocessor,
)
from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer
from keras_nlp.tests.test_case import TestCase
class FNetMaskedLMPreprocessorTest(TestCase):
def setUp(self):
self.tokenizer = FNetTokenizer(
# Generated using create_f_net_test_proto.py
proto=os.path.join(self.get_test_data_dir(), "f_net_test_vocab.spm")
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
# Simplify our testing by masking every available token.
"mask_selection_rate": 1.0,
"mask_token_rate": 1.0,
"random_token_rate": 0.0,
"mask_selection_length": 4,
"sequence_length": 12,
}
self.input_data = ["the quick brown fox"]
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=FNetMaskedLMPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=(
{
"token_ids": [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]],
"segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
"mask_positions": [[1, 2, 3, 4]],
},
[[5, 10, 6, 8]],
[[1.0, 1.0, 1.0, 1.0]],
),
)
def test_no_masking_zero_rate(self):
no_mask_preprocessor = FNetMaskedLMPreprocessor(
self.tokenizer,
mask_selection_rate=0.0,
mask_selection_length=4,
sequence_length=12,
)
input_data = ["the quick brown fox"]
self.assertAllClose(
no_mask_preprocessor(input_data),
(
{
"token_ids": [[2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0]],
"segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
"mask_positions": [[0, 0, 0, 0]],
},
[[0, 0, 0, 0]],
[[0.0, 0.0, 0.0, 0.0]],
),
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in FNetMaskedLMPreprocessor.presets:
self.run_preset_test(
cls=FNetMaskedLMPreprocessor,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1547
} | 125 |
# Copyright 2024 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from keras_nlp.models.gemma.gemma_backbone import GemmaBackbone
from keras_nlp.tests.test_case import TestCase
@pytest.mark.keras_3_only
class GemmaLoraTest(TestCase):
def setUp(self):
self._init_kwargs = {
"vocabulary_size": 50,
"num_layers": 2,
"num_query_heads": 2,
"num_key_value_heads": 2,
"hidden_dim": 32,
"intermediate_dim": 16,
"head_dim": 16,
"layer_norm_epsilon": 1e-6,
}
def test_lora_fine_tuning(self):
# Set up backbone and preprocessor.
backbone = GemmaBackbone(**self._init_kwargs)
backbone.enable_lora(4)
# 4 layers, 2 weights per layer
self.assertLen(backbone.trainable_weights, 4 * 2)
self.assertLen(backbone.non_trainable_weights, 20)
input_data = {
"token_ids": np.ones((2, 5), dtype="int32"),
"padding_mask": np.ones((2, 5), dtype="int32"),
}
targets = np.random.normal(size=(2, 5, self._init_kwargs["hidden_dim"]))
# Test fine-tuning
backbone.compile(optimizer="sgd", loss="mse")
backbone.fit(input_data, targets, epochs=1)
# Test saving and reloading.
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
backbone.save_weights(temp_filepath)
new_backbone = GemmaBackbone(**self._init_kwargs)
new_backbone.load_weights(temp_filepath)
ref_out = backbone(input_data)
new_out = new_backbone(input_data)
self.assertAllClose(ref_out, new_out)
def test_lora_saving_and_reloading(self):
backbone = GemmaBackbone(**self._init_kwargs)
initial_model_filepath = os.path.join(
self.get_temp_dir(), "base.weights.h5"
)
backbone.save_weights(initial_model_filepath)
backbone.enable_lora(4)
input_data = {
"token_ids": np.ones((2, 5), dtype="int32"),
"padding_mask": np.ones((2, 5), dtype="int32"),
}
targets = np.random.normal(size=(2, 5, self._init_kwargs["hidden_dim"]))
backbone.compile(optimizer="sgd", loss="mse")
backbone.fit(input_data, targets, epochs=1)
lora_filepath = os.path.join(self.get_temp_dir(), "lora_model.lora.h5")
backbone.save_lora_weights(lora_filepath)
# New backbone with same initial weights
new_backbone = GemmaBackbone(**self._init_kwargs)
new_backbone.load_weights(initial_model_filepath)
new_backbone.enable_lora(4)
new_backbone.load_lora_weights(lora_filepath)
ref_out = backbone(input_data)
new_out = new_backbone(input_data)
self.assertAllClose(ref_out, new_out)
# Test exceptions
backbone = GemmaBackbone(**self._init_kwargs)
with self.assertRaisesRegex(ValueError, "no lora-enabled layers"):
backbone.save_lora_weights(lora_filepath)
backbone.enable_lora(5)
with self.assertRaisesRegex(ValueError, "ranks must match"):
backbone.load_lora_weights(lora_filepath)
with self.assertRaisesRegex(ValueError, "filename must end in"):
backbone.save_lora_weights("bad_filepath")
| keras-nlp/keras_nlp/models/gemma/gemma_lora_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gemma/gemma_lora_test.py",
"repo_id": "keras-nlp",
"token_count": 1706
} | 126 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest.mock import patch
import pytest
from keras_nlp.backend import ops
from keras_nlp.models.mistral.mistral_backbone import MistralBackbone
from keras_nlp.models.mistral.mistral_causal_lm import MistralCausalLM
from keras_nlp.models.mistral.mistral_causal_lm_preprocessor import (
MistralCausalLMPreprocessor,
)
from keras_nlp.models.mistral.mistral_tokenizer import MistralTokenizer
from keras_nlp.tests.test_case import TestCase
class MistralCausalLMTest(TestCase):
def setUp(self):
self.preprocessor = MistralCausalLMPreprocessor(
MistralTokenizer(
# Generated using create_mistral_test_proto.py
proto=os.path.join(
self.get_test_data_dir(), "mistral_test_vocab.spm"
)
),
sequence_length=8,
)
self.backbone = MistralBackbone(
vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(),
num_layers=2,
num_query_heads=4,
num_key_value_heads=2,
hidden_dim=8,
intermediate_dim=16,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
"backbone": self.backbone,
}
self.train_data = (["the quick brown fox", "the earth is round"],)
self.input_data = self.preprocessor(*self.train_data)[0]
def test_causal_lm_basics(self):
self.run_task_test(
cls=MistralCausalLM,
init_kwargs=self.init_kwargs,
train_data=self.train_data,
expected_output_shape=(2, 8, 10),
)
def test_generate(self):
causal_lm = MistralCausalLM(**self.init_kwargs)
# String input.
prompt = "the quick brown fox"
output = causal_lm.generate(prompt)
self.assertTrue(prompt in output)
# Int tensor input.
prompt_ids = self.preprocessor.generate_preprocess([prompt])
causal_lm.preprocessor = None
outputs = causal_lm.generate(prompt_ids)
# Assert prompt is in output in token id space.
self.assertAllEqual(
outputs["token_ids"][:, :5],
prompt_ids["token_ids"][:, :5],
)
self.assertAllEqual(
outputs["padding_mask"][:, :5],
prompt_ids["padding_mask"][:, :5],
)
def test_early_stopping(self):
causal_lm = MistralCausalLM(**self.init_kwargs)
call_with_cache = causal_lm.call_with_cache
def wrapper(*args, **kwargs):
"""Modify output logits to always favor end_token_id"""
logits, hidden_states, cache = call_with_cache(*args, **kwargs)
index = self.preprocessor.tokenizer.end_token_id
update = ops.ones_like(logits)[:, :, index] * 1.0e9
update = ops.expand_dims(update, axis=-1)
logits = ops.slice_update(logits, (0, 0, index), update)
return logits, hidden_states, cache
with patch.object(causal_lm, "call_with_cache", wraps=wrapper):
prompt = ["the quick brown fox", "the earth"]
output = causal_lm.generate(prompt)
# We should immediately abort and output the prompt.
self.assertEqual(prompt, output)
def test_generate_compilation(self):
causal_lm = MistralCausalLM(**self.init_kwargs)
# Assert we do not recompile with successive calls.
causal_lm.generate("the quick brown fox")
first_fn = causal_lm.generate_function
causal_lm.generate("the quick brown fox")
second_fn = causal_lm.generate_function
self.assertEqual(first_fn, second_fn)
# Assert we do recompile after compile is called.
causal_lm.compile(sampler="greedy")
self.assertIsNone(causal_lm.generate_function)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=MistralCausalLM,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in MistralCausalLM.presets:
self.run_preset_test(
cls=MistralCausalLM,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/mistral/mistral_causal_lm_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/mistral/mistral_causal_lm_test.py",
"repo_id": "keras-nlp",
"token_count": 2205
} | 127 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RoBERTa model preset configurations."""
backbone_presets = {
"roberta_base_en": {
"metadata": {
"description": (
"12-layer RoBERTa model where case is maintained."
"Trained on English Wikipedia, BooksCorpus, CommonCraw, and OpenWebText."
),
"params": 124052736,
"official_name": "RoBERTa",
"path": "roberta",
"model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md",
},
"kaggle_handle": "kaggle://keras/roberta/keras/roberta_base_en/2",
},
"roberta_large_en": {
"metadata": {
"description": (
"24-layer RoBERTa model where case is maintained."
"Trained on English Wikipedia, BooksCorpus, CommonCraw, and OpenWebText."
),
"params": 354307072,
"official_name": "RoBERTa",
"path": "roberta",
"model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md",
},
"kaggle_handle": "kaggle://keras/roberta/keras/roberta_large_en/2",
},
}
| keras-nlp/keras_nlp/models/roberta/roberta_presets.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/roberta/roberta_presets.py",
"repo_id": "keras-nlp",
"token_count": 740
} | 128 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.models.whisper.whisper_audio_feature_extractor import (
WhisperAudioFeatureExtractor,
)
from keras_nlp.tests.test_case import TestCase
class WhisperAudioFeatureExtractorTest(TestCase):
def setUp(self):
self.init_kwargs = {
"num_mels": 80,
"num_fft_bins": 400,
"stride": 100,
"sampling_rate": 100,
"max_audio_length": 5,
}
audio_tensor_1 = tf.ones((2,), dtype="float32")
audio_tensor_2 = tf.ones((25,), dtype="float32")
self.input_data = tf.ragged.stack(
[audio_tensor_1, audio_tensor_2],
axis=0,
)
def test_feature_extractor_basics(self):
self.run_preprocessing_layer_test(
cls=WhisperAudioFeatureExtractor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
def test_correctness(self):
audio_tensor = tf.ones((2,), dtype="float32")
outputs = WhisperAudioFeatureExtractor(**self.init_kwargs)(audio_tensor)
# Verify shape.
self.assertEqual(outputs.shape, (5, 80))
# Verify output.
expected = [1.1656, 1.0151, -0.8343, -0.8343, -0.8343]
self.assertAllClose(outputs[:, 0], expected, atol=0.01, rtol=0.01)
| keras-nlp/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py",
"repo_id": "keras-nlp",
"token_count": 803
} | 129 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tree
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import ops
from keras_nlp.samplers.sampler import Sampler
@keras_nlp_export("keras_nlp.samplers.BeamSampler")
class BeamSampler(Sampler):
"""Beam Sampler class.
This sampler implements beam search algorithm. At each time-step, beam
search keeps the beams (sequences) of the top `num_beams` highest
accumulated probabilities, and uses each one of the beams to predict
candidate next tokens.
Args:
num_beams: int. The number of beams that should be kept at each
time-step. `num_beams` should be strictly positive.
return_all_beams: bool. When set to `True`, the sampler will return all
beams and their respective probabilities score.
Call arguments:
{{call_args}}
Examples:
```python
causal_lm = keras_nlp.models.GPT2CausalLM.from_preset("gpt2_base_en")
# Pass by name to compile.
causal_lm.compile(sampler="beam")
causal_lm.generate(["Keras is a"])
# Pass by object to compile.
sampler = keras_nlp.samplers.BeamSampler(num_beams=5)
causal_lm.compile(sampler=sampler)
causal_lm.generate(["Keras is a"])
```
"""
def __init__(
self,
num_beams=5,
return_all_beams=False,
**kwargs,
):
super().__init__(**kwargs)
self.num_beams = num_beams
self.return_all_beams = return_all_beams
def __call__(
self,
next,
prompt,
cache=None,
index=0,
mask=None,
end_token_id=None,
hidden_states=None,
model=None,
):
batch_size, max_length = ops.shape(prompt)[0], ops.shape(prompt)[1]
index = ops.cast(index, "int32")
def create_beams(x):
"""Add initial beam state."""
return ops.repeat(x, self.num_beams, axis=0)
def flatten_beams(x):
"""Combine the beam dim and batch dim."""
flat_shape = (batch_size * self.num_beams,) + ops.shape(x)[2:]
return ops.reshape(x, flat_shape)
def unflatten_beams(x):
"""Separate the beam dim and batch dim."""
unflat_shape = (batch_size, self.num_beams) + ops.shape(x)[1:]
return ops.reshape(x, unflat_shape)
if mask is None:
mask = ops.zeros_like(prompt, dtype="bool")
else:
mask = ops.cast(mask, dtype="bool")
# `ops.while_loop` will not accept `None` as a value for `loop_vars`.
has_cache = cache is not None
cache = cache if has_cache else ()
# Add extra sequences for each beam.
prompt, mask = create_beams(prompt), create_beams(mask)
cache = tree.map_structure(create_beams, cache)
# Setup the initial beam log-likelihoods.
# On the first loop, make sure only the original beam is considered.
log_probs = ops.array(
[[0.0] + [-1e9] * (self.num_beams - 1)], dtype="float32"
)
log_probs = flatten_beams(ops.repeat(log_probs, batch_size, axis=0))
def cond(prompt, cache, index, log_probs):
if end_token_id is None:
return True
# Stop if all sequences have produced a *new* end_token_id.
end_tokens = (prompt == end_token_id) & (~mask)
prompt_done = ops.any(end_tokens, axis=-1)
return ops.logical_not(ops.all(prompt_done))
def body(prompt, cache, index, log_probs):
# Compute the softmax distribution for the next token.
logits, _, cache = next(prompt, cache, index)
vocab_size = ops.shape(logits)[-1]
probs = self.compute_probabilities(logits)
# Compute the running log-likelihood of each new candidate.
next_log_probs = ops.log(probs) + log_probs[..., None]
# Reshape `preds` to shape `(batch_size, num_beams * vocab_size)`.
next_log_probs = ops.reshape(next_log_probs, [batch_size, -1])
# Compute the top beam indices and next tokens.
next_log_probs, indices = ops.top_k(
next_log_probs, k=self.num_beams, sorted=False
)
beam_indices = indices // vocab_size
next_token = flatten_beams(indices % vocab_size)
# We need `ensure_shape` as `top_k` will change the static shape.
next_log_probs = flatten_beams(next_log_probs)
# Work around for top_k output shape on tf backend.
if isinstance(log_probs, tf.Tensor):
log_probs = tf.ensure_shape(next_log_probs, log_probs.shape)
else:
log_probs = next_log_probs
def gather_beams(x):
x = unflatten_beams(x)
indices = beam_indices
for axis in range(2, len(x.shape)):
indices = ops.expand_dims(indices, axis=axis)
x = ops.take_along_axis(x, indices, axis=1)
return flatten_beams(x)
prompt = gather_beams(prompt)
if has_cache:
cache = tree.map_structure(gather_beams, cache)
# Update each beam with the next token.
next_token = ops.cast(next_token, prompt.dtype)
# Don't overwrite anywhere mask is True.
next_token = ops.where(mask[:, index], prompt[:, index], next_token)
# Update the prompt with the next token.
next_token = next_token[:, None]
prompt = ops.slice_update(prompt, [0, index], next_token)
# Return the iteration of the loop state.
return (prompt, cache, index + 1, log_probs)
prompt, _, _, log_probs = self.run_loop(
cond=cond,
body=body,
loop_vars=(prompt, cache, index, log_probs),
maximum_iterations=(max_length - index),
model=model,
)
all_prompts = unflatten_beams(prompt)
all_log_probs = unflatten_beams(log_probs)
if self.return_all_beams:
sorted_indices = ops.argsort(-all_log_probs, axis=-1)
sorted_log_probs = ops.take_along_axis(
all_log_probs,
sorted_indices,
axis=1,
)
sorted_prompts = ops.take_along_axis(
all_prompts,
ops.expand_dims(sorted_indices, -1),
axis=1,
)
return sorted_prompts, sorted_log_probs
else:
# Gather the top beam at each batch index.
top_beams = ops.argmax(all_log_probs, axis=-1)[:, None, None]
prompt = ops.take_along_axis(all_prompts, top_beams, axis=1)
return ops.squeeze(prompt, axis=1)
def get_config(self):
config = super().get_config()
config.update(
{
"num_beams": self.num_beams,
"return_all_beams": self.return_all_beams,
}
)
return config
| keras-nlp/keras_nlp/samplers/beam_sampler.py/0 | {
"file_path": "keras-nlp/keras_nlp/samplers/beam_sampler.py",
"repo_id": "keras-nlp",
"token_count": 3619
} | 130 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Byte-pair encoder implementation.
This file implements the same logic as openai BPE:
https://github.com/openai/gpt-2/blob/master/src/encoder.py,
but is TF graph compatible.
"""
import json
import os
from typing import Iterable
from typing import List
import regex as re
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.tokenizers import tokenizer
from keras_nlp.utils.preset_utils import check_preset_class
from keras_nlp.utils.preset_utils import load_from_preset
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring
from keras_nlp.utils.tensor_utils import assert_tf_text_installed
from keras_nlp.utils.tensor_utils import convert_to_ragged_batch
from keras_nlp.utils.tensor_utils import is_int_dtype
from keras_nlp.utils.tensor_utils import is_string_dtype
try:
import tensorflow_text as tf_text
except ImportError:
tf_text = None
VOCAB_FILENAME = "vocabulary.json"
MERGES_FILENAME = "merges.txt"
# As python and TF handles special spaces differently, we need to
# manually handle special spaces during string split.
SPECIAL_WHITESPACES = r"\x{a0}\x{2009}\x{202f}\x{3000}"
# String splitting regex pattern.
SPLIT_PATTERN_1 = (
r"'s|'t|'re|'ve|'m|'ll|'d"
+ r"|[\s{special_spaces}]+[\n\r\t\f६{special_spaces}]| ?\p{L}+|"
+ r" ?[\p{N}]+| ?[^\s\p{L}\p{N}{special_spaces}]+"
)
SPLIT_PATTERN_1 = SPLIT_PATTERN_1.replace(
"{special_spaces}", SPECIAL_WHITESPACES
)
SPLIT_PATTERN_2 = rf"""[\s६{SPECIAL_WHITESPACES}]$"""
def create_alts_for_unsplittable_tokens(unsplittable_tokens):
# Create alternates for all special tokens that will be not split during
# tokenization.
alts = []
prefix = "Ĵ"
# Trim out splitters.
replace_pattern = r"'|\s+|[^\p{L}\p{N}]+"
for token in unsplittable_tokens:
token = re.sub(replace_pattern, "", token)
alts.append(prefix + token)
return alts
def bytes_to_unicode():
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
# removes mapping an int to a whitespace character
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
bs = [n.to_bytes(1, "little") for n in bs]
return bs, cs # int to string mapping
def remove_strings_from_inputs(tensor, string_to_remove):
"""Remove certain strings from input tensor."""
non_empty_mask = tensor != string_to_remove
flatten_indexes = tf.where(non_empty_mask)
flatten_result = tf.gather_nd(tensor, flatten_indexes)
row_lengths = tf.reduce_sum(tf.cast(non_empty_mask, "int64"), axis=1)
result = tf.RaggedTensor.from_row_lengths(
values=flatten_result,
row_lengths=row_lengths,
)
return result
def split_strings_for_bpe(inputs, unsplittable_tokens=None):
# We need to recreate the exact behavior of token presplitting in the
# original gpt2 tokenizer which uses a lookahead. As re2 does not
# support lookahead match, we are using an alternative insert a special
# token "६" before leading space of non-space characters and after the
# trailing space, e.g., " keras" will be "६ keras".
inputs = tf.strings.regex_replace(
inputs, rf"( )([^\s{SPECIAL_WHITESPACES}])", r"६\1\2"
)
inputs = tf.strings.regex_replace(
inputs, rf"(\s{SPECIAL_WHITESPACES})$", r"\1६"
)
if unsplittable_tokens:
alts = create_alts_for_unsplittable_tokens(unsplittable_tokens)
for token, alt in zip(unsplittable_tokens, alts):
escaped_token = re.escape(token)
inputs = tf_text.regex_split(inputs, escaped_token, escaped_token)
inputs = tf.strings.regex_replace(inputs, escaped_token, alt)
raw_tokens = tf_text.regex_split(inputs, SPLIT_PATTERN_1, SPLIT_PATTERN_1)
# Second pass splits out the last whilespace char or "६".
raw_tokens = tf_text.regex_split(
raw_tokens, SPLIT_PATTERN_2, SPLIT_PATTERN_2
)
if unsplittable_tokens:
# Replace special tokens alternate with originals.
for token, alt in zip(unsplittable_tokens, alts):
escaped_alt = re.escape(alt)
raw_tokens = tf.strings.regex_replace(
raw_tokens, escaped_alt, token
)
while raw_tokens.shape.rank > 2:
raw_tokens = raw_tokens.merge_dims(1, 2)
return remove_strings_from_inputs(raw_tokens, "६")
class BytePairTokenizerCache(tf.Module):
"""Cache that stores the encoded result of seen tokens.
The cache key is string tensor or python strings, and the value is split
tokens joined by whitespace. For example, "dragonfly" => "dragon fly"
Examples:
```
cache = BytePairTokenizerCache()
cache.insert(["butterfly", "dragonfly"], ["but ter fly", "dragon fly"])
cache.lookup(["butterfly"])
```
"""
def __init__(self):
# `tf.lookup.experimental.MutableHashTable` does not support string to
# string mapping. So we first convert to string to an integer key, and
# use the integer key to find the value.
self.factors = tf.pow(
tf.constant(256, dtype="int64"), tf.range(0, 8, dtype="int64")
)
self.id2value = tf.lookup.experimental.MutableHashTable(
"int64", tf.string, ""
)
def _get_key(self, keys):
"""Get the hash key for given inputs."""
# `tf.fingerprint` converts token to a array of uint8 of length 8, we
# need to convert it to a uint64.
return tf.squeeze(
tf.matmul(
tf.cast(tf.fingerprint(keys), dtype="int64"),
self.factors[:, tf.newaxis],
),
-1,
)
def lookup(self, keys):
"""Look up the encoded outputs of given tokens."""
ids = self._get_key(keys)
result = self.id2value.lookup(ids)
# Ensure output shape for graph mode.
result.set_shape([None])
return result
def insert(self, keys, values):
"""Insert token <=> encoded outputs pairs."""
self.id2value.insert(self._get_key(keys), values)
def create_static_hashtable(keys, values, default):
return tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
tf.convert_to_tensor(keys),
tf.convert_to_tensor(values),
),
default_value=default,
)
@keras_nlp_export("keras_nlp.tokenizers.BytePairTokenizer")
class BytePairTokenizer(tokenizer.Tokenizer):
"""Bype-pair encoding tokenizer layer.
This BPE tokenizer provides the same functionality as the official GPT-2
tokenizer. Given the same `vocabulary` which maps tokens to ids, and `merges`
which describes BPE merge rules, it should provide the same output
as OpenAI implementation (https://github.com/openai/gpt-2/blob/master/src/encoder.py).
Different from OpenAI, this implementation is graph-compatible, so you can
use it within a `tf.data` pipeline.
If input is a batch of strings (rank > 0):
By default, the layer will output a `tf.RaggedTensor` where the last
dimension of the output is ragged. If `sequence_length` is set, the layer
will output a dense `tf.Tensor` where all inputs have been padded or
truncated to `sequence_length`.
If input is a scalar string (rank == 0):
By default, the layer will output a dense `tf.Tensor` with static shape
`[None]`. If `sequence_length` is set, the output will be
a dense `tf.Tensor` of shape `[sequence_length]`.
Args:
vocabulary: string or dict, maps token to integer ids. If it is a
string, it should be the file path to a json file.
merges: string or list, contains the merge rule. If it is a string,
it should be the file path to merge rules. The merge rule file
should have one merge rule per line.
sequence_length: int. If set, the output will be
padded or truncated to the `sequence_length`. Defaults to `None`.
add_prefix_space: bool. Whether to add an
initial space to the input. This tokenizer is whitespace aware,
and will tokenize a word with a leading space differently. Adding
a prefix space to the first word will cause it to be tokenized
equivalently to all subsequent words in the sequence.
Defaults to `False`.
unsplittable_tokens: list. A list of strings that will
never be split during the word-level splitting applied before the
byte-pair encoding. This can be used to ensure special tokens map to
unique indices in the vocabulary, even if these special tokens
contain splittable characters such as punctuation. Special tokens
must still be included in `vocabulary`. Defaults to `None`.
Examples:
Tokenize
>>> vocab = {"butter": 1, "fly": 2}
>>> merge = ["b u", "t t", "e r", "bu tt", "butt er", "f l", "fl y"]
>>> tokenizer = keras_nlp.tokenizers.BytePairTokenizer(vocab, merge)
>>> outputs = tokenizer("butterfly")
>>> np.array(outputs)
array([1, 2], dtype=int32)
>>> seq1, seq2 = tokenizer(["butterfly", "butter"])
>>> np.array(seq1)
array([1, 2], dtype=int32)
>>> np.array(seq2)
array([1], dtype=int32)
>>> tokenizer = keras_nlp.tokenizers.BytePairTokenizer(
... vocab, merge, sequence_length=2)
>>> seq1, seq2 = tokenizer(["butterfly", "butter"])
>>> np.array(seq1)
array([1, 2], dtype=int32)
>>> np.array(seq2)
array([1, 0], dtype=int32)
Detokenize
>>> vocab = {"butter": 1, "fly": 2}
>>> merge = ["b u", "t t", "e r", "bu tt", "butt er", "f l", "fl y"]
>>> tokenizer = keras_nlp.tokenizers.BytePairTokenizer(vocab, merge)
>>> tokenizer.detokenize([[1, 2]])
<tf.Tensor: shape=(1,), dtype=string, numpy=array([b'butterfly'],
dtype=object)>
"""
def __init__(
self,
vocabulary=None,
merges=None,
sequence_length=None,
add_prefix_space=False,
unsplittable_tokens=None,
dtype="int32",
**kwargs,
) -> None:
assert_tf_text_installed(self.__class__.__name__)
if not is_int_dtype(dtype) and not is_string_dtype(dtype):
raise ValueError(
"Output dtype must be an integer type or a string. "
f"Received: dtype={dtype}"
)
super().__init__(dtype=dtype, **kwargs)
self.sequence_length = sequence_length
self.add_prefix_space = add_prefix_space
self.unsplittable_tokens = unsplittable_tokens
# Create byte <=> unicode mapping. This is useful for handling
# whitespace tokens.
byte_list, unicode_list = bytes_to_unicode()
self.byte2unicode = create_static_hashtable(
byte_list, unicode_list, default=""
)
self.unicode2byte = create_static_hashtable(
unicode_list, byte_list, default=""
)
self.set_vocabulary_and_merges(vocabulary, merges)
def save_assets(self, dir_path):
vocab_path = os.path.join(dir_path, VOCAB_FILENAME)
merges_path = os.path.join(dir_path, MERGES_FILENAME)
with open(vocab_path, "w", encoding="utf-8") as file:
file.write(json.dumps(dict(self.vocabulary)))
with open(merges_path, "w", encoding="utf-8") as file:
for merge in self.merges:
file.write(f"{merge}\n")
def load_assets(self, dir_path):
vocab_path = os.path.join(dir_path, VOCAB_FILENAME)
merges_path = os.path.join(dir_path, MERGES_FILENAME)
self.set_vocabulary_and_merges(vocab_path, merges_path)
def set_vocabulary_and_merges(self, vocabulary, merges):
"""Set the vocabulary and merge rules from data or files."""
if vocabulary is None or merges is None:
# Clear vocab related state.
self.vocabulary = None
self.merges = None
self.cache = None
self.id_to_token_map = None
self.token_to_id_map = None
self.merge_ranks_lookup_default = None
self.merge_ranks = None
return
if isinstance(vocabulary, str):
with open(vocabulary, "r", encoding="utf-8") as f:
self.vocabulary = json.load(f)
elif isinstance(vocabulary, dict):
self.vocabulary = vocabulary.copy()
else:
raise ValueError(
"Vocabulary must be an file path or dictionary mapping string "
"token to int ids. Received: "
f"`type(vocabulary)={type(vocabulary)}`."
)
if isinstance(merges, str):
with open(merges, encoding="utf-8") as f:
self.merges = [bp.rstrip() for bp in f]
elif isinstance(merges, Iterable):
self.merges = list(merges)
else:
raise ValueError(
"Merges must be a file path or a list of merge rules. "
f"Received: `type(merges)={type(merges)}`"
)
self.cache = BytePairTokenizerCache()
if self.unsplittable_tokens:
# Put special tokens into cache, so it won't be further split and
# merged.
self.cache.insert(
self.unsplittable_tokens, self.unsplittable_tokens
)
# Create mapping between string tokens to int ids, and vice versa.
byte_pairs = [x[0] for x in self.vocabulary.items()]
byte_pair_encoding_indices = [x[1] for x in self.vocabulary.items()]
self.token_to_id_map = create_static_hashtable(
byte_pairs,
byte_pair_encoding_indices,
default=-1,
)
self.id_to_token_map = create_static_hashtable(
byte_pair_encoding_indices,
byte_pairs,
default="",
)
# Create ranking of merge rules, this is the same as order of merge
# pairs in `self.merges`.
self.merge_ranks_lookup_default = len(self.merges) + 1
self.merge_ranks = create_static_hashtable(
self.merges,
list(range(len(self.merges))),
default=self.merge_ranks_lookup_default,
)
def get_vocabulary(self) -> List[str]:
"""Get the tokenizer vocabulary as a list of strings tokens."""
self._check_vocabulary()
return self.vocabulary.keys()
def vocabulary_size(self) -> int:
"""Get the size of the tokenizer vocabulary."""
self._check_vocabulary()
return len(self.vocabulary)
def id_to_token(self, id: int) -> str:
"""Convert an integer id to a string token."""
# This will be slow, but keep memory usage down compared to building a
# dict. Assuming the main use case is looking up a few special tokens
# early in the vocab, this should be fine.
self._check_vocabulary()
keys = self.get_vocabulary()
for token in keys:
if self.vocabulary[token] == id:
return token
raise ValueError(f"`id` is out of the vocabulary. Received: {id}")
def token_to_id(self, token: str) -> int:
"""Convert a string token to an integer id."""
self._check_vocabulary()
return self.vocabulary[token]
@tf.function
def _bpe_merge_one_step(self, words, mask):
"""Perform one step of byte-pair merge."""
# Get all word pairs.
first, second = words[:, :-1], words[:, 1:]
# Mask empty.
non_empty_mask = second.nested_row_lengths()[0] != 0
mask = mask & non_empty_mask
if not tf.reduce_any(mask):
return [words, mask]
non_empty_indices = tf.boolean_mask(tf.range(tf.shape(mask)[0]), mask)
filterd_first = tf.ragged.boolean_mask(first, mask)
filtered_second = tf.ragged.boolean_mask(second, mask)
# Get byte pair ranking in merge rules.
pairs = tf.strings.join([filterd_first, filtered_second], separator=" ")
pair_rank = self.merge_ranks.lookup(pairs)
# Get BPE pair ranks.
min_pair_rank = tf.reduce_min(pair_rank, axis=1)
pair_found_mask = min_pair_rank != self.merge_ranks_lookup_default
# Tokens that cannot be further merged are marked as finished.
mask = tf.tensor_scatter_nd_update(
mask, tf.expand_dims(non_empty_indices, axis=1), pair_found_mask
)
if not tf.math.reduce_any(mask):
return [words, mask]
masked_pair_rank = tf.ragged.boolean_mask(pair_rank, pair_found_mask)
min_pair_rank_indices = tf.math.argmin(
masked_pair_rank.to_tensor(self.merge_ranks_lookup_default), axis=1
)
# Get words and pairs to process.
unfinished_words = tf.ragged.boolean_mask(words, mask)
pair_left = tf.gather(
unfinished_words, min_pair_rank_indices, batch_dims=1
)
pair_right = tf.gather(
unfinished_words, min_pair_rank_indices + 1, batch_dims=1
)
merged_pairs = tf.strings.join([pair_left, pair_right])
empty_strs = tf.fill(tf.shape(merged_pairs), "")
unfinished_word_indices = tf.cast(
tf.boolean_mask(tf.range(tf.shape(mask)[0]), mask), dtype="int64"
)
merged_pair_indices = tf.concat(
[
unfinished_word_indices[:, tf.newaxis],
min_pair_rank_indices[:, tf.newaxis],
],
axis=1,
)
empty_string_indices = tf.concat(
[
unfinished_word_indices[:, tf.newaxis],
min_pair_rank_indices[:, tf.newaxis] + 1,
],
axis=1,
)
tensor_words = words.to_tensor(default_value="")
tensor_words = tf.tensor_scatter_nd_update(
tensor_words,
merged_pair_indices,
merged_pairs,
)
words = tf.tensor_scatter_nd_update(
tensor_words,
empty_string_indices,
empty_strs,
)
# Remove empty strings.
words = remove_strings_from_inputs(words, "")
return [words, mask]
def _bpe_merge(self, inputs):
"""Perform byte-pair merge for each word in the inputs."""
num_words = tf.shape(inputs)[0]
# Merge bytes.
def loop_condition(_, mask):
return tf.math.reduce_any(mask)
initial_mask = tf.fill((num_words,), True)
merged_words, _ = tf.while_loop(
loop_condition,
self._bpe_merge_one_step,
loop_vars=[
inputs,
initial_mask,
],
shape_invariants=[
tf.TensorShape([None, None]),
tf.TensorShape([None]),
],
)
return merged_words
def _check_vocabulary(self):
if self.vocabulary is None:
raise ValueError(
"No vocabulary has been set for BytePairTokenizer. Make sure "
"to pass `vocabulary` and `merges` arguments when creating the "
"layer."
)
def tokenize(self, inputs):
self._check_vocabulary()
if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)):
inputs = tf.convert_to_tensor(inputs)
if self.add_prefix_space:
inputs = tf.strings.join([" ", inputs])
scalar_input = inputs.shape.rank == 0
if scalar_input:
inputs = tf.expand_dims(inputs, 0)
raw_tokens = split_strings_for_bpe(inputs, self.unsplittable_tokens)
token_row_splits = raw_tokens.row_splits
flat_tokens = raw_tokens.flat_values
# Check cache.
cache_lookup = self.cache.lookup(flat_tokens)
cache_mask = cache_lookup == ""
has_unseen_words = tf.math.reduce_any(
(cache_lookup == "") & (flat_tokens != "")
)
def process_unseen_tokens():
unseen_tokens = tf.boolean_mask(flat_tokens, cache_mask)
self._bpe_merge_and_update_cache(unseen_tokens)
return self.cache.lookup(flat_tokens)
# If `has_unseen_words == True`, it means not all tokens are in cache,
# we will process the unseen tokens. Otherwise return the cache lookup.
tokenized_words = tf.cond(
has_unseen_words,
process_unseen_tokens,
lambda: cache_lookup,
)
tokens = tf.strings.split(tokenized_words, sep=" ")
if self.compute_dtype != tf.string:
# Encode merged tokens.
tokens = self.token_to_id_map.lookup(tokens)
# Unflatten to match input.
tokens = tf.RaggedTensor.from_row_splits(
tokens.flat_values,
tf.gather(tokens.row_splits, token_row_splits),
)
# Convert to a dense output if `sequence_length` is set.
if self.sequence_length:
output_shape = tokens.shape.as_list()
output_shape[-1] = self.sequence_length
tokens = tokens.to_tensor(shape=output_shape)
# Convert to a dense output if input in scalar
if scalar_input:
tokens = tf.squeeze(tokens, 0)
tf.ensure_shape(tokens, shape=[self.sequence_length])
return tokens
def detokenize(self, inputs):
self._check_vocabulary()
inputs, unbatched, _ = convert_to_ragged_batch(inputs)
inputs = tf.cast(inputs, self.dtype)
unicode_text = tf.strings.reduce_join(
self.id_to_token_map.lookup(inputs), axis=-1
)
split_unicode_text = tf.strings.unicode_split(unicode_text, "UTF-8")
outputs = tf.strings.reduce_join(
self.unicode2byte.lookup(split_unicode_text), axis=-1
)
if unbatched:
outputs = tf.squeeze(outputs, 0)
return outputs
def _transform_bytes(self, tokens):
"""Map token bytes to unicode using `byte2unicode`."""
split_bytes = tf.strings.bytes_split(tokens)
split_unicode = self.byte2unicode.lookup(split_bytes)
return split_unicode
def _bpe_merge_and_update_cache(self, tokens):
"""Process unseen tokens and add to cache."""
words = self._transform_bytes(tokens)
tokenized_words = self._bpe_merge(words)
# For each word, join all its token by a whitespace,
# e.g., ["dragon", "fly"] => "dragon fly" for hash purpose.
tokenized_words = tf.strings.reduce_join(
tokenized_words, axis=1, separator=" "
)
self.cache.insert(tokens, tokenized_words)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"add_prefix_space": self.add_prefix_space,
"unsplittable_tokens": self.unsplittable_tokens,
}
)
return config
@classproperty
def presets(cls):
return {}
@classmethod
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate {{model_name}} tokenizer from preset vocabulary.
Args:
preset: string. Must be one of "{{preset_names}}".
Examples:
```python
# Load a preset tokenizer.
tokenizer = {{model_name}}.from_preset("{{example_preset_name}}")
# Tokenize some input.
tokenizer("The quick brown fox tripped.")
# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
preset = cls.presets[preset]["kaggle_handle"]
config_file = "tokenizer.json"
check_preset_class(preset, cls, config_file=config_file)
return load_from_preset(
preset,
config_file=config_file,
config_overrides=kwargs,
)
def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to setup a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
# If the subclass does not define from_preset, assign a wrapper so that
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:
def from_preset(calling_cls, *args, **kwargs):
return super(cls, calling_cls).from_preset(*args, **kwargs)
cls.from_preset = classmethod(from_preset)
# Format and assign the docstring unless the subclass has overridden it.
if cls.from_preset.__doc__ is None:
cls.from_preset.__func__.__doc__ = (
BytePairTokenizer.from_preset.__doc__
)
format_docstring(
model_name=cls.__name__,
example_preset_name=next(iter(cls.presets), ""),
preset_names='", "'.join(cls.presets),
)(cls.from_preset.__func__)
| keras-nlp/keras_nlp/tokenizers/byte_pair_tokenizer.py/0 | {
"file_path": "keras-nlp/keras_nlp/tokenizers/byte_pair_tokenizer.py",
"repo_id": "keras-nlp",
"token_count": 11681
} | 131 |
<jupyter_start><jupyter_text>Install deps<jupyter_code>!pip install git+https://github.com/abheesht17/keras-nlp.git@more-bert-variants tensorflow tf-models-official tensorflow_hub --upgrade --quiet
import json
import os
import keras_nlp
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
MODEL_TYPE = "bert_medium"
MODEL_SUFFIX = "uncased"
MODEL_SPEC_STR = "L-8_H-512_A-8"
MODEL_NAME = f"{MODEL_TYPE}_{MODEL_SUFFIX}"
VOCAB_SIZE = 30522
NUM_LAYERS = 8
NUM_ATTN_HEADS = 8
EMBEDDING_SIZE = 512
# BERT ckpt https://github.com/google-research/bert/blob/master/README.md.
zip_path = f"""https://storage.googleapis.com/bert_models/2020_02_20/{MODEL_SUFFIX}_{MODEL_SPEC_STR}.zip"""
zip_file = keras.utils.get_file(
f"""/content/{MODEL_NAME}""",
zip_path,
extract=True,
archive_format="zip",
)
!unzip """{MODEL_NAME}""" -d """{MODEL_SUFFIX}_{MODEL_SPEC_STR}"""
# BERT paths.
extract_dir = f"/content/{MODEL_SUFFIX}_{MODEL_SPEC_STR}"
vocab_path = os.path.join(extract_dir, "vocab.txt")
checkpoint_path = os.path.join(extract_dir, "bert_model.ckpt")
config_path = os.path.join(extract_dir, "bert_config.json")
vars = tf.train.list_variables(checkpoint_path)
weights = {}
for name, shape in vars:
print(name, shape)
weight = tf.train.load_variable(checkpoint_path, name)
weights[name] = weight<jupyter_output>bert/embeddings/LayerNorm/beta [512]
bert/embeddings/LayerNorm/gamma [512]
bert/embeddings/position_embeddings [512, 512]
bert/embeddings/token_type_embeddings [2, 512]
bert/embeddings/word_embeddings [30522, 512]
bert/encoder/layer_0/attention/output/LayerNorm/beta [512]
bert/encoder/layer_0/attention/output/LayerNorm/gamma [512]
bert/encoder/layer_0/attention/output/dense/bias [512]
bert/encoder/layer_0/attention/output/dense/kernel [512, 512]
bert/encoder/layer_0/attention/self/key/bias [512]
bert/encoder/layer_0/attention/self/key/kernel [512, 512]
bert/encoder/layer_0/attention/self/query/bias [512]
bert/encoder/layer_0/attention/self/query/kernel [512, 512]
bert/encoder/layer_0/attention/self/value/bias [512]
bert/encoder/layer_0/attention/self/value/kernel [512, 512]
bert/encoder/layer_0/intermediate/dense/bias [2048]
bert/encoder/layer_0/intermediate/dense/kernel [512, 2048]
bert/encoder/layer_0/output/LayerNorm/beta [512]
bert/encoder/layer_0/output/LayerNorm/gamma [512]
bert/[...]<jupyter_text>Load BertMedium model with KerasNLP.<jupyter_code>model = keras_nlp.models.BertMedium(vocabulary_size=VOCAB_SIZE)
model.summary()<jupyter_output>Model: "bert_custom"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
token_ids (InputLayer) [(None, None)] 0 []
token_embedding (Embedding) (None, None, 512) 15627264 ['token_ids[0][0]']
segment_ids (InputLayer) [(None, None)] 0 []
position_embedding (PositionEm (None, None, 512) 262144 ['token_embedding[0][0][...]<jupyter_text>Convert Weights<jupyter_code>model.get_layer("token_embedding").embeddings.assign(
weights["bert/embeddings/word_embeddings"]
)
model.get_layer("position_embedding").position_embeddings.assign(
weights["bert/embeddings/position_embeddings"]
)
model.get_layer("segment_embedding").embeddings.assign(
weights["bert/embeddings/token_type_embeddings"]
)
model.get_layer("embeddings_layer_norm").gamma.assign(
weights["bert/embeddings/LayerNorm/gamma"]
)
model.get_layer("embeddings_layer_norm").beta.assign(
weights["bert/embeddings/LayerNorm/beta"]
)
for i in range(model.num_layers):
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/attention/self/key/kernel"].reshape(
(EMBEDDING_SIZE, NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/attention/self/key/bias"].reshape(
(NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/attention/self/query/kernel"].reshape(
(EMBEDDING_SIZE, NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/attention/self/query/bias"].reshape(
(NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/attention/self/value/kernel"].reshape(
(EMBEDDING_SIZE, NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/attention/self/value/bias"].reshape(
(NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
weights[
f"bert/encoder/layer_{i}/attention/output/dense/kernel"
].reshape((NUM_ATTN_HEADS, -1, EMBEDDING_SIZE))
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/attention/output/dense/bias"]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.gamma.assign(
weights[f"bert/encoder/layer_{i}/attention/output/LayerNorm/gamma"]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.beta.assign(
weights[f"bert/encoder/layer_{i}/attention/output/LayerNorm/beta"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/intermediate/dense/kernel"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/intermediate/dense/bias"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/output/dense/kernel"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/output/dense/bias"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.gamma.assign(
weights[f"bert/encoder/layer_{i}/output/LayerNorm/gamma"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.beta.assign(
weights[f"bert/encoder/layer_{i}/output/LayerNorm/beta"]
)
model.get_layer("pooled_dense").kernel.assign(
weights["bert/pooler/dense/kernel"]
)
model.get_layer("pooled_dense").bias.assign(weights["bert/pooler/dense/bias"])
pass<jupyter_output><empty_output><jupyter_text>Load Bert Medium from TF-Hub.These weights have been ratified by the authors of BERT: https://github.com/google-research/bert/blob/master/README.md. BERT README statement:"***** New February 7th, 2019: TfHub Module *****BERT has been uploaded to TensorFlow Hub. See run_classifier_with_tfhub.py for an example of how to use the TF Hub module, or run an example in the browser on Colab."<jupyter_code>text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
preprocessor = hub.load(
"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
)
tokenizer = hub.KerasLayer(preprocessor.tokenize, name="tokenizer")
tokenized_text = tokenizer(text_input)
packer = hub.KerasLayer(
preprocessor.bert_pack_inputs, arguments=dict(seq_length=512), name="packer"
)
encoder_inputs = packer([tokenized_text])
encoder = hub.KerasLayer(
f"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_{MODEL_SPEC_STR}/2",
trainable=True,
)
outputs = encoder(encoder_inputs)
pooled_output = outputs["pooled_output"] # [batch_size, 1024].
sequence_output = outputs["sequence_output"] # [batch_size, seq_length, 1024].
embedding_model = tf.keras.Model(text_input, (pooled_output, sequence_output))
def preprocess(x):
tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=vocab_path, lowercase=False
)
packer = keras_nlp.layers.MultiSegmentPacker(
sequence_length=model.max_sequence_length,
start_value=tokenizer.token_to_id("[CLS]"),
end_value=tokenizer.token_to_id("[SEP]"),
)
return packer(tokenizer(x))
token_ids, segment_ids = preprocess(["the quick brown fox."])
keras_nlp_output = model(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)
hub_pooled_output, hub_sequence_output = embedding_model(
tf.constant(["the quick brown fox."])
)
keras_nlp_output["pooled_output"][0, :10], hub_pooled_output[0, :10]
# Very close! Though not 100% exact.
(
tf.reduce_mean(keras_nlp_output["pooled_output"] - hub_pooled_output),
tf.reduce_mean(keras_nlp_output["sequence_output"] - hub_sequence_output),
)
# Save BertMedium checkpoint
model.save_weights(f"""{MODEL_NAME}.h5""")
model2 = keras_nlp.models.BertMedium(vocabulary_size=VOCAB_SIZE)
model2.load_weights(f"""{MODEL_NAME}.h5""")
# Same output from loaded checkpoint
keras_nlp_output2 = model2(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)
(
tf.reduce_mean(
keras_nlp_output["pooled_output"] - keras_nlp_output2["pooled_output"]
),
tf.reduce_mean(
keras_nlp_output["sequence_output"]
- keras_nlp_output2["sequence_output"]
),
)
# Save vocab file as well
vocab_info = tf.io.gfile.GFile(vocab_path).read()
f = open("vocab.txt", "w")
f.write(vocab_info)
# Get MD5 of model
!md5sum """{MODEL_NAME}.h5"""
# Upload model to drive
# from google.colab import drive
# drive.mount('/content/drive')
# Check uploaded model once added to repo
model_cloud = keras_nlp.models.BertMedium(weights="uncased_en")
# Same output from cloud model
keras_nlp_output_cloud = model_cloud(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)["pooled_output"]
tf.reduce_mean(keras_nlp_output["pooled_output"] - keras_nlp_output_cloud)
keras_nlp_output_cloud[0, :10]<jupyter_output><empty_output> | keras-nlp/tools/checkpoint_conversion/bert_medium_uncased_en.ipynb/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/bert_medium_uncased_en.ipynb",
"repo_id": "keras-nlp",
"token_count": 4995
} | 132 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import shutil
import numpy as np
import transformers
from absl import app
from absl import flags
from checkpoint_conversion_utils import get_md5_checksum
from keras import ops
import keras_nlp
PRESET_MAP = {
"t5_small_multi": "t5-small",
"t5_base_multi": "t5-base",
"t5_large_multi": "t5-large",
"flan_small_multi": "google/flan-t5-small",
"flan_base_multi": "google/flan-t5-base",
"flan_large_multi": "google/flan-t5-large",
}
FLAGS = flags.FLAGS
flags.DEFINE_string(
"preset", "t5_base_multi", f'Must be one of {",".join(PRESET_MAP.keys())}'
)
os.environ["KERAS_BACKEND"] = "torch"
def extract_vocab(hf_tokenizer):
proto_path = f"./{FLAGS.preset}/vocab.spm"
print(f"\n-> Save KerasNLP vocab to `{proto_path}`.")
# Huggingface has a save_vocabulary function but it's not byte-for-byte
# with the source. Instead copy the original downloaded file directly.
shutil.copyfile(
transformers.utils.hub.get_file_from_repo(
hf_tokenizer.name_or_path, "spiece.model"
),
proto_path,
)
keras_tokenizer = keras_nlp.models.T5Tokenizer(
proto=proto_path,
)
print("-> Print MD5 checksum of the vocab files.")
print(f"`{proto_path}` md5sum: ", get_md5_checksum(proto_path))
return keras_tokenizer
def convert_checkpoints(hf_model):
keras_nlp_model = keras_nlp.models.T5Backbone.from_preset(
FLAGS.preset, load_weights=False
)
hf_wts = hf_model.state_dict()
print("Original weights:")
print(list(hf_wts.keys()))
for i in range(keras_nlp_model.num_layers):
for section in ["encoder", "decoder"]:
n = 0
# Token embedding layer
keras_nlp_model.get_layer("token_embedding").embeddings.assign(
hf_wts[f"{section}.embed_tokens.weight"]
)
if not keras_nlp_model.tie_embedding_weights:
keras_nlp_model.get_layer(
"token_embedding"
).reverse_embeddings.assign(
hf_wts["lm_head.weight"].transpose(1, 0).numpy()
)
# Query, key, value, and output projectors in self-attention
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).self_attention.query_projector.kernel.assign(
hf_wts[f"{section}.block.{i}.layer.{n}.SelfAttention.q.weight"]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).self_attention.key_projector.kernel.assign(
hf_wts[f"{section}.block.{i}.layer.{n}.SelfAttention.k.weight"]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).self_attention.value_projector.kernel.assign(
hf_wts[f"{section}.block.{i}.layer.{n}.SelfAttention.v.weight"]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).self_attention.output_projector.kernel.assign(
hf_wts[f"{section}.block.{i}.layer.{n}.SelfAttention.o.weight"]
.transpose(1, 0)
.numpy()
)
# Add relative attention bias
if keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).self_attention.use_relative_attention_bias:
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).self_attention.relative_attention_bias.assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.SelfAttention.relative_attention_bias.weight"
].numpy()
)
# Self-attention norm
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).self_attention_layer_norm.weight.assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.layer_norm.weight"
].numpy()
)
# Increment for next layer
n += 1
if section == "decoder":
# Cross-attention QKV and output proj (one between encoder and decoder)
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).cross_attention.query_projector.kernel.assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.EncDecAttention.q.weight"
]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).cross_attention.key_projector.kernel.assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.EncDecAttention.k.weight"
]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).cross_attention.value_projector.kernel.assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.EncDecAttention.v.weight"
]
.transpose(1, 0)
.numpy()
)
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).cross_attention.output_projector.kernel.assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.EncDecAttention.o.weight"
]
.transpose(1, 0)
.numpy()
)
# Cross-attention layer norm
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).cross_attention_layer_norm.weight.assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.layer_norm.weight"
].numpy()
)
# Increment for next layer
n += 1
if keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).use_gated_activation:
# Input projection layer
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).input_projector.weights[0].assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.DenseReluDense.wi_0.weight"
]
.transpose(1, 0)
.numpy()
)
# Gated activation layer
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).gate_projector.weights[0].assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.DenseReluDense.wi_1.weight"
]
.transpose(1, 0)
.numpy()
)
else:
# Input projection layer
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).input_projector.weights[0].assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.DenseReluDense.wi.weight"
]
.transpose(1, 0)
.numpy()
)
# Output projection layer
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).output_projector.weights[0].assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.DenseReluDense.wo.weight"
]
.transpose(1, 0)
.numpy()
)
# Layer norm
keras_nlp_model.get_layer(
f"transformer_{section}_layer_{i}"
).layer_norm.weight.assign(
hf_wts[
f"{section}.block.{i}.layer.{n}.layer_norm.weight"
].numpy()
)
# Final normalization
keras_nlp_model.get_layer(f"{section}_output_layer_norm").weights[
-1
].assign(hf_wts[f"{section}.final_layer_norm.weight"].numpy())
return keras_nlp_model
def check_output(
keras_model,
keras_tokenizer,
hf_model,
hf_tokenizer,
):
print("\n-> Compare the outputs.")
encoder_input = ["the quick brown fox jumped."]
decoder_input = ["the quick brown fox fell."]
sequence_length = 12
# KerasNLP Tokenization
packer = keras_nlp.layers.StartEndPacker(
sequence_length=sequence_length,
pad_value=keras_tokenizer.pad_token_id,
end_value=keras_tokenizer.end_token_id,
)
encoder_token_ids = packer(keras_tokenizer(encoder_input))
encoder_padding_mask = encoder_token_ids != keras_tokenizer.pad_token_id
decoder_token_ids = packer(keras_tokenizer(decoder_input))
decoder_padding_mask = decoder_token_ids != keras_tokenizer.pad_token_id
keras_inputs = {
"encoder_token_ids": encoder_token_ids,
"encoder_padding_mask": encoder_padding_mask,
"decoder_token_ids": decoder_token_ids,
"decoder_padding_mask": decoder_padding_mask,
}
# HF Tokenization.
hf_encoder_inputs = hf_tokenizer(
encoder_input,
padding="max_length",
max_length=sequence_length,
return_tensors="pt",
)
hf_decoder_inputs = hf_tokenizer(
decoder_input,
padding="max_length",
max_length=sequence_length,
return_tensors="pt",
)
hf_inputs = {
"input_ids": hf_encoder_inputs["input_ids"],
"attention_mask": hf_encoder_inputs["attention_mask"],
"decoder_input_ids": hf_decoder_inputs["input_ids"],
"decoder_attention_mask": hf_decoder_inputs["attention_mask"],
}
# Compare tokenized inputs. This should be a compete match.
print("-> KerasNLP inputs:")
for k, v in keras_inputs.items():
print(k, v)
print("-> HF inputs:")
for k, v in hf_inputs.items():
print(k, v)
# Forward pass
keras_out = keras_model(keras_inputs)
hf_out = hf_model(**hf_inputs, output_hidden_states=True)
# Only compare non-padded token ids.
keras_hidden_states = keras_out["decoder_sequence_output"]
hf_hidden_states = hf_out.decoder_hidden_states[-1]
keras_outputs = ops.take_along_axis(
keras_hidden_states, ops.where(decoder_padding_mask)
)
hf_outputs = ops.take_along_axis(
hf_hidden_states, ops.where(decoder_padding_mask)
)
print("-> KerasNLP output:", keras_outputs[0:5])
print("-> HF output:", hf_outputs[0:5])
np.testing.assert_allclose(
keras_outputs.detach().numpy(), hf_outputs.detach().numpy(), atol=1e-5
)
if keras_model.tie_embedding_weights:
keras_hidden_states = keras_hidden_states * (
keras_model.hidden_dim**-0.5
)
keras_logits = keras_model.token_embedding(
keras_hidden_states, reverse=True
)
hf_logits = hf_out.logits
print("-> KerasNLP logits:", keras_logits[0:5])
print("-> HF logits:", hf_logits[0:5])
np.testing.assert_allclose(
keras_logits.detach().numpy(), hf_logits.detach().numpy(), atol=1e-3
)
def count_params(weights):
shapes = [v.shape for v in weights]
return int(sum(math.prod(p) for p in shapes))
def main(_):
hf_id = PRESET_MAP[FLAGS.preset]
shutil.rmtree(f"./{FLAGS.preset}", ignore_errors=True)
os.mkdir(f"./{FLAGS.preset}")
print("\n-> Convert weights.")
hf_model = transformers.T5ForConditionalGeneration.from_pretrained(hf_id)
keras_model = convert_checkpoints(hf_model)
# Save the model.
model_path = f"./{FLAGS.preset}/model.weights.h5"
print(f"\n-> Save KerasNLP model weights to `{model_path}`.")
keras_model.save_weights(model_path)
print("-> Print MD5 checksum of the model weights files.")
print(f"`{model_path}` md5sum: ", get_md5_checksum(model_path))
print(f"-> Param count {count_params(keras_model.weights)}")
print("\n-> Convert vocab.")
hf_tokenizer = transformers.AutoTokenizer.from_pretrained(hf_id)
keras_tokenizer = extract_vocab(hf_tokenizer)
check_output(
keras_model,
keras_tokenizer,
hf_model,
hf_tokenizer,
)
if __name__ == "__main__":
flags.mark_flag_as_required("preset")
app.run(main)
| keras-nlp/tools/checkpoint_conversion/convert_t5_checkpoints.py/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/convert_t5_checkpoints.py",
"repo_id": "keras-nlp",
"token_count": 7154
} | 133 |
"""Utilities for real-time data augmentation on image data.
"""
import os
import warnings
from collections import OrderedDict
import numpy as np
from .iterator import BatchFromFilesMixin, Iterator
from .utils import validate_filename
class DataFrameIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory on disk
through a dataframe.
# Arguments
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the
images in a string column. It should include other column/s
depending on the `class_mode`:
- if `class_mode` is `"categorical"` (default value) it must
include the `y_col` column with the class/es of each image.
Values in column can be string/list/tuple if a single class
or list/tuple if multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include
the given `y_col` column with class values as strings.
- if `class_mode` is `"raw"` or `"multi_output"` it should contain
the columns specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
image_data_generator: Instance of `ImageDataGenerator` to use for
random transformations and normalization. If None, no transformations
and normalizations are made.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
classes: Optional list of strings, classes to use (e.g. `["dogs", "cats"]`).
If None, all classes in `y_col` will be used.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", "sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels.
Supports multi-label output.
- `"input"`: images identical to input images (mainly used to
work with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels,
- `None`, no targets are returned (the generator will only yield
batches of image data, which is useful to use in
`model.predict_generator()`).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target size
without aspect ratio distortion. The image is cropped in the center
with target aspect ratio before resizing.
dtype: Dtype to use for the generated arrays.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this option
can lead to speed-up in the instantiation of this class. Default: `True`.
"""
allowed_class_modes = {
'binary', 'categorical', 'input', 'multi_output', 'raw', 'sparse', None
}
def __new__(cls, *args, **kwargs):
try:
from tensorflow.keras.utils import Sequence as TFSequence
if TFSequence not in cls.__bases__:
cls.__bases__ = cls.__bases__ + (TFSequence,)
except ImportError:
pass
return super(DataFrameIterator, cls).__new__(cls)
def __init__(self,
dataframe,
directory=None,
image_data_generator=None,
x_col="filename",
y_col="class",
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
keep_aspect_ratio=False,
dtype='float32',
validate_filenames=True):
super(DataFrameIterator, self).set_processing_attrs(image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio)
df = dataframe.copy()
self.directory = directory or ''
self.class_mode = class_mode
self.dtype = dtype
# check that inputs match the required class_mode
self._check_params(df, x_col, y_col, weight_col, classes)
if validate_filenames: # check which image files are valid and keep them
df = self._filter_valid_filepaths(df, x_col)
if class_mode not in ["input", "multi_output", "raw", None]:
df, classes = self._filter_classes(df, y_col, classes)
num_classes = len(classes)
# build an index of all the unique classes
self.class_indices = dict(zip(classes, range(len(classes))))
# retrieve only training or validation set
if self.split:
num_files = len(df)
start = int(self.split[0] * num_files)
stop = int(self.split[1] * num_files)
df = df.iloc[start: stop, :]
# get labels for each observation
if class_mode not in ["input", "multi_output", "raw", None]:
self.classes = self.get_classes(df, y_col)
self.filenames = df[x_col].tolist()
self._sample_weight = df[weight_col].values if weight_col else None
if class_mode == "multi_output":
self._targets = [np.array(df[col].tolist()) for col in y_col]
if class_mode == "raw":
self._targets = df[y_col].values
self.samples = len(self.filenames)
validated_string = 'validated' if validate_filenames else 'non-validated'
if class_mode in ["input", "multi_output", "raw", None]:
print('Found {} {} image filenames.'
.format(self.samples, validated_string))
else:
print('Found {} {} image filenames belonging to {} classes.'
.format(self.samples, validated_string, num_classes))
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super(DataFrameIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _check_params(self, df, x_col, y_col, weight_col, classes):
# check class mode is one of the currently supported
if self.class_mode not in self.allowed_class_modes:
raise ValueError('Invalid class_mode: {}; expected one of: {}'
.format(self.class_mode, self.allowed_class_modes))
# check that y_col has several column names if class_mode is multi_output
if (self.class_mode == 'multi_output') and not isinstance(y_col, list):
raise TypeError(
'If class_mode="{}", y_col must be a list. Received {}.'
.format(self.class_mode, type(y_col).__name__)
)
# check that filenames/filepaths column values are all strings
if not all(df[x_col].apply(lambda x: isinstance(x, str))):
raise TypeError('All values in column x_col={} must be strings.'
.format(x_col))
# check labels are string if class_mode is binary or sparse
if self.class_mode in {'binary', 'sparse'}:
if not all(df[y_col].apply(lambda x: isinstance(x, str))):
raise TypeError('If class_mode="{}", y_col="{}" column '
'values must be strings.'
.format(self.class_mode, y_col))
# check that if binary there are only 2 different classes
if self.class_mode == 'binary':
if classes:
classes = set(classes)
if len(classes) != 2:
raise ValueError('If class_mode="binary" there must be 2 '
'classes. {} class/es were given.'
.format(len(classes)))
elif df[y_col].nunique() != 2:
raise ValueError('If class_mode="binary" there must be 2 classes. '
'Found {} classes.'.format(df[y_col].nunique()))
# check values are string, list or tuple if class_mode is categorical
if self.class_mode == 'categorical':
types = (str, list, tuple)
if not all(df[y_col].apply(lambda x: isinstance(x, types))):
raise TypeError('If class_mode="{}", y_col="{}" column '
'values must be type string, list or tuple.'
.format(self.class_mode, y_col))
# raise warning if classes are given but will be unused
if classes and self.class_mode in {"input", "multi_output", "raw", None}:
warnings.warn('`classes` will be ignored given the class_mode="{}"'
.format(self.class_mode))
# check that if weight column that the values are numerical
if weight_col and not issubclass(df[weight_col].dtype.type, np.number):
raise TypeError('Column weight_col={} must be numeric.'
.format(weight_col))
def get_classes(self, df, y_col):
labels = []
for label in df[y_col]:
if isinstance(label, (list, tuple)):
labels.append([self.class_indices[lbl] for lbl in label])
else:
labels.append(self.class_indices[label])
return labels
@staticmethod
def _filter_classes(df, y_col, classes):
df = df.copy()
def remove_classes(labels, classes):
if isinstance(labels, (list, tuple)):
labels = [cls for cls in labels if cls in classes]
return labels or None
elif isinstance(labels, str):
return labels if labels in classes else None
else:
raise TypeError(
"Expect string, list or tuple but found {} in {} column "
.format(type(labels), y_col)
)
if classes:
# prepare for membership lookup
classes = list(OrderedDict.fromkeys(classes).keys())
df[y_col] = df[y_col].apply(lambda x: remove_classes(x, classes))
else:
classes = set()
for v in df[y_col]:
if isinstance(v, (list, tuple)):
classes.update(v)
else:
classes.add(v)
classes = sorted(classes)
return df.dropna(subset=[y_col]), classes
def _filter_valid_filepaths(self, df, x_col):
"""Keep only dataframe rows with valid filenames
# Arguments
df: Pandas dataframe containing filenames in a column
x_col: string, column in `df` that contains the filenames or filepaths
# Returns
absolute paths to image files
"""
filepaths = df[x_col].map(
lambda fname: os.path.join(self.directory, fname)
)
mask = filepaths.apply(validate_filename, args=(self.white_list_formats,))
n_invalid = (~mask).sum()
if n_invalid:
warnings.warn(
'Found {} invalid image filename(s) in x_col="{}". '
'These filename(s) will be ignored.'
.format(n_invalid, x_col)
)
return df[mask]
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
if self.class_mode in {"multi_output", "raw"}:
return self._targets
else:
return self.classes
@property
def sample_weight(self):
return self._sample_weight
| keras-preprocessing/keras_preprocessing/image/dataframe_iterator.py/0 | {
"file_path": "keras-preprocessing/keras_preprocessing/image/dataframe_iterator.py",
"repo_id": "keras-preprocessing",
"token_count": 7013
} | 134 |
from keras_preprocessing import image
def test_api_classes():
expected_exposed_classes = [
'DataFrameIterator',
'DirectoryIterator',
'ImageDataGenerator',
'Iterator',
'NumpyArrayIterator',
]
for _class in expected_exposed_classes:
assert hasattr(image, _class)
def test_api_functions():
expected_exposed_functions = [
'flip_axis',
'random_rotation',
'random_shift',
'random_shear',
'random_zoom',
'apply_channel_shift',
'random_channel_shift',
'apply_brightness_shift',
'random_brightness',
'transform_matrix_offset_center',
'apply_affine_transform',
'validate_filename',
'save_img',
'load_img',
'list_pictures',
'array_to_img',
'img_to_array'
]
for function in expected_exposed_functions:
assert hasattr(image, function)
| keras-preprocessing/tests/image/test_image_api.py/0 | {
"file_path": "keras-preprocessing/tests/image/test_image_api.py",
"repo_id": "keras-preprocessing",
"token_count": 437
} | 135 |
<meta http-equiv="refresh" content="0; URL='https://github.com/keras-team/keras-tuner/blob/master/CONTRIBUTING.md'" />
| keras-tuner/docs/site/contributing/index.html/0 | {
"file_path": "keras-tuner/docs/site/contributing/index.html",
"repo_id": "keras-tuner",
"token_count": 48
} | 136 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hypertunable version of ResNet."""
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.backend import keras
from keras_tuner.backend import ops
from keras_tuner.backend.keras import layers
from keras_tuner.engine import hypermodel
@keras_tuner_export("keras_tuner.applications.HyperResNet")
class HyperResNet(hypermodel.HyperModel):
"""A ResNet hypermodel.
Models built by `HyperResNet` take images with shape (height, width,
channels) as input. The output are one-hot encoded with the length matching
the number of classes specified by the `classes` argument.
Args:
include_top: Boolean, whether to include the fully-connected layer at
the top of the network.
input_shape: Optional shape tuple, e.g. `(256, 256, 3)`. One of
`input_shape` or `input_tensor` must be specified.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. One of `input_shape` or
`input_tensor` must be specified.
classes: Optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified.
**kwargs: Additional keyword arguments that apply to all hypermodels.
See `keras_tuner.HyperModel`.
"""
def __init__(
self,
include_top=True,
input_shape=None,
input_tensor=None,
classes=None,
**kwargs,
):
super().__init__(**kwargs)
if include_top and classes is None:
raise ValueError(
"You must specify `classes` when `include_top=True`"
)
if input_shape is None and input_tensor is None:
raise ValueError(
"You must specify either `input_shape` or `input_tensor`."
)
self.include_top = include_top
self.input_shape = input_shape
self.input_tensor = input_tensor
self.classes = classes
def build(self, hp):
version = hp.Choice("version", ["v1", "v2", "next"], default="v2")
conv3_depth = hp.Choice("conv3_depth", [4, 8])
conv4_depth = hp.Choice("conv4_depth", [6, 23, 36])
# Version-conditional fixed parameters
preact = version == "v2"
use_bias = version != "next"
# Model definition.
bn_axis = (
3 if keras.backend.image_data_format() == "channels_last" else 1
)
if self.input_tensor is not None:
inputs = keras.utils.get_source_inputs(self.input_tensor)
x = self.input_tensor
else:
inputs = layers.Input(shape=self.input_shape)
x = inputs
# Initial conv2d block.
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name="conv1_pad")(x)
x = layers.Conv2D(
64, 7, strides=2, use_bias=use_bias, name="conv1_conv"
)(x)
if not preact:
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name="conv1_bn"
)(x)
x = layers.Activation("relu", name="conv1_relu")(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name="pool1_pad")(x)
x = layers.MaxPooling2D(3, strides=2, name="pool1_pool")(x)
# Middle hypertunable stack.
if version == "v1":
x = stack1(x, 64, 3, stride1=1, name="conv2")
x = stack1(x, 128, conv3_depth, name="conv3")
x = stack1(x, 256, conv4_depth, name="conv4")
x = stack1(x, 512, 3, name="conv5")
elif version == "v2":
x = stack2(x, 64, 3, name="conv2")
x = stack2(x, 128, conv3_depth, name="conv3")
x = stack2(x, 256, conv4_depth, name="conv4")
x = stack2(x, 512, 3, stride1=1, name="conv5")
elif version == "next":
x = stack3(x, 64, 3, name="conv2")
x = stack3(x, 256, conv3_depth, name="conv3")
x = stack3(x, 512, conv4_depth, name="conv4")
x = stack3(x, 1024, 3, stride1=1, name="conv5")
# Top of the model.
if preact:
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name="post_bn"
)(x)
x = layers.Activation("relu", name="post_relu")(x)
pooling = hp.Choice("pooling", ["avg", "max"], default="avg")
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
if not self.include_top:
return keras.Model(inputs, x, name="ResNet")
x = layers.Dense(self.classes, activation="softmax", name="probs")(x)
model = keras.Model(inputs, x, name="ResNet")
optimizer_name = hp.Choice(
"optimizer", ["adam", "rmsprop", "sgd"], default="adam"
)
optimizer = keras.optimizers.get(optimizer_name)
optimizer.learning_rate = hp.Choice(
"learning_rate", [0.1, 0.01, 0.001], default=0.01
)
model.compile(
optimizer=optimizer,
loss="categorical_crossentropy",
metrics=["accuracy"],
)
return model
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
"""A residual block.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if keras.backend.image_data_format() == "channels_last" else 1
if conv_shortcut is True:
shortcut = layers.Conv2D(
4 * filters, 1, strides=stride, name=f"{name}_0_conv"
)(x)
shortcut = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_0_bn"
)(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, strides=stride, name=f"{name}_1_conv")(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_1_relu")(x)
x = layers.Conv2D(
filters, kernel_size, padding="same", name=f"{name}_2_conv"
)(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_2_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_2_relu")(x)
x = layers.Conv2D(4 * filters, 1, name=f"{name}_3_conv")(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_3_bn"
)(x)
x = layers.Add(name=f"{name}_add")([shortcut, x])
x = layers.Activation("relu", name=f"{name}_out")(x)
return x
def stack1(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block1(x, filters, stride=stride1, name=f"{name}_block1")
for i in range(2, blocks + 1):
x = block1(
x, filters, conv_shortcut=False, name=f"{name}_block{str(i)}"
)
return x
def block2(x, filters, kernel_size=3, stride=1, conv_shortcut=False, name=None):
"""A residual block.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
conv_shortcut: default False, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if keras.backend.image_data_format() == "channels_last" else 1
preact = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_preact_bn"
)(x)
preact = layers.Activation("relu", name=f"{name}_preact_relu")(preact)
if conv_shortcut is True:
shortcut = layers.Conv2D(
4 * filters, 1, strides=stride, name=f"{name}_0_conv"
)(preact)
else:
shortcut = (
layers.MaxPooling2D(1, strides=stride)(x) if stride > 1 else x
)
x = layers.Conv2D(
filters, 1, strides=1, use_bias=False, name=f"{name}_1_conv"
)(preact)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_1_relu")(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=f"{name}_2_pad")(x)
x = layers.Conv2D(
filters,
kernel_size,
strides=stride,
use_bias=False,
name=f"{name}_2_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_2_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_2_relu")(x)
x = layers.Conv2D(4 * filters, 1, name=f"{name}_3_conv")(x)
x = layers.Add(name=f"{name}_out")([shortcut, x])
return x
def stack2(x, filters, blocks, stride1=2, name=None):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block2(x, filters, conv_shortcut=True, name=f"{name}_block1")
for i in range(2, blocks):
x = block2(x, filters, name=f"{name}_block{str(i)}")
x = block2(x, filters, stride=stride1, name=f"{name}_block{str(blocks)}")
return x
def block3(
x,
filters,
kernel_size=3,
stride=1,
groups=32,
conv_shortcut=True,
name=None,
):
"""A residual block.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer.
kernel_size: default 3, kernel size of the bottleneck layer.
stride: default 1, stride of the first layer.
groups: default 32, group size for grouped convolution.
conv_shortcut: default True, use convolution shortcut if True,
otherwise identity shortcut.
name: string, block label.
Returns:
Output tensor for the residual block.
"""
bn_axis = 3 if keras.backend.image_data_format() == "channels_last" else 1
if conv_shortcut is True:
shortcut = layers.Conv2D(
(64 // groups) * filters,
1,
strides=stride,
use_bias=False,
name=f"{name}_0_conv",
)(x)
shortcut = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_0_bn"
)(shortcut)
else:
shortcut = x
x = layers.Conv2D(filters, 1, use_bias=False, name=f"{name}_1_conv")(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_1_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_1_relu")(x)
c = filters // groups
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=f"{name}_2_pad")(x)
x = layers.DepthwiseConv2D(
kernel_size,
strides=stride,
depth_multiplier=c,
use_bias=False,
name=f"{name}_2_conv",
)(x)
if bn_axis == 3:
x_shape = ops.shape(x)[1:-1]
x = layers.Reshape(x_shape + (groups, c, c))(x)
output_shape = x_shape + (groups, c)
x = layers.Lambda(
lambda x: sum(x[:, :, :, :, i] for i in range(c)),
name=f"{name}_2_reduce",
output_shape=output_shape,
)(x)
x = layers.Reshape(x_shape + (filters,))(x)
else:
x_shape = ops.shape(x)[2:]
x = layers.Reshape((groups, c, c) + x_shape)(x)
output_shape = (groups, c) + x_shape
x = layers.Lambda(
lambda x: sum(x[:, :, i, :, :] for i in range(c)),
name=f"{name}_2_reduce",
output_shape=output_shape,
)(x)
x = layers.Reshape((filters,) + x_shape)(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_2_bn"
)(x)
x = layers.Activation("relu", name=f"{name}_2_relu")(x)
x = layers.Conv2D(
(64 // groups) * filters, 1, use_bias=False, name=f"{name}_3_conv"
)(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=f"{name}_3_bn"
)(x)
x = layers.Add(name=f"{name}_add")([shortcut, x])
x = layers.Activation("relu", name=f"{name}_out")(x)
return x
def stack3(x, filters, blocks, stride1=2, groups=32, name=None):
"""A set of stacked residual blocks.
Args:
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
groups: default 32, group size for grouped convolution.
name: string, stack label.
Returns:
Output tensor for the stacked blocks.
"""
x = block3(x, filters, stride=stride1, groups=groups, name=f"{name}_block1")
for i in range(2, blocks + 1):
x = block3(
x,
filters,
groups=groups,
conv_shortcut=False,
name=f"{name}_block{str(i)}",
)
return x
| keras-tuner/keras_tuner/applications/resnet.py/0 | {
"file_path": "keras-tuner/keras_tuner/applications/resnet.py",
"repo_id": "keras-tuner",
"token_count": 6761
} | 137 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distributed tuning."""
import copy
import os
from unittest import mock
import numpy as np
import portpicker
import pytest
import keras_tuner
from keras_tuner.backend import keras
from keras_tuner.distribute import oracle_client
from keras_tuner.distribute import utils as dist_utils
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.test_utils import mock_distribute
from keras_tuner.tuners import randomsearch
class SimpleTuner(keras_tuner.engine.base_tuner.BaseTuner):
def run_trial(self, trial):
score = self.hypermodel.build(trial.hyperparameters)
self.save_model(trial.trial_id, score)
return {"score": score}
def save_model(self, trial_id, score, step=0):
save_path = os.path.join(self.project_dir, trial_id)
with open(save_path, "w") as f:
f.write(str(score))
def load_model(self, trial):
save_path = os.path.join(self.project_dir, trial.trial_id)
with open(save_path, "r") as f:
score = int(f.read())
return score
def test_base_tuner_distribution(tmp_path):
num_workers = 3
def _test_base_tuner():
def build_model(hp):
return hp.Int("a", 1, 100)
tuner = SimpleTuner(
oracle=keras_tuner.oracles.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"), max_trials=10
),
hypermodel=build_model,
directory=tmp_path,
)
tuner.search()
if dist_utils.is_chief_oracle():
# Model is just a score.
scores = tuner.get_best_models(10)
assert len(scores)
assert scores == sorted(copy.copy(scores), reverse=True)
mock_distribute.mock_distribute(
_test_base_tuner, num_workers=num_workers, wait_for_chief=True
)
def test_random_search(tmp_path):
# TensorFlow model building and execution is not thread-safe.
num_workers = 1
x = np.random.uniform(-1, 1, size=(2, 5))
y = np.ones((2,))
def _test_random_search():
def build_model(hp):
model = keras.Sequential()
model.add(
keras.layers.Dense(hp.Int("num_units", 1, 3), input_shape=(5,))
)
model.add(keras.layers.Dense(1))
model.compile(loss="mse")
return model
tuner = keras_tuner.tuners.RandomSearch(
hypermodel=build_model,
objective="val_loss",
max_trials=10,
directory=tmp_path,
)
tuner.search(
x, y, validation_data=(x, y), epochs=1, batch_size=2, verbose=0
)
# Suppress warnings about optimizer state not being restored by
# tf.keras.
if dist_utils.is_chief_oracle():
trials = tuner.oracle.get_best_trials(2)
assert trials[0].score <= trials[1].score
models = tuner.get_best_models(2)
assert models[0].evaluate(x, y, verbose=0) <= models[1].evaluate(
x, y, verbose=0
)
mock_distribute.mock_distribute(
_test_random_search, num_workers, wait_for_chief=True
)
def test_client_no_attribute_error():
with mock.patch.object(os, "environ", mock_distribute.MockEnvVars()):
port = str(portpicker.pick_unused_port())
os.environ["KERASTUNER_ORACLE_IP"] = "127.0.0.1"
os.environ["KERASTUNER_ORACLE_PORT"] = port
os.environ["KERASTUNER_TUNER_ID"] = "worker0"
hps = keras_tuner.HyperParameters()
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"),
max_trials=10,
hyperparameters=hps,
)
client = oracle_client.OracleClient(oracle)
with pytest.raises(AttributeError, match="has no attribute"):
client.unknown_attribute
@mock.patch("keras_tuner.distribute.oracle_client.OracleClient.get_space")
def test_should_not_report_update_trial_return_running(get_space):
get_space.return_value = hp_module.HyperParameters()
with mock.patch.object(os, "environ", mock_distribute.MockEnvVars()):
port = str(portpicker.pick_unused_port())
os.environ["KERASTUNER_ORACLE_IP"] = "127.0.0.1"
os.environ["KERASTUNER_ORACLE_PORT"] = port
os.environ["KERASTUNER_TUNER_ID"] = "worker0"
hps = keras_tuner.HyperParameters()
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"),
max_trials=10,
hyperparameters=hps,
)
client = oracle_client.OracleClient(oracle)
client.should_report = False
assert client.update_trial("a", {"score": 100}).status == "RUNNING"
| keras-tuner/keras_tuner/distribute/oracle_client_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/distribute/oracle_client_test.py",
"repo_id": "keras-tuner",
"token_count": 2359
} | 138 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_tuner.engine import hyperparameters as hp_module
def test_fixed():
fixed = hp_module.Fixed("fixed", "value")
fixed = hp_module.Fixed.from_config(fixed.get_config())
assert fixed.default == "value"
assert fixed.random_sample() == "value"
fixed = hp_module.Fixed("fixed", True)
assert fixed.default is True
assert fixed.random_sample() is True
fixed = hp_module.Fixed("fixed", False)
fixed = hp_module.Fixed.from_config(fixed.get_config())
assert fixed.default is False
assert fixed.random_sample() is False
fixed = hp_module.Fixed("fixed", 1)
assert fixed.value == 1
assert fixed.random_sample() == 1
fixed = hp_module.Fixed("fixed", 8.2)
assert fixed.value == 8.2
assert fixed.random_sample() == 8.2
assert fixed.value_to_prob(fixed.value) == 0.5
with pytest.raises(ValueError, match="value must be an"):
hp_module.Fixed("fixed", None)
def test_fixed_repr():
assert repr(hp_module.Fixed("fixed", "value")) == repr(
hp_module.Fixed("fixed", "value")
)
def test_fixed_values_property():
assert list(hp_module.Fixed("fixed", 2).values) == [2]
| keras-tuner/keras_tuner/engine/hyperparameters/hp_types/fixed_hp_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/fixed_hp_test.py",
"repo_id": "keras-tuner",
"token_count": 583
} | 139 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Oracle base class."
import collections
import hashlib
import os
import random
import threading
import warnings
from datetime import datetime
import numpy as np
from keras_tuner import backend
from keras_tuner import utils
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.engine import objective as obj_module
from keras_tuner.engine import stateful
from keras_tuner.engine import trial as trial_module
# For backward compatibility.
Objective = obj_module.Objective
# Map each `Oracle` instance to its `Lock`.
LOCKS = collections.defaultdict(lambda: threading.Lock())
# Map each `Oracle` instance to the thread name aquired the `Lock`.
THREADS = collections.defaultdict(lambda: None)
@keras_tuner_export("keras_tuner.synchronized")
def synchronized(func, *args, **kwargs):
"""Decorator to synchronize the multi-threaded calls to `Oracle` functions.
In parallel tuning, there may be concurrent gRPC calls from multiple threads
to the `Oracle` methods like `create_trial()`, `update_trial()`, and
`end_trial()`. To avoid concurrent writing to the data, use `@synchronized`
to ensure the calls are synchronized, which only allows one call to run at a
time.
Concurrent calls to different `Oracle` objects would not block one another.
Concurrent calls to the same or different functions of the same `Oracle`
object would block one another.
You can decorate a subclass function, which overrides an already decorated
function in the base class, without worrying about creating a deadlock.
However, the decorator only support methods within classes, and cannot be
applied to standalone functions.
You do not need to decorate `Oracle.populate_space()`, which is only
called by `Oracle.create_trial()`, which is decorated.
Example:
```py
class MyOracle(keras_tuner.Oracle):
@keras_tuner.synchronized
def create_trial(self, tuner_id):
super().create_trial(tuner_id)
...
@keras_tuner.synchronized
def update_trial(self, trial_id, metrics, step=0):
super().update_trial(trial_id, metrics, step)
...
@keras_tuner.synchronized
def end_trial(self, trial):
super().end_trial(trial)
...
```
"""
def backward_compatible_end_trial(self, trial_id, status):
trial = trial_module.Trial(self.get_space(), trial_id, status)
return [self, trial], {}
def wrapped_func(*args, **kwargs):
# For backward compatible with the old end_trial signature:
# def end_trial(self, trial_id, status="COMPLETED"):
if func.__name__ == "end_trial" and (
"trial_id" in kwargs
or "status" in kwargs
or isinstance(args[1], str)
):
args, kwargs = backward_compatible_end_trial(*args, **kwargs)
oracle = args[0]
thread_name = threading.currentThread().getName()
need_acquire = THREADS[oracle] != thread_name
if need_acquire:
LOCKS[oracle].acquire()
THREADS[oracle] = thread_name
ret_val = func(*args, **kwargs)
if need_acquire:
THREADS[oracle] = None
LOCKS[oracle].release()
return ret_val
return wrapped_func
# TODO: Add more extensive display.
class Display(stateful.Stateful):
def __init__(self, oracle, verbose=1):
self.verbose = verbose
self.oracle = oracle
self.col_width = 18
# Start time for the overall search
self.search_start = None
# Start time of the trials
# {trial_id: start_time}
self.trial_start = {}
# Trial number of the trials, starting from #1.
# {trial_id: trial_number}
self.trial_number = {}
def get_state(self):
return {
"search_start": self.search_start.isoformat()
if self.search_start is not None
else self.search_start,
"trial_start": {
key: value.isoformat()
for key, value in self.trial_start.items()
},
"trial_number": self.trial_number,
}
def set_state(self, state):
self.search_start = (
datetime.fromisoformat(state["search_start"])
if state["search_start"] is not None
else state["search_start"]
)
self.trial_start = {
key: datetime.fromisoformat(value)
for key, value in state["trial_start"].items()
}
self.trial_number = state["trial_number"]
def on_trial_begin(self, trial):
if self.verbose < 1:
return
start_time = datetime.now()
self.trial_start[trial.trial_id] = start_time
if self.search_start is None:
self.search_start = start_time
current_number = len(self.oracle.trials)
self.trial_number[trial.trial_id] = current_number
print()
print(f"Search: Running Trial #{current_number}")
print()
self.show_hyperparameter_table(trial)
print()
def on_trial_end(self, trial):
if self.verbose < 1:
return
utils.try_clear()
time_taken_str = self.format_duration(
datetime.now() - self.trial_start[trial.trial_id]
)
print(
f"Trial {self.trial_number[trial.trial_id]} "
f"Complete [{time_taken_str}]"
)
if trial.score is not None:
print(f"{self.oracle.objective.name}: {trial.score}")
print()
best_trials = self.oracle.get_best_trials()
best_score = best_trials[0].score if len(best_trials) > 0 else None
print(f"Best {self.oracle.objective.name} So Far: {best_score}")
time_elapsed_str = self.format_duration(
datetime.now() - self.search_start
)
print(f"Total elapsed time: {time_elapsed_str}")
def show_hyperparameter_table(self, trial):
template = "{{0:{0}}}|{{1:{0}}}|{{2}}".format(self.col_width)
best_trials = self.oracle.get_best_trials()
best_trial = best_trials[0] if len(best_trials) > 0 else None
if trial.hyperparameters.values:
print(
template.format("Value", "Best Value So Far", "Hyperparameter")
)
for hp, value in trial.hyperparameters.values.items():
best_value = (
best_trial.hyperparameters.values.get(hp)
if best_trial
else "?"
)
print(
template.format(
self.format_value(value),
self.format_value(best_value),
hp,
)
)
else:
print("default configuration")
def format_value(self, val):
if isinstance(val, (int, float)) and not isinstance(val, bool):
return f"{val:.5g}"
val_str = str(val)
if len(val_str) > self.col_width:
val_str = f"{val_str[:self.col_width - 3]}..."
return val_str
def format_duration(self, d):
s = round(d.total_seconds())
d = s // 86400
s %= 86400
h = s // 3600
s %= 3600
m = s // 60
s %= 60
if d > 0:
return f"{d:d}d {h:02d}h {m:02d}m {s:02d}s"
return f"{h:02d}h {m:02d}m {s:02d}s"
@keras_tuner_export(["keras_tuner.Oracle", "keras_tuner.engine.oracle.Oracle"])
class Oracle(stateful.Stateful):
"""Implements a hyperparameter optimization algorithm.
In a parallel tuning setting, there is only one `Oracle` instance. The
workers would communicate with the centralized `Oracle` instance with gPRC
calls to the `Oracle` methods.
`Trial` objects are often used as the communication packet through the gPRC
calls to pass information between the worker `Tuner` instances and the
`Oracle`. For example, `Oracle.create_trial()` returns a `Trial` object, and
`Oracle.end_trial()` accepts a `Trial` in its arguments.
New copies of the same `Trial` instance are reconstructed as it going
through the gRPC calls. The changes to the `Trial` objects in the worker
`Tuner`s are synced to the original copy in the `Oracle` when they are
passed back to the `Oracle` by calling `Oracle.end_trial()`.
Args:
objective: A string, `keras_tuner.Objective` instance, or a list of
`keras_tuner.Objective`s and strings. If a string, the direction of
the optimization (min or max) will be inferred. If a list of
`keras_tuner.Objective`, we will minimize the sum of all the
objectives to minimize subtracting the sum of all the objectives to
maximize. The `objective` argument is optional when
`Tuner.run_trial()` or `HyperModel.fit()` returns a single float as
the objective to minimize.
max_trials: Integer, the total number of trials (model configurations)
to test at most. Note that the oracle may interrupt the search
before `max_trial` models have been tested if the search space has
been exhausted.
hyperparameters: Optional `HyperParameters` instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
seed: Int. Random seed.
max_retries_per_trial: Integer. Defaults to 0. The maximum number of
times to retry a `Trial` if the trial crashed or the results are
invalid.
max_consecutive_failed_trials: Integer. Defaults to 3. The maximum
number of consecutive failed `Trial`s. When this number is reached,
the search will be stopped. A `Trial` is marked as failed when none
of the retries succeeded.
"""
def __init__(
self,
objective=None,
max_trials=None,
hyperparameters=None,
allow_new_entries=True,
tune_new_entries=True,
seed=None,
max_retries_per_trial=0,
max_consecutive_failed_trials=3,
):
self.objective = obj_module.create_objective(objective)
self.max_trials = max_trials
if not hyperparameters:
if not tune_new_entries:
raise ValueError(
"If you set `tune_new_entries=False`, you must"
"specify the search space via the "
"`hyperparameters` argument."
)
if not allow_new_entries:
raise ValueError(
"If you set `allow_new_entries=False`, you must"
"specify the search space via the "
"`hyperparameters` argument."
)
self.hyperparameters = hp_module.HyperParameters()
else:
self.hyperparameters = hyperparameters
self.allow_new_entries = allow_new_entries
self.tune_new_entries = tune_new_entries
# trial_id -> Trial
self.trials = {}
# tuner_id -> Trial
self.ongoing_trials = {}
# List of trial_ids in the order of the trials start
self.start_order = []
# List of trial_ids in the order of the trials end
self.end_order = []
# Map trial_id to failed times
self._run_times = collections.defaultdict(lambda: 0)
# Used as a queue of trial_id to retry
self._retry_queue = []
# Client Tuner IDs
self.tuner_ids = set()
self.seed = seed or random.randint(1, 10000)
self._seed_state = self.seed
# Hashes of values in the trials, which only hashes the active values.
self._tried_so_far = set()
# Dictionary mapping trial_id to the the hash of the values.
self._id_to_hash = collections.defaultdict(lambda: None)
# Maximum number of identical values that can be generated
# before we consider the space to be exhausted.
self._max_collisions = 20
# Set in `BaseTuner` via `set_project_dir`.
self.directory = None
self.project_name = None
# In multi-worker mode, only the chief of each cluster should report
# results. These 2 attributes exist in `Oracle` just make it consistent
# with `OracleClient`, in which the attributes are utilized.
self.multi_worker = False
self.should_report = True
# Handling the retries and failed trials.
self.max_retries_per_trial = max_retries_per_trial
self.max_consecutive_failed_trials = max_consecutive_failed_trials
# Print the logs to screen
self._display = Display(oracle=self)
@property
def verbose(self):
return self._display.verbose
@verbose.setter
def verbose(self, value):
if value == "auto":
value = 1
self._display.verbose = value
def _populate_space(self, trial_id):
warnings.warn(
"The `_populate_space` method is deprecated, "
"please use `populate_space`.",
DeprecationWarning,
)
return self.populate_space(trial_id)
def populate_space(self, trial_id):
"""Fill the hyperparameter space with values for a trial.
This method should be overridden in subclasses and called in
`create_trial` in order to populate the hyperparameter space with
values.
Args:
trial_id: A string, the ID for this Trial.
Returns:
A dictionary with keys "values" and "status", where "values" is
a mapping of parameter names to suggested values, and "status"
should be one of "RUNNING" (the trial can start normally), "IDLE"
(the oracle is waiting on something and cannot create a trial), or
"STOPPED" (the oracle has finished searching and no new trial should
be created).
"""
raise NotImplementedError
def _score_trial(self, trial):
warnings.warn(
"The `_score_trial` method is deprecated, "
"please use `score_trial`.",
DeprecationWarning,
)
self.score_trial(trial)
def score_trial(self, trial):
"""Score a completed `Trial`.
This method can be overridden in subclasses to provide a score for
a set of hyperparameter values. This method is called from `end_trial`
on completed `Trial`s.
Args:
trial: A completed `Trial` object.
"""
trial.score = trial.metrics.get_best_value(self.objective.name)
trial.best_step = trial.metrics.get_best_step(self.objective.name)
@synchronized
def create_trial(self, tuner_id):
"""Create a new `Trial` to be run by the `Tuner`.
A `Trial` corresponds to a unique set of hyperparameters to be run
by `Tuner.run_trial`.
Args:
tuner_id: A string, the ID that identifies the `Tuner` requesting a
`Trial`. `Tuners` that should run the same trial (for instance,
when running a multi-worker model) should have the same ID.
Returns:
A `Trial` object containing a set of hyperparameter values to run
in a `Tuner`.
"""
# Allow for multi-worker DistributionStrategy within a Trial.
if tuner_id in self.ongoing_trials:
return self.ongoing_trials[tuner_id]
# Record all running client Tuner IDs.
self.tuner_ids.add(tuner_id)
# Pick the Trials waiting for retry first.
if len(self._retry_queue) > 0:
trial = self.trials[self._retry_queue.pop()]
trial.status = trial_module.TrialStatus.RUNNING
self.ongoing_trials[tuner_id] = trial
self.save()
self._display.on_trial_begin(trial)
return trial
# Make the trial_id the current number of trial, pre-padded with 0s
trial_id = f"{{:0{len(str(self.max_trials))}d}}"
trial_id = trial_id.format(len(self.trials))
if self.max_trials and len(self.trials) >= self.max_trials:
status = trial_module.TrialStatus.STOPPED
values = None
else:
response = self.populate_space(trial_id)
status = response["status"]
values = response["values"] if "values" in response else None
hyperparameters = self.hyperparameters.copy()
hyperparameters.values = values or {}
trial = trial_module.Trial(
hyperparameters=hyperparameters, trial_id=trial_id, status=status
)
if status == trial_module.TrialStatus.RUNNING:
# Record the populated values (active only). Only record when the
# status is RUNNING. If other status, the trial will not run, the
# values are discarded and should not be recorded, in which case,
# the trial_id may appear again in the future.
self._record_values(trial)
self.ongoing_trials[tuner_id] = trial
self.trials[trial_id] = trial
self.start_order.append(trial_id)
self._save_trial(trial)
self.save()
self._display.on_trial_begin(trial)
# Remove the client Tuner ID when triggered the client to exit
if status == trial_module.TrialStatus.STOPPED:
self.tuner_ids.remove(tuner_id)
return trial
@synchronized
def update_trial(self, trial_id, metrics, step=0):
"""Used by a worker to report the status of a trial.
Args:
trial_id: A string, a previously seen trial id.
metrics: Dict. The keys are metric names, and the values are this
trial's metric values.
step: Optional float, reporting intermediate results. The current
value in a timeseries representing the state of the trial. This
is the value that `metrics` will be associated with.
Returns:
Trial object.
"""
trial = self.trials[trial_id]
self._check_objective_found(metrics)
for metric_name, metric_value in metrics.items():
if not trial.metrics.exists(metric_name):
direction = _maybe_infer_direction_from_objective(
self.objective, metric_name
)
trial.metrics.register(metric_name, direction=direction)
trial.metrics.update(metric_name, metric_value, step=step)
self._save_trial(trial)
# TODO: To signal early stopping, set Trial.status to "STOPPED".
return trial
def _check_consecutive_failures(self):
# For thread safety, check all trials for consecutive failures.
consecutive_failures = 0
for trial_id in self.end_order:
trial = self.trials[trial_id]
if trial.status == trial_module.TrialStatus.FAILED:
consecutive_failures += 1
else:
consecutive_failures = 0
if consecutive_failures == self.max_consecutive_failed_trials:
raise RuntimeError(
"Number of consecutive failures exceeded the limit "
f"of {self.max_consecutive_failed_trials}.\n"
+ (trial.message or "")
)
@synchronized
def end_trial(self, trial):
"""Logistics when a `Trial` finished running.
Record the `Trial` information and end the trial or send it for retry.
Args:
trial: The Trial to be ended. `trial.status` should be one of
`"COMPLETED"` (the trial finished normally), `"INVALID"` (the
trial has crashed or been deemed infeasible, but subject to
retries), or `"FAILED"` (The Trial is failed. No more retries
needed.). `trial.message` is an optional string, which is the
error message if the trial status is `"INVALID"` or `"FAILED"`.
"""
# To support parallel tuning, the information in the `trial` argument is
# synced back to the `Oracle`. Update the self.trials with the given
# trial.
old_trial = self.trials[trial.trial_id]
old_trial.hyperparameters = trial.hyperparameters
old_trial.status = trial.status
old_trial.message = trial.message
trial = old_trial
self.update_space(trial.hyperparameters)
if trial.status == trial_module.TrialStatus.COMPLETED:
self.score_trial(trial)
if np.isnan(trial.score):
trial.status = trial_module.TrialStatus.INVALID
# Record the values again in case of new hps appeared.
self._record_values(trial)
self._run_times[trial.trial_id] += 1
# Check if need to retry the trial.
if not self._retry(trial):
self.end_order.append(trial.trial_id)
self._check_consecutive_failures()
self._save_trial(trial)
self.save()
self._display.on_trial_end(trial)
# Pop the ongoing trial at last, which would notify the chief server to
# stop when ongoing_trials is empty.
for tuner_id, ongoing_trial in self.ongoing_trials.items():
if ongoing_trial.trial_id == trial.trial_id:
self.ongoing_trials.pop(tuner_id)
break
def _retry(self, trial):
"""Send the trial for retry if needed.
Args:
trial: Trial. The trial to check.
Returns:
Boolean. Whether the trial should be retried.
"""
if trial.status != trial_module.TrialStatus.INVALID:
return False
trial_id = trial.trial_id
max_run_times = self.max_retries_per_trial + 1
if self._run_times[trial_id] >= max_run_times:
trial.status = trial_module.TrialStatus.FAILED
return False
print(
f"Trial {trial_id} failed {self._run_times[trial_id]} "
"times. "
f"{max_run_times - self._run_times[trial_id]} "
"retries left."
)
self._retry_queue.append(trial_id)
return True
def get_space(self):
"""Returns the `HyperParameters` search space."""
return self.hyperparameters.copy()
def update_space(self, hyperparameters):
"""Add new hyperparameters to the tracking space.
Already recorded parameters get ignored.
Args:
hyperparameters: An updated `HyperParameters` object.
"""
hps = hyperparameters.space
new_hps = [
hp
for hp in hps
if not self.hyperparameters._exists(hp.name, hp.conditions)
]
if new_hps and not self.allow_new_entries:
raise RuntimeError(
"`allow_new_entries` is `False`, "
f"but found new entries {new_hps}"
)
if not self.tune_new_entries:
# New entries should always use the default value.
return
self.hyperparameters.merge(new_hps)
def get_trial(self, trial_id):
"""Returns the `Trial` specified by `trial_id`."""
return self.trials[trial_id]
def get_best_trials(self, num_trials=1):
"""Returns the best `Trial`s."""
trials = [
t
for t in self.trials.values()
if t.status == trial_module.TrialStatus.COMPLETED
]
sorted_trials = sorted(
trials,
key=lambda trial: trial.score,
reverse=self.objective.direction == "max",
)
if len(sorted_trials) < num_trials:
sorted_trials = sorted_trials + [
t
for t in self.trials.values()
if t.status != trial_module.TrialStatus.COMPLETED
]
return sorted_trials[:num_trials]
def remaining_trials(self):
return (
self.max_trials - len(self.trials.items())
if self.max_trials
else None
)
def get_state(self):
# `self.trials` are saved in their own, Oracle-agnostic files.
# Just save the IDs for ongoing trials, since these are in `trials`.
return {
"ongoing_trials": {
tuner_id: trial.trial_id
for tuner_id, trial in self.ongoing_trials.items()
},
# Hyperparameters are part of the state because they can be added to
# during the course of the search.
"hyperparameters": self.hyperparameters.get_config(),
"start_order": self.start_order,
"end_order": self.end_order,
"run_times": self._run_times,
"retry_queue": self._retry_queue,
"seed": self.seed,
"seed_state": self._seed_state,
"tried_so_far": list(self._tried_so_far),
"id_to_hash": self._id_to_hash,
"display": self._display.get_state(),
}
def set_state(self, state):
# `self.trials` are saved in their own, Oracle-agnostic files.
self.ongoing_trials = {
tuner_id: self.trials[trial_id]
for tuner_id, trial_id in state["ongoing_trials"].items()
}
self.hyperparameters = hp_module.HyperParameters.from_config(
state["hyperparameters"]
)
self.start_order = state["start_order"]
self.end_order = state["end_order"]
self._run_times = collections.defaultdict(lambda: 0)
self._run_times.update(state["run_times"])
self._retry_queue = state["retry_queue"]
self.seed = state["seed"]
self._seed_state = state["seed_state"]
self._tried_so_far = set(state["tried_so_far"])
self._id_to_hash = collections.defaultdict(lambda: None)
self._id_to_hash.update(state["id_to_hash"])
self._display.set_state(state["display"])
def _set_project_dir(self, directory, project_name):
"""Sets the project directory and reloads the Oracle."""
self._directory = directory
self._project_name = project_name
@property
def _project_dir(self):
dirname = os.path.join(str(self._directory), self._project_name)
utils.create_directory(dirname)
return dirname
def save(self):
# `self.trials` are saved in their own, Oracle-agnostic files.
super().save(self._get_oracle_fname())
def reload(self):
# Reload trials from their own files.
trial_fnames = backend.io.glob(
os.path.join(self._project_dir, "trial_*", "trial.json")
)
for fname in trial_fnames:
trial = trial_module.Trial.load(fname)
self.trials[trial.trial_id] = trial
try:
super().reload(self._get_oracle_fname())
except KeyError as e:
raise RuntimeError(
"Error reloading `Oracle` from existing project. "
"If you did not mean to reload from an existing project, "
f"change the `project_name` or pass `overwrite=True` "
"when creating the `Tuner`. Found existing "
f"project at: {self._project_dir}"
) from e
# Empty the ongoing_trials and send them for retry.
for _, trial in self.ongoing_trials.items():
self._retry_queue.append(trial.trial_id)
self.ongoing_trials = {}
def _get_oracle_fname(self):
return os.path.join(self._project_dir, "oracle.json")
def _compute_values_hash(self, values):
keys = sorted(values.keys())
s = "".join(f"{str(k)}={str(values[k])}" for k in keys)
return hashlib.sha256(s.encode("utf-8")).hexdigest()[:32]
def _check_objective_found(self, metrics):
if isinstance(self.objective, obj_module.MultiObjective):
objective_names = list(self.objective.name_to_direction.keys())
else:
objective_names = [self.objective.name]
for metric_name in metrics.keys():
if metric_name in objective_names:
objective_names.remove(metric_name)
if objective_names:
raise ValueError(
"Objective value missing in metrics reported to "
f"the Oracle, expected: {objective_names}, "
f"found: {metrics.keys()}"
)
def _get_trial_dir(self, trial_id):
dirname = os.path.join(self._project_dir, f"trial_{str(trial_id)}")
utils.create_directory(dirname)
return dirname
def _save_trial(self, trial):
# Write trial status to trial directory
trial_id = trial.trial_id
trial.save(os.path.join(self._get_trial_dir(trial_id), "trial.json"))
def _random_values(self):
"""Fills the hyperparameter space with random values.
Returns:
A dictionary mapping hyperparameter names to suggested values.
"""
collisions = 0
while 1:
hps = hp_module.HyperParameters()
# Generate a set of random values.
for hp in self.hyperparameters.space:
hps.merge([hp])
if hps.is_active(hp): # Only active params in `values`.
hps.values[hp.name] = hp.random_sample(self._seed_state)
self._seed_state += 1
# Keep trying until the set of values is unique,
# or until we exit due to too many collisions.
if self._duplicate(hps.values):
collisions += 1
if collisions > self._max_collisions:
return None
continue
break
return hps.values
def _duplicate(self, values):
"""Check if the values has been tried in previous trials.
Args:
A dictionary mapping hyperparameter names to suggested values.
Returns:
Boolean. Whether the values has been tried in previous trials.
"""
return self._compute_values_hash(values) in self._tried_so_far
def _record_values(self, trial):
hyperparameters = trial.hyperparameters
hyperparameters.ensure_active_values()
new_hash_value = self._compute_values_hash(hyperparameters.values)
self._tried_so_far.add(new_hash_value)
# In case of new hp appeared, remove the old hash value.
old_hash_value = self._id_to_hash[trial.trial_id]
if old_hash_value != new_hash_value:
self._id_to_hash[trial.trial_id] = new_hash_value
# Check before removing. If this is a retry run, the old value may
# have been removed already.
if old_hash_value in self._tried_so_far:
self._tried_so_far.remove(old_hash_value)
def _maybe_infer_direction_from_objective(objective, metric_name):
if isinstance(objective, obj_module.Objective):
objective = [objective]
return next(
(obj.direction for obj in objective if obj.name == metric_name), None
)
| keras-tuner/keras_tuner/engine/oracle.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/oracle.py",
"repo_id": "keras-tuner",
"token_count": 14269
} | 140 |
/* Copyright 2019 The KerasTuner Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
syntax = "proto3";
package keras_tuner;
enum Sampling {
NONE = 0;
LINEAR = 1;
LOG = 2;
REVERSE_LOG = 3;
}
message Value {
oneof kind {
sint64 int_value = 1;
double float_value = 2;
string string_value = 3;
bool boolean_value = 4;
}
}
message Float {
string name = 1;
double min_value = 2;
double max_value = 3;
double step = 4;
Sampling sampling = 5;
double default = 6;
repeated Condition conditions = 7;
}
message Int {
string name = 1;
sint64 min_value = 2;
sint64 max_value = 3;
sint64 step = 4;
Sampling sampling = 5;
sint64 default = 6;
repeated Condition conditions = 7;
}
message Choice {
string name = 1;
repeated Value values = 2;
Value default = 3;
bool ordered = 4;
repeated Condition conditions = 5;
}
message Boolean {
string name = 1;
bool default = 2;
repeated Condition conditions = 3;
}
message Fixed {
string name = 1;
Value value = 2;
repeated Condition conditions = 3;
}
message HyperParameters {
message Space {
repeated Float float_space = 1;
repeated Int int_space = 2;
repeated Choice choice_space = 3;
repeated Boolean boolean_space = 4;
repeated Fixed fixed_space = 5;
}
Space space = 1;
message Values {
map <string, Value> values = 1;
}
Values values = 2;
}
message MetricObservation {
repeated float value = 1;
int64 step = 2;
}
message MetricHistory {
repeated MetricObservation observations = 1;
bool maximize = 2;
}
message MetricsTracker {
map<string, MetricHistory> metrics = 1;
}
enum TrialStatus {
UNKNOWN = 0;
RUNNING = 1;
IDLE = 2;
INVALID = 3;
STOPPED = 4;
COMPLETED = 5;
FAILED = 6;
}
message Trial {
HyperParameters hyperparameters = 1;
string trial_id = 2;
TrialStatus status = 3;
MetricsTracker metrics = 4;
message Score {
float value = 1;
int64 step = 2;
}
Score score = 5;
}
message Condition {
message Parent {
string name = 1;
repeated Value values = 2;
}
oneof kind {
Parent parent = 1;
}
}
| keras-tuner/keras_tuner/protos/keras_tuner.proto/0 | {
"file_path": "keras-tuner/keras_tuner/protos/keras_tuner.proto",
"repo_id": "keras-tuner",
"token_count": 1063
} | 141 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import sklearn
import sklearn.exceptions
import sklearn.gaussian_process
except ImportError: # pragma: no cover
sklearn = None # pragma: no cover
try:
import scipy
import scipy.optimize
except ImportError: # pragma: no cover
scipy = None # pragma: no cover
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.engine import oracle as oracle_module
from keras_tuner.engine import trial as trial_module
from keras_tuner.engine import tuner as tuner_module
@keras_tuner_export("keras_tuner.oracles.BayesianOptimizationOracle")
class BayesianOptimizationOracle(oracle_module.Oracle):
"""Bayesian optimization oracle.
It uses Bayesian optimization with a underlying Gaussian process model.
The acquisition function used is upper confidence bound (UCB), which can
be found [here](
https://www.cse.wustl.edu/~garnett/cse515t/spring_2015/files/lecture_notes/12.pdf).
Args:
objective: A string, `keras_tuner.Objective` instance, or a list of
`keras_tuner.Objective`s and strings. If a string, the direction of
the optimization (min or max) will be inferred. If a list of
`keras_tuner.Objective`, we will minimize the sum of all the
objectives to minimize subtracting the sum of all the objectives to
maximize. The `objective` argument is optional when
`Tuner.run_trial()` or `HyperModel.fit()` returns a single float as
the objective to minimize.
max_trials: Integer, the total number of trials (model configurations)
to test at most. Note that the oracle may interrupt the search
before `max_trial` models have been tested if the search space has
been exhausted. Defaults to 10.
num_initial_points: Optional number of randomly generated samples as
initial training data for Bayesian optimization. If left
unspecified, a value of 3 times the dimensionality of the
hyperparameter space is used.
alpha: Float, the value added to the diagonal of the kernel matrix
during fitting. It represents the expected amount of noise in the
observed performances in Bayesian optimization. Defaults to 1e-4.
beta: Float, the balancing factor of exploration and exploitation. The
larger it is, the more explorative it is. Defaults to 2.6.
seed: Optional integer, the random seed.
hyperparameters: Optional `HyperParameters` instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
max_retries_per_trial: Integer. Defaults to 0. The maximum number of
times to retry a `Trial` if the trial crashed or the results are
invalid.
max_consecutive_failed_trials: Integer. Defaults to 3. The maximum
number of consecutive failed `Trial`s. When this number is reached,
the search will be stopped. A `Trial` is marked as failed when none
of the retries succeeded.
"""
def __init__(
self,
objective=None,
max_trials=10,
num_initial_points=None,
alpha=1e-4,
beta=2.6,
seed=None,
hyperparameters=None,
allow_new_entries=True,
tune_new_entries=True,
max_retries_per_trial=0,
max_consecutive_failed_trials=3,
):
if scipy is None:
raise ImportError(
"Please install scipy before using the `BayesianOptimization` "
"with `pip install keras-tuner[bayesian]`."
)
if sklearn is None:
raise ImportError(
"Please install scikit-learn (sklearn) before using the "
"`BayesianOptimization` with "
"`pip install keras-tuner[bayesian]`."
)
super().__init__(
objective=objective,
max_trials=max_trials,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries,
seed=seed,
max_retries_per_trial=max_retries_per_trial,
max_consecutive_failed_trials=max_consecutive_failed_trials,
)
self.num_initial_points = num_initial_points
self.alpha = alpha
self.beta = beta
self._random_state = np.random.RandomState(self.seed)
self.gpr = self._make_gpr()
def _make_gpr(self):
return sklearn.gaussian_process.GaussianProcessRegressor(
kernel=sklearn.gaussian_process.kernels.Matern(nu=2.5),
n_restarts_optimizer=20,
normalize_y=True,
alpha=self.alpha,
random_state=self.seed,
)
def populate_space(self, trial_id):
"""Fill the hyperparameter space with values.
Args:
trial_id: A string, the ID for this Trial.
Returns:
A dictionary with keys "values" and "status", where "values" is
a mapping of parameter names to suggested values, and "status"
should be one of "RUNNING" (the trial can start normally), "IDLE"
(the oracle is waiting on something and cannot create a trial), or
"STOPPED" (the oracle has finished searching and no new trial should
be created).
"""
# Generate enough samples before training Gaussian process.
completed_trials = [
t for t in self.trials.values() if t.status == "COMPLETED"
]
# Use 3 times the dimensionality of the space as the default number of
# random points.
dimensions = len(self.hyperparameters.space)
num_initial_points = self.num_initial_points or max(3 * dimensions, 3)
if len(completed_trials) < num_initial_points:
return self._random_populate_space()
# Fit a GPR to the completed trials and return the predicted optimum
# values.
x, y = self._vectorize_trials()
# Ensure no nan, inf in x, y. GPR cannot process nan or inf.
x = np.nan_to_num(x, posinf=0, neginf=0)
y = np.nan_to_num(y, posinf=0, neginf=0)
self.gpr.fit(x, y)
def _upper_confidence_bound(x):
x = x.reshape(1, -1)
mu, sigma = self.gpr.predict(x, return_std=True)
return mu - self.beta * sigma
optimal_val = float("inf")
optimal_x = None
num_restarts = 50
bounds = self._get_hp_bounds()
x_seeds = self._random_state.uniform(
bounds[:, 0], bounds[:, 1], size=(num_restarts, bounds.shape[0])
)
for x_try in x_seeds:
# Sign of score is flipped when maximizing.
result = scipy.optimize.minimize(
_upper_confidence_bound,
x0=x_try,
bounds=bounds,
method="L-BFGS-B",
)
result_fun = (
result.fun if np.isscalar(result.fun) else result.fun[0]
)
if result_fun < optimal_val:
optimal_val = result_fun
optimal_x = result.x
values = self._vector_to_values(optimal_x)
return {"status": trial_module.TrialStatus.RUNNING, "values": values}
def _random_populate_space(self):
values = self._random_values()
if values is None:
return {"status": trial_module.TrialStatus.STOPPED, "values": None}
return {"status": trial_module.TrialStatus.RUNNING, "values": values}
def get_state(self):
state = super().get_state()
state.update(
{
"num_initial_points": self.num_initial_points,
"alpha": self.alpha,
"beta": self.beta,
}
)
return state
def set_state(self, state):
super().set_state(state)
self.num_initial_points = state["num_initial_points"]
self.alpha = state["alpha"]
self.beta = state["beta"]
self.gpr = self._make_gpr()
def _vectorize_trials(self):
x = []
y = []
ongoing_trials = set(self.ongoing_trials.values())
for trial in self.trials.values():
# Create a vector representation of each Trial's hyperparameters.
trial_hps = trial.hyperparameters
vector = []
for hp in self._nonfixed_space():
# For hyperparameters not present in the trial (either added
# after the trial or inactive in the trial), set to default
# value.
if (
trial_hps.is_active(hp) # inactive
and hp.name in trial_hps.values # added after the trial
):
trial_value = trial_hps.values[hp.name]
else:
trial_value = hp.default
# Embed an HP value into the continuous space [0, 1].
prob = hp.value_to_prob(trial_value)
vector.append(prob)
if trial in ongoing_trials:
# "Hallucinate" the results of ongoing trials. This ensures that
# repeat trials are not selected when running distributed.
x_h = np.array(vector).reshape((1, -1))
y_h_mean, y_h_std = self.gpr.predict(x_h, return_std=True)
# Give a pessimistic estimate of the ongoing trial.
y_h_mean = np.array(y_h_mean).flatten()
score = y_h_mean[0] + y_h_std[0]
elif trial.status == "COMPLETED":
score = trial.score
# Always frame the optimization as a minimization for
# scipy.minimize.
if self.objective.direction == "max":
score = -1 * score
elif trial.status in ["FAILED", "INVALID"]:
# Skip the failed and invalid trials.
continue
x.append(vector)
y.append(score)
x = np.array(x)
y = np.array(y)
return x, y
def _vector_to_values(self, vector):
hps = hp_module.HyperParameters()
vector_index = 0
for hp in self.hyperparameters.space:
hps.merge([hp])
if isinstance(hp, hp_module.Fixed):
value = hp.value
else:
prob = vector[vector_index]
vector_index += 1
value = hp.prob_to_value(prob)
if hps.is_active(hp):
hps.values[hp.name] = value
return hps.values
def _nonfixed_space(self):
return [
hp
for hp in self.hyperparameters.space
if not isinstance(hp, hp_module.Fixed)
]
def _get_hp_bounds(self):
bounds = [[0, 1] for _ in self._nonfixed_space()]
return np.array(bounds)
@keras_tuner_export(
[
"keras_tuner.BayesianOptimization",
"keras_tuner.tuners.BayesianOptimization",
]
)
class BayesianOptimization(tuner_module.Tuner):
"""BayesianOptimization tuning with Gaussian process.
Args:
hypermodel: Instance of `HyperModel` class (or callable that takes
hyperparameters and returns a `Model` instance). It is optional
when `Tuner.run_trial()` is overridden and does not use
`self.hypermodel`.
objective: A string, `keras_tuner.Objective` instance, or a list of
`keras_tuner.Objective`s and strings. If a string, the direction of
the optimization (min or max) will be inferred. If a list of
`keras_tuner.Objective`, we will minimize the sum of all the
objectives to minimize subtracting the sum of all the objectives to
maximize. The `objective` argument is optional when
`Tuner.run_trial()` or `HyperModel.fit()` returns a single float as
the objective to minimize.
max_trials: Integer, the total number of trials (model configurations)
to test at most. Note that the oracle may interrupt the search
before `max_trial` models have been tested if the search space has
been exhausted. Defaults to 10.
num_initial_points: Optional number of randomly generated samples as
initial training data for Bayesian optimization. If left
unspecified, a value of 3 times the dimensionality of the
hyperparameter space is used.
alpha: Float, the value added to the diagonal of the kernel matrix
during fitting. It represents the expected amount of noise in the
observed performances in Bayesian optimization. Defaults to 1e-4.
beta: Float, the balancing factor of exploration and exploitation. The
larger it is, the more explorative it is. Defaults to 2.6.
seed: Optional integer, the random seed.
hyperparameters: Optional `HyperParameters` instance. Can be used to
override (or register in advance) hyperparameters in the search
space.
tune_new_entries: Boolean, whether hyperparameter entries that are
requested by the hypermodel but that were not specified in
`hyperparameters` should be added to the search space, or not. If
not, then the default value for these parameters will be used.
Defaults to True.
allow_new_entries: Boolean, whether the hypermodel is allowed to
request hyperparameter entries not listed in `hyperparameters`.
Defaults to True.
max_retries_per_trial: Integer. Defaults to 0. The maximum number of
times to retry a `Trial` if the trial crashed or the results are
invalid.
max_consecutive_failed_trials: Integer. Defaults to 3. The maximum
number of consecutive failed `Trial`s. When this number is reached,
the search will be stopped. A `Trial` is marked as failed when none
of the retries succeeded.
**kwargs: Keyword arguments relevant to all `Tuner` subclasses. Please
see the docstring for `Tuner`.
"""
def __init__(
self,
hypermodel=None,
objective=None,
max_trials=10,
num_initial_points=None,
alpha=1e-4,
beta=2.6,
seed=None,
hyperparameters=None,
tune_new_entries=True,
allow_new_entries=True,
max_retries_per_trial=0,
max_consecutive_failed_trials=3,
**kwargs
):
oracle = BayesianOptimizationOracle(
objective=objective,
max_trials=max_trials,
num_initial_points=num_initial_points,
alpha=alpha,
beta=beta,
seed=seed,
hyperparameters=hyperparameters,
tune_new_entries=tune_new_entries,
allow_new_entries=allow_new_entries,
max_retries_per_trial=max_retries_per_trial,
max_consecutive_failed_trials=max_consecutive_failed_trials,
)
super().__init__(oracle=oracle, hypermodel=hypermodel, **kwargs)
| keras-tuner/keras_tuner/tuners/bayesian.py/0 | {
"file_path": "keras-tuner/keras_tuner/tuners/bayesian.py",
"repo_id": "keras-tuner",
"token_count": 7169
} | 142 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
if os.path.exists("keras_tuner/version.py"):
VERSION = get_version("keras_tuner/version.py")
else:
VERSION = get_version("keras_tuner/__init__.py")
setup(
name="keras-tuner",
description="A Hyperparameter Tuning Library for Keras",
long_description_content_type="text/markdown",
long_description=README,
url="https://github.com/keras-team/keras-tuner",
author="The KerasTuner authors",
license="Apache License 2.0",
version=VERSION,
install_requires=[
"keras",
"packaging",
"requests",
"kt-legacy",
],
extras_require={
"tensorflow": [
"tensorflow>=2.0",
],
"tensorflow-cpu": [
"tensorflow-cpu>=2.0",
],
"tests": [
"black",
"flake8",
"isort",
"ipython",
"pandas",
"portpicker",
"pytest",
"pytest-cov",
"pytest-xdist",
"namex",
"scikit-learn",
"scipy",
],
"bayesian": [
"scikit-learn",
"scipy",
],
"build": [
"tensorflow-cpu",
"namex",
"build",
],
},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Operating System :: Unix",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*test*",)),
)
| keras-tuner/setup.py/0 | {
"file_path": "keras-tuner/setup.py",
"repo_id": "keras-tuner",
"token_count": 1341
} | 143 |
import numpy as np
import pytest
import keras
from keras import layers
from keras import losses
from keras import metrics
from keras import optimizers
from keras import testing
class MyModel(keras.Model):
def __init__(self, hidden_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.dense1 = layers.Dense(hidden_dim, activation="relu")
self.dense2 = layers.Dense(hidden_dim, activation="relu")
self.dense3 = layers.Dense(output_dim)
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
@pytest.mark.requires_trainable_backend
class BasicFlowTest(testing.TestCase):
def test_basic_fit(self):
model = MyModel(hidden_dim=2, output_dim=1)
x = np.random.random((128, 4))
y = np.random.random((128, 4))
batch_size = 32
epochs = 3
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
output_before_fit = model(x)
model.fit(
x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2
)
output_after_fit = model(x)
self.assertNotAllClose(output_before_fit, output_after_fit)
| keras/integration_tests/basic_full_flow.py/0 | {
"file_path": "keras/integration_tests/basic_full_flow.py",
"repo_id": "keras",
"token_count": 624
} | 144 |
from keras import backend
from keras.api_export import keras_export
if backend.backend() == "tensorflow":
BackendVariable = backend.tensorflow.core.Variable
backend_name_scope = backend.tensorflow.core.name_scope
elif backend.backend() == "jax":
BackendVariable = backend.jax.core.Variable
backend_name_scope = backend.common.name_scope.name_scope
elif backend.backend() == "torch":
BackendVariable = backend.torch.core.Variable
backend_name_scope = backend.common.name_scope.name_scope
elif backend.backend() == "numpy":
from keras.backend.numpy.core import Variable as NumpyVariable
BackendVariable = NumpyVariable
backend_name_scope = backend.common.name_scope.name_scope
else:
raise RuntimeError(f"Invalid backend: {backend.backend()}")
@keras_export("keras.Variable")
class Variable(BackendVariable):
pass
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return backend.device_scope(device_name)
| keras/keras/backend/exports.py/0 | {
"file_path": "keras/keras/backend/exports.py",
"repo_id": "keras",
"token_count": 357
} | 145 |
from keras.backend.numpy import core
from keras.backend.numpy import image
from keras.backend.numpy import linalg
from keras.backend.numpy import math
from keras.backend.numpy import nn
from keras.backend.numpy import numpy
from keras.backend.numpy import random
from keras.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.backend.numpy.core import Variable
from keras.backend.numpy.core import cast
from keras.backend.numpy.core import compute_output_spec
from keras.backend.numpy.core import cond
from keras.backend.numpy.core import convert_to_numpy
from keras.backend.numpy.core import convert_to_tensor
from keras.backend.numpy.core import is_tensor
from keras.backend.numpy.core import shape
from keras.backend.numpy.core import vectorized_map
from keras.backend.numpy.rnn import cudnn_ok
from keras.backend.numpy.rnn import gru
from keras.backend.numpy.rnn import lstm
from keras.backend.numpy.rnn import rnn
| keras/keras/backend/numpy/__init__.py/0 | {
"file_path": "keras/keras/backend/numpy/__init__.py",
"repo_id": "keras",
"token_count": 325
} | 146 |
import tensorflow as tf
from keras.backend.tensorflow.trackable import KerasAutoTrackable
from keras.utils import tf_utils
from keras.utils import tracking
class TFLayer(KerasAutoTrackable):
def __init__(self, *args, **kwargs):
# Export-related attributes
self._saved_model_inputs_spec = None
self._saved_model_arg_spec = None
self._tracked = []
@tf.__internal__.tracking.no_automatic_dependency_tracking
def _set_save_spec(self, inputs, args=None, kwargs=None):
"""Defines the save spec so that serialization can trace layer calls.
The TensorSpecs of the call function `inputs`, `args`, and `kwargs` are
saved into a tuple of `([inputs] + args, kwargs)`.
Args:
inputs: possibly nested inputs passed into the call function.
args: a list of positional arguments passed into call.
kwargs: a dictionary of keyword arguments passed into call.
"""
if self._saved_model_inputs_spec is not None:
return # Already set.
inputs_spec = tf.nest.map_structure(tf_utils.get_tensor_spec, inputs)
args_spec = tf.nest.map_structure(tf_utils.get_tensor_spec, args or [])
kwargs_spec = {}
# Filter out non-tensor arguments from kwargs.
for key, kwarg in kwargs.items():
flat_kwarg = tf.nest.flatten(kwarg)
flat_specs = [tf_utils.get_tensor_spec(x) for x in flat_kwarg]
if any(s is None for s in flat_specs):
continue
kwargs_spec[key] = tf.nest.pack_sequence_as(kwarg, flat_specs)
self._saved_model_inputs_spec = inputs_spec
self._saved_model_arg_spec = (
[inputs_spec] + list(args_spec),
kwargs_spec,
)
def _trackable_children(self, save_type="checkpoint", **kwargs):
if save_type == "savedmodel":
# SavedModel needs to ignore the execution functions.
train_function = getattr(self, "train_function", None)
test_function = getattr(self, "test_function", None)
predict_function = getattr(self, "predict_function", None)
self.train_function = None
self.test_function = None
self.predict_function = None
children = super()._trackable_children(save_type, **kwargs)
if save_type == "savedmodel":
self.train_function = train_function
self.test_function = test_function
self.predict_function = predict_function
for tracked_attr in self._tracked:
tracked_item = getattr(self, tracked_attr)
if isinstance(tracked_item, tracking.TrackedList):
children[tracked_attr] = list(tracked_item)
if isinstance(tracked_item, tracking.TrackedDict):
children[tracked_attr] = dict(tracked_item)
if isinstance(tracked_item, tracking.TrackedSet):
children[tracked_attr] = list(tracked_item)
return children
@property
def _default_save_signature(self):
"""For SavedModel support: returns the default serving signature."""
from keras.models.functional import Functional
from keras.models.model import Model
from keras.models.sequential import Sequential
if not isinstance(self, Model):
return None
inputs = None
if (
isinstance(self, Sequential)
and getattr(self, "_functional", None) is not None
):
inputs = self._functional.input
elif isinstance(self, Functional):
inputs = self.input
if inputs is not None:
input_signature = [
tf.nest.map_structure(
lambda x: tf.TensorSpec(x.shape, self.compute_dtype),
inputs,
)
]
else:
shapes_dict = self._build_shapes_dict
if len(shapes_dict) == 1:
input_shape = tuple(shapes_dict.values())[0]
input_signature = [
tf.TensorSpec(input_shape, self.compute_dtype)
]
else:
input_signature = [
tf.nest.map_structure(
lambda x: tf.TensorSpec(x.shape, self.compute_dtype),
shapes_dict,
)
]
@tf.function(input_signature=input_signature)
def serving_default(inputs):
return self(inputs)
return serving_default
| keras/keras/backend/tensorflow/layer.py/0 | {
"file_path": "keras/keras/backend/tensorflow/layer.py",
"repo_id": "keras",
"token_count": 2168
} | 147 |
import pytest
from keras import backend
from keras import testing
class DeviceTest(testing.TestCase):
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_tf_device_scope(self):
import tensorflow as tf
if not tf.config.list_physical_devices("GPU"):
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("CPU:0", t.device)
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
# Also verify the explicit gpu device
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertIn("GPU:0", t.device)
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_jax_device_scope(self):
import jax
from jax.lib import xla_bridge
platform = xla_bridge.get_backend().platform
if platform != "gpu":
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device(), jax.devices("cpu")[0])
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device(), jax.devices("cpu")[0])
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device(), jax.devices("gpu")[0])
# Also verify the explicit gpu device
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device(), jax.devices("gpu")[0])
@pytest.mark.skipif(backend.backend() != "jax", reason="jax only")
def test_invalid_jax_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device_scope(123).__enter__()
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_torch_device_scope(self):
import torch
if not torch.cuda.device_count():
self.skipTest("Need at least one GPU for testing")
with backend.device_scope("cpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
with backend.device_scope("CPU:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cpu"))
# When leaving the scope, the device should be back with gpu:0
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
# Also verify the explicit gpu -> cuda conversion
with backend.device_scope("gpu:0"):
t = backend.numpy.ones((2, 1))
self.assertEqual(t.device, torch.device("cuda", 0))
@pytest.mark.skipif(backend.backend() != "torch", reason="torch only")
def test_invalid_torch_device(self):
with self.assertRaisesRegex(ValueError, "Received: device_name='123'"):
backend.device_scope(123).__enter__()
| keras/keras/backend/tests/device_scope_test.py/0 | {
"file_path": "keras/keras/backend/tests/device_scope_test.py",
"repo_id": "keras",
"token_count": 1499
} | 148 |
import torch
from keras import ops
from keras import optimizers
from keras.backend.torch import core
from keras.backend.torch.optimizers import torch_parallel_optimizer
class Nadam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Nadam):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
local_step = ops.cast(self.iterations + 1, dtype)
next_step = ops.cast(self.iterations + 2, dtype)
decay = ops.cast(0.96, dtype)
beta_1 = ops.cast(self.beta_1, dtype)
beta_2 = ops.cast(self.beta_2, dtype)
u_t = beta_1 * (1.0 - 0.5 * (ops.power(decay, local_step)))
u_t_1 = beta_1 * (1.0 - 0.5 * (ops.power(decay, next_step)))
u_product_t = self._u_product.value * u_t
u_product_t_1 = u_product_t * u_t_1
beta_2_power = ops.power(beta_2, local_step)
self._u_product.assign(u_product_t)
m_list = [
self._momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
v_list = [
self._velocities[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_mul_(m_list, self.beta_1)
torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1)
torch._foreach_mul_(v_list, self.beta_2)
torch._foreach_add_(
v_list, torch._foreach_mul(grads, grads), alpha=1 - self.beta_2
)
m_hat_list = torch._foreach_add(
torch._foreach_div(
torch._foreach_mul(m_list, u_t_1),
1 - core.convert_to_numpy(u_product_t_1),
),
torch._foreach_div(
torch._foreach_mul(grads, 1 - u_t),
1 - core.convert_to_numpy(u_product_t),
),
)
v_hat_list = torch._foreach_div(v_list, 1 - beta_2_power)
torch._foreach_add_(
variables,
torch._foreach_div(
torch._foreach_mul(m_hat_list, lr),
torch._foreach_add(
torch._foreach_sqrt(v_hat_list), self.epsilon
),
),
alpha=-1,
)
| keras/keras/backend/torch/optimizers/torch_nadam.py/0 | {
"file_path": "keras/keras/backend/torch/optimizers/torch_nadam.py",
"repo_id": "keras",
"token_count": 1265
} | 149 |
import warnings
from keras import ops
from keras.api_export import keras_export
from keras.callbacks.callback import Callback
from keras.trainers import compile_utils
from keras.utils import io_utils
@keras_export("keras.callbacks.EarlyStopping")
class EarlyStopping(Callback):
"""Stop training when a monitored metric has stopped improving.
Assuming the goal of a training is to minimize the loss. With this, the
metric to be monitored would be `'loss'`, and mode would be `'min'`. A
`model.fit()` training loop will check at end of every epoch whether
the loss is no longer decreasing, considering the `min_delta` and
`patience` if applicable. Once it's found no longer decreasing,
`model.stop_training` is marked True and the training terminates.
The quantity to be monitored needs to be available in `logs` dict.
To make it so, pass the loss or metrics at `model.compile()`.
Args:
monitor: Quantity to be monitored. Defaults to `"val_loss"`.
min_delta: Minimum change in the monitored quantity to qualify as an
improvement, i.e. an absolute change of less than min_delta, will
count as no improvement. Defaults to `0`.
patience: Number of epochs with no improvement after which training will
be stopped. Defaults to `0`.
verbose: Verbosity mode, 0 or 1. Mode 0 is silent, and mode 1 displays
messages when the callback takes an action. Defaults to `0`.
mode: One of `{"auto", "min", "max"}`. In `min` mode, training will stop
when the quantity monitored has stopped decreasing; in `"max"` mode
it will stop when the quantity monitored has stopped increasing; in
`"auto"` mode, the direction is automatically inferred from the name
of the monitored quantity. Defaults to `"auto"`.
baseline: Baseline value for the monitored quantity. If not `None`,
training will stop if the model doesn't show improvement over the
baseline. Defaults to `None`.
restore_best_weights: Whether to restore model weights from the epoch
with the best value of the monitored quantity. If `False`, the model
weights obtained at the last step of training are used. An epoch
will be restored regardless of the performance relative to the
`baseline`. If no epoch improves on `baseline`, training will run
for `patience` epochs and restore weights from the best epoch in
that set. Defaults to `False`.
start_from_epoch: Number of epochs to wait before starting to monitor
improvement. This allows for a warm-up period in which no
improvement is expected and thus training will not be stopped.
Defaults to `0`.
Example:
>>> callback = keras.callbacks.EarlyStopping(monitor='loss',
... patience=3)
>>> # This callback will stop the training when there is no improvement in
>>> # the loss for three consecutive epochs.
>>> model = keras.models.Sequential([keras.layers.Dense(10)])
>>> model.compile(keras.optimizers.SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, batch_size=1, callbacks=[callback],
... verbose=0)
>>> len(history.history['loss']) # Only 4 epochs are run.
4
"""
def __init__(
self,
monitor="val_loss",
min_delta=0,
patience=0,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=False,
start_from_epoch=0,
):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
self.start_from_epoch = start_from_epoch
if mode not in ["auto", "min", "max"]:
warnings.warn(
f"EarlyStopping mode {mode} is unknown, fallback to auto mode.",
stacklevel=2,
)
mode = "auto"
self.mode = mode
self.monitor_op = None
def _set_monitor_op(self):
if self.mode == "min":
self.monitor_op = ops.less
elif self.mode == "max":
self.monitor_op = ops.greater
else:
metric_name = self.monitor.removeprefix("val_")
if metric_name == "loss":
self.monitor_op = ops.less
if hasattr(self.model, "metrics"):
all_metrics = []
for m in self.model.metrics:
if isinstance(
m,
(
compile_utils.CompileMetrics,
compile_utils.MetricsList,
),
):
all_metrics.extend(m.metrics)
for m in all_metrics:
if m.name == metric_name:
if hasattr(m, "_direction"):
if m._direction == "up":
self.monitor_op = ops.greater
else:
self.monitor_op = ops.less
if self.monitor_op is None:
raise ValueError(
f"EarlyStopping callback received monitor={self.monitor} "
"but Keras isn't able to automatically determine whether "
"that metric should be maximized or minimized. "
"Pass `mode='max'` in order to do early stopping based "
"on the highest metric value, or pass `mode='min'` "
"in order to use the lowest value."
)
if self.monitor_op == ops.less:
self.min_delta *= -1
self.best = (
float("inf") if self.monitor_op == ops.less else -float("inf")
)
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
self.best_weights = None
self.best_epoch = 0
def on_epoch_end(self, epoch, logs=None):
if self.monitor_op is None:
# Delay setup until the model's metrics are all built
self._set_monitor_op()
current = self.get_monitor_value(logs)
if current is None or epoch < self.start_from_epoch:
# If no monitor value exists or still in initial warm-up stage.
return
if self.restore_best_weights and self.best_weights is None:
# If best weights were never set,
# then the current weights are the best.
self.best_weights = self.model.get_weights()
self.best_epoch = epoch
self.wait += 1
if self._is_improvement(current, self.best):
self.best = current
self.best_epoch = epoch
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
# Only restart wait if we beat both the baseline and our previous
# best.
if self.baseline is None or self._is_improvement(
current, self.baseline
):
self.wait = 0
return
if self.wait >= self.patience and epoch > 0:
# Patience has been exceeded: stop training
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
io_utils.print_msg(
f"Epoch {self.stopped_epoch + 1}: early stopping"
)
if self.restore_best_weights and self.best_weights is not None:
if self.verbose > 0:
io_utils.print_msg(
"Restoring model weights from "
"the end of the best epoch: "
f"{self.best_epoch + 1}."
)
self.model.set_weights(self.best_weights)
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
warnings.warn(
(
f"Early stopping conditioned on metric `{self.monitor}` "
"which is not available. "
f"Available metrics are: {','.join(list(logs.keys()))}"
),
stacklevel=2,
)
return monitor_value
def _is_improvement(self, monitor_value, reference_value):
return self.monitor_op(monitor_value - self.min_delta, reference_value)
| keras/keras/callbacks/early_stopping.py/0 | {
"file_path": "keras/keras/callbacks/early_stopping.py",
"repo_id": "keras",
"token_count": 4070
} | 150 |
from keras.api_export import keras_export
from keras.layers.activations.activation import Activation
from keras.layers.activations.elu import ELU
from keras.layers.activations.leaky_relu import LeakyReLU
from keras.layers.activations.prelu import PReLU
from keras.layers.activations.relu import ReLU
from keras.layers.activations.softmax import Softmax
from keras.layers.attention.additive_attention import AdditiveAttention
from keras.layers.attention.attention import Attention
from keras.layers.attention.grouped_query_attention import GroupedQueryAttention
from keras.layers.attention.multi_head_attention import MultiHeadAttention
from keras.layers.convolutional.conv1d import Conv1D
from keras.layers.convolutional.conv1d_transpose import Conv1DTranspose
from keras.layers.convolutional.conv2d import Conv2D
from keras.layers.convolutional.conv2d_transpose import Conv2DTranspose
from keras.layers.convolutional.conv3d import Conv3D
from keras.layers.convolutional.conv3d_transpose import Conv3DTranspose
from keras.layers.convolutional.depthwise_conv1d import DepthwiseConv1D
from keras.layers.convolutional.depthwise_conv2d import DepthwiseConv2D
from keras.layers.convolutional.separable_conv1d import SeparableConv1D
from keras.layers.convolutional.separable_conv2d import SeparableConv2D
from keras.layers.core.dense import Dense
from keras.layers.core.einsum_dense import EinsumDense
from keras.layers.core.embedding import Embedding
from keras.layers.core.identity import Identity
from keras.layers.core.input_layer import Input
from keras.layers.core.input_layer import InputLayer
from keras.layers.core.lambda_layer import Lambda
from keras.layers.core.masking import Masking
from keras.layers.core.wrapper import Wrapper
from keras.layers.layer import Layer
from keras.layers.merging.add import Add
from keras.layers.merging.add import add
from keras.layers.merging.average import Average
from keras.layers.merging.average import average
from keras.layers.merging.concatenate import Concatenate
from keras.layers.merging.concatenate import concatenate
from keras.layers.merging.dot import Dot
from keras.layers.merging.dot import dot
from keras.layers.merging.maximum import Maximum
from keras.layers.merging.maximum import maximum
from keras.layers.merging.minimum import Minimum
from keras.layers.merging.minimum import minimum
from keras.layers.merging.multiply import Multiply
from keras.layers.merging.multiply import multiply
from keras.layers.merging.subtract import Subtract
from keras.layers.merging.subtract import subtract
from keras.layers.normalization.batch_normalization import BatchNormalization
from keras.layers.normalization.group_normalization import GroupNormalization
from keras.layers.normalization.layer_normalization import LayerNormalization
from keras.layers.normalization.spectral_normalization import (
SpectralNormalization,
)
from keras.layers.normalization.unit_normalization import UnitNormalization
from keras.layers.pooling.average_pooling1d import AveragePooling1D
from keras.layers.pooling.average_pooling2d import AveragePooling2D
from keras.layers.pooling.average_pooling3d import AveragePooling3D
from keras.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D
from keras.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D
from keras.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D
from keras.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D
from keras.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D
from keras.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D
from keras.layers.pooling.max_pooling1d import MaxPooling1D
from keras.layers.pooling.max_pooling2d import MaxPooling2D
from keras.layers.pooling.max_pooling3d import MaxPooling3D
from keras.layers.preprocessing.category_encoding import CategoryEncoding
from keras.layers.preprocessing.center_crop import CenterCrop
from keras.layers.preprocessing.discretization import Discretization
from keras.layers.preprocessing.hashed_crossing import HashedCrossing
from keras.layers.preprocessing.hashing import Hashing
from keras.layers.preprocessing.index_lookup import IndexLookup
from keras.layers.preprocessing.integer_lookup import IntegerLookup
from keras.layers.preprocessing.normalization import Normalization
from keras.layers.preprocessing.random_brightness import RandomBrightness
from keras.layers.preprocessing.random_contrast import RandomContrast
from keras.layers.preprocessing.random_crop import RandomCrop
from keras.layers.preprocessing.random_flip import RandomFlip
from keras.layers.preprocessing.random_rotation import RandomRotation
from keras.layers.preprocessing.random_translation import RandomTranslation
from keras.layers.preprocessing.random_zoom import RandomZoom
from keras.layers.preprocessing.rescaling import Rescaling
from keras.layers.preprocessing.resizing import Resizing
from keras.layers.preprocessing.string_lookup import StringLookup
from keras.layers.preprocessing.text_vectorization import TextVectorization
from keras.layers.regularization.activity_regularization import (
ActivityRegularization,
)
from keras.layers.regularization.alpha_dropout import AlphaDropout
from keras.layers.regularization.dropout import Dropout
from keras.layers.regularization.gaussian_dropout import GaussianDropout
from keras.layers.regularization.gaussian_noise import GaussianNoise
from keras.layers.regularization.spatial_dropout import SpatialDropout1D
from keras.layers.regularization.spatial_dropout import SpatialDropout2D
from keras.layers.regularization.spatial_dropout import SpatialDropout3D
from keras.layers.reshaping.cropping1d import Cropping1D
from keras.layers.reshaping.cropping2d import Cropping2D
from keras.layers.reshaping.cropping3d import Cropping3D
from keras.layers.reshaping.flatten import Flatten
from keras.layers.reshaping.permute import Permute
from keras.layers.reshaping.repeat_vector import RepeatVector
from keras.layers.reshaping.reshape import Reshape
from keras.layers.reshaping.up_sampling1d import UpSampling1D
from keras.layers.reshaping.up_sampling2d import UpSampling2D
from keras.layers.reshaping.up_sampling3d import UpSampling3D
from keras.layers.reshaping.zero_padding1d import ZeroPadding1D
from keras.layers.reshaping.zero_padding2d import ZeroPadding2D
from keras.layers.reshaping.zero_padding3d import ZeroPadding3D
from keras.layers.rnn.bidirectional import Bidirectional
from keras.layers.rnn.conv_lstm1d import ConvLSTM1D
from keras.layers.rnn.conv_lstm2d import ConvLSTM2D
from keras.layers.rnn.conv_lstm3d import ConvLSTM3D
from keras.layers.rnn.gru import GRU
from keras.layers.rnn.gru import GRUCell
from keras.layers.rnn.lstm import LSTM
from keras.layers.rnn.lstm import LSTMCell
from keras.layers.rnn.rnn import RNN
from keras.layers.rnn.simple_rnn import SimpleRNN
from keras.layers.rnn.simple_rnn import SimpleRNNCell
from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells
from keras.layers.rnn.time_distributed import TimeDistributed
from keras.saving import serialization_lib
@keras_export("keras.layers.serialize")
def serialize(layer):
"""Returns the layer configuration as a Python dict.
Args:
layer: A `keras.layers.Layer` instance to serialize.
Returns:
Python dict which contains the configuration of the layer.
"""
return serialization_lib.serialize_keras_object(layer)
@keras_export("keras.layers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras layer object via its configuration.
Args:
config: A python dict containing a serialized layer configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras layer instance.
"""
obj = serialization_lib.deserialize_keras_object(
config,
custom_objects=custom_objects,
)
if not isinstance(obj, Layer):
raise ValueError(
"`keras.layers.deserialize` was passed a `config` object that is "
f"not a `keras.layers.Layer`. Received: {config}"
)
return obj
| keras/keras/layers/__init__.py/0 | {
"file_path": "keras/keras/layers/__init__.py",
"repo_id": "keras",
"token_count": 2714
} | 151 |
import os
import numpy as np
import pytest
from keras import backend
from keras import constraints
from keras import layers
from keras import models
from keras import saving
from keras.testing import test_case
class EmbeddingTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_embedding_basics(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 4, "output_dim": 3},
input_shape=(2,),
input_dtype="int32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4, "mask_zero": True},
input_shape=(2, 3),
input_dtype="int64",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4},
input_shape=(2, 3),
input_dtype="int32",
input_sparse=True,
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_correctness(self):
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array([2, 1, 0]))
self.assertAllClose(out, np.array([[3.0, 3.0], [2.0, 2.0], [0.0, 0.0]]))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_correctness_sparse(self):
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
if backend.backend() == "tensorflow":
import tensorflow as tf
x = tf.SparseTensor([[0, 0], [1, 2]], [2, 1], (2, 3))
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
x = jax_sparse.BCOO(([2, 1], [[0, 0], [1, 2]]), shape=(2, 3))
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
self.assertAllClose(
layer(x),
np.array(
[
[[3.0, 3.0], [0.0, 0.0], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0], [2.0, 2.0]],
]
),
)
def test_masking(self):
layer = layers.Embedding(input_dim=3, output_dim=2, mask_zero=True)
layer.build()
out = layer.compute_mask(np.array(([2, 1, 0])))
self.assertAllClose(out, np.array([True, True, False]))
def test_compute_mask_no_masking(self):
layer = layers.Embedding(input_dim=3, output_dim=2, mask_zero=False)
input_data = np.array([2, 1, 0])
mask = layer.compute_mask(input_data)
self.assertIsNone(mask)
def test_embedding_constraints(self):
layer = layers.Embedding(3, 2, embeddings_constraint="non_neg")
layer.build((None, 2))
self.assertIsInstance(layer.embeddings.constraint, constraints.NonNeg)
@pytest.mark.requires_trainable_backend
def test_enable_lora(self):
layer = layers.Embedding(10, 16)
layer.build()
layer.enable_lora(4)
self.assertLen(layer.trainable_weights, 2)
self.assertLen(layer.non_trainable_weights, 1)
# Try eager call
x = np.random.randint(0, 9, size=(64, 3))
y = np.random.random((64, 3, 16))
_ = layer(x[:2])
init_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
init_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
# Try calling fit()
model = models.Sequential(
[
layer,
]
)
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y)
final_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
final_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_embeddings_value - final_lora_a_embeddings_value)
)
diff_b = np.max(
np.abs(init_lora_b_embeddings_value - final_lora_b_embeddings_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras")
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertFalse(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
model.save_weights(temp_filepath)
# Load the file into a fresh, non-lora model
new_model = models.Sequential(
[
layers.Input((3,), dtype="int32"),
layers.Embedding(10, 16),
]
)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@pytest.mark.requires_trainable_backend
def test_lora_rank_argument(self):
self.run_layer_test(
layers.Embedding,
init_kwargs={"input_dim": 5, "output_dim": 4, "lora_rank": 2},
input_shape=(2, 3),
input_dtype="int32",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_enable_lora_with_embeddings_constraint(self):
layer = layers.Embedding(
input_dim=10, output_dim=16, embeddings_constraint="max_norm"
)
with self.assertRaisesRegex(
ValueError, "incompatible with embedding constraints"
):
layer.enable_lora(rank=2)
def test_enable_lora_on_unbuilt_layer(self):
layer = layers.Embedding(input_dim=10, output_dim=16)
with self.assertRaisesRegex(
ValueError, "Cannot enable lora on a layer that isn't yet built"
):
layer.enable_lora(rank=2)
def test_enable_lora_when_already_enabled(self):
layer = layers.Embedding(input_dim=10, output_dim=16)
layer.build()
layer.enable_lora(rank=2)
with self.assertRaisesRegex(ValueError, "lora is already enabled"):
layer.enable_lora(rank=2)
| keras/keras/layers/core/embedding_test.py/0 | {
"file_path": "keras/keras/layers/core/embedding_test.py",
"repo_id": "keras",
"token_count": 3860
} | 152 |
from keras import ops
from keras.api_export import keras_export
from keras.layers.merging.base_merge import Merge
@keras_export("keras.layers.Average")
class Average(Merge):
"""Averages a list of inputs element-wise..
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Average()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.average([x1, x2])`
>>> y = keras.layers.Average()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.add(output, inputs[i])
return output / len(inputs)
@keras_export("keras.layers.average")
def average(inputs, **kwargs):
"""Functional interface to the `keras.layers.Average` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.average([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.average([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Average(**kwargs)(inputs)
| keras/keras/layers/merging/average.py/0 | {
"file_path": "keras/keras/layers/merging/average.py",
"repo_id": "keras",
"token_count": 902
} | 153 |
import numpy as np
import pytest
from keras import layers
from keras import ops
from keras import testing
class Cropping1DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_cropping_1d(self):
inputs = np.random.rand(3, 5, 7)
# Cropping with different values on the left and the right.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 2)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:3, :]),
)
# Same cropping on the left and the right.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 1)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:4, :]),
)
# Same cropping on the left and the right provided as an int.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": 1},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:4, :]),
)
# Cropping on the right only.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (0, 1)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 0:4, :]),
)
# Cropping on the left only.
self.run_layer_test(
layers.Cropping1D,
init_kwargs={"cropping": (1, 0)},
input_data=inputs,
expected_output=ops.convert_to_tensor(inputs[:, 1:5, :]),
)
@pytest.mark.requires_trainable_backend
def test_cropping_1d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, None, 7))
cropped = layers.Cropping1D((1, 2))(input_layer)
self.assertEqual(cropped.shape, (1, None, 7))
def test_cropping_1d_errors_if_cropping_argument_invalid(self):
with self.assertRaises(ValueError):
layers.Cropping1D(cropping=(1,))
with self.assertRaises(ValueError):
layers.Cropping1D(cropping=(1, 2, 3))
with self.assertRaises(ValueError):
layers.Cropping1D(cropping="1")
def test_cropping_1d_errors_if_cropping_more_than_available(self):
with self.assertRaisesRegex(
ValueError,
"`cropping` parameter of `Cropping1D` layer must be smaller than",
):
input_layer = layers.Input(batch_shape=(3, 5, 7))
layers.Cropping1D(cropping=(2, 3))(input_layer)
def test_cropping_1d_error_on_excessive_cropping(self):
inputs = np.random.rand(3, 5, 7)
with self.assertRaisesRegex(
ValueError,
"`cropping` parameter of `Cropping1D` layer must be smaller than",
):
layer = layers.Cropping1D(cropping=(3, 3))
_ = layer(inputs)
| keras/keras/layers/reshaping/cropping1d_test.py/0 | {
"file_path": "keras/keras/layers/reshaping/cropping1d_test.py",
"repo_id": "keras",
"token_count": 1423
} | 154 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.