text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.bounding_box.converters import _decode_deltas_to_boxes
from keras_cv.bounding_box.converters import _encode_box_to_deltas
from keras_cv.bounding_box.converters import convert_format
from keras_cv.bounding_box.ensure_tensor import ensure_tensor
from keras_cv.bounding_box.formats import CENTER_XYWH
from keras_cv.bounding_box.formats import REL_XYXY
from keras_cv.bounding_box.formats import REL_YXYX
from keras_cv.bounding_box.formats import XYWH
from keras_cv.bounding_box.formats import XYXY
from keras_cv.bounding_box.formats import YXYX
from keras_cv.bounding_box.iou import compute_ciou
from keras_cv.bounding_box.iou import compute_iou
from keras_cv.bounding_box.mask_invalid_detections import (
mask_invalid_detections,
)
from keras_cv.bounding_box.to_dense import to_dense
from keras_cv.bounding_box.to_ragged import to_ragged
from keras_cv.bounding_box.utils import as_relative
from keras_cv.bounding_box.utils import clip_to_image
from keras_cv.bounding_box.utils import is_relative
from keras_cv.bounding_box.validate_format import validate_format
| keras-cv/keras_cv/bounding_box/__init__.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/__init__.py",
"repo_id": "keras-cv",
"token_count": 548
} | 51 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.bounding_box.validate_format")
def validate_format(bounding_boxes, variable_name="bounding_boxes"):
"""validates that a given set of bounding boxes complies with KerasCV
format.
For a set of bounding boxes to be valid it must satisfy the following
conditions:
- `bounding_boxes` must be a dictionary
- contains keys `"boxes"` and `"classes"`
- each entry must have matching first two dimensions; representing the batch
axis and the number of boxes per image axis.
- either both `"boxes"` and `"classes"` are batched, or both are unbatched.
Additionally, one of the following must be satisfied:
- `"boxes"` and `"classes"` are both Ragged
- `"boxes"` and `"classes"` are both Dense
- `"boxes"` and `"classes"` are unbatched
Args:
bounding_boxes: dictionary of bounding boxes according to KerasCV
format.
Raises:
ValueError if any of the above conditions are not met
"""
if not isinstance(bounding_boxes, dict):
raise ValueError(
f"Expected `{variable_name}` to be a dictionary, got "
f"`{variable_name}={bounding_boxes}`."
)
if not all([x in bounding_boxes for x in ["boxes", "classes"]]):
raise ValueError(
f"Expected `{variable_name}` to be a dictionary containing keys "
"`'classes'` and `'boxes'`. Got "
f"`{variable_name}.keys()={bounding_boxes.keys()}`."
)
boxes = bounding_boxes.get("boxes")
classes = bounding_boxes.get("classes")
info = {}
is_batched = len(boxes.shape) == 3
info["is_batched"] = is_batched
info["ragged"] = isinstance(boxes, tf.RaggedTensor)
if not is_batched:
if boxes.shape[:1] != classes.shape[:1]:
raise ValueError(
"Expected `boxes` and `classes` to have matching dimensions "
"on the first axis when operating in unbatched mode. Got "
f"`boxes.shape={boxes.shape}`, `classes.shape={classes.shape}`."
)
info["classes_one_hot"] = len(classes.shape) == 2
# No Ragged checks needed in unbatched mode.
return info
info["classes_one_hot"] = len(classes.shape) == 3
if isinstance(boxes, tf.RaggedTensor) != isinstance(
classes, tf.RaggedTensor
):
raise ValueError(
"Either both `boxes` and `classes` "
"should be Ragged, or neither should be ragged."
f" Got `type(boxes)={type(boxes)}`, type(classes)={type(classes)}."
)
# Batched mode checks
if boxes.shape[:2] != classes.shape[:2]:
raise ValueError(
"Expected `boxes` and `classes` to have matching dimensions "
"on the first two axes when operating in batched mode. "
f"Got `boxes.shape={boxes.shape}`, `classes.shape={classes.shape}`."
)
return info
| keras-cv/keras_cv/bounding_box/validate_format.py/0 | {
"file_path": "keras-cv/keras_cv/bounding_box/validate_format.py",
"repo_id": "keras-cv",
"token_count": 1371
} | 52 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.core.factor_sampler.factor_sampler import FactorSampler
@keras_cv_export("keras_cv.core.UniformFactorSampler")
class UniformFactorSampler(FactorSampler):
"""UniformFactorSampler samples factors uniformly from a range.
This is useful in cases where a user wants to always ensure that an
augmentation layer performs augmentations of the same strength.
Args:
lower: the lower bound of values returned from `__call__()`.
upper: the upper bound of values returned from `__call__()`.
seed: A shape int or Tensor, the seed to the random number generator.
Must have dtype int32 or int64. (When using XLA, only int32 is
allowed.)
Usage:
```python
uniform_factor = keras_cv.UniformFactorSampler(0, 0.5)
random_sharpness = keras_cv.layers.RandomSharpness(factor=uniform_factor)
# random_sharpness will now sample factors between 0, and 0.5
```
"""
def __init__(self, lower, upper, seed=None):
self.lower = lower
self.upper = upper
self.seed = seed
def __call__(self, shape=(), dtype="float32"):
return tf.random.uniform(
shape,
seed=self.seed,
minval=self.lower,
maxval=self.upper,
dtype=dtype,
)
def get_config(self):
return {
"lower": self.lower,
"upper": self.upper,
"seed": self.seed,
}
@classmethod
def from_config(cls, config):
return cls(**config)
| keras-cv/keras_cv/core/factor_sampler/uniform_factor_sampler.py/0 | {
"file_path": "keras-cv/keras_cv/core/factor_sampler/uniform_factor_sampler.py",
"repo_id": "keras-cv",
"token_count": 819
} | 53 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import keypoint
from keras_cv.tests.test_case import TestCase
xy_keypoints = np.array(
[[[10, 20], [110, 120], [210, 220]], [[20, 30], [120, 130], [220, 230]]],
dtype="float32",
)
rel_xy_keypoints = np.array(
[
[[0.01, 0.04], [0.11, 0.24], [0.21, 0.44]],
[[0.02, 0.06], [0.12, 0.26], [0.22, 0.46]],
],
dtype="float32",
)
images = np.ones([2, 500, 1000, 3])
keypoints = {
"xy": xy_keypoints,
"rel_xy": rel_xy_keypoints,
}
test_cases = [
(f"{source}_{target}", source, target)
for (source, target) in itertools.permutations(keypoints.keys(), 2)
] + [("xy_xy", "xy", "xy")]
class ConvertersTestCase(TestCase):
@parameterized.named_parameters(*test_cases)
def test_converters(self, source, target):
source_keypoints = keypoints[source]
target_keypoints = keypoints[target]
self.assertAllClose(
keypoint.convert_format(
source_keypoints, source=source, target=target, images=images
),
target_keypoints,
)
@parameterized.named_parameters(*test_cases)
def test_converters_unbatched(self, source, target):
source_keypoints = keypoints[source][0]
target_keypoints = keypoints[target][0]
self.assertAllClose(
keypoint.convert_format(
source_keypoints, source=source, target=target, images=images[0]
),
target_keypoints,
)
@parameterized.named_parameters(*test_cases)
def test_converters_ragged_groups(self, source, target):
source_keypoints = keypoints[source]
target_keypoints = keypoints[target]
def create_ragged_group(ins):
res = []
for b, groups in zip(ins, [[1, 2], [0, 3]]):
res.append(tf.RaggedTensor.from_row_lengths(b, groups))
return tf.stack(res, axis=0)
source_keypoints = create_ragged_group(source_keypoints)
target_keypoints = create_ragged_group(target_keypoints)
self.assertAllClose(
keypoint.convert_format(
source_keypoints, source=source, target=target, images=images
),
target_keypoints,
)
@parameterized.named_parameters(*test_cases)
def test_converters_with_metadata(self, source, target):
source_keypoints = keypoints[source]
target_keypoints = keypoints[target]
def add_metadata(ins):
return tf.concat([ins, np.ones([2, 3, 5])], axis=-1)
source_keypoints = add_metadata(source_keypoints)
target_keypoints = add_metadata(target_keypoints)
self.assertAllClose(
keypoint.convert_format(
source_keypoints, source=source, target=target, images=images
),
target_keypoints,
)
def test_raise_errors_when_missing_shape(self):
with self.assertRaises(ValueError) as e:
keypoint.convert_format(
keypoints["xy"], source="xy", target="rel_xy"
)
self.assertEqual(
str(e.exception),
"convert_format() must receive `images` when transforming "
"between relative and absolute formats. convert_format() "
"received source=`xy`, target=`rel_xy`, but images=None",
)
@parameterized.named_parameters(
(
"keypoint_rank",
np.ones([2, 3, 4, 2, 1]),
None,
"Expected keypoints rank to be in [2, 4], got "
"len(keypoints.shape)=5.",
),
(
"images_rank",
np.ones([4, 2]),
np.ones([35, 35]),
"Expected images rank to be 3 or 4, got len(images.shape)=2.",
),
(
"batch_mismatch",
np.ones([2, 4, 2]),
np.ones([35, 35, 3]),
"convert_format() expects both `keypoints` and `images` to be "
"batched or both unbatched. Received len(keypoints.shape)=3, "
"len(images.shape)=3. Expected either len(keypoints.shape)=2 and "
"len(images.shape)=3, or len(keypoints.shape)>=3 and "
"len(images.shape)=4.",
),
)
def test_input_format_exception(self, keypoints, images, expected):
with self.assertRaises(ValueError) as e:
keypoint.convert_format(
keypoints, source="xy", target="rel_xy", images=images
)
self.assertEqual(str(e.exception), expected)
| keras-cv/keras_cv/keypoint/converters_test.py/0 | {
"file_path": "keras-cv/keras_cv/keypoint/converters_test.py",
"repo_id": "keras-cv",
"token_count": 2339
} | 54 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from absl.testing import parameterized
from keras_cv import layers as cv_layers
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
class AnchorGeneratorTest(TestCase):
@parameterized.named_parameters(
("unequal_lists", [0, 1, 2], [1]),
("unequal_levels_dicts", {"level_1": [0, 1, 2]}, {"1": [0, 1, 2]}),
)
def test_raises_when_strides_not_equal_to_sizes(self, sizes, strides):
with self.assertRaises(ValueError):
cv_layers.AnchorGenerator(
bounding_box_format="xyxy",
sizes=sizes,
strides=strides,
aspect_ratios=[3 / 4, 1, 4 / 3],
scales=[0.5, 1.0, 1.5],
)
def test_raises_batched_images(self):
strides = [4]
scales = [1.0]
sizes = [4]
aspect_ratios = [1.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="xyxy",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
image = np.random.uniform(size=(4, 8, 8, 3))
with self.assertRaisesRegex(ValueError, "rank"):
_ = anchor_generator(image=image)
@parameterized.parameters(
((640, 480, 3),),
((512, 512, 3),),
((224, 224, 3),),
)
def test_output_shapes_image(self, image_shape):
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
image = np.random.uniform(size=image_shape)
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
boxes = anchor_generator(image=image)
boxes = ops.concatenate(list(boxes.values()), axis=0)
expected_box_shapes = ops.cast(
ops.ceil(image_shape[0] / ops.array(strides))
* ops.ceil(image_shape[1] / ops.array(strides))
* len(scales)
* len(aspect_ratios),
"int32",
)
sum_expected_shape = (ops.sum(expected_box_shapes), 4)
self.assertEqual(boxes.shape, sum_expected_shape)
@parameterized.parameters(
((640, 480, 3),),
((512, 512, 3),),
((224, 224, 3),),
)
def test_output_shapes_image_shape(self, image_shape):
strides = [2**i for i in range(3, 8)]
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [x**2 for x in [32.0, 64.0, 128.0, 256.0, 512.0]]
aspect_ratios = [0.5, 1.0, 2.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
boxes = anchor_generator(image_shape=image_shape)
boxes = ops.concatenate(list(boxes.values()), axis=0)
expected_box_shapes = ops.cast(
ops.ceil(image_shape[0] / ops.array(strides))
* ops.ceil(image_shape[1] / ops.array(strides))
* len(scales)
* len(aspect_ratios),
"int32",
)
sum_expected_shape = (ops.sum(expected_box_shapes), 4)
self.assertEqual(boxes.shape, sum_expected_shape)
def test_hand_crafted_aspect_ratios(self):
strides = [4]
scales = [1.0]
sizes = [4]
aspect_ratios = [3 / 4, 1.0, 4 / 3]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="xyxy",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
image = np.random.uniform(size=(8, 8, 3))
boxes = anchor_generator(image=image)
level_0 = boxes[0]
# width/4 * height/4 * aspect_ratios =
self.assertAllEqual(level_0.shape, [12, 4])
image = np.random.uniform(size=(4, 4, 3))
boxes = anchor_generator(image=image)
level_0 = boxes[0]
expected_boxes = [
[0.267949224, -0.309401035, 3.7320509, 4.30940104],
[0, 0, 4, 4],
[-0.309401035, 0.267949104, 4.30940104, 3.7320509],
]
self.assertAllClose(level_0, expected_boxes)
def test_hand_crafted_strides(self):
strides = [4]
scales = [1.0]
sizes = [4]
aspect_ratios = [1.0]
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="xyxy",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
)
image = np.random.uniform(size=(8, 8, 3))
boxes = anchor_generator(image=image)
level_0 = boxes[0]
expected_boxes = [
[0, 0, 4, 4],
[4, 0, 8, 4],
[0, 4, 4, 8],
[4, 4, 8, 8],
]
self.assertAllClose(level_0, expected_boxes)
def test_relative_generation(self):
strides = [8, 16, 32]
# 0, 1 / 3, 2 / 3
scales = [2**x for x in [0, 1 / 3, 2 / 3]]
sizes = [32.0, 64.0, 128.0]
aspect_ratios = [0.5, 1.0, 2.0]
image = np.random.uniform(size=(512, 512, 3))
anchor_generator = cv_layers.AnchorGenerator(
bounding_box_format="rel_yxyx",
sizes=sizes,
aspect_ratios=aspect_ratios,
scales=scales,
strides=strides,
clip_boxes=False,
)
boxes = anchor_generator(image=image)
boxes = np.concatenate(
[ops.convert_to_numpy(x) for x in list(boxes.values())], axis=0
)
self.assertAllLessEqual(boxes, 1.5)
self.assertAllGreaterEqual(boxes, -0.50)
| keras-cv/keras_cv/layers/object_detection/anchor_generator_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/anchor_generator_test.py",
"repo_id": "keras-cv",
"token_count": 3331
} | 55 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def balanced_sample(
positive_matches: tf.Tensor,
negative_matches: tf.Tensor,
num_samples: int,
positive_fraction: float,
):
"""
Sampling ops to balance positive and negative samples, deals with both
batched and unbatched inputs.
Args:
positive_matches: [N] or [batch_size, N] boolean Tensor, True for
indicating the index is a positive sample
negative_matches: [N] or [batch_size, N] boolean Tensor, True for
indicating the index is a negative sample
num_samples: int, representing the number of samples to collect
positive_fraction: float. 0.5 means positive samples should be half
of all collected samples.
Returns:
selected_indicators: [N] or [batch_size, N]
integer Tensor, 1 for indicating the index is sampled, 0 for
indicating the index is not sampled.
"""
N = positive_matches.get_shape().as_list()[-1]
if N < num_samples:
raise ValueError(
"passed in {positive_matches.shape} has less element than "
f"{num_samples}"
)
# random_val = tf.random.uniform(tf.shape(positive_matches), minval=0.,
# maxval=1.)
zeros = tf.zeros_like(positive_matches, dtype=tf.float32)
ones = tf.ones_like(positive_matches, dtype=tf.float32)
ones_rand = ones + tf.random.uniform(ones.shape, minval=-0.2, maxval=0.2)
halfs = 0.5 * tf.ones_like(positive_matches, dtype=tf.float32)
halfs_rand = halfs + tf.random.uniform(halfs.shape, minval=-0.2, maxval=0.2)
values = zeros
values = tf.where(positive_matches, ones_rand, values)
values = tf.where(negative_matches, halfs_rand, values)
num_pos_samples = int(num_samples * positive_fraction)
valid_matches = tf.logical_or(positive_matches, negative_matches)
# this might contain negative samples as well
_, positive_indices = tf.math.top_k(values, k=num_pos_samples)
selected_indicators = tf.cast(
tf.reduce_sum(tf.one_hot(positive_indices, depth=N), axis=-2), tf.bool
)
# setting all selected samples to zeros
values = tf.where(selected_indicators, zeros, values)
# setting all excessive positive matches to zeros as well
values = tf.where(positive_matches, zeros, values)
num_neg_samples = num_samples - num_pos_samples
_, negative_indices = tf.math.top_k(values, k=num_neg_samples)
selected_indices = tf.concat([positive_indices, negative_indices], axis=-1)
selected_indicators = tf.reduce_sum(
tf.one_hot(selected_indices, depth=N), axis=-2
)
selected_indicators = tf.minimum(
selected_indicators, tf.ones_like(selected_indicators)
)
selected_indicators = tf.where(
valid_matches, selected_indicators, tf.zeros_like(selected_indicators)
)
return selected_indicators
| keras-cv/keras_cv/layers/object_detection/sampling.py/0 | {
"file_path": "keras-cv/keras_cv/layers/object_detection/sampling.py",
"repo_id": "keras-cv",
"token_count": 1260
} | 56 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from keras_cv.backend import ops
from keras_cv.layers import preprocessing
from keras_cv.tests.test_case import TestCase
class AutoContrastTest(TestCase):
def test_constant_channels_dont_get_nanned(self):
img = np.array([1, 1], dtype=np.float32)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
def test_auto_contrast_expands_value_range(self):
img = np.array([0, 128], dtype=np.float32)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_different_values_per_channel(self):
img = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype=np.float32,
)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 255.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 255.0))
self.assertAllClose(
ys,
[
[
[[0.0, 0.0, 0.0], [85.0, 85.0, 85.0]],
[[170.0, 170.0, 170.0], [255.0, 255.0, 255.0]],
]
],
)
def test_auto_contrast_expands_value_range_uint8(self):
img = np.array([0, 128], dtype=np.uint8)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_properly_converts_value_range(self):
img = np.array([0, 0.5], dtype=np.float32)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = preprocessing.AutoContrast(value_range=(0, 1))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
| keras-cv/keras_cv/layers/preprocessing/auto_contrast_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/auto_contrast_test.py",
"repo_id": "keras-cv",
"token_count": 1667
} | 57 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv import core
from keras_cv import layers
from keras_cv.backend import ops
from keras_cv.tests.test_case import TestCase
class JitteredResizeTest(TestCase):
batch_size = 4
height = 9
width = 8
seed = 13
target_size = (4, 4)
def test_train_augments_image(self):
# Checks if original and augmented images are different
input_image_shape = (self.batch_size, self.height, self.width, 3)
image = tf.random.uniform(shape=input_image_shape, seed=self.seed)
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
seed=self.seed,
)
output = layer(image, training=True)
input_image_resized = tf.image.resize(image, self.target_size)
self.assertNotAllClose(output, input_image_resized)
def test_augment_bounding_box_single(self):
image = tf.zeros([20, 20, 3])
boxes = {
"boxes": tf.convert_to_tensor([[0, 0, 1, 1]], dtype=tf.float32),
"classes": tf.convert_to_tensor([0], dtype=tf.float32),
}
input = {"images": image, "bounding_boxes": boxes}
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
bounding_box_format="rel_xywh",
seed=self.seed,
)
output = layer(input, training=True)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
expected_output = {
"boxes": tf.convert_to_tensor([[0, 0, 1, 1]], dtype=tf.float32),
"classes": tf.convert_to_tensor([0], dtype=tf.float32),
}
self.assertAllClose(
expected_output["boxes"],
output["bounding_boxes"]["boxes"],
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_batched_input(self):
image = tf.zeros([20, 20, 3])
bounding_boxes = {
"classes": tf.convert_to_tensor([[0, 0], [0, 0]]),
"boxes": tf.convert_to_tensor(
[
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
]
),
}
input = {"images": [image, image], "bounding_boxes": bounding_boxes}
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
bounding_box_format="rel_xyxy",
seed=self.seed,
)
output = layer(input, training=True)
output["bounding_boxes"] = bounding_box.to_dense(
output["bounding_boxes"]
)
expected_output = {
"classes": tf.convert_to_tensor([[0, 0], [0, 0]], dtype=tf.float32),
"boxes": tf.convert_to_tensor(
[
[[0, 0, 1, 1], [0, 0, 1, 1]],
[[0, 0, 1, 1], [0, 0, 1, 1]],
],
dtype=tf.float32,
),
}
self.assertAllClose(
expected_output["boxes"],
output["bounding_boxes"]["boxes"],
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_augment_boxes_ragged(self):
image = tf.zeros([2, 20, 20, 3])
boxes = {
"boxes": tf.ragged.constant(
[[[0, 0, 1, 1], [0, 0, 1, 1]], [[0, 0, 1, 1]]], dtype=tf.float32
),
"classes": tf.ragged.constant(
[
[
0,
0,
],
[0],
],
dtype=tf.float32,
),
}
input = {"images": image, "bounding_boxes": boxes}
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
bounding_box_format="rel_xyxy",
seed=self.seed,
)
output = layer(input, training=True)
# the result boxes will still have the entire image in them
expected_output = {
"boxes": tf.ragged.constant(
[[[0, 0, 1, 1], [0, 0, 1, 1]], [[0, 0, 1, 1]]], dtype=tf.float32
),
"classes": tf.ragged.constant(
[
[
0.0,
0.0,
],
[0.0],
],
dtype=tf.float32,
),
}
self.assertAllClose(
expected_output["boxes"].to_tensor(),
output["bounding_boxes"]["boxes"].to_tensor(),
)
self.assertAllClose(
expected_output["classes"], output["bounding_boxes"]["classes"]
)
def test_independence_of_jittered_resize_on_batched_images(self):
image = tf.random.uniform((100, 100, 3))
batched_images = tf.stack((image, image), axis=0)
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
seed=self.seed,
)
results = layer(batched_images)
self.assertNotAllClose(results[0], results[1])
def test_augments_segmentation_masks(self):
input_shape = (self.batch_size, self.height, self.width, 3)
image = tf.random.uniform(shape=input_shape, seed=self.seed)
mask = tf.cast(
2 * tf.random.uniform(shape=input_shape, seed=self.seed),
tf.int32,
)
inputs = {"images": image, "segmentation_masks": mask}
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
seed=self.seed,
)
output = layer(inputs, training=True)
input_image_resized = tf.image.resize(image, self.target_size)
input_mask_resized = tf.image.resize(
mask, self.target_size, method="nearest"
)
self.assertNotAllClose(output["images"], input_image_resized)
self.assertNotAllClose(output["segmentation_masks"], input_mask_resized)
def test_config_with_custom_name(self):
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
name="image_preproc",
)
config = layer.get_config()
layer_1 = layers.JitteredResize.from_config(config)
self.assertEqual(layer_1.name, layer.name)
def test_output_dtypes(self):
inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64")
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
)
self.assertAllEqual(
ops.convert_to_numpy(layer(inputs)).dtype, "float32"
)
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
dtype="uint8",
)
self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
def test_config(self):
layer = layers.JitteredResize(
target_size=self.target_size,
scale_factor=(3 / 4, 4 / 3),
bounding_box_format="xyxy",
)
config = layer.get_config()
self.assertEqual(config["target_size"], self.target_size)
self.assertTrue(
isinstance(config["scale_factor"], core.UniformFactorSampler)
)
self.assertEqual(config["bounding_box_format"], "xyxy")
| keras-cv/keras_cv/layers/preprocessing/jittered_resize_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/jittered_resize_test.py",
"repo_id": "keras-cv",
"token_count": 4239
} | 58 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing as preprocessing_utils
@keras_cv_export("keras_cv.layers.RandomBrightness")
class RandomBrightness(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly adjusts brightness.
This layer will randomly increase/reduce the brightness for the input RGB
images.
Note that different brightness adjustment factors
will be applied to each the images in the batch.
Args:
factor: Float or a list/tuple of 2 floats between -1.0 and 1.0. The
factor is used to determine the lower bound and upper bound of the
brightness adjustment. A float value will be chosen randomly between
the limits. When -1.0 is chosen, the output image will be black, and
when 1.0 is chosen, the image will be fully white. When only one float
is provided, eg, 0.2, then -0.2 will be used for lower bound and 0.2
will be used for upper bound.
value_range: Optional list/tuple of 2 floats for the lower and upper limit
of the values of the input data, defaults to [0.0, 255.0]. Can be
changed to e.g. [0.0, 1.0] if the image input has been scaled before
this layer. The brightness adjustment will be scaled to this range, and
the output values will be clipped to this range.
seed: optional integer, for fixed RNG behavior.
Inputs: 3D (HWC) or 4D (NHWC) tensor, with float or int dtype. Input pixel
values can be of any range (e.g. `[0., 1.)` or `[0, 255]`)
Output: 3D (HWC) or 4D (NHWC) tensor with brightness adjusted based on the
`factor`. By default, the layer will output floats. The output value will
be clipped to the range `[0, 255]`, the valid range of RGB colors, and
rescaled based on the `value_range` if needed.
Usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_brightness = keras_cv.layers.preprocessing.RandomBrightness()
augmented_images = random_brightness(images)
```
"""
def __init__(self, factor, value_range=(0, 255), seed=None, **kwargs):
super().__init__(seed=seed, **kwargs)
if isinstance(factor, float) or isinstance(factor, int):
factor = (-factor, factor)
self.factor = preprocessing_utils.parse_factor(
factor, min_value=-1, max_value=1
)
self.value_range = value_range
self.seed = seed
def get_random_transformation_batch(self, batch_size, **kwargs):
rgb_delta_shape = (batch_size, 1, 1, 1)
random_rgb_deltas = self.factor(shape=rgb_delta_shape)
random_rgb_deltas = random_rgb_deltas * (
self.value_range[1] - self.value_range[0]
)
return random_rgb_deltas
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
transformation = tf.expand_dims(transformation, axis=0)
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def augment_images(self, images, transformations, **kwargs):
rank = images.shape.rank
if rank != 4:
raise ValueError(
"Expected the input image to be rank 4. Got "
f"inputs.shape = {images.shape}"
)
rgb_deltas = tf.cast(transformations, images.dtype)
images += rgb_deltas
return tf.clip_by_value(
images, self.value_range[0], self.value_range[1]
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs):
return bounding_boxes
def get_config(self):
config = {
"factor": self.factor,
"value_range": self.value_range,
"seed": self.seed,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if isinstance(config["factor"], dict):
config["factor"] = keras.utils.deserialize_keras_object(
config["factor"]
)
return cls(**config)
| keras-cv/keras_cv/layers/preprocessing/random_brightness.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_brightness.py",
"repo_id": "keras-cv",
"token_count": 2010
} | 59 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import fill_utils
from keras_cv.utils import preprocessing
H_AXIS = -3
W_AXIS = -2
@keras_cv_export("keras_cv.layers.RandomCutout")
class RandomCutout(VectorizedBaseImageAugmentationLayer):
"""Randomly cut out rectangles from images and fill them.
Args:
height_factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `height_factor` controls the size of the
cutouts. `height_factor=0.0` means the rectangle will be of size 0%
of the image height, `height_factor=0.1` means the rectangle will
have a size of 10% of the image height, and so forth. Values should
be between `0.0` and `1.0`. If a tuple is used, a `height_factor`
is sampled between the two values for every image augmented. If a
single float is used, a value between `0.0` and the passed float is
sampled. In order to ensure the value is always the same, please
pass a tuple with two identical floats: `(0.5, 0.5)`.
width_factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. `width_factor` controls the size of the
cutouts. `width_factor=0.0` means the rectangle will be of size 0%
of the image height, `width_factor=0.1` means the rectangle will
have a size of 10% of the image width, and so forth.
Values should be between `0.0` and `1.0`. If a tuple is used, a
`width_factor` is sampled between the two values for every image
augmented. If a single float is used, a value between `0.0` and the
passed float is sampled. In order to ensure the value is always the
same, please pass a tuple with two identical floats: `(0.5, 0.5)`.
fill_mode: Pixels inside the patches are filled according to the given
mode (one of `{"constant", "gaussian_noise"}`).
- *constant*: Pixels are filled with the same constant value.
- *gaussian_noise*: Pixels are filled with random gaussian noise.
fill_value: a float represents the value to be filled inside the patches
when `fill_mode="constant"`.
seed: Integer. Used to create a random seed.
Sample usage:
```python
(images, labels), _ = keras.datasets.cifar10.load_data()
random_cutout = keras_cv.layers.preprocessing.RandomCutout(0.5, 0.5)
augmented_images = random_cutout(images)
```
"""
def __init__(
self,
height_factor,
width_factor,
fill_mode="constant",
fill_value=0.0,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
self.height_factor = preprocessing.parse_factor(
height_factor, param_name="height_factor", seed=seed
)
self.width_factor = preprocessing.parse_factor(
width_factor, param_name="width_factor", seed=seed
)
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
if fill_mode not in ["gaussian_noise", "constant"]:
raise ValueError(
'`fill_mode` should be "gaussian_noise" '
f'or "constant". Got `fill_mode`={fill_mode}'
)
def get_random_transformation_batch(self, batch_size, images, **kwargs):
centers_x, centers_y = self._compute_rectangle_position(images)
rectangles_height, rectangles_width = self._compute_rectangle_size(
images
)
return {
"centers_x": centers_x,
"centers_y": centers_y,
"rectangles_height": rectangles_height,
"rectangles_width": rectangles_width,
}
def augment_images(self, images, transformations=None, **kwargs):
"""Apply random cutout."""
centers_x, centers_y = (
transformations["centers_x"],
transformations["centers_y"],
)
rectangles_height, rectangles_width = (
transformations["rectangles_height"],
transformations["rectangles_width"],
)
rectangles_fill = self._compute_rectangle_fill(images)
images = fill_utils.fill_rectangle(
images,
centers_x,
centers_y,
rectangles_width,
rectangles_height,
rectangles_fill,
)
return images
def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def augment_labels(self, labels, transformations=None, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
return segmentation_masks
def augment_keypoints(self, keypoints, transformations, **kwargs):
return keypoints
def augment_targets(self, targets, transformations, **kwargs):
return targets
def augment_ragged_image(self, image, transformation, **kwargs):
image = tf.expand_dims(image, axis=0)
centers_x, centers_y = (
transformation["centers_x"],
transformation["centers_y"],
)
rectangles_height, rectangles_width = (
transformation["rectangles_height"],
transformation["rectangles_width"],
)
transformation = {
"centers_x": tf.expand_dims(centers_x, axis=0),
"centers_y": tf.expand_dims(centers_y, axis=0),
"rectangles_height": tf.expand_dims(rectangles_height, axis=0),
"rectangles_width": tf.expand_dims(rectangles_width, axis=0),
}
image = self.augment_images(
images=image, transformations=transformation, **kwargs
)
return tf.squeeze(image, axis=0)
def _get_image_shape(self, images):
if isinstance(images, tf.RaggedTensor):
heights = tf.reshape(images.row_lengths(), (-1,))
widths = tf.reshape(
tf.reduce_max(images.row_lengths(axis=2), 1), (-1,)
)
else:
batch_size = tf.shape(images)[0]
heights = tf.repeat(tf.shape(images)[H_AXIS], repeats=[batch_size])
heights = tf.reshape(heights, shape=(-1,))
widths = tf.repeat(tf.shape(images)[W_AXIS], repeats=[batch_size])
widths = tf.reshape(widths, shape=(-1,))
return tf.cast(heights, dtype=tf.int32), tf.cast(widths, dtype=tf.int32)
def _compute_rectangle_position(self, inputs):
batch_size = tf.shape(inputs)[0]
heights, widths = self._get_image_shape(inputs)
# generate values in float32 and then cast (i.e. round) to int32 because
# random.uniform do not support maxval broadcasting for integer types.
# Needed because maxval is a 1-D tensor to support ragged inputs.
heights = tf.cast(heights, dtype=tf.float32)
widths = tf.cast(widths, dtype=tf.float32)
center_x = self._random_generator.uniform(
(batch_size,), 0, widths, dtype=tf.float32
)
center_y = self._random_generator.uniform(
(batch_size,), 0, heights, dtype=tf.float32
)
center_x = tf.cast(center_x, tf.int32)
center_y = tf.cast(center_y, tf.int32)
return center_x, center_y
def _compute_rectangle_size(self, inputs):
batch_size = tf.shape(inputs)[0]
images_heights, images_widths = self._get_image_shape(inputs)
height = self.height_factor(shape=(batch_size,))
width = self.width_factor(shape=(batch_size,))
height = height * tf.cast(images_heights, tf.float32)
width = width * tf.cast(images_widths, tf.float32)
height = tf.cast(tf.math.ceil(height), tf.int32)
width = tf.cast(tf.math.ceil(width), tf.int32)
height = tf.minimum(height, images_heights)
width = tf.minimum(width, images_heights)
return height, width
def _compute_rectangle_fill(self, inputs):
input_shape = tf.shape(inputs)
if self.fill_mode == "constant":
fill_value = tf.fill(input_shape, self.fill_value)
fill_value = tf.cast(fill_value, dtype=self.compute_dtype)
else:
# gaussian noise
fill_value = tf.random.normal(input_shape, dtype=self.compute_dtype)
# rescale the random noise to the original image range
image_max = tf.reduce_max(inputs)
image_min = tf.reduce_min(inputs)
fill_max = tf.reduce_max(fill_value)
fill_min = tf.reduce_min(fill_value)
fill_value = (image_max - image_min) * (fill_value - fill_min) / (
fill_max - fill_min
) + image_min
return fill_value
def get_config(self):
config = super().get_config()
config.update(
{
"height_factor": self.height_factor,
"width_factor": self.width_factor,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_cutout.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_cutout.py",
"repo_id": "keras-cv",
"token_count": 4341
} | 60 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501
VectorizedBaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing
@keras_cv_export("keras_cv.layers.RandomShear")
class RandomShear(VectorizedBaseImageAugmentationLayer):
"""A preprocessing layer which randomly shears images.
This layer will apply random shearings to each image, filling empty space
according to `fill_mode`.
Input pixel values can be of any range and any data type.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format
Args:
x_factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, x_factor)`. Values represent a percentage of the
image to shear over. For example, 0.3 shears pixels up to 30% of the
way across the image. All provided values should be positive. If
`None` is passed, no shear occurs on the X axis. Defaults to `None`.
y_factor: A tuple of two floats, a single float or a
`keras_cv.FactorSampler`. For each augmented image a value is
sampled from the provided range. If a float is passed, the range is
interpreted as `(0, y_factor)`. Values represent a percentage of the
image to shear over. For example, 0.3 shears pixels up to 30% of the
way across the image. All provided values should be positive. If
`None` is passed, no shear occurs on the Y axis. Defaults to `None`.
interpolation: interpolation method used in the
`ImageProjectiveTransformV3` op. Supported values are `"nearest"`
and `"bilinear"`, defaults to `"bilinear"`.
fill_mode: fill_mode in the `ImageProjectiveTransformV3` op. Supported
values are `"reflect"`, `"wrap"`, `"constant"`, and `"nearest"`.
Defaults to `"reflect"`.
fill_value: fill_value in the `ImageProjectiveTransformV3` op. A
`Tensor` of type `float32`. The value to be filled when fill_mode is
constant". Defaults to `0.0`.
bounding_box_format: The format of bounding boxes of input dataset.
Refer to
https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box/converters.py
for more details on supported bounding box formats.
seed: Integer. Used to create a random seed.
"""
def __init__(
self,
x_factor=None,
y_factor=None,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
bounding_box_format=None,
seed=None,
**kwargs,
):
super().__init__(seed=seed, **kwargs)
if x_factor is not None:
self.x_factor = preprocessing.parse_factor(
x_factor, max_value=None, param_name="x_factor", seed=seed
)
else:
self.x_factor = x_factor
if y_factor is not None:
self.y_factor = preprocessing.parse_factor(
y_factor, max_value=None, param_name="y_factor", seed=seed
)
else:
self.y_factor = y_factor
if x_factor is None and y_factor is None:
warnings.warn(
"RandomShear received both `x_factor=None` and `y_factor=None`."
" As a result, the layer will perform no augmentation."
)
self.interpolation = interpolation
self.fill_mode = fill_mode
self.fill_value = fill_value
self.seed = seed
self.bounding_box_format = bounding_box_format
def get_random_transformation_batch(self, batch_size, **kwargs):
transformations = {"shear_x": None, "shear_y": None}
if self.x_factor is not None:
invert = preprocessing.batch_random_inversion(
self._random_generator, batch_size
)
transformations["shear_x"] = (
self.x_factor(shape=(batch_size, 1)) * invert
)
if self.y_factor is not None:
invert = preprocessing.batch_random_inversion(
self._random_generator, batch_size
)
transformations["shear_y"] = (
self.y_factor(shape=(batch_size, 1)) * invert
)
return transformations
def augment_ragged_image(self, image, transformation, **kwargs):
images = tf.expand_dims(image, axis=0)
new_transformation = {"shear_x": None, "shear_y": None}
shear_x = transformation["shear_x"]
if shear_x is not None:
new_transformation["shear_x"] = tf.expand_dims(shear_x, axis=0)
shear_y = transformation["shear_y"]
if shear_y is not None:
new_transformation["shear_y"] = tf.expand_dims(shear_y, axis=0)
output = self.augment_images(images, new_transformation)
return tf.squeeze(output, axis=0)
def augment_images(self, images, transformations, **kwargs):
x, y = transformations["shear_x"], transformations["shear_y"]
if x is not None:
transforms_x = self._build_shear_x_transform_matrix(x)
images = preprocessing.transform(
images=images,
transforms=transforms_x,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
if y is not None:
transforms_y = self._build_shear_y_transform_matrix(y)
images = preprocessing.transform(
images=images,
transforms=transforms_y,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
return images
@staticmethod
def _build_shear_x_transform_matrix(shear_x):
"""Build transform matrix for horizontal shear.
The transform matrix looks like:
(1, x, 0)
(0, 1, 0)
(0, 0, 1)
where the last entry is implicit.
We flatten the matrix to `[1.0, x, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]` for
use with ImageProjectiveTransformV3.
"""
batch_size = tf.shape(shear_x)[0]
return tf.concat(
values=[
tf.ones((batch_size, 1), tf.float32),
shear_x,
tf.zeros((batch_size, 2), tf.float32),
tf.ones((batch_size, 1), tf.float32),
tf.zeros((batch_size, 3), tf.float32),
],
axis=1,
)
@staticmethod
def _build_shear_y_transform_matrix(shear_y):
"""Build transform matrix for vertical shear.
The transform matrix looks like:
(1, 0, 0)
(y, 1, 0)
(0, 0, 1)
where the last entry is implicit.
We flatten the matrix to `[1.0, 0.0, 0.0, y, 1.0, 0.0, 0.0, 0.0]` for
use ImageProjectiveTransformV3.
"""
batch_size = tf.shape(shear_y)[0]
return tf.concat(
values=[
tf.ones((batch_size, 1), tf.float32),
tf.zeros((batch_size, 2), tf.float32),
shear_y,
tf.ones((batch_size, 1), tf.float32),
tf.zeros((batch_size, 3), tf.float32),
],
axis=1,
)
def augment_labels(self, labels, transformations, **kwargs):
return labels
def augment_segmentation_masks(
self, segmentation_masks, transformations, **kwargs
):
x, y = transformations["shear_x"], transformations["shear_y"]
if x is not None:
transforms_x = self._build_shear_x_transform_matrix(x)
segmentation_masks = preprocessing.transform(
images=segmentation_masks,
transforms=transforms_x,
interpolation="nearest",
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
if y is not None:
transforms_y = self._build_shear_y_transform_matrix(y)
segmentation_masks = preprocessing.transform(
images=segmentation_masks,
transforms=transforms_y,
interpolation="nearest",
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
return segmentation_masks
def augment_bounding_boxes(
self, bounding_boxes, transformations, images=None, **kwargs
):
"""Augments bounding boxes after a shear operations.
The algorithm to update (x,y) point coordinates after shearing, tells us
to matrix multiply them with inverted transform matrix. This is:
```
# for shear x # for shear_y
(1.0, -shear_x) (x) (1.0, 0.0) (x)
(0.0, 1.0 ) (y) (-shear_y, 1.0) (y)
```
We can simplify this equation: any new coordinate can be calculated by
`x = x - (shear_x * y)` and `(y = y - (shear_y * x)`
Notice that each coordinate has to be calculated twice, e.g. `x1` will
be affected differently by y1 (top) and y2 (bottom). Therefore, we
calculate both `x1_top` and `x1_bottom` and choose the final x1
depending on the sign of the used shear value.
"""
if self.bounding_box_format is None:
raise ValueError(
"`RandomShear()` was called with bounding boxes,"
"but no `bounding_box_format` was specified in the constructor."
"Please specify a bounding box format in the constructor. i.e."
"`RandomShear(bounding_box_format='xyxy')`"
)
# Edge case: boxes is a tf.RaggedTensor
if isinstance(bounding_boxes["boxes"], tf.RaggedTensor):
bounding_boxes = bounding_box.to_dense(
bounding_boxes, default_value=0
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
images=images,
dtype=self.compute_dtype,
)
shear_x_amount = transformations["shear_x"]
shear_y_amount = transformations["shear_y"]
x1, y1, x2, y2 = tf.split(bounding_boxes["boxes"], 4, axis=-1)
# Squeeze redundant extra dimension as it messes multiplication
# [num_batches, num_boxes, 1] -> [num_batches, num_boxes]
x1 = tf.squeeze(x1, axis=-1)
y1 = tf.squeeze(y1, axis=-1)
x2 = tf.squeeze(x2, axis=-1)
y2 = tf.squeeze(y2, axis=-1)
# Apply horizontal shear
if shear_x_amount is not None:
x1_top = x1 - (shear_x_amount * y1)
x1_bottom = x1 - (shear_x_amount * y2)
x1 = tf.where(shear_x_amount < 0, x1_top, x1_bottom)
x2_top = x2 - (shear_x_amount * y1)
x2_bottom = x2 - (shear_x_amount * y2)
x2 = tf.where(shear_x_amount < 0, x2_bottom, x2_top)
# Apply vertical shear
if shear_y_amount is not None:
y1_left = y1 - (shear_y_amount * x1)
y1_right = y1 - (shear_y_amount * x2)
y1 = tf.where(shear_y_amount > 0, y1_right, y1_left)
y2_left = y2 - (shear_y_amount * x1)
y2_right = y2 - (shear_y_amount * x2)
y2 = tf.where(shear_y_amount > 0, y2_left, y2_right)
# Join the results:
boxes = tf.concat(
[
# Add dummy last axis for concat:
# (num_batches, num_boxes) -> (num_batches, num_boxes, 1)
x1[..., tf.newaxis],
y1[..., tf.newaxis],
x2[..., tf.newaxis],
y2[..., tf.newaxis],
],
axis=-1,
)
bounding_boxes = bounding_boxes.copy()
bounding_boxes["boxes"] = boxes
bounding_boxes = bounding_box.clip_to_image(
bounding_boxes, images=images, bounding_box_format="rel_xyxy"
)
bounding_boxes = bounding_box.convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
images=images,
dtype=self.compute_dtype,
)
return bounding_boxes
@staticmethod
def _format_transform(transform):
transform = tf.convert_to_tensor(transform, dtype=tf.float32)
return transform[tf.newaxis]
def get_config(self):
config = super().get_config()
config.update(
{
"x_factor": self.x_factor,
"y_factor": self.y_factor,
"interpolation": self.interpolation,
"fill_mode": self.fill_mode,
"fill_value": self.fill_value,
"bounding_box_format": self.bounding_box_format,
"seed": self.seed,
}
)
return config
| keras-cv/keras_cv/layers/preprocessing/random_shear.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/random_shear.py",
"repo_id": "keras-cv",
"token_count": 6683
} | 61 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from absl.testing import parameterized
from keras_cv import layers
from keras_cv.tests.test_case import TestCase
TEST_CONFIGURATIONS = [
("AutoContrast", layers.AutoContrast, {"value_range": (0, 255)}),
("ChannelShuffle", layers.ChannelShuffle, {}),
("Equalization", layers.Equalization, {"value_range": (0, 255)}),
(
"RandomCropAndResize",
layers.RandomCropAndResize,
{
"target_size": (224, 224),
"crop_area_factor": (0.8, 1.0),
"aspect_ratio_factor": (3 / 4, 4 / 3),
},
),
(
"Resizing",
layers.Resizing,
{
"height": 224,
"width": 224,
},
),
("Grayscale", layers.Grayscale, {}),
("GridMask", layers.GridMask, {}),
(
"Posterization",
layers.Posterization,
{"bits": 3, "value_range": (0, 255)},
),
("RandomBrightness", layers.RandomBrightness, {"factor": 0.5}),
(
"RandomColorDegeneration",
layers.RandomColorDegeneration,
{"factor": 0.5},
),
(
"RandomCutout",
layers.RandomCutout,
{"height_factor": 0.2, "width_factor": 0.2},
),
(
"RandomHue",
layers.RandomHue,
{"factor": 0.5, "value_range": (0, 255)},
),
(
"RandomChannelShift",
layers.RandomChannelShift,
{"value_range": (0, 255), "factor": 0.5},
),
(
"RandomColorJitter",
layers.RandomColorJitter,
{
"value_range": (0, 255),
"brightness_factor": (-0.2, 0.5),
"contrast_factor": (0.5, 0.9),
"saturation_factor": (0.5, 0.9),
"hue_factor": (0.5, 0.9),
"seed": 1,
},
),
(
"RandomContrast",
layers.RandomContrast,
{"value_range": (0, 255), "factor": 0.5},
),
("RandomFlip", layers.RandomFlip, {"mode": "horizontal"}),
(
"RandomGaussianBlur",
layers.RandomGaussianBlur,
{"kernel_size": 3, "factor": (0.0, 3.0)},
),
("RandomJpegQuality", layers.RandomJpegQuality, {"factor": (75, 100)}),
("RandomRotation", layers.RandomRotation, {"factor": 0.5}),
("RandomSaturation", layers.RandomSaturation, {"factor": 0.5}),
(
"RandomSharpness",
layers.RandomSharpness,
{"factor": 0.5, "value_range": (0, 255)},
),
("RandomShear", layers.RandomShear, {"x_factor": 0.3, "x_factor": 0.3}),
(
"RandomTranslation",
layers.RandomTranslation,
{"height_factor": 0.5, "width_factor": 0.5},
),
("Solarization", layers.Solarization, {"value_range": (0, 255)}),
(
"RandomZoom",
layers.RandomZoom,
{"height_factor": 0.2, "width_factor": 0.5},
),
(
"RandomCrop",
layers.RandomCrop,
{
"height": 100,
"width": 200,
},
),
(
"Rescaling",
layers.Rescaling,
{
"scale": 1,
"offset": 0.5,
},
),
]
class WithLabelsTest(TestCase):
@parameterized.named_parameters(
*TEST_CONFIGURATIONS,
("CutMix", layers.CutMix, {}),
("Mosaic", layers.Mosaic, {}),
)
def test_can_run_with_labels(self, layer_cls, init_args):
layer = layer_cls(**init_args)
img = tf.random.uniform(
shape=(3, 512, 512, 3), minval=0, maxval=255, dtype=tf.float32
)
labels = tf.ones((3,), dtype=tf.float32)
inputs = {"images": img, "labels": labels}
outputs = layer(inputs)
self.assertIn("labels", outputs)
# this has to be a separate test case to exclude CutMix, MixUp, Mosaic etc.
@parameterized.named_parameters(*TEST_CONFIGURATIONS)
def test_can_run_with_labels_single_image(self, layer_cls, init_args):
layer = layer_cls(**init_args)
img = tf.random.uniform(
shape=(512, 512, 3), minval=0, maxval=1, dtype=tf.float32
)
labels = tf.ones((), dtype=tf.float32)
inputs = {"images": img, "labels": labels}
outputs = layer(inputs)
self.assertIn("labels", outputs)
| keras-cv/keras_cv/layers/preprocessing/with_labels_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing/with_labels_test.py",
"repo_id": "keras-cv",
"token_count": 2235
} | 62 |
# Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.global_random_flip import (
GlobalRandomFlip,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class GlobalRandomFlipTest(TestCase):
def test_augment_random_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomFlip()
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_augment_specific_random_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomFlip()
point_clouds = np.array(
[[[1, 1, 2, 3, 4, 5, 6, 7, 8, 9]] * 2] * 2
).astype("float32")
bounding_boxes = np.array([[[1, 1, 2, 3, 4, 5, 1]] * 2] * 2).astype(
"float32"
)
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
flipped_point_clouds = np.array(
[[[1, -1, 2, 3, 4, 5, 6, 7, 8, 9]] * 2] * 2
).astype("float32")
flipped_bounding_boxes = np.array(
[[[1, -1, 2, 3, 4, 5, -1]] * 2] * 2
).astype("float32")
self.assertAllClose(outputs[POINT_CLOUDS], flipped_point_clouds)
self.assertAllClose(outputs[BOUNDING_BOXES], flipped_bounding_boxes)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = GlobalRandomFlip()
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_noop_raises_error(self):
with self.assertRaisesRegexp(
ValueError, "must flip over at least 1 axis"
):
_ = GlobalRandomFlip(flip_x=False, flip_y=False, flip_z=False)
def test_flip_x_or_z_raises_error(self):
with self.assertRaisesRegexp(
ValueError, "only supports flipping over the Y"
):
_ = GlobalRandomFlip(flip_x=True, flip_y=False, flip_z=False)
with self.assertRaisesRegexp(
ValueError, "only supports flipping over the Y"
):
_ = GlobalRandomFlip(flip_x=False, flip_y=False, flip_z=True)
| keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_flip_test.py/0 | {
"file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_flip_test.py",
"repo_id": "keras-cv",
"token_count": 1325
} | 63 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend import random
@keras_cv_export("keras_cv.layers.DropPath")
class DropPath(keras.layers.Layer):
"""
Implements the DropPath layer. DropPath randomly drops samples during
training with a probability of `rate`. Note that this layer drops individual
samples within a batch and not the entire batch. DropPath randomly drops
some individual samples from a batch, whereas StochasticDepth
randomly drops the entire batch.
References:
- [FractalNet](https://arxiv.org/abs/1605.07648v4).
- [rwightman/pytorch-image-models](https://github.com/rwightman/pytorch-image-models/blob/7c67d6aca992f039eece0af5f7c29a43d48c00e4/timm/models/layers/drop.py#L135)
Args:
rate: float, the probability of the residual branch being dropped.
seed: (Optional) integer. Used to create a random seed.
Usage:
`DropPath` can be used in any network as follows:
```python
# (...)
input = tf.ones((1, 3, 3, 1), dtype=tf.float32)
residual = keras.layers.Conv2D(1, 1)(input)
output = keras_cv.layers.DropPath()(input)
# (...)
```
""" # noqa: E501
def __init__(self, rate=0.5, seed=None, **kwargs):
super().__init__(**kwargs)
self.rate = rate
self._seed_val = seed
self.seed = random.SeedGenerator(seed=seed)
def call(self, x, training=None):
if self.rate == 0.0 or not training:
return x
else:
batch_size = x.shape[0] or ops.shape(x)[0]
drop_map_shape = (batch_size,) + (1,) * (len(x.shape) - 1)
drop_map = ops.cast(
random.uniform(drop_map_shape, seed=self.seed) > self.rate,
x.dtype,
)
x = x / (1.0 - self.rate)
x = x * drop_map
return x
def get_config(self):
config = super().get_config()
config.update({"rate": self.rate, "seed": self._seed_val})
return config
| keras-cv/keras_cv/layers/regularization/drop_path.py/0 | {
"file_path": "keras-cv/keras_cv/layers/regularization/drop_path.py",
"repo_id": "keras-cv",
"token_count": 1065
} | 64 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from tensorflow.keras import layers
from keras_cv.api_export import keras_cv_export
@keras_cv_export("keras_cv.layers.PatchingAndEmbedding")
class PatchingAndEmbedding(layers.Layer):
"""
Layer to patchify images, prepend a class token, positionally embed and
create a projection of patches for Vision Transformers
The layer expects a batch of input images and returns batches of patches,
flattened as a sequence and projected onto `project_dims`. If the height and
width of the images aren't divisible by the patch size, the supplied padding
type is used (or 'VALID' by default).
Reference:
An Image is Worth 16x16 Words: Transformers for Image Recognition at
Scale by Alexey Dosovitskiy et al. (https://arxiv.org/abs/2010.11929)
Args:
project_dim: the dimensionality of the project_dim
patch_size: the patch size
padding: default 'VALID', the padding to apply for patchifying images
Returns:
Patchified and linearly projected input images, including a prepended
learnable class token with shape (batch, num_patches+1, project_dim)
Basic usage:
```
images = #... batch of images
encoded_patches = keras_cv.layers.PatchingAndEmbedding(
project_dim=project_dim,
patch_size=patch_size)(patches)
print(encoded_patches.shape) # (1, 197, 1024)
```
"""
def __init__(self, project_dim, patch_size, padding="VALID", **kwargs):
super().__init__(**kwargs)
self.project_dim = project_dim
self.patch_size = patch_size
self.padding = padding
if patch_size < 0:
raise ValueError(
"The patch_size cannot be a negative number. Received "
f"{patch_size}"
)
if padding not in ["VALID", "SAME"]:
raise ValueError(
f"Padding must be either 'SAME' or 'VALID', but {padding} was "
"passed."
)
self.projection = layers.Conv2D(
filters=self.project_dim,
kernel_size=self.patch_size,
strides=self.patch_size,
padding=self.padding,
)
def build(self, input_shape):
self.class_token = self.add_weight(
shape=[1, 1, self.project_dim], name="class_token", trainable=True
)
self.num_patches = (
input_shape[1]
// self.patch_size
* input_shape[2]
// self.patch_size
)
self.position_embedding = layers.Embedding(
input_dim=self.num_patches + 1, output_dim=self.project_dim
)
def call(
self,
images,
interpolate=False,
interpolate_width=None,
interpolate_height=None,
patch_size=None,
):
"""Calls the PatchingAndEmbedding layer on a batch of images.
Args:
images: A `tf.Tensor` of shape [batch, width, height, depth]
interpolate: A `bool` to enable or disable interpolation
interpolate_height: An `int` representing interpolated height
interpolate_width: An `int` representing interpolated width
patch_size: An `int` representing the new patch size if
interpolation is used
Returns:
`A tf.Tensor` of shape [batch, patch_num+1, embedding_dim]
"""
# Turn images into patches and project them onto `project_dim`
patches = self.projection(images)
patch_shapes = tf.shape(patches)
patches_flattened = tf.reshape(
patches,
shape=(
patch_shapes[0],
patch_shapes[-2] * patch_shapes[-2],
patch_shapes[-1],
),
)
# Add learnable class token before linear projection and positional
# embedding
flattened_shapes = tf.shape(patches_flattened)
class_token_broadcast = tf.cast(
tf.broadcast_to(
self.class_token,
[flattened_shapes[0], 1, flattened_shapes[-1]],
),
dtype=patches_flattened.dtype,
)
patches_flattened = tf.concat(
[class_token_broadcast, patches_flattened], 1
)
positions = tf.range(start=0, limit=self.num_patches + 1, delta=1)
if interpolate and None not in (
interpolate_width,
interpolate_height,
patch_size,
):
(
interpolated_embeddings,
class_token,
) = self.__interpolate_positional_embeddings(
self.position_embedding(positions),
interpolate_width,
interpolate_height,
patch_size,
)
addition = patches_flattened + interpolated_embeddings
encoded = tf.concat([class_token, addition], 1)
elif interpolate and None in (
interpolate_width,
interpolate_height,
patch_size,
):
raise ValueError(
"`None of `interpolate_width`, `interpolate_height` and "
"`patch_size` cannot be None if `interpolate` is True"
)
else:
encoded = patches_flattened + self.position_embedding(positions)
return encoded
def __interpolate_positional_embeddings(
self, embedding, height, width, patch_size
):
"""
Allows for pre-trained position embedding interpolation. This trick
allows you to fine-tune a ViT on higher resolution images than it was
trained on.
Based on:
https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/modeling_tf_vit.py
"""
dimensionality = embedding.shape[-1]
class_token = tf.expand_dims(embedding[:1, :], 0)
patch_positional_embeddings = embedding[1:, :]
h0 = height // patch_size
w0 = width // patch_size
new_shape = tf.constant(int(math.sqrt(self.num_patches)))
interpolated_embeddings = tf.image.resize(
images=tf.reshape(
patch_positional_embeddings,
shape=(
1,
new_shape,
new_shape,
dimensionality,
),
),
size=(h0, w0),
method="bicubic",
)
reshaped_embeddings = tf.reshape(
tensor=interpolated_embeddings, shape=(1, -1, dimensionality)
)
# linear_projection = self.linear_projection(reshaped_embeddings)
# addition = linear_projection + reshaped_embeddings
# return tf.concat([class_token, addition], 1)
return reshaped_embeddings, class_token
def get_config(self):
config = {
"project_dim": self.project_dim,
"patch_size": self.patch_size,
"padding": self.padding,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
| keras-cv/keras_cv/layers/vit_layers.py/0 | {
"file_path": "keras-cv/keras_cv/layers/vit_layers.py",
"repo_id": "keras-cv",
"token_count": 3510
} | 65 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from absl.testing import parameterized
from keras_cv import losses as cv_losses
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import test_utils
class SerializationTest(TestCase):
@parameterized.named_parameters(
(
"FocalLoss",
cv_losses.FocalLoss,
{"alpha": 0.25, "gamma": 2, "from_logits": True},
),
("GIoULoss", cv_losses.GIoULoss, {"bounding_box_format": "xywh"}),
(
"BinaryPenaltyReducedFocalCrossEntropy",
cv_losses.BinaryPenaltyReducedFocalCrossEntropy,
{},
),
("SimCLRLoss", cv_losses.SimCLRLoss, {"temperature": 0.5}),
("SmoothL1Loss", cv_losses.SmoothL1Loss, {}),
)
def test_loss_serialization(self, loss_cls, init_args):
loss = loss_cls(**init_args)
config = loss.get_config()
self.assertAllInitParametersAreInConfig(loss_cls, config)
reconstructed_loss = loss_cls.from_config(config)
self.assertTrue(
test_utils.config_equals(
loss.get_config(), reconstructed_loss.get_config()
)
)
def assertAllInitParametersAreInConfig(self, loss_cls, config):
excluded_name = ["args", "kwargs", "*"]
parameter_names = {
v
for v in inspect.signature(loss_cls).parameters.keys()
if v not in excluded_name
}
intersection_with_config = {
v for v in config.keys() if v in parameter_names
}
self.assertSetEqual(parameter_names, intersection_with_config)
| keras-cv/keras_cv/losses/serialization_test.py/0 | {
"file_path": "keras-cv/keras_cv/losses/serialization_test.py",
"repo_id": "keras-cv",
"token_count": 915
} | 66 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientNetLite model preset configurations."""
backbone_presets_no_weights = {
"efficientnetlite_b0": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.0`."
),
"params": 3414176,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b0",
},
"efficientnetlite_b1": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.0` and `depth_coefficient=1.1`."
),
"params": 4190496,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b1",
},
"efficientnetlite_b2": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.1` and `depth_coefficient=1.2`."
),
"params": 4870320,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b2",
},
"efficientnetlite_b3": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.2` and `depth_coefficient=1.4`."
),
"params": 6994504,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b3",
},
"efficientnetlite_b4": {
"metadata": {
"description": (
"EfficientNet B-style architecture with 7 "
"convolutional blocks. This B-style model has "
"`width_coefficient=1.4` and `depth_coefficient=1.8`."
),
"params": 11840256,
"official_name": "EfficientNetLite",
"path": "EfficientNetLite",
},
"kaggle_handle": "gs://keras-cv-kaggle/efficientnetlite_b4",
},
}
backbone_presets_with_weights = {}
backbone_presets = {
**backbone_presets_no_weights,
**backbone_presets_with_weights,
}
| keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_backbone_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/efficientnet_lite/efficientnet_lite_backbone_presets.py",
"repo_id": "keras-cv",
"token_count": 1545
} | 67 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone import (
MiTBackbone,
)
from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone_presets import ( # noqa: E501
backbone_presets,
)
from keras_cv.utils.python_utils import classproperty
ALIAS_DOCSTRING = """MiT model.
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/).
Args:
include_rescaling: bool, whether to rescale the inputs. If set to
True, inputs will be passed through a `Rescaling(scale=1 / 255)`
layer. Defaults to True.
input_shape: optional shape tuple, defaults to (None, None, 3).
input_tensor: optional Keras tensor (i.e., output of `layers.Input()`)
to use as image input for the model.
Examples:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Randomly initialized backbone
model = {name}Backbone()
output = model(input_data)
```
""" # noqa: E501
class MiTB0Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b0", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {
"mit_b0_imagenet": copy.deepcopy(
backbone_presets["mit_b0_imagenet"]
),
}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return cls.presets
class MiTB1Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b1", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
class MiTB2Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b2", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
class MiTB3Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b3", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
class MiTB4Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b4", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
class MiTB5Backbone(MiTBackbone):
def __new__(
cls,
include_rescaling=True,
input_shape=(224, 224, 3),
input_tensor=None,
**kwargs,
):
# Pack args in kwargs
kwargs.update(
{
"include_rescaling": include_rescaling,
"input_shape": input_shape,
"input_tensor": input_tensor,
}
)
return MiTBackbone.from_preset("mit_b5", **kwargs)
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return {}
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations."""
return {}
setattr(
MiTB0Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB0"),
)
setattr(
MiTB1Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB1"),
)
setattr(
MiTB2Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB2"),
)
setattr(
MiTB3Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB3"),
)
setattr(
MiTB4Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB4"),
)
setattr(
MiTB5Backbone,
"__doc__",
ALIAS_DOCSTRING.format(name="MiTB5"),
)
| keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_aliases.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_aliases.py",
"repo_id": "keras-cv",
"token_count": 3127
} | 68 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet18Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet50Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet101Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_aliases import (
ResNet152Backbone,
)
from keras_cv.models.backbones.resnet_v1.resnet_v1_backbone import (
ResNetBackbone,
)
from keras_cv.tests.test_case import TestCase
from keras_cv.utils.train import get_feature_extractor
class ResNetBackboneTest(TestCase):
def setUp(self):
self.input_batch = np.ones(shape=(2, 224, 224, 3))
def test_valid_call(self):
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model(self.input_batch)
def test_valid_call_applications_model(self):
model = ResNet50Backbone()
model(self.input_batch)
def test_valid_call_with_rescaling(self):
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=True,
)
model(self.input_batch)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_model(self):
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[2, 2, 2, 2],
stackwise_strides=[1, 2, 2, 2],
include_rescaling=False,
)
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v1_backbone.keras"
)
model.save(save_path)
restored_model = keras.saving.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, ResNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
@pytest.mark.large # Saving is slow, so mark these large.
def test_saved_alias_model(self):
model = ResNet50Backbone()
model_output = model(self.input_batch)
save_path = os.path.join(
self.get_temp_dir(), "resnet_v1_alias_backbone.keras"
)
model.save(save_path)
restored_model = keras.saving.load_model(save_path)
# Check we got the real object back.
# Note that these aliases serialized as the base class
self.assertIsInstance(restored_model, ResNetBackbone)
# Check that output matches.
restored_output = restored_model(self.input_batch)
self.assertAllClose(
ops.convert_to_numpy(model_output),
ops.convert_to_numpy(restored_output),
)
def test_feature_pyramid_inputs(self):
model = ResNet50Backbone()
backbone_model = get_feature_extractor(
model,
model.pyramid_level_inputs.values(),
model.pyramid_level_inputs.keys(),
)
input_size = 256
inputs = keras.Input(shape=[input_size, input_size, 3])
outputs = backbone_model(inputs)
levels = ["P2", "P3", "P4", "P5"]
self.assertEquals(list(outputs.keys()), levels)
self.assertEquals(
outputs["P2"].shape,
(None, input_size // 2**2, input_size // 2**2, 256),
)
self.assertEquals(
outputs["P3"].shape,
(None, input_size // 2**3, input_size // 2**3, 512),
)
self.assertEquals(
outputs["P4"].shape,
(None, input_size // 2**4, input_size // 2**4, 1024),
)
self.assertEquals(
outputs["P5"].shape,
(None, input_size // 2**5, input_size // 2**5, 2048),
)
@parameterized.named_parameters(
("one_channel", 1),
("four_channels", 4),
)
def test_application_variable_input_channels(self, num_channels):
# ResNet50 model
model = ResNetBackbone(
stackwise_filters=[64, 128, 256, 512],
stackwise_blocks=[3, 4, 6, 3],
stackwise_strides=[1, 2, 2, 2],
input_shape=(None, None, num_channels),
include_rescaling=False,
)
self.assertEqual(model.output_shape, (None, None, None, 2048))
@parameterized.named_parameters(
("18", ResNet18Backbone),
("50", ResNet50Backbone),
("101", ResNet101Backbone),
("152", ResNet152Backbone),
)
def test_specific_arch_forward_pass(self, arch_class):
backbone = arch_class()
backbone(tf.random.uniform(shape=[2, 256, 256, 3]))
| keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/backbones/resnet_v1/resnet_v1_backbone_test.py",
"repo_id": "keras-cv",
"token_count": 2570
} | 69 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image classifier model using pooling and dense layers."""
import copy
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.models.backbones.backbone_presets import backbone_presets
from keras_cv.models.backbones.backbone_presets import (
backbone_presets_with_weights,
)
from keras_cv.models.classification.image_classifier_presets import (
classifier_presets,
)
from keras_cv.models.task import Task
from keras_cv.utils.python_utils import classproperty
@keras_cv_export(
[
"keras_cv.models.ImageClassifier",
"keras_cv.models.classification.ImageClassifier",
]
)
class ImageClassifier(Task):
"""Image classifier with pooling and dense layer prediction head.
Args:
backbone: `keras.Model` instance, the backbone architecture of the
classifier called on the inputs. Pooling will be called on the last
dimension of the backbone output.
num_classes: int, number of classes to predict.
pooling: str, type of pooling layer. Must be one of "avg", "max".
activation: Optional `str` or callable, defaults to "softmax". The
activation function to use on the Dense layer. Set `activation=None`
to return the output logits.
Example:
```python
input_data = tf.ones(shape=(8, 224, 224, 3))
# Pretrained classifier (e.g., for imagenet categories)
model = keras_cv.models.ImageClassifier.from_preset(
"resnet50_v2_imagenet_classifier",
)
output = model(input_data)
# Pretrained backbone
backbone = keras_cv.models.ResNet50V2Backbone.from_preset(
"resnet50_v2_imagenet",
)
model = keras_cv.models.ImageClassifier(
backbone=backbone,
num_classes=4,
)
output = model(input_data)
# Randomly initialized backbone with a custom config
model = keras_cv.models.ImageClassifier(
backbone=keras_cv.models.ResNet50V2Backbone(),
num_classes=4,
)
output = model(input_data)
```
"""
def __init__(
self,
backbone,
num_classes,
pooling="avg",
activation="softmax",
**kwargs,
):
if pooling == "avg":
pooling_layer = keras.layers.GlobalAveragePooling2D(name="avg_pool")
elif pooling == "max":
pooling_layer = keras.layers.GlobalMaxPooling2D(name="max_pool")
else:
raise ValueError(
f'`pooling` must be one of "avg", "max". Received: {pooling}.'
)
inputs = backbone.input
x = backbone(inputs)
x = pooling_layer(x)
outputs = keras.layers.Dense(
num_classes,
activation=activation,
name="predictions",
)(x)
# Instantiate using Functional API Model constructor
super().__init__(
inputs=inputs,
outputs=outputs,
**kwargs,
)
# All references to `self` below this line
self.backbone = backbone
self.num_classes = num_classes
self.pooling = pooling
self.activation = activation
def get_config(self):
# Backbone serialized in `super`
config = super().get_config()
config.update(
{
"backbone": keras.layers.serialize(self.backbone),
"num_classes": self.num_classes,
"pooling": self.pooling,
"activation": self.activation,
}
)
return config
@classproperty
def presets(cls):
"""Dictionary of preset names and configurations."""
return copy.deepcopy({**backbone_presets, **classifier_presets})
@classproperty
def presets_with_weights(cls):
"""Dictionary of preset names and configurations that include
weights."""
return copy.deepcopy(
{**backbone_presets_with_weights, **classifier_presets}
)
@classproperty
def backbone_presets(cls):
"""Dictionary of preset names and configurations of compatible
backbones."""
return copy.deepcopy(backbone_presets)
| keras-cv/keras_cv/models/classification/image_classifier.py/0 | {
"file_path": "keras-cv/keras_cv/models/classification/image_classifier.py",
"repo_id": "keras-cv",
"token_count": 1944
} | 70 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for KerasCV model utils."""
from tensorflow import keras
from tensorflow.keras import layers
from keras_cv.models.legacy import utils
from keras_cv.tests.test_case import TestCase
class ModelUtilTestCase(TestCase):
def test_parse_model_inputs(self):
input_shape = (224, 244, 3)
inputs = utils.parse_model_inputs(input_shape, None)
self.assertEqual(inputs.shape.as_list(), list((None,) + input_shape))
input_tensor = layers.Input(shape=input_shape)
self.assertIs(
utils.parse_model_inputs(input_shape, input_tensor), input_tensor
)
def test_as_backbone_missing_backbone_level_outputs(self):
model = keras.models.Sequential()
model.add(layers.Conv2D(64, kernel_size=3, input_shape=(16, 16, 3)))
model.add(
layers.Conv2D(
32,
kernel_size=3,
)
)
model.add(layers.Dense(10))
with self.assertRaises(ValueError):
utils.as_backbone(model)
def test_as_backbone_util(self):
inp = layers.Input((16, 16, 3))
_backbone_level_outputs = {}
x = layers.Conv2D(64, kernel_size=3, input_shape=(16, 16, 3))(inp)
_backbone_level_outputs[2] = x
x = layers.Conv2D(
32,
kernel_size=3,
)(x)
_backbone_level_outputs[3] = x
out = layers.Dense(10)(x)
_backbone_level_outputs[4] = out
model = keras.models.Model(inputs=inp, outputs=out)
# when model has _backbone_level_outputs, it should not raise an error
model._backbone_level_outputs = _backbone_level_outputs
backbone = utils.as_backbone(model)
self.assertEqual(len(backbone.outputs), 3)
| keras-cv/keras_cv/models/legacy/utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/legacy/utils_test.py",
"repo_id": "keras-cv",
"token_count": 992
} | 71 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv import bounding_box
from keras_cv.api_export import keras_cv_export
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.layers.object_detection import box_matcher
from keras_cv.utils import target_gather
@keras_cv_export("keras_cv.models.retinanet.LabelEncoder")
class RetinaNetLabelEncoder(keras.layers.Layer):
"""Transforms the raw labels into targets for training.
This class has operations to generate targets for a batch of samples which
is made up of the input images, bounding boxes for the objects present and
their class ids. Targets are always represented in `center_yxwh` format.
This done for numerical reasons, to ensure numerical consistency when
training in any format.
Args:
bounding_box_format: The format of bounding boxes of input dataset.
Refer [to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/) for more
details on supported bounding box formats.
anchor_generator: `keras_cv.layers.AnchorGenerator` instance to produce
anchor boxes. Boxes are then used to encode labels on a per-image
basis.
positive_threshold: the float threshold to set an anchor to positive
match to gt box. Values above it are positive matches.
negative_threshold: the float threshold to set an anchor to negative
match to gt box. Values below it are negative matches.
box_variance: The scaling factors used to scale the bounding box
targets, defaults to (0.1, 0.1, 0.2, 0.2).
background_class: (Optional) The class ID used for the background class,
defaults to -1.
ignore_class: (Optional) The class ID used for the ignore class,
defaults to -2.
""" # noqa: E501
def __init__(
self,
bounding_box_format,
anchor_generator,
positive_threshold=0.5,
negative_threshold=0.4,
box_variance=(0.1, 0.1, 0.2, 0.2),
background_class=-1.0,
ignore_class=-2.0,
**kwargs,
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.anchor_generator = anchor_generator
self.box_variance = ops.array(box_variance, "float32")
self.background_class = background_class
self.ignore_class = ignore_class
self.matched_boxes_metric = MatchedBoxesMetric(
name="percent_boxes_matched_with_anchor"
)
self.positive_threshold = positive_threshold
self.negative_threshold = negative_threshold
self.box_matcher = box_matcher.BoxMatcher(
thresholds=[negative_threshold, positive_threshold],
match_values=[-1, -2, 1],
force_match_for_each_col=False,
)
self.box_variance_tuple = box_variance
self.built = True
def _encode_sample(self, box_labels, anchor_boxes, image_shape):
"""Creates box and classification targets for a batched sample
Matches ground truth boxes to anchor boxes based on IOU.
1. Calculates the pairwise IOU for the M `anchor_boxes` and N `gt_boxes`
to get a `(M, N)` shaped matrix.
2. The ground truth box with the maximum IOU in each row is assigned to
the anchor box provided the IOU is greater than `match_iou`.
3. If the maximum IOU in a row is less than `ignore_iou`, the anchor
box is assigned with the background class.
4. The remaining anchor boxes that do not have any class assigned are
ignored during training.
Args:
gt_boxes: A float tensor with shape `(num_objects, 4)` representing
the ground truth boxes, where each box is of the format
`[x, y, width, height]`.
gt_classes: A float Tensor with shape `(num_objects, 1)` representing
the ground truth classes.
anchor_boxes: A float tensor with the shape `(total_anchors, 4)`
representing all the anchor boxes for a given input image shape,
where each anchor box is of the format `[x, y, width, height]`.
Returns:
matched_gt_idx: Index of the matched object
positive_mask: A mask for anchor boxes that have been assigned ground
truth boxes.
ignore_mask: A mask for anchor boxes that need to by ignored during
training
"""
gt_boxes = box_labels["boxes"]
gt_classes = box_labels["classes"]
iou_matrix = bounding_box.compute_iou(
anchor_boxes,
gt_boxes,
bounding_box_format=self.bounding_box_format,
image_shape=image_shape,
)
matched_gt_idx, matched_vals = self.box_matcher(iou_matrix)
matched_vals = ops.expand_dims(matched_vals, axis=-1)
positive_mask = ops.cast(ops.equal(matched_vals, 1), self.dtype)
ignore_mask = ops.cast(ops.equal(matched_vals, -2), self.dtype)
matched_gt_boxes = target_gather._target_gather(
gt_boxes, matched_gt_idx
)
matched_gt_boxes = ops.reshape(
matched_gt_boxes, (-1, ops.shape(matched_gt_boxes)[1], 4)
)
box_target = bounding_box._encode_box_to_deltas(
anchors=anchor_boxes,
boxes=matched_gt_boxes,
anchor_format=self.bounding_box_format,
box_format=self.bounding_box_format,
variance=self.box_variance,
image_shape=image_shape,
)
matched_gt_cls_ids = target_gather._target_gather(
gt_classes, matched_gt_idx
)
cls_target = ops.where(
ops.not_equal(positive_mask, 1.0),
self.background_class,
matched_gt_cls_ids,
)
cls_target = ops.where(
ops.equal(ignore_mask, 1.0), self.ignore_class, cls_target
)
label = ops.concatenate(
[box_target, ops.cast(cls_target, box_target.dtype)], axis=-1
)
# In the case that a box in the corner of an image matches with an all
# -1 box that is outside the image, we should assign the box to the
# ignore class. There are rare cases where a -1 box can be matched,
# resulting in a NaN during training. The unit test passing all -1s to
# the label encoder ensures that we properly handle this edge-case.
label = ops.where(
ops.expand_dims(ops.any(ops.isnan(label), axis=-1), axis=-1),
self.ignore_class,
label,
)
result = {"boxes": label[:, :, :4], "classes": label[:, :, 4]}
box_shape = ops.shape(gt_boxes)
batch_size = box_shape[0]
n_boxes = box_shape[1]
box_ids = ops.arange(n_boxes, dtype=matched_gt_idx.dtype)
matched_ids = ops.expand_dims(matched_gt_idx, axis=-1)
matches = box_ids == matched_ids
matches = ops.any(matches, axis=1)
self.matched_boxes_metric.update_state(
ops.zeros(
(
batch_size,
n_boxes,
),
dtype="int32",
),
ops.cast(matches, "int32"),
)
return result
def call(self, images, box_labels):
"""Creates box and classification targets for a batch
Args:
images: a batched [batch_size, H, W, C] image float `tf.Tensor`.
box_labels: a batched KerasCV style bounding box dictionary containing
bounding boxes and class labels. Should be in `bounding_box_format`.
"""
if isinstance(images, tf.RaggedTensor):
raise ValueError(
"`RetinaNetLabelEncoder`'s `call()` method does not "
"support RaggedTensor inputs for the `images` argument. "
f"Received `type(images)={type(images)}`."
)
image_shape = ops.shape(images)
image_shape = (image_shape[1], image_shape[2], image_shape[3])
box_labels = bounding_box.to_dense(box_labels)
if len(box_labels["classes"].shape) == 2:
box_labels["classes"] = ops.expand_dims(
box_labels["classes"], axis=-1
)
anchor_boxes = self.anchor_generator(image_shape=image_shape)
anchor_boxes = ops.concatenate(list(anchor_boxes.values()), axis=0)
anchor_boxes = bounding_box.convert_format(
anchor_boxes,
source=self.anchor_generator.bounding_box_format,
target=self.bounding_box_format,
image_shape=image_shape,
)
result = self._encode_sample(box_labels, anchor_boxes, image_shape)
encoded_box_targets = result["boxes"]
encoded_box_targets = ops.reshape(
encoded_box_targets, (-1, ops.shape(encoded_box_targets)[1], 4)
)
class_targets = result["classes"]
return encoded_box_targets, class_targets
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"anchor_generator": self.anchor_generator,
"positive_threshold": self.positive_threshold,
"negative_threshold": self.negative_threshold,
"box_variance": self.box_variance_tuple,
"background_class": self.background_class,
"ignore_class": self.ignore_class,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if "anchor_generator" in config and isinstance(
config["anchor_generator"], dict
):
config["anchor_generator"] = keras.layers.deserialize(
config["anchor_generator"]
)
return super().from_config(config)
class MatchedBoxesMetric(keras.metrics.BinaryAccuracy):
# Prevent `load_weights` from accessing metric
def load_own_variables(self, store):
return
| keras-cv/keras_cv/models/object_detection/retinanet/retinanet_label_encoder.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/retinanet/retinanet_label_encoder.py",
"repo_id": "keras-cv",
"token_count": 4633
} | 72 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tensorflow import keras
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlock,
)
from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import (
DarknetConvBlockDepthwise,
)
class YoloXHead(keras.layers.Layer):
"""The YoloX prediction head.
Arguments:
num_classes: The number of classes to be considered for the
classification head.
bias_initializer: Bias Initializer for the final convolution layer for
the classification and regression heads. Defaults to None.
width_multiplier: A float value used to calculate the base width of the
model this changes based on the detection model being used. Defaults
to 1.0.
num_level: the number of levels in the FPN output. Defaults to 3.
activation: the activation applied after the BatchNorm layer. One of
"silu", "relu" or "leaky_relu". Defaults to "silu".
use_depthwise: a boolean value used to decide whether a depthwise conv
block should be used over a regular darknet block. Defaults to
`False`.
"""
def __init__(
self,
num_classes,
bias_initializer=None,
width_multiplier=1.0,
num_level=3,
activation="silu",
use_depthwise=False,
**kwargs,
):
super().__init__(**kwargs)
self.stems = []
self.classification_convs = []
self.regression_convs = []
self.classification_preds = []
self.regression_preds = []
self.objectness_preds = []
ConvBlock = (
DarknetConvBlockDepthwise if use_depthwise else DarknetConvBlock
)
for _ in range(num_level):
self.stems.append(
DarknetConvBlock(
filters=int(256 * width_multiplier),
kernel_size=1,
strides=1,
activation=activation,
)
)
self.classification_convs.append(
keras.Sequential(
[
ConvBlock(
filters=int(256 * width_multiplier),
kernel_size=3,
strides=1,
activation=activation,
),
ConvBlock(
filters=int(256 * width_multiplier),
kernel_size=3,
strides=1,
activation=activation,
),
]
)
)
self.regression_convs.append(
keras.Sequential(
[
ConvBlock(
filters=int(256 * width_multiplier),
kernel_size=3,
strides=1,
activation=activation,
),
ConvBlock(
filters=int(256 * width_multiplier),
kernel_size=3,
strides=1,
activation=activation,
),
]
)
)
self.classification_preds.append(
keras.layers.Conv2D(
filters=num_classes,
kernel_size=1,
strides=1,
padding="same",
bias_initializer=bias_initializer,
)
)
self.regression_preds.append(
keras.layers.Conv2D(
filters=4,
kernel_size=1,
strides=1,
padding="same",
bias_initializer=bias_initializer,
)
)
self.objectness_preds.append(
keras.layers.Conv2D(
filters=1,
kernel_size=1,
strides=1,
padding="same",
)
)
def call(self, inputs, training=False):
outputs = []
for i, p_i in enumerate(inputs):
stem = self.stems[i](p_i)
classes = self.classification_convs[i](stem)
classes = self.classification_preds[i](classes)
boxes_feat = self.regression_convs[i](stem)
boxes = self.regression_preds[i](boxes_feat)
objectness = self.objectness_preds[i](boxes_feat)
output = keras.layers.Concatenate(axis=-1)(
[boxes, objectness, classes]
)
outputs.append(output)
return outputs
| keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_head.py/0 | {
"file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_head.py",
"repo_id": "keras-cv",
"token_count": 2949
} | 73 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras_cv.backend import keras
from keras_cv.backend import ops
from keras_cv.backend.config import keras_3
from keras_cv.models import BASNet
from keras_cv.models import ResNet18Backbone
from keras_cv.tests.test_case import TestCase
class BASNetTest(TestCase):
def test_basnet_construction(self):
backbone = ResNet18Backbone()
model = BASNet(
input_shape=[64, 64, 3], backbone=backbone, num_classes=1
)
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
@pytest.mark.large
def test_basnet_call(self):
backbone = ResNet18Backbone()
model = BASNet(
input_shape=[64, 64, 3], backbone=backbone, num_classes=1
)
images = np.random.uniform(size=(2, 64, 64, 3))
_ = model(images)
_ = model.predict(images)
@pytest.mark.large
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_weights_change(self):
input_size = [64, 64, 3]
target_size = [64, 64, 1]
images = np.ones([1] + input_size)
labels = np.random.uniform(size=[1] + target_size)
ds = tf.data.Dataset.from_tensor_slices((images, labels))
ds = ds.repeat(2)
ds = ds.batch(2)
backbone = ResNet18Backbone()
model = BASNet(
input_shape=[64, 64, 3], backbone=backbone, num_classes=1
)
model_metrics = ["accuracy"]
if keras_3():
model_metrics = ["accuracy" for _ in range(8)]
model.compile(
optimizer="adam",
loss=keras.losses.BinaryCrossentropy(),
metrics=model_metrics,
)
original_weights = model.refinement_head.get_weights()
model.fit(ds, epochs=1, batch_size=1)
updated_weights = model.refinement_head.get_weights()
for w1, w2 in zip(original_weights, updated_weights):
self.assertNotAllEqual(w1, w2)
self.assertFalse(ops.any(ops.isnan(w2)))
@pytest.mark.large
def test_with_model_preset_forward_pass(self):
self.skipTest("Skipping preset test until BASNet weights are added.")
model = BASNet.from_preset(
"basnet_resnet34",
)
image = np.ones((1, 288, 288, 3))
output = ops.expand_dims(ops.argmax(model(image), axis=-1), axis=-1)
output = output[0]
expected_output = np.zeros((1, 288, 288, 1))
self.assertAllClose(output, expected_output)
@pytest.mark.large
def test_saved_model(self):
target_size = [64, 64, 3]
backbone = ResNet18Backbone()
model = BASNet(
input_shape=[64, 64, 3], backbone=backbone, num_classes=1
)
input_batch = np.ones(shape=[2] + target_size)
model_output = model(input_batch)
save_path = os.path.join(self.get_temp_dir(), "model.keras")
if keras_3():
model.save(save_path)
else:
model.save(save_path, save_format="keras_v3")
# Free up model memory
del model
gc.collect()
restored_model = keras.models.load_model(save_path)
# Check we got the real object back.
self.assertIsInstance(restored_model, BASNet)
# Check that output matches.
restored_output = restored_model(input_batch)
self.assertAllClose(model_output, restored_output)
@pytest.mark.large
class BASNetSmokeTest(TestCase):
@parameterized.named_parameters(
*[(preset, preset) for preset in ["resnet18", "resnet34"]]
)
def test_backbone_preset(self, preset):
model = BASNet.from_preset(
preset,
num_classes=1,
)
xs = np.random.uniform(size=(1, 128, 128, 3))
output = model(xs)[0]
self.assertEqual(output.shape, (1, 128, 128, 1))
| keras-cv/keras_cv/models/segmentation/basnet/basnet_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/basnet/basnet_test.py",
"repo_id": "keras-cv",
"token_count": 2014
} | 74 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAM model preset configurations."""
sam_presets = {
"sam_base_sa1b": {
"metadata": {
"description": "The base SAM model trained on the SA1B dataset.",
"params": 93_735_728,
"official_name": "SAM",
"path": "segment_anything",
},
"kaggle_handle": "kaggle://keras/sam/keras/sam_base_sa1b/2",
},
"sam_large_sa1b": {
"metadata": {
"description": "The large SAM model trained on the SA1B dataset.",
"params": 312_343_088,
"official_name": "SAM",
"path": "segment_anything",
},
"kaggle_handle": "kaggle://keras/sam/keras/sam_large_sa1b/2",
},
"sam_huge_sa1b": {
"metadata": {
"description": "The huge SAM model trained on the SA1B dataset.",
"params": 641_090_864,
"official_name": "SAM",
"path": "segment_anything",
},
"kaggle_handle": "kaggle://keras/sam/keras/sam_huge_sa1b/2",
},
}
| keras-cv/keras_cv/models/segmentation/segment_anything/sam_presets.py/0 | {
"file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_presets.py",
"repo_id": "keras-cv",
"token_count": 676
} | 75 |
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tensorflow.keras import mixed_precision
from keras_cv.backend import ops
from keras_cv.backend import random
from keras_cv.models import StableDiffusion
from keras_cv.tests.test_case import TestCase
@pytest.mark.tf_only
class StableDiffusionTest(TestCase):
@pytest.mark.large
def test_end_to_end_golden_value(self):
self.skipTest("TODO: #2246 values differ for Keras2 and Keras3 TF")
prompt = "a caterpillar smoking a hookah while sitting on a mushroom"
stablediff = StableDiffusion(128, 128)
img = stablediff.text_to_image(prompt, seed=1337, num_steps=5)
self.assertAllEqual(img[0][13:14, 13:14, :][0][0], [66, 38, 185])
# Verify that the step-by-step creation flow creates an identical output
text_encoding = stablediff.encode_text(prompt)
self.assertAllClose(
img,
stablediff.generate_image(text_encoding, seed=1337, num_steps=5),
atol=1e-4,
)
@pytest.mark.extra_large
def test_image_encoder_golden_value(self):
stablediff = StableDiffusion(128, 128)
outputs = stablediff.image_encoder.predict(ops.ones((1, 128, 128, 3)))
self.assertAllClose(
outputs[0][1:4][0][0],
[2.451568, 1.607522, -0.546311, -1.194388],
atol=5e-4,
)
@pytest.mark.extra_large
def test_text_encoder_golden_value(self):
prompt = "a caterpillar smoking a hookah while sitting on a mushroom"
stablediff = StableDiffusion(128, 128)
text_encoding = stablediff.encode_text(prompt)
self.assertAllClose(
text_encoding[0][1][0:5],
[0.029033, -1.325784, 0.308457, -0.061469, 0.03983],
atol=1e-4,
)
@pytest.mark.extra_large
def test_text_tokenizer_golden_value(self):
prompt = "a caterpillar smoking a hookah while sitting on a mushroom"
stablediff = StableDiffusion(128, 128)
text_encoding = stablediff.tokenizer.encode(prompt)
self.assertEqual(
text_encoding[0:5],
[49406, 320, 27111, 9038, 320],
)
@pytest.mark.extra_large
def test_mixed_precision(self):
try:
mixed_precision.set_global_policy("mixed_float16")
stablediff = StableDiffusion(128, 128)
_ = stablediff.text_to_image("Testing123 haha!", num_steps=2)
except Exception as e:
raise (e)
finally:
# Clean up global policy
mixed_precision.set_global_policy("float32")
@pytest.mark.extra_large
def test_generate_image_rejects_noise_and_seed(self):
stablediff = StableDiffusion(128, 128)
with self.assertRaisesRegex(
ValueError,
r"`diffusion_noise` and `seed` should not both be passed",
):
_ = stablediff.generate_image(
stablediff.encode_text("thou shall not render"),
diffusion_noise=random.normal((1, 16, 16, 4), seed=42),
seed=1337,
)
@pytest.mark.extra_large
class StableDiffusionMultiFrameworkTest(TestCase):
@pytest.mark.filterwarnings("ignore::UserWarning") # Torch + jit_compile
def test_end_to_end(self):
prompt = "a caterpillar smoking a hookah while sitting on a mushroom"
stablediff = StableDiffusion(128, 128)
img = stablediff.text_to_image(prompt, seed=1337, num_steps=5)
# Verify that the step-by-step creation flow creates an identical output
text_encoding = stablediff.encode_text(prompt)
self.assertAllClose(
img,
stablediff.generate_image(text_encoding, seed=1337, num_steps=5),
atol=1e-4,
)
| keras-cv/keras_cv/models/stable_diffusion/stable_diffusion_test.py/0 | {
"file_path": "keras-cv/keras_cv/models/stable_diffusion/stable_diffusion_test.py",
"repo_id": "keras-cv",
"token_count": 1896
} | 76 |
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import keras_cv # noqa: E402
BUCKET = "keras-cv-kaggle"
def to_snake_case(name):
name = re.sub(r"\W+", "", name)
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower()
return name
def convert_backbone_presets():
# Save and upload Backbone presets
backbone_models = [
keras_cv.models.ResNetBackbone,
keras_cv.models.ResNet18Backbone,
keras_cv.models.ResNet34Backbone,
keras_cv.models.ResNet50Backbone,
keras_cv.models.ResNet101Backbone,
keras_cv.models.ResNet152Backbone,
keras_cv.models.ResNetV2Backbone,
keras_cv.models.ResNet18V2Backbone,
keras_cv.models.ResNet34V2Backbone,
keras_cv.models.ResNet50V2Backbone,
keras_cv.models.ResNet101V2Backbone,
keras_cv.models.ResNet152V2Backbone,
keras_cv.models.YOLOV8Backbone,
keras_cv.models.MobileNetV3Backbone,
keras_cv.models.MobileNetV3SmallBackbone,
keras_cv.models.MobileNetV3LargeBackbone,
keras_cv.models.EfficientNetV2Backbone,
keras_cv.models.EfficientNetV2B0Backbone,
keras_cv.models.EfficientNetV2B1Backbone,
keras_cv.models.EfficientNetV2B2Backbone,
keras_cv.models.EfficientNetV2B3Backbone,
keras_cv.models.EfficientNetV2SBackbone,
keras_cv.models.EfficientNetV2MBackbone,
keras_cv.models.EfficientNetV2LBackbone,
keras_cv.models.CSPDarkNetBackbone,
keras_cv.models.DenseNetBackbone,
keras_cv.src.models.EfficientNetV1Backbone,
keras_cv.src.models.EfficientNetLiteBackbone,
keras_cv.models.MiTBackbone,
keras_cv.models.ViTDetBackbone,
keras_cv.models.CenterPillarBackbone,
]
for backbone_cls in backbone_models:
for preset in backbone_cls.presets:
backbone = backbone_cls.from_preset(
preset, name=to_snake_case(backbone_cls.__name__)
)
save_weights = preset in backbone_cls.presets_with_weights
save_to_preset(
backbone,
preset,
save_weights=save_weights,
config_filename="config.json",
)
# Delete first to clean up any exising version.
os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}")
os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}")
for root, _, files in os.walk(preset):
for file in files:
path = os.path.join(BUCKET, root, file)
os.system(
f"gcloud storage objects update gs://{path} "
"--add-acl-grant=entity=AllUsers,role=READER"
)
def convert_task_presets():
# Save and upload task presets
task_models = [
keras_cv.models.RetinaNet,
keras_cv.models.YOLOV8Detector,
keras_cv.models.ImageClassifier,
keras_cv.models.DeepLabV3Plus,
# keras_cv.models.SegFormer,
keras_cv.models.SegmentAnythingModel,
]
for task_cls in task_models:
# Remove backbone-specific keys
task_preset_keys = set(task_cls.presets) ^ set(
task_cls.backbone_presets
)
for preset in task_preset_keys:
save_weights = preset in task_cls.presets_with_weights
kwargs = {"name": to_snake_case(task_cls.__name__)}
if task_cls in [
keras_cv.models.RetinaNet,
keras_cv.models.YOLOV8Detector,
]:
kwargs.update({"bounding_box_format": "xywh"})
task = task_cls.from_preset(preset, **kwargs)
else:
task = task_cls.from_preset(preset, **kwargs)
save_to_preset(
task,
preset,
save_weights=save_weights,
config_filename="config.json",
)
# Delete first to clean up any exising version.
os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}")
os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}")
for root, _, files in os.walk(preset):
for file in files:
path = os.path.join(BUCKET, root, file)
os.system(
f"gcloud storage objects update gs://{path} "
"--add-acl-grant=entity=AllUsers,role=READER"
)
if __name__ == "__main__":
from keras_cv.src.utils.preset_utils import save_to_preset # noqa: E402
convert_backbone_presets()
convert_task_presets()
| keras-cv/keras_cv/tools/convert_presets.py/0 | {
"file_path": "keras-cv/keras_cv/tools/convert_presets.py",
"repo_id": "keras-cv",
"token_count": 2620
} | 77 |
[tool.black]
line-length = 80
[tool.isort]
profile = "black"
force_single_line = "True"
known_first_party = ["keras_cv", "tests"]
default_section = "THIRDPARTY"
line_length = 80
| keras-cv/pyproject.toml/0 | {
"file_path": "keras-cv/pyproject.toml",
"repo_id": "keras-cv",
"token_count": 70
} | 78 |
if [ "$#" -ne 2 ]; then
echo USAGE: ./process_backbone_weights.sh WEIGHTS_PATH GCS_PATH
exit 1
fi
WEIGHTS=$1
GCS_PATH=$2
echo Checksum: $(shasum -a 256 $WEIGHTS)
gsutil cp $WEIGHTS $GCS_PATH/
gsutil acl ch -u AllUsers:R $GCS_PATH/$WEIGHTS
| keras-cv/shell/weights/upload_weights.sh/0 | {
"file_path": "keras-cv/shell/weights/upload_weights.sh",
"repo_id": "keras-cv",
"token_count": 104
} | 79 |
## コールバックの使い方
コールバックは訓練中で適用される関数集合です.訓練中にモデル内部の状態と統計量を可視化する際に,コールバックを使います.`Sequential`と`Model`クラスの`.fit()`メソッドに(キーワード引数`callbacks`として)コールバックのリストを渡すことができます.コールバックに関連するメソッドは,訓練の各段階で呼び出されます.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L145)</span>
### Callback
```python
keras.callbacks.Callback()
```
この抽象基底クラスは新しいコールバックを構築するために使用されます.
__プロパティ__
- __params__: 辞書.訓練のパラメータ(例: 冗長性,バッチサイズ,エポック数...).
- __model__: `keras.models.Model`のインスタンス.学習されたモデルへの参照.
コールバック関数が引数としてとる辞書の`logs`は,現在のバッチ数かエポック数に関連したデータのキーを含みます.
現在,`Sequential`モデルクラスの`.fit()`メソッドは,そのコールバックに渡す`logs`に以下のデータが含まれます.
- __on_epoch_end__: ログは`acc`と`loss`を含み,オプションとして(`fit`内のバリデーションが有効になっている場合は)`val_loss`,(バリデーションと精度の監視が有効になっている場合は)`val_acc`を含みます.
- __on_batch_begin__: ログは現在のバッチのサンプル数`size`を含みます.
- __on_batch_end__: ログは`loss`と(精度の監視が有効になっている場合は)オプションとして`acc`を含みます.
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L201)</span>
### BaseLogger
```python
keras.callbacks.BaseLogger()
```
監視されている評価値のエポック平均を蓄積するコールバックです.
このコールバックは全Kerasモデルに自動的に適用されます.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L230)</span>
### TerminateOnNaN
```python
keras.callbacks.TerminateOnNaN()
```
損失がNaNになった時に訓練を終了するコールバックです.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L246)</span>
### ProgbarLogger
```python
keras.callbacks.ProgbarLogger(count_mode='samples')
```
標準出力に評価値を出力するコールバックです.
__引数__
- __count_mode__: "steps"か"samples"の一方.サンプルかステップ(バッチ)のどちらをプログレスバーの集計に使うか.
__Raises__
- __ValueError__: `count_mode`の値が不正のとき.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L313)</span>
### History
```python
keras.callbacks.History()
```
`History`オブジェクトにイベントを記録するコールバックです.
このコールバックは全Kerasモデルに自動的に適用されます.`History`オブジェクトはモデルの`fit`メソッドの戻り値として取得します.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L332)</span>
### ModelCheckpoint
```python
keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
```
各エポック終了後にモデルを保存します.
`filepath`は,(`on_epoch_end`で渡された)`epoch`の値と`logs`のキーで埋められた書式設定オプションを含むことができます.
例えば,`filepath`が`weights.{epoch:02d}-{val_loss:.2f}.hdf5`の場合,複数のファイルがエポック数とバリデーションロスの値を付与して保存されます.
__引数__
- __filepath__: 文字列,モデルファイルを保存するパス.
- __monitor__: 監視する値.
- __verbose__: 冗長モード, 0 または 1.
- __save_best_only__: `save_best_only=True`の場合,監視しているデータによって最新の最良モデルが上書きされません.
- __mode__: {auto, min, max}の内の一つが選択されます.`save_best_only=True`ならば,現在保存されているファイルを上書きするかは,監視されている値の最大化か最小化によって決定されます.`val_acc`の場合,この引数は`max`となり,`val_loss`の場合は`min`になります.`auto`モードでは,最大化・最小化のいずれかを監視されている値の名前から自動的に推定します.
- __save_weights_only__: Trueなら,モデルの重みが保存されます (`model.save_weights(filepath)`),そうでないなら,モデルの全体が保存されます (`model.save(filepath)`).
- __period__: チェックポイント間の間隔(エポック数).
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L432)</span>
### EarlyStopping
```python
keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto')
```
監視する値の変化が停止した時に訓練を終了します.
__引数__
- __monitor__: 監視する値.
- __min_delta__: 監視する値について改善として判定される最小変化値.つまり,min_deltaよりも絶対値の変化が小さければ改善していないとみなします.
- __patience__: ここで指定したエポック数の間(監視する値に)改善がないと,訓練が停止します.
- __verbose__: 冗長モード.
- __mode__: {auto, min, max}の内,一つが選択されます.`min`モードでは,監視する値の減少が停止した際に,訓練を終了します.また,`max`モードでは,監視する値の増加が停止した際に,訓練を終了します.`auto`モードでは,この傾向は自動的に監視されている値から推定します.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L514)</span>
### RemoteMonitor
```python
keras.callbacks.RemoteMonitor(root='http://localhost:9000', path='/publish/epoch/end/', field='data', headers=None)
```
このコールバックはサーバーにイベントをストリームするときに使用されます.
`requests`ライブラリが必要です.イベントはデフォルトで`root + '/publish/epoch/end/'`に送信されます.
コールすることによって,イベントデータをJSONエンコードした辞書の`data`引数をHTTP POSTされます.
__引数__
- __root__: 文字列;対象サーバのルートURL.
- __path__: 文字列;イベントを送る`root`への相対パス.
- __field__: 文字列;データを保存するJSONのフィールド.
- __headers__: 辞書; オプションでカスタムできるHTTPヘッダー.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L559)</span>
### LearningRateScheduler
```python
keras.callbacks.LearningRateScheduler(schedule, verbose=0)
```
学習率のスケジューラ.
__引数__
- __schedule__: この関数はエポックのインデックス(整数, 0から始まるインデックス)を入力とし,新しい学習率(浮動小数点数)を返します.
- __verbose__: 整数.0::何も表示しない.1:更新メッセージを表示.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L587)</span>
### TensorBoard
```python
keras.callbacks.TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=False, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
```
Tensorboardによる基本的な可視化.
[TensorBoard](https://www.tensorflow.org/get_started/summaries_and_tensorboard)はTensorFlowによって提供されている可視化ツールです
このコールバックはTensorBoardのログを出力します.TensorBoardでは,異なる層への活性化ヒストグラムと同様に,訓練とテストの評価値を動的にグラフ化し,可視化できます.
pipからTensorFlowをインストールしているならば,コマンドラインからTensorBoardを起動できます.
```
tensorboard --logdir=/full_path_to_your_logs
```
__引数__
- __log_dir__: TensorfBoardによって解析されたログファイルを保存するディレクトリのパス
- __histogram_freq__: モデルの層の活性化ヒストグラムを計算する(エポック中の)頻度.この値を0に設定するとヒストグラムが計算されません.ヒストグラムの可視化にはバリデーションデータを指定しておく必要があります.
- __write_graph__: TensorBoardのグラフを可視化するか.`write_graph`がTrueの場合,ログファイルが非常に大きくなることがあります.
- __write_grads__: TensorBoardに勾配のヒストグラフを可視化するかどうか.`histogram_freq`は0より大きくしなければなりません.
- __batch_size__: ヒストグラム計算のネットワークに渡す入力のバッチサイズ.
- __write_images__: TensorfBoardで可視化するモデルの重みを画像として書き出すかどうか.
- __embeddings_freq__: 選択したembeddingsレイヤーを保存する(エポックに対する)頻度.
- __embeddings_layer_names__: 観察するレイヤー名のリスト.もしNoneか空リストなら全embeddingsレイヤーを観察します.
- __embeddings_metadata__: レイヤー名からembeddingsレイヤーに関するメタデータの保存ファイル名へマップする辞書.
メタデータのファイルフォーマットの[詳細](https://www.tensorflow.org/get_started/embedding_viz#metadata_optional).
全embeddingsレイヤーに対して同じメタデータファイルを使う場合は文字列を渡します.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L811)</span>
### ReduceLROnPlateau
```python
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=0, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)
```
評価値の改善が止まった時に学習率を減らします.
モデルは訓練が停滞した時に学習率を2〜10で割ることで恩恵を受けることがあります.
このコールバックは評価値を監視し,'patience'で指定されたエポック数の間改善が見られなかった場合,学習率を減らします.
__例__
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
__引数__
- __monitor__: 監視する値.
- __factor__: 学習率を減らす割合.new_lr = lr * factor
- __patience__: 何エポック改善が見られなかったら学習率の削減を行うか.
- __verbose__: 整数.0: 何も表示しない.1: 学習率削減時メッセージを表示.
- __mode__: `auto`,`min`,`max`のいずれか.
`min`の場合,監視する値の減少が停止した際に,学習率を更新します.
`max`の場合,監視する値の増加が停止した時に,学習率を更新します.
`auto`の場合,監視する値の名前から自動で判断します.
- __epsilon__: 改善があったと判断する閾値.有意な変化だけに注目するために用います.
- __cooldown__: 学習率を減らした後,通常の学習を再開するまで待機するエポック数.
- __min_lr__: 学習率の下限.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L927)</span>
### CSVLogger
```python
keras.callbacks.CSVLogger(filename, separator=',', append=False)
```
各エポックの結果をcsvファイルに保存するコールバックです.
np.ndarrayのような1次元イテラブルを含む,文字列表現可能な値をサポートしています.
__例__
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
__引数__
- __filename__: csvファイル名.例えば'run/log.csv'.
- __separator__: csvファイルで各要素を区切るために用いられる文字.
- __append__: True: ファイルが存在する場合,追記します.(訓練を続ける場合に便利です)
False: 既存のファイルを上書きします.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/callbacks.py#L1004)</span>
### LambdaCallback
```python
keras.callbacks.LambdaCallback(on_epoch_begin=None, on_epoch_end=None, on_batch_begin=None, on_batch_end=None, on_train_begin=None, on_train_end=None)
```
シンプルな自作コールバックを急いで作るためのコールバックです.
このコールバックは,適切なタイミングで呼び出される無名関数で構築されます.
以下のような位置引数が必要であることに注意してください:
- `on_epoch_begin`と`on_epoch_end`は2つの位置引数が必要です: `epoch`,`logs`
- `on_batch_begin`と`on_batch_end`は2つの位置引数が必要です: `batch`,`logs`
- `on_train_begin`と`on_train_end`は1つの位置引数が必要です: `logs`
__引数__
- __on_epoch_begin__: すべてのエポックの開始時に呼ばれます.
- __on_epoch_end__: すべてのエポックの終了時に呼ばれます.
- __on_batch_begin__: すべてのバッチの開始時に呼ばれます.
- __on_batch_end__: すべてのバッチの終了時に呼ばれます.
- __on_train_begin__: 訓練の開始時に呼ばれます.
- __on_train_end__: 訓練の終了時に呼ばれます.
__例__
```python
# すべてのバッチの開始時にバッチ番号を表示
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# 訓練終了時にいくつかのプロセスを終了
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
---
# コールバックを作成
基底クラスの`keras.callbacks.Callback`を拡張することで,カスタムコールバックを作成できます.
コールバックは,`self.model`プロパティによって,関連したモデルにアクセスできます.
訓練中の各バッチの損失のリストを保存する簡単な例は,以下のようになります.
```python
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
```
---
### 例: 損失の履歴を記録する
```python
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
model = Sequential()
model.add(Dense(10, input_dim=784, kernel_initializer='uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
history = LossHistory()
model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=0, callbacks=[history])
print(history.losses)
# 出力
'''
[0.66047596406559383, 0.3547245744908703, ..., 0.25953155204159617, 0.25901699725311789]
'''
```
---
### 例: モデルのチェックポイント
```python
from keras.callbacks import ModelCheckpoint
model = Sequential()
model.add(Dense(10, input_dim=784, kernel_initializer='uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
'''
バリデーションロスが減少した場合に,各エポック終了後,モデルの重みを保存します
'''
checkpointer = ModelCheckpoint(filepath='/tmp/weights.hdf5', verbose=1, save_best_only=True)
model.fit(x_train, y_train, batch_size=128, epochs=20, verbose=0, validation_data=(X_test, Y_test), callbacks=[checkpointer])
```
| keras-docs-ja/sources/callbacks.md/0 | {
"file_path": "keras-docs-ja/sources/callbacks.md",
"repo_id": "keras-docs-ja",
"token_count": 7990
} | 80 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L11)</span>
### GaussianNoise
```python
keras.layers.noise.GaussianNoise(stddev)
```
平均値0,ガウシアンノイズを加えます.
これはオーバーフィッティングの軽減に有効です(random data augmentationの一種).
ガウシアンノイズ (GS) は実数値の入力におけるノイズ付与として一般的です.
regularization layerは訓練時のみ有効です.
__引数__
- __stddev__: 浮動小数点数,ノイズ分布の標準偏差値.
__入力のshape__
任意.
モデルの最初のレイヤーで使う場合は,`input_shape`キーワードで指定してください.
(整数のタプル(サンプルのaxisは含まない))
__出力のshape__
入力と同じ.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L96)</span>
### AlphaDropout
```python
keras.layers.noise.AlphaDropout(rate, noise_shape=None, seed=None)
```
入力にAlpha Dropoutを適用します.
Alpha Dropoutは,dropoutの適用後でもself-normalizingの性質を担保するために入力のもともとの値の平均と分散を保持しつつ、`Dropout`を行います.
Alpha Dropoutは,活性化値にランダムに負の飽和値をセットするために、Scaled Exponential Linear Unitsと相性が良いです.
__引数__
- __rate__: 浮動小数点数,drop probability (`Dropout`同様).平均1,標準偏差値`sqrt(rate / (1 - rate))`のノイズを乗じます.
- __seed__: 整数.乱数のシードに使います.
__入力のshape__
任意.
モデルの最初のレイヤーで使う場合は,`input_shape`キーワードで指定してください.
(整数のタプル(サンプルのaxisは含まない))
__出力のshape__
入力と同じ.
__参考文献__
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/noise.py#L52)</span>
### GaussianDropout
```python
keras.layers.noise.GaussianDropout(rate)
```
平均値1,ガウシアンノイズを乗じます.
regularization layerは訓練時のみ有効です.
__引数__
- __rate__: 浮動小数点数,drop probability(`Dropout`同様).平均1,標準偏差値`sqrt(rate / (1 - rate))`のノイズを乗じます.
__入力のshape__
任意.
モデルの最初のレイヤーで使う場合は,`input_shape`キーワードで指定してください.
(整数のタプル(サンプルのaxisは含まない))
__出力のshape__
入力と同じ.
__参考文献__
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting Srivastava, Hinton, et al. 2014](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)
| keras-docs-ja/sources/layers/noise.md/0 | {
"file_path": "keras-docs-ja/sources/layers/noise.md",
"repo_id": "keras-docs-ja",
"token_count": 1423
} | 81 |
# Scikit-Learn APIのためのラッパー
`keras.wrappers.scikit_learn.py`にあるラッパーを通して,Kerasの`Sequential`モデル(1つの入力のみ)をScikit-Learnワークフローの一部として利用できます.
2つのラッパーが利用可能です:
`keras.wrappers.scikit_learn.KerasClassifier(build_fn=None, **sk_params)`, これはScikit-Learnのclassifierインターフェースを実装します.
`keras.wrappers.scikit_learn.KerasRegressor(build_fn=None, **sk_params)`, これはScikit-Learnのregressorインターフェースを実装します.
### 引数
- __build_fn__: 呼び出し可能な関数,または,クラスインスタンス
- __sk_params__: モデルパラメータとfittingパラメータ
`build_fn`は,Kerasモデルを構成し,コンパイルし,返します.
このモデルは,fit/predictのために利用されます.以下の3つの値のうち
1つをbuild_fnに渡すことができます:
1. 関数
2. `__call__` メソッドを実装したクラスのインスタンス
3. None.これは`KerasClassifier`または`KerasRegressor`を継承したクラスを意味します.この `__call__` メソッドはbuild_fnのデフォルトとして扱われます.
`sk_params`はモデルパラメータとfittingパラメータの両方を取ります.
モデルパラメータは`build_fn`の引数です.
scikit-learnの他の予測器と同様,`sk_params`に何も与えなくとも予測器が作れるように,`build_fn`の引数にはデフォルト値を与える必要があります.
また,`sk_params`は`fit`,`predict`,`predict_proba`,および,`score`メソッドを
呼ぶためのパラメータも取ります(例えば,`epochs`, `batch_size`).
fitting (predicting) パラメータは以下の順番で選択されます:
1. `fit`,`predict`,`predict_proba`,および,`score`メソッドの辞書引数に与えられた値
2. `sk_params`に与えられた値
3. `keras.models.Sequential`,`fit`,`predict`,`predict_proba`,および,`score`メソッドのデフォルト値
scikit-learnの`grid_search`APIを利用するとき,チューニングパラメータは`sk_params`に渡したものになります.
これには,fittingパラメータも含まれます.つまり,最適なモデルパラメータだけでなく,最適な`batch_size`や
`epochs`の探索に,`grid_search`を利用できます.
| keras-docs-ja/sources/scikit-learn-api.md/0 | {
"file_path": "keras-docs-ja/sources/scikit-learn-api.md",
"repo_id": "keras-docs-ja",
"token_count": 1150
} | 82 |
site_name: Keras Documentation
theme: readthedocs
docs_dir: sources
repo_url: https://github.com/keras-team/keras-docs-ko
site_url: http://keras.io/ko/
site_description: 'Korean documentation for Keras, the Python Deep Learning library.'
dev_addr: '0.0.0.0:8000'
google_analytics: ['UA-61785484-1', 'keras.io']
nav:
- Home: index.md
- Why use Keras: why-use-keras.md
- Getting started:
- Guide to the Sequential model: getting-started/sequential-model-guide.md
- Guide to the Functional API: getting-started/functional-api-guide.md
- FAQ: getting-started/faq.md
- Models:
- About Keras models: models/about-keras-models.md
- Sequential: models/sequential.md
- Model (functional API): models/model.md
- Layers:
- About Keras layers: layers/about-keras-layers.md
- Core Layers: layers/core.md
- Convolutional Layers: layers/convolutional.md
- Pooling Layers: layers/pooling.md
- Locally-connected Layers: layers/local.md
- Recurrent Layers: layers/recurrent.md
- Embedding Layers: layers/embeddings.md
- Merge Layers: layers/merge.md
- Advanced Activations Layers: layers/advanced-activations.md
- Normalization Layers: layers/normalization.md
- Noise layers: layers/noise.md
- Layer wrappers: layers/wrappers.md
- Writing your own Keras layers: layers/writing-your-own-keras-layers.md
- Preprocessing:
- Sequence Preprocessing: preprocessing/sequence.md
- Text Preprocessing: preprocessing/text.md
- Image Preprocessing: preprocessing/image.md
- Losses: losses.md
- Metrics: metrics.md
- Optimizers: optimizers.md
- Activations: activations.md
- Callbacks: callbacks.md
- Datasets: datasets.md
- Applications: applications.md
- Backend: backend.md
- Initializers: initializers.md
- Regularizers: regularizers.md
- Constraints: constraints.md
- Visualization: visualization.md
- Scikit-learn API: scikit-learn-api.md
- Utils: utils.md
- Contributing: contributing.md
| keras-docs-ko/mkdocs.yml/0 | {
"file_path": "keras-docs-ko/mkdocs.yml",
"repo_id": "keras-docs-ko",
"token_count": 635
} | 83 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L796)</span>
### Dense
```python
keras.layers.Dense(units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
일반적인 완전 연결 신경망 층<sub>densely connected layer</sub>.
`Dense`는 `output = activation(dot(input, kernel) + bias)`을 실행합니다.
여기서 `activation`은 `activation` 인자로 전달되는 원소별<sub>element-wise</sub> 활성화 함수이고,
`kernel`은 층에서 만들어진 가중치 행렬<sub>weight matrix</sub>입니다.
`bias`는 층에서 만들어진 편향<sub>bias</sub> 벡터이며 `'use_bias=True'`인 경우에만 적용 가능합니다.
참고: 층의 입력 텐서의 랭크가 2보다 클 경우, `kernel`과의 내적<sub>dot product</sub>을 하기 전에 1D 벡터로 형태를 변환해야 합니다.
__예시__
```python
# Sequential 모델의 첫 번째 층.
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# 모델은 (*, 16) 형태의 배열을 입력으로 받고
# (*, 32) 형태의 배열을 출력합니다.
# 첫 번째 층 이후에는,
# 입력의 크기를 지정하지 않아도 됩니다.
model.add(Dense(32))
```
__인자__
- __units__: 양의 `int`. 출력값의 차원 크기를 결정합니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`이며, 이 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __kernel_initializer__: `kernel` 가중치 행렬의 초기화 함수를 결정합니다. 이 가중치는 입력값에 곱해져서 선형변환하는 연산에 사용됩니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __kernel_regularizer__: `kernel` 가중치 행렬에 적용할 규제 함수<sub>regularizer</sub>를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __kernel_constraint__: `kernel` 가중치 행렬에 적용할 제약<sub>constraints</sub>을 결정합니다. 참고: [제약](../constraints.md))
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md))
__입력 형태__
`(batch_size, ..., input_dim)` 형태의 nD 텐서.
가장 일반적인 경우는`(batch_size, input_dim)` 형태의 2D 입력입니다.
__출력 형태__
`(batch_size, ..., units)` 형태의 nD 텐서.
예를 들어, `(batch_size, input_dim)` 형태의 2D 입력에 대해서
출력은 `(batch_size, units)`의 형태를 가집니다.
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L277)</span>
### Activation
```python
keras.layers.Activation(activation)
```
출력값에 활성화 함수를 적용합니다.
__인자__
- __activation__: Theano나 TensorFlow 또는 다른 곳에서 사용하는 활성화 함수의 이름 문자열. 참고: [활성화 함수](../activations.md)
__입력 형태__
임의의 형태입니다. 모델의 첫 번째 층으로 `Activation`층을
사용하려면 키워드 인자 `input_shape`로 형태를 지정해야 합니다.
`input_shape`는 `int`로 이루어진 튜플로 배치 축을 포함하지 않습니다.
__출력 형태__
입력 형태와 동일합니다.
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L81)</span>
### Dropout
```python
keras.layers.Dropout(rate, noise_shape=None, seed=None)
```
입력에 드롭아웃을 적용합니다.
드롭아웃은 학습 과정 중 업데이트를 할 때 `rate`의 비율에 따라 입력 유닛을 무작위로 0으로 설정합니다. 이는 과적합을 방지하는데 도움이 됩니다.
__인자__
- __rate__: 0과 1사이의 `float`. 0으로 설정할 입력 유닛의 비율입니다.
- __noise_shape__: 입력과 곱하게 되는 이진 드롭아웃 마스크의
형태를 나타내는 1D 정수 텐서입니다.
예를 들어, 입력이 `(batch_size, timesteps, features)`의
형태를 가지는 경우, 드롭아웃 마스크를
모든 시간 단계에 대해서 동일하게 적용하고 싶다면
`noise_shape=(batch_size, 1, features)`를 사용하면 됩니다.
- __seed__: `int`. 난수 생성에 사용할 시드를 정합니다.
__참고__
- [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](
http://www.jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf)
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L462)</span>
### Flatten
```python
keras.layers.Flatten(data_format=None)
```
입력을 1차원으로 바꿉니다. 배치 크기에는 영향을 미치치 않습니다.
__인자__
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, time, ..., channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, time, channels, ...)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
__예시__
```python
model = Sequential()
model.add(Conv2D(64, (3, 3),
input_shape=(3, 32, 32), padding='same',))
# 현재: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# 현재: model.output_shape == (None, 65536)
```
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/engine/input_layer.py#L114)</span>
### Input
```python
keras.engine.input_layer.Input()
```
케라스 텐서를 생성합니다.
케라스 텐서는 백엔드(Theano, TensorFlow 혹은 CNTK)에서 사용되는 텐서에
몇가지 속성을 추가한 것으로, 이를 통해 모델의 입력과 출력을 아는 것만으로도
케라스 모델을 만들 수 있습니다.
예를 들어 a, b와 c가 케라스 텐서라고 하면
`model = Model(input=[a, b], output=c)`만으로도 모델을 생성할 수 있습니다.
케라스 텐서에 추가된 속성은 다음과 같습니다.
`_keras_shape`: 케라스의 형태 유추를 통해 전파되는 정수 튜플.
`_keras_history`: 텐서에 적용되는 마지막 층. 해당 층에서 전체 모델 전체의 그래프를 추출할 수 있습니다.
__인자__
- __shape__: `int`로 이루어진 튜플. 배치 축을 포함하지 않습니다.
예를 들어 `shape=(32,)`는 입력이 32차원 벡터의 배치라는 것을 나타냅니다.
- __batch_shape__: `int`로 이루어진 튜플. 배치 축을 포함합니다.
예를 들어 `batch_shape=(10, 32)`는 입력이 10개의 32차원 벡터로 이루어진 배치라는 것을 나타냅니다.
`batch_shape=(None, 32)`는 임의의 수의 32차원 벡터로 이루어진 배치를 뜻합니다.
- __name__: `str`, 층의 문자열 이름.
모델 내에서 이름은 고유해야 하며 이미 사용한 이름은 다시 사용할 수 없습니다.
따로 지정하지 않을 경우, 자동으로 생성됩니다.
- __dtype__: `str`, 입력 데이터의 자료형(`float32`, `float64`, `int32`...) 입니다.
- __sparse__: `bool`, 생성할 플레이스홀더가 희소<sub>sparse</sub>한지
여부를 나타냅니다.
- __tensor__: 해당 인자가 주어진 경우 `Input` 층은 해당 텐서의 래퍼로 사용되며, 새로운 플레이스홀더 텐서를 만들지 않습니다.
__반환값__
텐서.
__예시__
```python
# 다음은 케라스의 로지스틱 회귀입니다.
x = Input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L311)</span>
### Reshape
```python
keras.layers.Reshape(target_shape)
```
출력을 특정 형태로 변형시킵니다.
__인자__
- __target_shape__: `int`로 이루어진 튜플. 목푯값 형태.
배치 축은 포함하지 않습니다.
__입력 형태__
임의의 형태입니다. 모델의 첫 번째 층으로 `Reshape`층을
사용하려면 키워드 인자 `input_shape`로 형태를 지정해야 합니다.
`input_shape`는 `int`로 이루어진 튜플로 배치 축을 포함하지 않습니다.
__출력 형태__
`(batch_size,) + target_shape`
__예시__
```python
# 시퀀스 모델의 첫 번째 층입니다.
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# 현재: model.output_shape == (None, 3, 4)
# 참고: `None`은 배치 차원입니다.
# 시퀀스 모델의 중간 층입니다.
model.add(Reshape((6, 2)))
# 현재: model.output_shape == (None, 6, 2)
# `-1`을 차원으로 사용해서 형태 유추를 지원합니다.
model.add(Reshape((-1, 2, 2)))
# 현재: model.output_shape == (None, 3, 2, 2)
```
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L408)</span>
### Permute
```python
keras.layers.Permute(dims)
```
주어진 패턴에 따라서 입력의 차원을 치환합니다.
순환 신경망<sub>Recurrnent Neural Network</sub>과
합성곱 신경망<sub>Convolutional Neural Network</sub>을 함께 연결하는 경우에 유용합니다.
__예시__
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# 현재: model.output_shape == (None, 64, 10)
# 참고: `None`은 배치 차원입니다.
```
__인자__
- __dims__: `int`로 이루어진 튜플. 치환 패턴, 배치 차원을 포함하지 않습니다.
인덱스는 1에서 시작합니다.
예를 들어, `(2, 1)`은 입력의 첫 번째와 두 번째 차원을
치환합니다.
__입력 형태__
임의의 형태입니다. 모델의 첫 번째 층으로 `Permute`층을
사용하려면 키워드 인자 `input_shape`로 형태를 지정해야 합니다.
`input_shape`는 `int`로 이루어진 튜플로 배치 축을 포함하지 않습니다.
__출력 형태__
입력 형태와 동일하나, 특정된 패턴에 따라 차원의 순서가 재조정됩니다.
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L524)</span>
### RepeatVector
```python
keras.layers.RepeatVector(n)
```
입력을 `n`회 반복합니다.
__예시__
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# 현재: model.output_shape == (None, 32)
# 참고: `None`은 배치 차원입니다.
model.add(RepeatVector(3))
# 현재: model.output_shape == (None, 3, 32).
```
__인자__
- __n__: `int`, 반복 인자.
__입력 형태__
`(num_samples, features)` 형태의 2D 텐서.
__출력 형태__
`(num_samples, n, features)` 형태의 3D 텐서.
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L566)</span>
### Lambda
```python
keras.layers.Lambda(function, output_shape=None, mask=None, arguments=None)
```
임의의 표현식을 `Layer` 객체로 래핑하는 함수입니다.
__예시__
```python
# x -> x^2 층을 추가합니다.
model.add(Lambda(lambda x: x ** 2))
```
```python
# 입력의 음성 부분과 양성 부분의
# 연결을 반환하는
# 층을 추가합니다.
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
def antirectifier_output_shape(input_shape):
shape = list(input_shape)
assert len(shape) == 2 # 2D 텐서만 유효합니다.
shape[-1] *= 2
return tuple(shape)
model.add(Lambda(antirectifier,
output_shape=antirectifier_output_shape))
```
```python
# 두 입력 텐서의 아다마르 곱과
# 합을 반환하는 층을 추가합니다.
def hadamard_product_sum(tensors):
out1 = tensors[0] * tensors[1]
out2 = K.sum(out1, axis=-1)
return [out1, out2]
def hadamard_product_sum_output_shape(input_shapes):
shape1 = list(input_shapes[0])
shape2 = list(input_shapes[1])
assert shape1 == shape2 # 형태가 다르면 아다마르 곱이 성립하지 않습니다.
return [tuple(shape1), tuple(shape2[:-1])]
x1 = Dense(32)(input_1)
x2 = Dense(32)(input_2)
layer = Lambda(hadamard_product_sum, hadamard_product_sum_output_shape)
x_hadamard, x_sum = layer([x1, x2])
```
__인자__
- __function__: 임의의 표현식 또는 함수. 함수는
첫 번째 인자로 텐서 혹은 텐서의 리스트를 입력 받아야 합니다.
- __output_shape__: 함수의 출력 형태.
Theano를 사용하는 경우에만 유효합니다. 튜플 혹은 함수가 될 수 있습니다.
튜플인 경우, 첫 번째 차원만 차원을 지정합니다.
샘플 차원은 입력 차원과 동일하다고 가정하거나(`output_shape = (input_shape[0], ) + output_shape`),
입력이 `None`인 경우 샘플 차원 또한 `None`이라고 가정합니다(`output_shape = (None, ) + output_shape`).
함수인 경우, 전체 형태를 입력 함수의 형태로 지정 합니다(`output_shape = f(input_shape)`).
- __mask__: `None`(마스킹을 하지 않는 경우) 또는 임베딩에 사용할 마스크 텐서.
- __arguments__: 함수에 전달하는 키워드 인자의 딕셔너리.
__입력 형태__
임의의 형태입니다. 모델의 첫 번째 층으로 `Lambda`층을
사용하려면 키워드 인자 `input_shape`로 형태를 지정합니다.
`input_shape`는 `int`로 이루어진 튜플로 배치 축을 포함하지 않습니다.
__출력 형태__
`output_shape` 인자의 형태를 따릅니다.
혹은 TensorFlow나 CNTK를 사용하는 경우 자동으로 형태가 지정됩니다.
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L940)</span>
### ActivityRegularization
```python
keras.layers.ActivityRegularization(l1=0.0, l2=0.0)
```
손실 함수에 항을 추가하여 입력값에 규제화 함수를 적용합니다.
__인자__
- __l1__: L1 규제화 인수 (양의 `float`).
- __l2__: L2 규제화 인수 (양의 `float`).
__입력 형태__
임의의 형태입니다. 모델의 첫 번째 층으로 `ActivityRegularization`층을
사용하려면 키워드 인자 `input_shape`로 형태를 지정합니다.
`input_shape`는 `int`로 이루어진 튜플로 배치 축을 포함하지 않습니다.
__출력 형태__
입력 형태와 동일합니다.
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L28)</span>
### Masking
```python
keras.layers.Masking(mask_value=0.0)
```
시간 단계를 건너 뛰기 위해 마스크 값을 이용해 시퀀스를 마스킹합니다.
주어진 샘플 시간 단계<sub>time step</sub>의 모든 특징<sub>feature</sub>이 `mask_value`와 동일하고 마스킹을 지원한다면 모든 하위 층에서 해당되는 샘플 시간 단계를 마스킹합니다.
아직 하위 층이 마스킹을 지원하지 않는데 입력 마스킹을
받아들이는 경우 예외가 발생합니다.
__예시__
LSTM 층에 전달할 `(samples, timesteps, features)`의
형태를 가진 NumPy 데이터 배열 `x`를 생각해 봅시다.
샘플 시간 단계에 대한 특성이 없는 경우, 시간 단계 #3의 샘플 #0과 시간 단계 #5의 샘플 #2를 마스킹하고 싶다면, 다음을 실행하면 됩니다.
- `x[0, 3, :] = 0.`, 그리고 `x[2, 5, :] = 0.`으로 설정합니다.
- LSTM층 전에 `mask_value=0.`의 `Masking` 층을 삽입합니다.
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
__인자__
__mask_value__: `None` 또는 건너뛸 마스크 값.
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L141)</span>
### SpatialDropout1D
```python
keras.layers.SpatialDropout1D(rate)
```
드롭아웃의 공간적 1D 버전.
이 버전은 드롭아웃과 같은 함수를 수행하지만, 개별적 원소 대신
1D 특징 맵 전체를 드롭시킵니다. 초기 합성곱 층에서는 특징 맵 내 인접한 프레임들이 강한 상관관계를 보이는 경우가 많습니다. 이 경우 일반적인 드롭아웃으로는 활성값들을 정규화 시키지 못하고 그저 학습 속도를 감소시키는 것과 같은 결과를 낳습니다.
이때 `SpatialDropout1D`을 사용하면 특징 맵 사이의 독립성을 유지하는데 도움을 줍니다.
__인자__
- __rate__: `0`과 `1`사이의 `float`. 드롭시킬 입력 유닛의 비율.
__입력 형태__
`(samples, timesteps, channels)`형태의 3D 텐서.
__출력 형태__
입력 형태와 동일.
__참고__
- [Efficient Object Localization Using Convolutional Networks](
https://arxiv.org/abs/1411.4280)
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L178)</span>
### SpatialDropout2D
```python
keras.layers.SpatialDropout2D(rate, data_format=None)
```
드롭아웃의 공간적 2D 버전.
이 버전은 드롭아웃과 같은 함수를 수행하지만, 개별적 원소 대신
2D 특징 맵 전체를 드롭시킵니다. 초기 합성곱 층에서는 특징 맵 내 인접한 픽셀들이 강한 상관관계를 보이는 경우가 많습니다.
이 경우 일반적인 드롭아웃으로는 활성값들을 정규화 시키지 못하고 그저 학습 속도를 감소시키는 것과 같은 결과를 낳습니다.
이때 `SpatialDropout2D`을 사용하면 특징 맵 사이의 독립성을 유지하는데 도움을 줍니다.
__인수__
- __rate__: `0`과 `1`사이의 `float`. 드롭시킬 입력 유닛의 비율.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, time, ..., channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, time, channels, ...)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
__입력 형태__
- data_format=`'channels_first'`인 경우:
`(samples, channels, rows, cols)` 형태의 4D 텐서.
- data_format=`'channels_last'`인 경우:
`(samples, rows, cols, channels)` 형태의 4D 텐서.
__출력 형태__
입력 형태와 동일합니다.
__참고__
- [Efficient Object Localization Using Convolutional Networks](
https://arxiv.org/abs/1411.4280)
------
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L228)</span>
### SpatialDropout3D
```python
keras.layers.SpatialDropout3D(rate, data_format=None)
```
드롭아웃의 공간적 3D 버전.
이 버전은 드롭아웃과 같은 함수를 수행하지만, 개별적 원소 대신
3D 특징 맵 전체를 드롭시킵니다. 초기 합성곱 층에서는 특징 맵 내 인접한 복셀들이 강한 상관관계를 보이는 경우가 많습니다.
이 경우 일반적인 드롭아웃으로는 활성값들을 정규화 시키지 못하고 그저 학습 속도를 감소시키는 것과 같은 결과를 낳습니다.
이때 `SpatialDropout3D`을 사용하면 특징 맵 사이의 독립성을 유지하는데 도움을 줍니다.
__인자__
- __rate__: `0`과 `1`사이의 `float`. 드롭시킬 입력 유닛의 비율.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, time, ..., channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, time, channels, ...)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
__입력 형태__
- data_format=`'channels_first'`인 경우:
`(samples, channels, dim1, dim2, dim3)` 형태의 5D 텐서.
- data_format=`'channels_last'`인 경우:
`(samples, dim1, dim2, dim3, channels)` 형태의 5D 텐서.
__출력 형태__
입력 형태와 동일합니다.
__참고__
- [Efficient Object Localization Using Convolutional Networks](
https://arxiv.org/abs/1411.4280)
| keras-docs-ko/sources/layers/core.md/0 | {
"file_path": "keras-docs-ko/sources/layers/core.md",
"repo_id": "keras-docs-ko",
"token_count": 14525
} | 84 |
# 이미지 전처리
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/preprocessing/image.py#L238)</span>
## ImageDataGenerator 클래스
```python
keras.preprocessing.image.ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0, width_shift_range=0.0, height_shift_range=0.0, brightness_range=None, shear_range=0.0, zoom_range=0.0, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format='channels_last', validation_split=0.0, interpolation_order=1, dtype='float32')
```
실시간 데이터 증강을 사용해서 텐서 이미지 데이터 배치를 생성합니다.
데이터에 대해 (배치 단위로) 루프가 순환됩니다.
__인수__
- __featurewise_center__: 불리언.
데이터셋에 대해 특성별로 인풋의 평균이 0이 되도록 합니다.
- __samplewise_center__: 불리언. 각 샘플의 평균이 0이 되도록 합니다.
- __featurewise_std_normalization__: 불리언.
인풋을 각 특성 내에서 데이터셋의 표준편차로 나눕니다.
- __samplewise_std_normalization__: 불리언. 각 인풋을 표준편차로 나눕니다.
- __zca_whitening__: 불리언. 영위상 성분분석 백색화를 적용할지 여부입니다.
- __zca_epsilon__: 영위상 성분분석 백색화의 엡실론 값. 디폴트 값은 1e-6입니다.
- __rotation_range__: 정수. 무작위 회전의 각도 범위입니다.
- __width_shift_range__: 부동소수점, 1D 형태의 유사배열 혹은 정수
- 부동소수점: < 1인 경우 전체 가로넓이에서의 비율, >= 1인 경우 픽셀의 개수입니다.
- 1D 형태의 유사배열: 배열에서 가져온 무작위 요소입니다.
- 정수: `(-width_shift_range, +width_shift_range)`
사이 구간의 픽셀 개수입니다.
- `width_shift_range=2`인 경우 유효값은
정수인 `[-1, 0, +1]`로,
`width_shift_range=[-1, 0, +1]`와 동일한 반면,
`width_shift_range=1.0`인 경우 유효값은
`[-1.0, +1.0)`의 구간 사이 부동소수점입니다.
- __height_shift_range__: 부동소수점, 1D 형태의 유사배열 혹은 정수
- 부동소수점: < 1인 경우 전체 세로높이에서의 비율, >= 1인 경우 픽셀의 개수입니다.
- 1D 형태의 유사배열: 배열에서 가져온 무작위 요소입니다.
- 정수: `(-height_shift_range, +height_shift_range)`
사이 구간의 픽셀 개수입니다.
- `height_shift_range=2`인 경우
유효한 값은 정수인 `[-1, 0, +1]`으로
`height_shift_range=[-1, 0, +1]`와 동일한 반면,
`height_shift_range=1.0`인 경우 유효한 값은
`[-1.0, +1.0)`의 구간 사이 부동소수점입니다.
- __brightness_range__: 두 부동소수점 값으로 이루어진 리스트 혹은 튜플.
밝기 정도를 조절할 값의 범위입니다.
- __shear_range__: 부동소수점. 층밀리기의 강도입니다.
(도 단위의 반시계 방향 층밀리기 각도)
- __zoom_range__: 부동소수점 혹은 [하한, 상산]. 무작위 줌의 범위입니다.
부동소수점인 경우, `[하한, 상한] = [1-zoom_range, 1+zoom_range]`입니다.
- __channel_shift_range__: 부동소수점. 무작위 채널 이동의 범위입니다.
- __fill_mode__: {"constant", "nearest", "reflect" 혹은 "wrap"} 중 하나.
디폴트 값은 'nearest'입니다.
인풋 경계의 바깥 공간은 다음의 모드에 따라
다르게 채워집니다:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
- __cval__: 부동소수점 혹은 정수.
`fill_mode = "constant"`인 경우
경계 밖 공간에 사용하는 값입니다.
- __horizontal_flip__: 불리언. 인풋을 무작위로 가로로 뒤집습니다.
- __vertical_flip__: 불리언. 인풋을 무작위로 세로로 뒤집습니다.
- __rescale__: 크기 재조절 인수. 디폴트 값은 None입니다.
None 혹은 0인 경우 크기 재조절이 적용되지 않고,
그 외의 경우 (다른 변형을 전부 적용한 후에)
데이터를 주어진 값으로 곱합니다.
- __preprocessing_function__: 각 인풋에 적용되는 함수.
이미지가 크기 재조절되고 증강된 후에 함수가 작동합니다.
이 함수는 다음과 같은 하나의 인수를 갖습니다:
단일 이미지 (계수가 3인 Numpy 텐서),
그리고 동일한 형태의 Numpy 텐서를 출력해야 합니다.
- __data_format__: 이미지 데이터 형식,
"channels_first" 혹은 "channels_last"가 사용가능합니다.
"channels_last" 모드는 이미지의 형태가
`(샘플, 높이, 넓이, 채널)`이어야 함을,
"channels_first" 모드는 이미지의 형태가
`(샘플, 채널, 높이, 넓이)`이어야 함을 의미합니다.
디폴트 값은 `~/.keras/keras.json`에 위치한 value found in your
케라스 구성 파일의 `image_data_format` 값으로 설정됩니다.
따로 설정을 바꾸지 않았다면, "channels_last"가 초기값입니다.
- __validation_split__: 부동소수점. (엄격히 0과 1사이의 값으로) 검증의 용도로 남겨둘
남겨둘 이미지의 비율입니다.
- __interpolation_order__: int, order to use for the spline interpolation. Higher is slower
- __dtype__: 생성된 배열에 사용할 자료형.
__예시__
`.flow(x, y)`사용한 예시:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# 특성별 정규화에 필요한 수치를 계산합니다
# (영위상 성분분석 백색화를 적용하는 경우, 표준편차, 평균, 그리고 주성분이 이에 해당합니다)
datagen.fit(x_train)
# 실시간 데이터 증강을 사용해 배치에 대해서 모델을 학습합니다:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# 다음은 보다 "수동"인 예시입니다
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
`.flow_from_directory(directory)`사용한 예시:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
이미지와 마스크를 함께 변형하는 예시.
```python
# 동일한 인수로 두 인스턴스를 생성합니다
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# fit과 flow 메서드에 동일한 시드와 키워드 인수를 제공합니다
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# 생성기를 하나로 합쳐 이미지와 마스크를 만들어 냅니다
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
Example of using ```.flow_from_dataframe(dataframe, directory,
x_col, y_col)```:
```python
train_df = pandas.read_csv("./train.csv")
valid_df = pandas.read_csv("./valid.csv")
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory='data/train',
x_col="filename",
y_col="class",
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_dataframe(
dataframe=valid_df,
directory='data/validation',
x_col="filename",
y_col="class",
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
---
## ImageDataGenerator 메서드
### apply_transform
```python
apply_transform(x, transform_parameters)
```
주어진 매개변수에 따라 이미지에 변형을 가합니다.
__인수__
- __x__: 3D 텐서, 단일 이미지.
- __transform_parameters__: 문자열을 가진 딕셔너리 - 변형을 묘사하는
매개변수 쌍.
현재는 딕셔너리에서
다음과 같은 매개변수가 사용됩니다:
- `'theta'`: 부동소수점. 도 단위의 회전 각도.
- `'tx'`: 부동소수점. x 방향으로의 이동.
- `'ty'`: 부동소수점. y 방향으로의 이동.
- `'shear'`: 부동소수점. 도 단위의 층밀리기 각도.
- `'zx'`: 부동소수점. x 방향으로의 줌.
- `'zy'`: 부동소수점. y 방향으로의 줌.
- `'flip_horizontal'`: 불리언. 가로 뒤집기.
- `'flip_vertical'`: 불리언. 세로 뒤집기.
- `'channel_shift_intencity'`: 부동소수점. 채널 이동 강도.
- `'brightness'`: 부동소수점. 밝기 이동 강도.
__반환값__
인풋이 변형된 버전 (인풋과 동일한 형태).
---
### fit
```python
fit(x, augment=False, rounds=1, seed=None)
```
샘플 데이터에 데이터 생성기를 학습시킵니다.
이는 샘플 데이터 배열을 기반으로
데이터 의존적인 변형에 관련된 내적 데이터 통걔를 계산합니다.
`featurewise_center`, `featurewise_std_normalization`,
혹은 `zca_whitening`이 참으로 설정되어 있을 때만 필요합니다.
__인수__
- __x__: 샘플 데이터. 계수가 4이어야 합니다.
흑백 데이터의 경우
채널 축이 1의 값을 가져야 하며,
RGB 데이터의 경우 3,
RGBA 데이터의 경우, 그 값이 4가 되어야 합니다.
- __augment__: 불리언 (디폴트 값: 거짓).
무작위로 증강된 샘플에 대해서 학습할지 여부.
- __rounds__: 정수 (디폴트 값: 1).
데이터 증강을 사용하는 경우(`augment=True`),
사용할 데이터에 몇 번이나 증강이 적용되는지에 대한 값입니다.
- __seed__: 정수 (디폴트 값: None). 난수 시드.
---
### flow
```python
flow(x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None)
```
데이터와 라벨 배열을 받아 증강된 데이터의 배치를 생성합니다.
__인수__
- __x__: 인풋 데이터. 계수 4의 numpy 배열 혹은 튜플.
튜플인 경우 첫 번째 성분이
이미지를 담고
두 번때 성분이 어떤 수정도 없이
아웃풋에 전달되는 또 다른 numpy 배열
혹은 numpy 배열의 리스트를 담습니다.
이미지와 함께 다양한 종류의 데이터를 모델에
전달할 때 사용할 수 있습니다.
흑백 데이터의 경우 이미지 배열의 채널 축의 값이
1이어야 하며,
RGB 데이터의 경우 3,
RGBA 데이터의 경우 4의 값을 가져야 합니다.
- __y__: 라벨.
- __batch_size__: 정수 (디폴트 값: 32).
- __shuffle__: 불리언 (디폴트 값: 참).
- __sample_weight__: 샘플 가중치.
- __seed__: 정수 (디폴트 값: None).
- __save_to_dir__: None 혹은 문자열 (디폴트 값: None).
이는 디렉토리를 선택적으로 지정해서
생성된 증강 사진을 저장할 수 있도록 합니다.
(현재 작업을 시각화하는데 유용합니다).
- __save_prefix__: 문자열 (기본값: `''`).
저장된 사진의 파일이름에 사용할 접두부호
(`save_to_dir`이 지정된 경우에만 유의미합니다).
- __save_format__: "png"나 "jpeg" 중 하나
(`save_to_dir`이 지정된 경우에만 유의미합니다). 디폴트 값: "png".
- __subset__: `ImageDataGenerator`에 `validation_split`이 설정된 경우
데이터의 부분세트 (`"training"` or `"validation"`).
__반환값__
`(x, y)` 튜플을 만들어내는 `Iterator`
여기서 `x`는 이미지 데이터로 구성된
(단일 이미지 인풋의 경우) numpy 배열 혹은
(추가적 인풋이 존재하는 경우) numpy 배열의 리스트이고
`y`는 그에 대응하는 라벨로 이루어진
numpy 배열입니다. 'sample_weight'이 None이 아니라면,
생성된 튜플은 `(x, y, sample_weight)`의 형태를 갖습니다.
`y`가 None인 경우, numpy 배열 `x`만 반환됩니다.
---
### flow_from_dataframe
```python
flow_from_dataframe(dataframe, directory=None, x_col='filename', y_col='class', weight_col=None, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None, interpolation='nearest', validate_filenames=True)
```
dataframe과 디렉토리의 위치를 전달받아 증강/정규화된 데이터의 배치를 생성합니다.
**간단한 튜토리얼은** [여기](http://bit.ly/keras_flow_from_dataframe)**에서 확인하실 수 있습니다.**
__인수__
- __dataframe__: Pandas dataframe containing the filepaths relative to 'directory' (or absolute paths if `directory` is None) of the images in a string column. It should include other column/s depending on the `class_mode`:
- if `class_mode` is `"categorical"` (default value) it must include the `y_col` column with the
class/es of each image. Values in column can be string/list/tuple if a single class or list/tuple
if multiple classes.
- if 'class_mode' is '"binary"' or '"sparse"' it must include the given 'y_col' column with class
values as strings.
- if 'class_mode' is is `"raw"` or `"multi_output"` it should contain the columns specified in
'y_col'.
- if 'class_mode' is '"input"' or 'None' no extra column is needed.
- __directory__: string, path to the directory to read images from. If 'None', data in 'x_col' column should be absolute paths.
- __x_col__: string, column in 'dataframe' that contains the filenames (or absolute paths if 'directory' is 'None').
- __y_col__: string or list, column/s in dataframe that has the target data.
- __weight_col__: string, column in `dataframe` that contains the sample weights. Default: `None`.
- __target_size__: 정수의 튜플 `(높이, 넓이)`, 디폴트 값: `(256, 256)`. 모든 이미지의 크기를 재조정할 치수.
- __color_mode__: "grayscale", "rgb", "rgba" 중 하나. 디폴트 값: "rgb". 이미지가 1개 혹은 3개의 색깔 채널을 갖도록 변환할지 여부.
- __classes__: 클래스로 이루어진 선택적 리스트 (예. `['dogs', 'cats']`). 디폴트 값: None. 특별히 값을 지정하지 않으면, 클래스로 이루어진 리스트가 `y_col`에서 자동으로 유추됩니다. (이는 영숫자순으로 라벨 색인에 대응됩니다). `class_indices` 속성을 통해서 클래스 이름과 클래스 색인 간 매핑을 담은 딕셔너리를 얻을 수 있습니다.
- __class_mode__: "binary", "categorical", "input", "multi_output", "raw", "sparse" 혹은 None 중 하나. 디폴트 값: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels. Supports multi-label output.
- `"input"`: images identical to input images (mainly used to work with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels,
- `None`, no targets are returned (the generator will only yield batches of image data, which is useful to use in `model.predict_generator()`).
- __batch_size__: 데이터 배치의 크기 (디폴트 값: 32).
- __shuffle__: 데이터를 뒤섞을지 여부 (디폴트 값: 참)
- __seed__: 데이터 셔플링과 변형에 사용할 선택적 난수 시드.
- __save_to_dir__: None 혹은 문자열 (디폴트 값: None).이는 디렉토리를 선택적으로 지정해서
생성된 증강 사진을 저장할 수 있도록 해줍니다. (현재 작업을 시각화하는데 유용합니다).
- __save_prefix__: 문자열. 저장된 사진의 파일 이름에 사용할 접두부호 (`save_to_dir`이 설정된 경우에만 유의미합니다).
- __save_format__: "png"와 "jpeg" 중 하나 (`save_to_dir`이 설정된 경우에만 유의미합니다). 디폴트 값: "png".
- __follow_links__: 클래스 하위 디렉토리 내 심볼릭 링크를 따라갈지 여부 (디폴트 값: 거짓).
- __subset__: `ImageDataGenerator`에 `validation_split`이 설정된 경우 데이터의 부분집합 (`"training"` or `"validation"`).
- __interpolation__: Interpolation method used to resample the image if the target size is different from that of the loaded image. 지원되는 메서드로는 `"nearest"`, `"bilinear"`, 그리고 `"bicubic"`이 있습니다.
PIL 버전 1.1.3 이상이 설치된 경우, `"lanczos"`도 지원됩니다. PIL 버전 3.4.0 이상이 설치된 경우, `"box"`와 `"hamming"` 또한 지원됩니다. 디폴트 값으로 `"nearest"`가 사용됩니다.
- __validate_filenames__: Boolean, whether to validate image filenames in `x_col`. If `True`, invalid images will be ignored. Disabling this option can lead to speed-up in the execution of this function. Default: `True`.
__반환값__
`(x, y)` 튜플을 만들어내는 A `DataFrameIterator` 여기서 `x`는 `(배치 크기, *표적 크기, 채널)` 형태의
이미지 배치로 구성된 numpy 배열이고 `y`는 그에 상응하는 라벨로 이루어진 numpy 배열입니다.
---
### flow_from_directory
```python
flow_from_directory(directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest')
```
디렉토리에의 경로를 전달받아 증강된 데이터의 배치를 생성합니다.
__인수__
- __directory__: string, 표적 디렉토리에의 경로.
반드시 한 클래스 당 하나의 하위 디렉토리가 있어야 합니다.
각 하위 디렉토리 내에 위치한
어떤 PNG, JPG, BMP, PPM 혹은 TIF 이미지도
생성자에 포함됩니다.
세부사항은 [이 스크립트](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
를 참조하십시오.
- __target_size__: 정수 튜플 `(높이, 넓이)`,
디폴트 값: `(256, 256)`.
모든 이미지의 크기를 재조정할 치수.
- __color_mode__: "grayscale", "rgb", "rgba" 중 하나. 디폴트 값: "rgb".
변환될 이미지가
1개, 3개, 혹은 4개의 채널을 가질지 여부.
- __classes__: 클래스 하위 디렉토리의 선택적 리스트
(예. `['dogs', 'cats']`). 디폴트 값: None.
특별히 값을 지정하지 않으면, 각 하위 디렉토리를 각기 다른 클래스로
대하는 방식으로 클래스의 리스트가
`directory` 내 하위 디렉토리의 이름/구조에서
자동으로 유추됩니다
(그리고 라벨 색인에 대응되는 클래스의 순서는
영숫자 순서를 따릅니다).
`class_indices` 속성을 통해서 클래스 이름과 클래스 색인 간
매핑을 담은 딕셔너리를 얻을 수 있습니다.
- __class_mode__: "categorical", "binary", "sparse",
"input", 혹은 None 중 하나. 디폴트 값: "categorical".
반환될 라벨 배열의 종류를 결정합니다:
- "categorical"은 2D형태의 원-핫 인코딩된 라벨입니다,
- "binary"는 1D 형태의 이진 라벨입니다,
"sparse"는 1D 형태의 정수 라벨입니다,
- "input"은 인풋 이미지와
동일한 이미지입니다 (주로 자동 인코더와 함께 사용합니다).
- None의 경우, 어떤 라벨되 반환되지 않습니다
(생성자가 이미지 데이터의 배치만 만들기 때문에,
`model.predict_generator()`을 사용하는 것이 유용합니다).
class_mode가 None일 경우,
제대로 작동하려면 데이터가 `directory` 내 하위 디렉토리에
위치해야 한다는 점을 유의하십시오.
- __batch_size__: 데이터 배치의 크기 (디폴트 값: 32).
- __shuffle__: 데이터를 뒤섞을지 여부 (디폴트 값: 참)
If set to False, sorts the data in alphanumeric order.
- __seed__: 데이터 셔플링과 변형에 사용할 선택적 난수 시드.
- __save_to_dir__: None 혹은 문자열 (디폴트 값: None).
이는 디렉토리를 선택적으로 지정해서
생성된 증강 사진을
저장할 수 있도록 해줍니다
(현재 작업을 시각화하는데 유용합니다).
- __save_prefix__: 문자열. 저장된 사진의 파일 이름에 사용할 접두부호
(`save_to_dir`이 설정된 경우에만 유의미합니다).
- __save_format__: "png"와 "jpeg" 중 하나
(`save_to_dir`이 설정된 경우에만 유의미합니다). 디폴트 값: "png".
- __follow_links__: 클래스 하위 디렉토리 내 심볼릭 링크를
따라갈지 여부 (디폴트 값: 거짓).
- __subset__: `ImageDataGenerator`에 `validation_split`이 설정된 경우
데이터의 부분집합 (`"training"` or `"validation"`).
- __interpolation__: 로드된 이미지의 크기와
표적 크기가 다른 경우
이미지를 다시 샘플링하는 보간법 메서드.
지원되는 메서드로는 `"nearest"`, `"bilinear"`,
그리고 `"bicubic"`이 있습니다.
PIL 버전 1.1.3 이상이 설치된 경우 `"lanczos"`도
지원됩니다. PIL 버전 3.4.0 이상이 설치된 경우는,
`"box"`와 `"hamming"` 또한 지원됩니다.
디폴트 값으로 `"nearest"`가 사용됩니다.
__반환값__
`(x, y)` 튜플을 만들어내는 `DirectoryIterator`
여기서 `x`는 `(배치 크기, *표적 크기, 채널)`의 형태의
이미지 배치로 구성된 numpy 배열이고
`y`는 그에 대응하는 라벨로 이루어진 numpy 배열입니다.
---
### get_random_transform
```python
get_random_transform(img_shape, seed=None)
```
변형에 대한 무작위 매개변수를 생성합니다.
__인수__
- __seed__: 난수 시드.
- __img_shape__: 정수 튜플.
변형할 이미지의 형태입니다.
__반환값__
변형을 묘사하는 무작위 선정된 매개변수를 포함한
딕셔너리.
---
### random_transform
```python
random_transform(x, seed=None)
```
이미지에 무작위 변형을 적용합니다.
__인수__
- __x__: 3D 텐서, 단일 이미지.
- __seed__: 난수 시드.
__반환값__
인풋이 무작위로 변형된 버전 (인풋과 동일한 형태 유지).
---
### standardize
```python
standardize(x)
```
인풋의 배치에 정규화 구성을 적용합니다.
`x` is changed in-place since the function is mainly used internally to standarize images and feed them to your network. If a copy of `x` would be created instead it would have a significant performance cost. If you want to apply this method without changing the input in-place you can call the method creating a copy before:
standarize(np.copy(x))
__인수__
- __x__: 정규화할 인풋의 배치.
__반환값__
정규화된 인풋.
| keras-docs-ko/sources/preprocessing/image.md/0 | {
"file_path": "keras-docs-ko/sources/preprocessing/image.md",
"repo_id": "keras-docs-ko",
"token_count": 16418
} | 85 |
# 在 CIFAR10 小型图像数据集上训练一个深度卷积神经网络。
在 25 轮迭代后 验证集准确率达到 75%,在 50 轮后达到 79%。
(尽管目前仍然欠拟合)。
```python
from __future__ import print_function
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
import os
batch_size = 32
num_classes = 10
epochs = 100
data_augmentation = True
num_predictions = 20
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'
# 数据,切分为训练和测试集。
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# 将类向量转换为二进制类矩阵。
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# 初始化 RMSprop 优化器。
opt = keras.optimizers.RMSprop(learning_rate=0.0001, decay=1e-6)
# 利用 RMSprop 来训练模型。
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# 这一步将进行数据处理和实时数据增益。data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # 将整个数据集的均值设为0
samplewise_center=False, # 将每个样本的均值设为0
featurewise_std_normalization=False, # 将输入除以整个数据集的标准差
samplewise_std_normalization=False, # 将输入除以其标准差
zca_whitening=False, # 运用 ZCA 白化
zca_epsilon=1e-06, # ZCA 白化的 epsilon值
rotation_range=0, # 随机旋转图像范围 (角度, 0 to 180)
# 随机水平移动图像 (总宽度的百分比)
width_shift_range=0.1,
# 随机垂直移动图像 (总高度的百分比)
height_shift_range=0.1,
shear_range=0., # 设置随机裁剪范围
zoom_range=0., # 设置随机放大范围
channel_shift_range=0., # 设置随机通道切换的范围
# 设置填充输入边界之外的点的模式
fill_mode='nearest',
cval=0., # 在 fill_mode = "constant" 时使用的值
horizontal_flip=True, # 随机水平翻转图像
vertical_flip=False, # 随机垂直翻转图像
# 设置缩放因子 (在其他转换之前使用)
rescale=None,
# 设置将应用于每一个输入的函数
preprocessing_function=None,
# 图像数据格式,"channels_first" 或 "channels_last" 之一
data_format=None,
# 保留用于验证的图像比例(严格在0和1之间)
validation_split=0.0)
# 计算特征标准化所需的计算量
# (如果应用 ZCA 白化,则为 std,mean和主成分).
datagen.fit(x_train)
# 利用由 datagen.flow() 生成的批来训练模型
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
workers=4)
# 保存模型和权重
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
# 评估训练模型
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
```
| keras-docs-zh/sources/examples/cifar10_cnn.md/0 | {
"file_path": "keras-docs-zh/sources/examples/cifar10_cnn.md",
"repo_id": "keras-docs-zh",
"token_count": 2567
} | 86 |
# 从尼采作品生成文本的示例脚本。
生成的文本开始听起来连贯之前,至少需要 20 个轮次。
建议在 GPU 上运行此脚本,因为循环网络的计算量很大。
如果在新数据上尝试使用此脚本,请确保您的语料库至少包含约 10 万个字符。〜1M 更好。
```python
from __future__ import print_function
from keras.callbacks import LambdaCallback
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
import io
path = get_file(
'nietzsche.txt',
origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
with io.open(path, encoding='utf-8') as f:
text = f.read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# 以 maxlen 字符的半冗余序列剪切文本
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# 建立模型:单个 LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars), activation='softmax'))
optimizer = RMSprop(learning_rate=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# 辅助函数从概率数组中采样索引
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def on_epoch_end(epoch, _):
# 在每个轮次结束时调用的函数。 打印生成的文本。
print()
print('----- Generating text after Epoch: %d' % epoch)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
model.fit(x, y,
batch_size=128,
epochs=60,
callbacks=[print_callback])
``` | keras-docs-zh/sources/examples/lstm_text_generation.md/0 | {
"file_path": "keras-docs-zh/sources/examples/lstm_text_generation.md",
"repo_id": "keras-docs-zh",
"token_count": 1594
} | 87 |
# 将自规范化 MLP 与常规 MLP 进行比较。
使用两种不同的激活函数(路透社新闻分类主题任务上的 RELU 和 SELU)比较简单 MLP 的性能。
# 参考文献
- Klambauer, G., Unterthiner, T., Mayr, A., & Hochreiter, S. (2017).
[Self-Normalizing Neural Networks. arXiv preprint arXiv:1706.02515.](https://arxiv.org/abs/1706.02515)
```python
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers.noise import AlphaDropout
from keras.preprocessing.text import Tokenizer
max_words = 1000
batch_size = 16
epochs = 40
plot = True
def create_network(n_dense=6,
dense_units=16,
activation='selu',
dropout=AlphaDropout,
dropout_rate=0.1,
kernel_initializer='lecun_normal',
optimizer='adam',
num_classes=1,
max_words=max_words):
"""泛型函数可创建完全连接的神经网络。
# 参数
n_dense: int > 0. 全连接层数。
dense_units: int > 0. 每层的全连接单元数。
dropout: keras.layers.Layer. 要应用的 dropout 层。
dropout_rate: 0 <= float <= 1. dropout 率。
kernel_initializer: str. 权重的初始化程序。
optimizer: str/keras.optimizers.Optimizer. 要使用的优化程序。
num_classes: int > 0. 要预测的类数。
max_words: int > 0. 每个数据点的最大字数。
# 返回
Keras 模型实例(已编译)。
"""
model = Sequential()
model.add(Dense(dense_units, input_shape=(max_words,),
kernel_initializer=kernel_initializer))
model.add(Activation(activation))
model.add(dropout(dropout_rate))
for i in range(n_dense - 1):
model.add(Dense(dense_units, kernel_initializer=kernel_initializer))
model.add(Activation(activation))
model.add(dropout(dropout_rate))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
network1 = {
'n_dense': 6,
'dense_units': 16,
'activation': 'relu',
'dropout': Dropout,
'dropout_rate': 0.5,
'kernel_initializer': 'glorot_uniform',
'optimizer': 'sgd'
}
network2 = {
'n_dense': 6,
'dense_units': 16,
'activation': 'selu',
'dropout': AlphaDropout,
'dropout_rate': 0.1,
'kernel_initializer': 'lecun_normal',
'optimizer': 'sgd'
}
print('Loading data...')
(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_words,
test_split=0.2)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
num_classes = np.max(y_train) + 1
print(num_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(num_words=max_words)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Convert class vector to binary class matrix '
'(for use with categorical_crossentropy)')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
print('\nBuilding network 1...')
model1 = create_network(num_classes=num_classes, **network1)
history_model1 = model1.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1)
score_model1 = model1.evaluate(x_test,
y_test,
batch_size=batch_size,
verbose=1)
print('\nBuilding network 2...')
model2 = create_network(num_classes=num_classes, **network2)
history_model2 = model2.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1)
score_model2 = model2.evaluate(x_test,
y_test,
batch_size=batch_size,
verbose=1)
print('\nNetwork 1 results')
print('Hyperparameters:', network1)
print('Test score:', score_model1[0])
print('Test accuracy:', score_model1[1])
print('Network 2 results')
print('Hyperparameters:', network2)
print('Test score:', score_model2[0])
print('Test accuracy:', score_model2[1])
plt.plot(range(epochs),
history_model1.history['val_loss'],
'g-',
label='Network 1 Val Loss')
plt.plot(range(epochs),
history_model2.history['val_loss'],
'r-',
label='Network 2 Val Loss')
plt.plot(range(epochs),
history_model1.history['loss'],
'g--',
label='Network 1 Loss')
plt.plot(range(epochs),
history_model2.history['loss'],
'r--',
label='Network 2 Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.savefig('comparison_of_networks.png')
``` | keras-docs-zh/sources/examples/reuters_mlp_relu_vs_selu.md/0 | {
"file_path": "keras-docs-zh/sources/examples/reuters_mlp_relu_vs_selu.md",
"repo_id": "keras-docs-zh",
"token_count": 2857
} | 88 |
# 关于 Keras 网络层
所有 Keras 网络层都有很多共同的函数:
- `layer.get_weights()`: 以含有Numpy矩阵的列表形式返回层的权重。
- `layer.set_weights(weights)`: 从含有Numpy矩阵的列表中设置层的权重(与`get_weights`的输出形状相同)。
- `layer.get_config()`: 返回包含层配置的字典。此图层可以通过以下方式重置:
```python
layer = Dense(32)
config = layer.get_config()
reconstructed_layer = Dense.from_config(config)
```
或:
```python
from keras import layers
config = layer.get_config()
layer = layers.deserialize({'class_name': layer.__class__.__name__,
'config': config})
```
如果一个层具有单个节点 (i.e. 如果它不是共享层), 你可以得到它的输入张量、输出张量、输入尺寸和输出尺寸:
- `layer.input`
- `layer.output`
- `layer.input_shape`
- `layer.output_shape`
如果层有多个节点 (参见: [层节点和共享层的概念](/getting-started/functional-api-guide/#the-concept-of-layer-node)), 您可以使用以下函数:
- `layer.get_input_at(node_index)`
- `layer.get_output_at(node_index)`
- `layer.get_input_shape_at(node_index)`
- `layer.get_output_shape_at(node_index)`
| keras-docs-zh/sources/layers/about-keras-layers.md/0 | {
"file_path": "keras-docs-zh/sources/layers/about-keras-layers.md",
"repo_id": "keras-docs-zh",
"token_count": 677
} | 89 |
# Model 类(函数式 API)
在函数式 API 中,给定一些输入张量和输出张量,可以通过以下方式实例化一个 `Model`:
```python
from keras.models import Model
from keras.layers import Input, Dense
a = Input(shape=(32,))
b = Dense(32)(a)
model = Model(inputs=a, outputs=b)
```
这个模型将包含从 `a` 到 `b` 的计算的所有网络层。
在多输入或多输出模型的情况下,你也可以使用列表:
```python
model = Model(inputs=[a1, a2], outputs=[b1, b3, b3])
```
有关 `Model` 的详细介绍,请阅读 [Keras 函数式 API 指引](/getting-started/functional-api-guide)。
## Model 类模型方法
### compile
```python
compile(optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None)
```
用于配置训练模型。
__参数__
- __optimizer__: 字符串(优化器名)或者优化器实例。
详见 [optimizers](/optimizers)。
- __loss__: 字符串(目标函数名)或目标函数或 `Loss` 实例。
详见 [losses](/losses)。
如果模型具有多个输出,则可以通过传递损失函数的字典或列表,在每个输出上使用不同的损失。
模型将最小化的损失值将是所有单个损失的总和。
- __metrics__: 在训练和测试期间的模型评估标准。
通常你会使用 `metrics = ['accuracy']`。要为多输出模型的不同输出指定不同的评估标准,
还可以传递一个字典,如 `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`。
你也可以传递一个评估指标序列的序列 (len = len(outputs)) 例如 `metrics=[['accuracy'], ['accuracy', 'mse']]`
或 `metrics=['accuracy', ['accuracy', 'mse']]`。
- __loss_weights__: 可选的指定标量系数(Python 浮点数)的列表或字典,
用以衡量损失函数对不同的模型输出的贡献。
模型将最小化的误差值是由 `loss_weights` 系数加权的*加权总和*误差。
如果是列表,那么它应该是与模型输出相对应的 1:1 映射。
如果是字典,那么应该把输出的名称(字符串)映到标量系数。
- __sample_weight_mode__: 如果你需要执行按时间步采样权重(2D 权重),请将其设置为 `temporal`。
默认为 `None`,为采样权重(1D)。
如果模型有多个输出,则可以通过传递 mode 的字典或列表,以在每个输出上使用不同的 `sample_weight_mode`。
- __weighted_metrics__: 在训练和测试期间,由 sample_weight 或 class_weight 评估和加权的度量标准列表。
- __target_tensors__: 默认情况下,Keras 将为模型的目标创建一个占位符,在训练过程中将使用目标数据。
相反,如果你想使用自己的目标张量(反过来说,Keras 在训练期间不会载入这些目标张量的外部 Numpy 数据),
您可以通过 `target_tensors` 参数指定它们。
它可以是单个张量(单输出模型),张量列表,或一个映射输出名称到目标张量的字典。
- __**kwargs__: 当使用 Theano/CNTK 后端时,这些参数被传入 `K.function`。
当使用 TensorFlow 后端时,这些参数被传递到 `tf.Session.run`。
__异常__
- __ValueError__: 如果 `optimizer`, `loss`, `metrics` 或 `sample_weight_mode` 这些参数不合法。
----
### fit
```python
fit(x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1, use_multiprocessing=False)
```
以固定数量的轮次(数据集上的迭代)训练模型。
__参数__
- __x__: 输入数据。可以是:
- 一个 Numpy 数组(或类数组),或者数组的序列(如果模型有多个输入)。
- 一个将名称匹配到对应数组/张量的字典,如果模型具有命名输入。
- 一个返回 `(inputs, targets)` 或 `(inputs, targets, sample weights)`
的生成器或 `keras.utils.Sequence`。
- None(默认),如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
- __y__: 目标数据。与输入数据 `x` 类似,它可以是 Numpy 数组(序列)、
本地框架张量(序列)、Numpy数组序列(如果模型有多个输出)
或 None(默认)如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
如果模型输出层已命名,你也可以传递一个名称匹配 Numpy 数组的字典。
如果 `x` 是一个生成器,或 `keras.utils.Sequence` 实例,则不应该
指定 `y`(因为目标可以从 `x` 获得)。
- __batch_size__: 整数或 `None`。每次梯度更新的样本数。如果未指定,默认为 32。
如果你的数据是符号张量、生成器或 `Sequence` 实例形式,不要指定 `batch_size`,
因为它们会生成批次。
- __epochs__: 整数。训练模型迭代轮次。一个轮次是在整个 `x` 和 `y` 上的一轮迭代。
请注意,与 `initial_epoch` 一起,`epochs` 被理解为 「最终轮次」。
模型并不是训练了 `epochs` 轮,而是到第 `epochs` 轮停止训练。
- __verbose__: 整数,0, 1 或 2。日志显示模式。
0 = 安静模式, 1 = 进度条, 2 = 每轮一行。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在训练和验证(如果有)时使用的回调函数。
详见 [callbacks](/callbacks)。
- __validation_split__: 0 和 1 之间的浮点数。用作验证集的训练数据的比例。
模型将分出一部分不会被训练的验证数据,并将在每一轮结束时评估这些验证数据的误差和任何其他模型指标。
验证数据是混洗之前 `x` 和`y` 数据的最后一部分样本中。
这个参数在 `x` 是生成器或 `Sequence` 实例时不支持。
- __validation_data__: 用于在每个轮次结束后评估损失和任意指标的数据。
模型不会在这个数据上训练。`validation_data` 会覆盖 `validation_split`。
`validation_data` 可以是:
- 元组 `(x_val, y_val)` 或 Numpy 数组或张量
- 元组 `(x_val, y_val, val_sample_weights)` 或 Numpy 数组。
- 数据集或数据集迭代器。
对于前两种情况,必须提供 `batch_size`。
对于最后一种情况,必须提供 `validation_steps`。
- __shuffle__: 布尔值(是否在每轮迭代之前混洗数据)或者 字符串 (`batch`)。
`batch` 是处理 HDF5 数据限制的特殊选项,它对一个 batch 内部的数据进行混洗。
当 `steps_per_epoch` 非 `None` 时,这个参数无效。
- __class_weight__: 可选的字典,用来映射类索引(整数)到权重(浮点)值,用于加权损失函数(仅在训练期间)。
这可能有助于告诉模型「更多关注」来自代表性不足的类的样本。
- __sample_weight__: 训练样本的可选 Numpy 权重数组,用于对损失函数进行加权(仅在训练期间)。
你可以传递与输入样本长度相同的平坦(1D)Numpy 数组(权重和样本之间的 1:1 映射),
或者在时序数据的情况下,可以传递尺寸为 `(samples, sequence_length)` 的 2D 数组,以对每个样本的每个时间步施加不同的权重。
在这种情况下,你应该确保在 `compile()` 中指定 `sample_weight_mode="temporal"`。
这个参数在 `x` 是生成器或 `Sequence` 实例时不支持,应该提供 sample_weights 作为 `x` 的第 3 元素。
- __initial_epoch__: 整数。开始训练的轮次(有助于恢复之前的训练)。
- __steps_per_epoch__: 整数或 `None`。
在声明一个轮次完成并开始下一个轮次之前的总步数(样品批次)。
使用 TensorFlow 数据张量等输入张量进行训练时,默认值 `None` 等于数据集中样本的数量除以 batch 的大小,如果无法确定,则为 1。
- __validation_steps__: 只有在提供了 `validation_data` 并且时一个生成器时才有用。
表示在每个轮次结束时执行验证时,在停止之前要执行的步骤总数(样本批次)。
- __validation_freq__: 只有在提供了验证数据时才有用。整数或列表/元组/集合。
如果是整数,指定在新的验证执行之前要执行多少次训练,例如,`validation_freq=2` 在每 2 轮训练后执行验证。
如果是列表、元组或集合,指定执行验证的轮次,例如,`validation_freq=[1, 2, 10]` 表示在第 1、2、10 轮训练后执行验证。
- __max_queue_size__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
生成器队列的最大尺寸。若未指定,`max_queue_size` 将默认为 10。
- __workers__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
当使用基于进程的多线程时的最大进程数。若未指定,`workers` 将默认为 1。若为 0,将在主线程执行生成器。
- __use_multiprocessing__: 布尔值。仅用于生成器或 `keras.utils.Sequence` 输入。
如果是 `True`,使用基于进程的多线程。若未指定,`use_multiprocessing` 将默认为 `False`。
注意由于这个实现依赖于 multiprocessing,你不应该像生成器传递不可选的参数,因为它们不能轻松地传递给子进程。
- __**kwargs__: 用于向后兼容。
__返回__
一个 `History` 对象。其 `History.history` 属性是连续 epoch 训练损失和评估值,以及验证集损失和评估值的记录(如果适用)。
__异常__
- __RuntimeError__: 如果模型从未编译。
- __ValueError__: 在提供的输入数据与模型期望的不匹配的情况下。
----
### evaluate
```python
evaluate(x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)
```
在测试模式下返回模型的误差值和评估标准值。
计算是分批进行的。
__参数__
- __x__: 输入数据。可以是:
- 一个 Numpy 数组(或类数组),或者数组的序列(如果模型有多个输入)。
- 一个将名称匹配到对应数组/张量的字典,如果模型具有命名输入。
- 一个返回 `(inputs, targets)` 或 `(inputs, targets, sample weights)`
的生成器或 `keras.utils.Sequence`。
- None(默认),如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
- __y__: 目标数据。与输入数据 `x` 类似,它可以是 Numpy 数组(序列)、
本地框架张量(序列)、Numpy数组序列(如果模型有多个输出)
或 None(默认)如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
如果模型输出层已命名,你也可以传递一个名称匹配 Numpy 数组的字典。
如果 `x` 是一个生成器,或 `keras.utils.Sequence` 实例,则不应该
指定 `y`(因为目标可以从 `x` 获得)。
- __batch_size__: 整数或 `None`。每次梯度更新的样本数。如果未指定,默认为 32。
如果你的数据是符号张量、生成器或 `keras.utils.Sequence` 实例形式,
不要指定 `batch_size`,因为它们会生成批次。
- __verbose__: 0, 1。日志显示模式。0 = 安静模式, 1 = 进度条。
- __sample_weight__: 训练样本的可选 Numpy 权重数组,用于对损失函数进行加权。
你可以传递与输入样本长度相同的平坦(1D)Numpy 数组(权重和样本之间的 1:1 映射),
或者在时序数据的情况下,可以传递尺寸为 `(samples, sequence_length)` 的 2D 数组,以对每个样本的每个时间步施加不同的权重。
在这种情况下,你应该确保在 `compile()` 中指定 `sample_weight_mode="temporal"`。
- __steps__: 整数或 `None`。
声明评估结束之前的总步数(批次样本)。默认值 `None` 时被忽略。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在评估时使用的回调函数。
详见 [callbacks](/callbacks)。
- __max_queue_size__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
生成器队列的最大尺寸。若未指定,`max_queue_size` 将默认为 10。
- __workers__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
当使用基于进程的多线程时的最大进程数。若未指定,`workers` 将默认为 1。若为 0,将在主线程执行生成器。
- __use_multiprocessing__: 布尔值。仅用于生成器或 `keras.utils.Sequence` 输入。
如果是 `True`,使用基于进程的多线程。若未指定,`use_multiprocessing` 将默认为 `False`。
注意由于这个实现依赖于 multiprocessing,你不应该像生成器传递不可选的参数,因为它们不能轻松地传递给子进程。
__异常__
- __ValueError__: 如果参数错误。
__返回__
标量测试误差(如果模型只有一个输出且没有评估标准)
或标量列表(如果模型具有多个输出 和/或 评估指标)。
属性 `model.metrics_names` 将提供标量输出的显示标签。
----
### predict
```python
predict(x, batch_size=None, verbose=0, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False)
```
为输入样本生成输出预测。
计算是分批进行的
__参数__
- __x__: 输入数据。可以是:
- 一个 Numpy 数组(或类数组),或者数组的序列(如果模型有多个输入)。
- 一个将名称匹配到对应数组/张量的字典,如果模型具有命名输入。
- 一个返回 `(inputs, targets)` 或 `(inputs, targets, sample weights)`
的生成器或 `keras.utils.Sequence`。
- None(默认),如果从本地框架张量馈送(例如 TensorFlow 数据张量)。
- __batch_size__: 整数或 `None`。每次梯度更新的样本数。如果未指定,默认为 32。
如果你的数据是符号张量、生成器或 `keras.utils.Sequence` 实例形式,
不要指定 `batch_size`,因为它们会生成批次。
- __verbose__: 日志显示模式,0 或 1。
- __steps__: 整数或 `None`。
声明评估结束之前的总步数(批次样本)。默认值 `None` 时被忽略。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在预测时使用的回调函数。
详见 [callbacks](/callbacks)。
- __max_queue_size__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
生成器队列的最大尺寸。若未指定,`max_queue_size` 将默认为 10。
- __workers__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
当使用基于进程的多线程时的最大进程数。若未指定,`workers` 将默认为 1。若为 0,将在主线程执行生成器。
- __use_multiprocessing__: 布尔值。仅用于生成器或 `keras.utils.Sequence` 输入。
如果是 `True`,使用基于进程的多线程。若未指定,`use_multiprocessing` 将默认为 `False`。
注意由于这个实现依赖于 multiprocessing,你不应该像生成器传递不可选的参数,因为它们不能轻松地传递给子进程。
__返回__
预测的 Numpy 数组(或数组列表)。
__异常__
- __ValueError__: 在提供的输入数据与模型期望的不匹配的情况下,
或者在有状态的模型接收到的样本不是 batch size 的倍数的情况下。
----
### train_on_batch
```python
train_on_batch(x, y, sample_weight=None, class_weight=None, reset_metrics=True)
```
运行一批样品的单次梯度更新。
__参数_
- __x__: 训练数据的 Numpy 数组(如果模型只有一个输入),
或者是 Numpy 数组的列表(如果模型有多个输入)。
如果模型中的输入层被命名,你也可以传递一个字典,将输入层名称映射到 Numpy 数组。
- __y__: 目标(标签)数据的 Numpy 数组,或 Numpy 数组的列表(如果模型具有多个输出)。
如果模型中的输出层被命名,你也可以传递一个字典,将输出层名称映射到 Numpy 数组。
- __sample_weight__: 可选数组,与 x 长度相同,包含应用到模型损失函数的每个样本的权重。
如果是时域数据,你可以传递一个尺寸为 (samples, sequence_length) 的 2D 数组,
为每一个样本的每一个时间步应用不同的权重。
在这种情况下,你应该在 `compile()` 中指定 `sample_weight_mode="temporal"`。
- __class_weight__: 可选的字典,用来映射类索引(整数)到权重(浮点)值,以在训练时对模型的损失函数加权。
这可能有助于告诉模型 「更多关注」来自代表性不足的类的样本。
- __reset_metrics__: 如果为 `True`,返回的指标仅适用于该批次。
如果为 `False`,则指标将在批次之间有状态地累积。
__返回__
标量训练误差(如果模型只有一个输入且没有评估标准),
或者标量的列表(如果模型有多个输出 和/或 评估标准)。
属性 `model.metrics_names` 将提供标量输出的显示标签。
----
### test_on_batch
```python
test_on_batch(x, y, sample_weight=None, reset_metrics=True)
```
在一批样本上测试模型。
__参数__
- __x__: 测试数据的 Numpy 数组(如果模型只有一个输入),
或者是 Numpy 数组的列表(如果模型有多个输入)。
如果模型中的输入层被命名,你也可以传递一个字典,将输入层名称映射到 Numpy 数组。
- __y__: 目标(标签)数据的 Numpy 数组,或 Numpy 数组的列表(如果模型具有多个输出)。
如果模型中的输出层被命名,你也可以传递一个字典,将输出层名称映射到 Numpy 数组。
- __sample_weight__: 可选数组,与 x 长度相同,包含应用到模型损失函数的每个样本的权重。
如果是时域数据,你可以传递一个尺寸为 (samples, sequence_length) 的 2D 数组,
为每一个样本的每一个时间步应用不同的权重。
- __reset_metrics__: 如果为 `True`,返回的指标仅适用于该批次。
如果为 `False`,则指标将在批次之间有状态地累积。
__返回__
标量测试误差(如果模型只有一个输入且没有评估标准),
或者标量的列表(如果模型有多个输出 和/或 评估标准)。
属性 `model.metrics_names` 将提供标量输出的显示标签。
----
### predict_on_batch
```python
predict_on_batch(x)
```
返回一批样本的模型预测值。
__参数__
- __x__: 输入数据,Numpy 数组。
__返回__
预测值的 Numpy 数组(或数组列表)。
----
### fit_generator
```python
fit_generator(generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, validation_freq=1, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)
```
使用 Python 生成器(或 `Sequence` 实例)逐批生成的数据,按批次训练模型。
生成器与模型并行运行,以提高效率。
例如,这可以让你在 CPU 上对图像进行实时数据增强,以在 GPU 上训练模型。
`keras.utils.Sequence` 的使用可以保证数据的顺序,
以及当 `use_multiprocessing=True` 时 ,保证每个输入在每个 epoch 只使用一次。
__参数__
- __generator__: 一个生成器,或者一个 `Sequence` (`keras.utils.Sequence`) 对象的实例,
以在使用多进程时避免数据的重复。
生成器的输出应该为以下之一:
- 一个 `(inputs, targets)` 元组
- 一个 `(inputs, targets, sample_weights)` 元组。
这个元组(生成器的单个输出)组成了单个的 batch。
因此,这个元组中的所有数组长度必须相同(与这一个 batch 的大小相等)。
不同的 batch 可能大小不同。
例如,一个 epoch 的最后一个 batch 往往比其他 batch 要小,
如果数据集的尺寸不能被 batch size 整除。
生成器将无限地在数据集上循环。当运行到第 `steps_per_epoch` 时,记一个 epoch 结束。
- __steps_per_epoch__: 在声明一个 epoch 完成并开始下一个 epoch
之前从 `generator` 产生的总步数(批次样本)。
它通常应该等于 `ceil(num_samples / batch_size)`。
对于 `Sequence`,它是可选的:如果未指定,将使用`len(generator)` 作为步数。
- __epochs__: 整数。训练模型的迭代总轮数。一个 epoch 是对所提供的整个数据的一轮迭代,
如 `steps_per_epoch` 所定义。注意,与 `initial_epoch` 一起使用,
epoch 应被理解为「最后一轮」。模型没有经历由 `epochs` 给出的多次迭代的训练,
而仅仅是直到达到索引 `epoch` 的轮次。
- __verbose__: 整数,0, 1 或 2。日志显示模式。
0 = 安静模式, 1 = 进度条, 2 = 每轮一行。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在训练时使用的回调函数。
详见 [callbacks](/callbacks)。
- __validation_data__: 它可以是以下之一:
- 验证数据的生成器或 `Sequence` 实例
- `(inputs, targets)` 元组
- `(inputs, targets, sample_weights)` 元组。
在每个 epoch 结束时评估损失和任何模型指标。该模型不会对此数据进行训练。
- __validation_steps__: 仅当 `validation_data` 是一个生成器时才可用。
表示在每一轮迭代末尾停止前从 `validation_data` 生成器生成地总步数(样本批次)。
它应该等于由 batch size 分割的验证数据集的样本数。
对于 `Sequence` 它是可选的:若未指定,将会使用 `len(validation_data)` 作为步数。
- __validation_freq__: 只有在提供了验证数据时才有用。整数或 `collections.Container` 实例(例如列表、元组等)。
如果是整数,指定在新的验证执行之前要执行多少次训练,例如,`validation_freq=2` 在每 2 轮训练后执行验证。
如果是 Container,指定执行验证的轮次,例如,`validation_freq=[1, 2, 10]` 表示在第 1、2、10 轮训练后执行验证。
- __class_weight__: 可选的将类索引(整数)映射到权重(浮点)值的字典,用于加权损失函数(仅在训练期间)。
这可以用来告诉模型「更多地关注」来自代表性不足的类的样本。
- __max_queue_size__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
生成器队列的最大尺寸。若未指定,`max_queue_size` 将默认为 10。
- __workers__: 整数。仅用于生成器或 `keras.utils.Sequence` 输入。
当使用基于进程的多线程时的最大进程数。若未指定,`workers` 将默认为 1。若为 0,将在主线程执行生成器。
- __use_multiprocessing__: 布尔值。仅用于生成器或 `keras.utils.Sequence` 输入。
如果是 `True`,使用基于进程的多线程。若未指定,`use_multiprocessing` 将默认为 `False`。
注意由于这个实现依赖于 multiprocessing,你不应该像生成器传递不可选的参数,因为它们不能轻松地传递给子进程。
- __shuffle__: 布尔值。是否在每轮迭代之前打乱 batch 的顺序。
只能与 `Sequence` (`keras.utils.Sequence`) 实例同用。
当 `steps_per_epoch` 为 `None` 是无效。
- __initial_epoch__: 整数。开始训练的轮次(有助于恢复之前的训练)。
__返回__
一个 `History` 对象。其 `History.history` 属性是连续 epoch 训练损失和评估值,以及验证集损失和评估值的记录(如果适用)。
__异常__
- __ValueError__: 如果生成器生成的数据格式不正确。
__示例__
```python
def generate_arrays_from_file(path):
while True:
with open(path) as f:
for line in f:
# 从文件中的每一行生成输入数据和标签的 numpy 数组,
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
steps_per_epoch=10000, epochs=10)
```
----
### evaluate_generator
```python
evaluate_generator(generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
```
在数据生成器上评估模型。
这个生成器应该返回与 `test_on_batch` 所接收的同样的数据。
__参数__
- __generator__: 一个生成 `(inputs, targets)` 或 `(inputs, targets, sample_weights)` 的生成器,
或一个 `Sequence` (`keras.utils.Sequence`) 对象的实例,以避免在使用多进程时数据的重复。
- __steps__: 在声明一个 epoch 完成并开始下一个 epoch 之前从 `generator` 产生的总步数(批次样本)。
它通常应该等于你的数据集的样本数量除以批量大小。
对于 `Sequence`,它是可选的:如果未指定,将使用`len(generator)` 作为步数。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在评估时使用的回调函数。
详见 [callbacks](/callbacks)。
- __max_queue_size__: 生成器队列的最大尺寸。
- __workers__: 整数。使用的最大进程数量,如果使用基于进程的多线程。
如未指定,`workers` 将默认为 1。如果为 0,将在主线程上执行生成器。
- __use_multiprocessing__: 布尔值。如果 True,则使用基于进程的多线程。
请注意,由于此实现依赖于多进程,所以不应将不可传递的参数传递给生成器,因为它们不能被轻易地传递给子进程。
- __verbose__: 日志显示模式,0 或 1。
__返回__
标量测试误差(如果模型只有一个输入且没有评估标准),
或者标量的列表(如果模型有多个输出 和/或 评估标准)。
属性 `model.metrics_names` 将提供标量输出的显示标签。
__异常__
- __ValueError__: 如果生成器生成的数据格式不正确。
----
### predict_generator
```python
predict_generator(generator, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0)
```
为来自数据生成器的输入样本生成预测。
这个生成器应该返回与 `predict_on_batch` 所接收的同样的数据。
__参数__
- __generator__: 生成器,返回批量输入样本,
或一个 `Sequence` (`keras.utils.Sequence`) 对象的实例,以避免在使用多进程时数据的重复。
- __steps__: 在声明一个 epoch 完成并开始下一个 epoch 之前从 `generator` 产生的总步数(批次样本)。
它通常应该等于你的数据集的样本数量除以批量大小。
对于 `Sequence`,它是可选的:如果未指定,将使用`len(generator)` 作为步数。
- __callbacks__: 一系列的 `keras.callbacks.Callback` 实例。一系列可以在预测时使用的回调函数。
详见 [callbacks](/callbacks)。
- __max_queue_size__: 生成器队列的最大尺寸。
- __max_queue_size__: 生成器队列的最大尺寸。
- __workers__: 整数。使用的最大进程数量,如果使用基于进程的多线程。
如未指定,`workers` 将默认为 1。如果为 0,将在主线程上执行生成器。
- __use_multiprocessing__: 如果 True,则使用基于进程的多线程。
请注意,由于此实现依赖于多进程,所以不应将不可传递的参数传递给生成器,因为它们不能被轻易地传递给子进程。
- __verbose__: 日志显示模式,0 或 1。
__返回__
预测值的 Numpy 数组(或数组列表)。
__异常__
- __ValueError__: 如果生成器生成的数据格式不正确。
----
### get_layer
```python
get_layer(self, name=None, index=None)
```
根据名称(唯一)或索引值查找网络层。
如果同时提供了 `name` 和 `index`,则 `index` 将优先。
索引值来自于水平图遍历的顺序(自下而上)。
__参数__
- __name__: 字符串,层的名字。
- __index__: 整数,层的索引。
__返回__
一个层实例。
__异常__
- __ValueError__: 如果层的名称或索引不正确。
| keras-docs-zh/sources/models/model.md/0 | {
"file_path": "keras-docs-zh/sources/models/model.md",
"repo_id": "keras-docs-zh",
"token_count": 17805
} | 90 |
"""
Title: Conditional GAN
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/07/13
Last modified: 2024/01/02
Description: Training a GAN conditioned on class labels to generate handwritten digits.
Accelerator: GPU
"""
"""
Generative Adversarial Networks (GANs) let us generate novel image data, video data,
or audio data from a random input. Typically, the random input is sampled
from a normal distribution, before going through a series of transformations that turn
it into something plausible (image, video, audio, etc.).
However, a simple [DCGAN](https://arxiv.org/abs/1511.06434) doesn't let us control
the appearance (e.g. class) of the samples we're generating. For instance,
with a GAN that generates MNIST handwritten digits, a simple DCGAN wouldn't let us
choose the class of digits we're generating.
To be able to control what we generate, we need to _condition_ the GAN output
on a semantic input, such as the class of an image.
In this example, we'll build a **Conditional GAN** that can generate MNIST handwritten
digits conditioned on a given class. Such a model can have various useful applications:
* let's say you are dealing with an
[imbalanced image dataset](https://developers.google.com/machine-learning/data-prep/construct/sampling-splitting/imbalanced-data),
and you'd like to gather more examples for the skewed class to balance the dataset.
Data collection can be a costly process on its own. You could instead train a Conditional GAN and use
it to generate novel images for the class that needs balancing.
* Since the generator learns to associate the generated samples with the class labels,
its representations can also be used for [other downstream tasks](https://arxiv.org/abs/1809.11096).
Following are the references used for developing this example:
* [Conditional Generative Adversarial Nets](https://arxiv.org/abs/1411.1784)
* [Lecture on Conditional Generation from Coursera](https://www.coursera.org/lecture/build-basic-generative-adversarial-networks-gans/conditional-generation-inputs-2OPrG)
If you need a refresher on GANs, you can refer to the "Generative adversarial networks"
section of
[this resource](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-12/r-3/232).
This example requires TensorFlow 2.5 or higher, as well as TensorFlow Docs, which can be
installed using the following command:
"""
"""shell
pip install -q git+https://github.com/tensorflow/docs
"""
"""
## Imports
"""
import keras
from keras import layers
from keras import ops
from tensorflow_docs.vis import embed
import tensorflow as tf
import numpy as np
import imageio
"""
## Constants and hyperparameters
"""
batch_size = 64
num_channels = 1
num_classes = 10
image_size = 28
latent_dim = 128
"""
## Loading the MNIST dataset and preprocessing it
"""
# We'll use all the available examples from both the training and test
# sets.
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
all_digits = np.concatenate([x_train, x_test])
all_labels = np.concatenate([y_train, y_test])
# Scale the pixel values to [0, 1] range, add a channel dimension to
# the images, and one-hot encode the labels.
all_digits = all_digits.astype("float32") / 255.0
all_digits = np.reshape(all_digits, (-1, 28, 28, 1))
all_labels = keras.utils.to_categorical(all_labels, 10)
# Create tf.data.Dataset.
dataset = tf.data.Dataset.from_tensor_slices((all_digits, all_labels))
dataset = dataset.shuffle(buffer_size=1024).batch(batch_size)
print(f"Shape of training images: {all_digits.shape}")
print(f"Shape of training labels: {all_labels.shape}")
"""
## Calculating the number of input channel for the generator and discriminator
In a regular (unconditional) GAN, we start by sampling noise (of some fixed
dimension) from a normal distribution. In our case, we also need to account
for the class labels. We will have to add the number of classes to
the input channels of the generator (noise input) as well as the discriminator
(generated image input).
"""
generator_in_channels = latent_dim + num_classes
discriminator_in_channels = num_channels + num_classes
print(generator_in_channels, discriminator_in_channels)
"""
## Creating the discriminator and generator
The model definitions (`discriminator`, `generator`, and `ConditionalGAN`) have been
adapted from [this example](https://keras.io/guides/customizing_what_happens_in_fit/).
"""
# Create the discriminator.
discriminator = keras.Sequential(
[
keras.layers.InputLayer((28, 28, discriminator_in_channels)),
layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.GlobalMaxPooling2D(),
layers.Dense(1),
],
name="discriminator",
)
# Create the generator.
generator = keras.Sequential(
[
keras.layers.InputLayer((generator_in_channels,)),
# We want to generate 128 + num_classes coefficients to reshape into a
# 7x7x(128 + num_classes) map.
layers.Dense(7 * 7 * generator_in_channels),
layers.LeakyReLU(negative_slope=0.2),
layers.Reshape((7, 7, generator_in_channels)),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"),
],
name="generator",
)
"""
## Creating a `ConditionalGAN` model
"""
class ConditionalGAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super().__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.seed_generator = keras.random.SeedGenerator(1337)
self.gen_loss_tracker = keras.metrics.Mean(name="generator_loss")
self.disc_loss_tracker = keras.metrics.Mean(name="discriminator_loss")
@property
def metrics(self):
return [self.gen_loss_tracker, self.disc_loss_tracker]
def compile(self, d_optimizer, g_optimizer, loss_fn):
super().compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
def train_step(self, data):
# Unpack the data.
real_images, one_hot_labels = data
# Add dummy dimensions to the labels so that they can be concatenated with
# the images. This is for the discriminator.
image_one_hot_labels = one_hot_labels[:, :, None, None]
image_one_hot_labels = ops.repeat(
image_one_hot_labels, repeats=[image_size * image_size]
)
image_one_hot_labels = ops.reshape(
image_one_hot_labels, (-1, image_size, image_size, num_classes)
)
# Sample random points in the latent space and concatenate the labels.
# This is for the generator.
batch_size = ops.shape(real_images)[0]
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
random_vector_labels = ops.concatenate(
[random_latent_vectors, one_hot_labels], axis=1
)
# Decode the noise (guided by labels) to fake images.
generated_images = self.generator(random_vector_labels)
# Combine them with real images. Note that we are concatenating the labels
# with these images here.
fake_image_and_labels = ops.concatenate(
[generated_images, image_one_hot_labels], -1
)
real_image_and_labels = ops.concatenate([real_images, image_one_hot_labels], -1)
combined_images = ops.concatenate(
[fake_image_and_labels, real_image_and_labels], axis=0
)
# Assemble labels discriminating real from fake images.
labels = ops.concatenate(
[ops.ones((batch_size, 1)), ops.zeros((batch_size, 1))], axis=0
)
# Train the discriminator.
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space.
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
random_vector_labels = ops.concatenate(
[random_latent_vectors, one_hot_labels], axis=1
)
# Assemble labels that say "all real images".
misleading_labels = ops.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
fake_images = self.generator(random_vector_labels)
fake_image_and_labels = ops.concatenate(
[fake_images, image_one_hot_labels], -1
)
predictions = self.discriminator(fake_image_and_labels)
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
# Monitor loss.
self.gen_loss_tracker.update_state(g_loss)
self.disc_loss_tracker.update_state(d_loss)
return {
"g_loss": self.gen_loss_tracker.result(),
"d_loss": self.disc_loss_tracker.result(),
}
"""
## Training the Conditional GAN
"""
cond_gan = ConditionalGAN(
discriminator=discriminator, generator=generator, latent_dim=latent_dim
)
cond_gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0003),
loss_fn=keras.losses.BinaryCrossentropy(from_logits=True),
)
cond_gan.fit(dataset, epochs=20)
"""
## Interpolating between classes with the trained generator
"""
# We first extract the trained generator from our Conditional GAN.
trained_gen = cond_gan.generator
# Choose the number of intermediate images that would be generated in
# between the interpolation + 2 (start and last images).
num_interpolation = 9 # @param {type:"integer"}
# Sample noise for the interpolation.
interpolation_noise = keras.random.normal(shape=(1, latent_dim))
interpolation_noise = ops.repeat(interpolation_noise, repeats=num_interpolation)
interpolation_noise = ops.reshape(interpolation_noise, (num_interpolation, latent_dim))
def interpolate_class(first_number, second_number):
# Convert the start and end labels to one-hot encoded vectors.
first_label = keras.utils.to_categorical([first_number], num_classes)
second_label = keras.utils.to_categorical([second_number], num_classes)
first_label = ops.cast(first_label, "float32")
second_label = ops.cast(second_label, "float32")
# Calculate the interpolation vector between the two labels.
percent_second_label = ops.linspace(0, 1, num_interpolation)[:, None]
percent_second_label = ops.cast(percent_second_label, "float32")
interpolation_labels = (
first_label * (1 - percent_second_label) + second_label * percent_second_label
)
# Combine the noise and the labels and run inference with the generator.
noise_and_labels = ops.concatenate([interpolation_noise, interpolation_labels], 1)
fake = trained_gen.predict(noise_and_labels)
return fake
start_class = 2 # @param {type:"slider", min:0, max:9, step:1}
end_class = 6 # @param {type:"slider", min:0, max:9, step:1}
fake_images = interpolate_class(start_class, end_class)
"""
Here, we first sample noise from a normal distribution and then we repeat that for
`num_interpolation` times and reshape the result accordingly.
We then distribute it uniformly for `num_interpolation`
with the label identities being present in some proportion.
"""
fake_images *= 255.0
converted_images = fake_images.astype(np.uint8)
converted_images = ops.image.resize(converted_images, (96, 96)).numpy().astype(np.uint8)
imageio.mimsave("animation.gif", converted_images[:, :, :, 0], fps=1)
embed.embed_file("animation.gif")
"""
We can further improve the performance of this model with recipes like
[WGAN-GP](https://keras.io/examples/generative/wgan_gp).
Conditional generation is also widely used in many modern image generation architectures like
[VQ-GANs](https://arxiv.org/abs/2012.09841), [DALL-E](https://openai.com/blog/dall-e/),
etc.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/conditional-gan) and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/conditional-GAN).
"""
| keras-io/examples/generative/conditional_gan.py/0 | {
"file_path": "keras-io/examples/generative/conditional_gan.py",
"repo_id": "keras-io",
"token_count": 4805
} | 91 |
<jupyter_start><jupyter_text>Neural Style Transfer with AdaIN**Author:** [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Ritwik Raha](https://twitter.com/ritwik_raha)**Date created:** 2021/11/08**Last modified:** 2021/11/08**Description:** Neural Style Transfer with Adaptive Instance Normalization. Introduction[Neural Style Transfer](https://www.tensorflow.org/tutorials/generative/style_transfer)is the process of transferring the style of one image onto the contentof another. This was first introduced in the seminal paper["A Neural Algorithm of Artistic Style"](https://arxiv.org/abs/1508.06576)by Gatys et al. A major limitation of the technique proposed in thiswork is in its runtime, as the algorithm uses a slow iterativeoptimization process.Follow-up papers that introduced[Batch Normalization](https://arxiv.org/abs/1502.03167),[Instance Normalization](https://arxiv.org/abs/1701.02096) and[Conditional Instance Normalization](https://arxiv.org/abs/1610.07629)allowed Style Transfer to be performed in new ways, no longerrequiring a slow iterative process.Following these papers, the authors Xun Huang and SergeBelongie propose[Adaptive Instance Normalization](https://arxiv.org/abs/1703.06868) (AdaIN),which allows arbitrary style transfer in real time.In this example we implement Adaptive Instance Normalizationfor Neural Style Transfer. We show in the below figure the outputof our AdaIN model trained foronly **30 epochs**.You can also try out the model with your own images with this[Hugging Face demo](https://huggingface.co/spaces/ariG23498/nst). SetupWe begin with importing the necessary packages. We also set theseed for reproducibility. The global variables are hyperparameterswhich we can change as we like.<jupyter_code>import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
from tensorflow.keras import layers
# Defining the global variables.
IMAGE_SIZE = (224, 224)
BATCH_SIZE = 64
# Training for single epoch for time constraint.
# Please use atleast 30 epochs to see good results.
EPOCHS = 1
AUTOTUNE = tf.data.AUTOTUNE<jupyter_output><empty_output><jupyter_text>Style transfer sample galleryFor Neural Style Transfer we need style images and content images. Inthis example we will use the[Best Artworks of All Time](https://www.kaggle.com/ikarus777/best-artworks-of-all-time)as our style dataset and[Pascal VOC](https://www.tensorflow.org/datasets/catalog/voc)as our content dataset.This is a deviation from the original paper implementation by theauthors, where they use[WIKI-Art](https://paperswithcode.com/dataset/wikiart) as style and[MSCOCO](https://cocodataset.org/home) as content datasetsrespectively. We do this to create a minimal yet reproducible example. Downloading the dataset from KaggleThe [Best Artworks of All Time](https://www.kaggle.com/ikarus777/best-artworks-of-all-time)dataset is hosted on Kaggle and one can easily download it in Colab byfollowing these steps:- Follow the instructions [here](https://github.com/Kaggle/kaggle-api)in order to obtain your Kaggle API keys in case you don't have them.- Use the following command to upload the Kaggle API keys.```pythonfrom google.colab import filesfiles.upload()```- Use the following commands to move the API keys to the properdirectory and download the dataset.```shell$ mkdir ~/.kaggle$ cp kaggle.json ~/.kaggle/$ chmod 600 ~/.kaggle/kaggle.json$ kaggle datasets download ikarus777/best-artworks-of-all-time$ unzip -qq best-artworks-of-all-time.zip$ rm -rf images$ mv resized artwork$ rm best-artworks-of-all-time.zip artists.csv``` `tf.data` pipelineIn this section, we will build the `tf.data` pipeline for the project.For the style dataset, we decode, convert and resize the images fromthe folder. For the content images we are already presented with a`tf.data` dataset as we use the `tfds` module.After we have our style and content data pipeline ready, we zip thetwo together to obtain the data pipeline that our model will consume.<jupyter_code>def decode_and_resize(image_path):
"""Decodes and resizes an image from the image file path.
Args:
image_path: The image file path.
Returns:
A resized image.
"""
image = tf.io.read_file(image_path)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, dtype="float32")
image = tf.image.resize(image, IMAGE_SIZE)
return image
def extract_image_from_voc(element):
"""Extracts image from the PascalVOC dataset.
Args:
element: A dictionary of data.
Returns:
A resized image.
"""
image = element["image"]
image = tf.image.convert_image_dtype(image, dtype="float32")
image = tf.image.resize(image, IMAGE_SIZE)
return image
# Get the image file paths for the style images.
style_images = os.listdir("/content/artwork/resized")
style_images = [os.path.join("/content/artwork/resized", path) for path in style_images]
# split the style images in train, val and test
total_style_images = len(style_images)
train_style = style_images[: int(0.8 * total_style_images)]
val_style = style_images[int(0.8 * total_style_images) : int(0.9 * total_style_images)]
test_style = style_images[int(0.9 * total_style_images) :]
# Build the style and content tf.data datasets.
train_style_ds = (
tf.data.Dataset.from_tensor_slices(train_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
train_content_ds = tfds.load("voc", split="train").map(extract_image_from_voc).repeat()
val_style_ds = (
tf.data.Dataset.from_tensor_slices(val_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
val_content_ds = (
tfds.load("voc", split="validation").map(extract_image_from_voc).repeat()
)
test_style_ds = (
tf.data.Dataset.from_tensor_slices(test_style)
.map(decode_and_resize, num_parallel_calls=AUTOTUNE)
.repeat()
)
test_content_ds = (
tfds.load("voc", split="test")
.map(extract_image_from_voc, num_parallel_calls=AUTOTUNE)
.repeat()
)
# Zipping the style and content datasets.
train_ds = (
tf.data.Dataset.zip((train_style_ds, train_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
val_ds = (
tf.data.Dataset.zip((val_style_ds, val_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)
test_ds = (
tf.data.Dataset.zip((test_style_ds, test_content_ds))
.shuffle(BATCH_SIZE * 2)
.batch(BATCH_SIZE)
.prefetch(AUTOTUNE)
)<jupyter_output><empty_output><jupyter_text>Visualizing the dataIt is always better to visualize the data before training. To ensurethe correctness of our preprocessing pipeline, we visualize 10 samplesfrom our dataset.<jupyter_code>style, content = next(iter(train_ds))
fig, axes = plt.subplots(nrows=10, ncols=2, figsize=(5, 30))
[ax.axis("off") for ax in np.ravel(axes)]
for (axis, style_image, content_image) in zip(axes, style[0:10], content[0:10]):
(ax_style, ax_content) = axis
ax_style.imshow(style_image)
ax_style.set_title("Style Image")
ax_content.imshow(content_image)
ax_content.set_title("Content Image")<jupyter_output><empty_output><jupyter_text>ArchitectureThe style transfer network takes a content image and a style image asinputs and outputs the style transferred image. The authors of AdaINpropose a simple encoder-decoder structure for achieving this.The content image (`C`) and the style image (`S`) are both fed to theencoder networks. The output from these encoder networks (feature maps)are then fed to the AdaIN layer. The AdaIN layer computes a combinedfeature map. This feature map is then fed into a randomly initializeddecoder network that serves as the generator for the neural styletransferred image.The style feature map (`fs`) and the content feature map (`fc`) arefed to the AdaIN layer. This layer produced the combined feature map`t`. The function `g` represents the decoder (generator) network. EncoderThe encoder is a part of the pretrained (pretrained on[imagenet](https://www.image-net.org/)) VGG19 model. We slice themodel from the `block4-conv1` layer. The output layer is as suggestedby the authors in their paper.<jupyter_code>def get_encoder():
vgg19 = keras.applications.VGG19(
include_top=False,
weights="imagenet",
input_shape=(*IMAGE_SIZE, 3),
)
vgg19.trainable = False
mini_vgg19 = keras.Model(vgg19.input, vgg19.get_layer("block4_conv1").output)
inputs = layers.Input([*IMAGE_SIZE, 3])
mini_vgg19_out = mini_vgg19(inputs)
return keras.Model(inputs, mini_vgg19_out, name="mini_vgg19")<jupyter_output><empty_output><jupyter_text>Adaptive Instance NormalizationThe AdaIN layer takes in the featuresof the content and style image. The layer can be defined via thefollowing equation:where `sigma` is the standard deviation and `mu` is the mean for theconcerned variable. In the above equation the mean and variance of thecontent feature map `fc` is aligned with the mean and variance of thestyle feature maps `fs`.It is important to note that the AdaIN layer proposed by the authorsuses no other parameters apart from mean and variance. The layer alsodoes not have any trainable parameters. This is why we use a*Python function* instead of using a *Keras layer*. The function takesstyle and content feature maps, computes the mean and standard deviationof the images and returns the adaptive instance normalized feature map.<jupyter_code>def get_mean_std(x, epsilon=1e-5):
axes = [1, 2]
# Compute the mean and standard deviation of a tensor.
mean, variance = tf.nn.moments(x, axes=axes, keepdims=True)
standard_deviation = tf.sqrt(variance + epsilon)
return mean, standard_deviation
def ada_in(style, content):
"""Computes the AdaIn feature map.
Args:
style: The style feature map.
content: The content feature map.
Returns:
The AdaIN feature map.
"""
content_mean, content_std = get_mean_std(content)
style_mean, style_std = get_mean_std(style)
t = style_std * (content - content_mean) / content_std + style_mean
return t<jupyter_output><empty_output><jupyter_text>DecoderThe authors specify that the decoder network must mirror the encodernetwork. We have symmetrically inverted the encoder to build ourdecoder. We have used `UpSampling2D` layers to increase the spatialresolution of the feature maps.Note that the authors warn against using any normalization layerin the decoder network, and do indeed go on to show that includingbatch normalization or instance normalization hurts the performanceof the overall network.This is the only portion of the entire architecture that is trainable.<jupyter_code>def get_decoder():
config = {"kernel_size": 3, "strides": 1, "padding": "same", "activation": "relu"}
decoder = keras.Sequential(
[
layers.InputLayer((None, None, 512)),
layers.Conv2D(filters=512, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.Conv2D(filters=256, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=128, **config),
layers.Conv2D(filters=128, **config),
layers.UpSampling2D(),
layers.Conv2D(filters=64, **config),
layers.Conv2D(
filters=3,
kernel_size=3,
strides=1,
padding="same",
activation="sigmoid",
),
]
)
return decoder<jupyter_output><empty_output><jupyter_text>Loss functionsHere we build the loss functions for the neural style transfer model.The authors propose to use a pretrained VGG-19 to compute the lossfunction of the network. It is important to keep in mind that thiswill be used for training only the decoder network. The totalloss (`Lt`) is a weighted combination of content loss (`Lc`) and styleloss (`Ls`). The `lambda` term is used to vary the amount of styletransferred. Content LossThis is the Euclidean distance between the content image featuresand the features of the neural style transferred image.Here the authors propose to use the output from the AdaIn layer `t` asthe content target rather than using features of the original image astarget. This is done to speed up convergence. Style LossRather than using the more commonly used[Gram Matrix](https://mathworld.wolfram.com/GramMatrix.html),the authors propose to compute the difference between the statistical features(mean and variance) which makes it conceptually cleaner. This can beeasily visualized via the following equation:where `theta` denotes the layers in VGG-19 used to compute the loss.In this case this corresponds to:- `block1_conv1`- `block1_conv2`- `block1_conv3`- `block1_conv4`<jupyter_code>def get_loss_net():
vgg19 = keras.applications.VGG19(
include_top=False, weights="imagenet", input_shape=(*IMAGE_SIZE, 3)
)
vgg19.trainable = False
layer_names = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1"]
outputs = [vgg19.get_layer(name).output for name in layer_names]
mini_vgg19 = keras.Model(vgg19.input, outputs)
inputs = layers.Input([*IMAGE_SIZE, 3])
mini_vgg19_out = mini_vgg19(inputs)
return keras.Model(inputs, mini_vgg19_out, name="loss_net")<jupyter_output><empty_output><jupyter_text>Neural Style TransferThis is the trainer module. We wrap the encoder and decoder insidea `tf.keras.Model` subclass. This allows us to customize what happensin the `model.fit()` loop.<jupyter_code>class NeuralStyleTransfer(tf.keras.Model):
def __init__(self, encoder, decoder, loss_net, style_weight, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.loss_net = loss_net
self.style_weight = style_weight
def compile(self, optimizer, loss_fn):
super().compile()
self.optimizer = optimizer
self.loss_fn = loss_fn
self.style_loss_tracker = keras.metrics.Mean(name="style_loss")
self.content_loss_tracker = keras.metrics.Mean(name="content_loss")
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
def train_step(self, inputs):
style, content = inputs
# Initialize the content and style loss.
loss_content = 0.0
loss_style = 0.0
with tf.GradientTape() as tape:
# Encode the style and content image.
style_encoded = self.encoder(style)
content_encoded = self.encoder(content)
# Compute the AdaIN target feature maps.
t = ada_in(style=style_encoded, content=content_encoded)
# Generate the neural style transferred image.
reconstructed_image = self.decoder(t)
# Compute the losses.
reconstructed_vgg_features = self.loss_net(reconstructed_image)
style_vgg_features = self.loss_net(style)
loss_content = self.loss_fn(t, reconstructed_vgg_features[-1])
for inp, out in zip(style_vgg_features, reconstructed_vgg_features):
mean_inp, std_inp = get_mean_std(inp)
mean_out, std_out = get_mean_std(out)
loss_style += self.loss_fn(mean_inp, mean_out) + self.loss_fn(
std_inp, std_out
)
loss_style = self.style_weight * loss_style
total_loss = loss_content + loss_style
# Compute gradients and optimize the decoder.
trainable_vars = self.decoder.trainable_variables
gradients = tape.gradient(total_loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the trackers.
self.style_loss_tracker.update_state(loss_style)
self.content_loss_tracker.update_state(loss_content)
self.total_loss_tracker.update_state(total_loss)
return {
"style_loss": self.style_loss_tracker.result(),
"content_loss": self.content_loss_tracker.result(),
"total_loss": self.total_loss_tracker.result(),
}
def test_step(self, inputs):
style, content = inputs
# Initialize the content and style loss.
loss_content = 0.0
loss_style = 0.0
# Encode the style and content image.
style_encoded = self.encoder(style)
content_encoded = self.encoder(content)
# Compute the AdaIN target feature maps.
t = ada_in(style=style_encoded, content=content_encoded)
# Generate the neural style transferred image.
reconstructed_image = self.decoder(t)
# Compute the losses.
recons_vgg_features = self.loss_net(reconstructed_image)
style_vgg_features = self.loss_net(style)
loss_content = self.loss_fn(t, recons_vgg_features[-1])
for inp, out in zip(style_vgg_features, recons_vgg_features):
mean_inp, std_inp = get_mean_std(inp)
mean_out, std_out = get_mean_std(out)
loss_style += self.loss_fn(mean_inp, mean_out) + self.loss_fn(
std_inp, std_out
)
loss_style = self.style_weight * loss_style
total_loss = loss_content + loss_style
# Update the trackers.
self.style_loss_tracker.update_state(loss_style)
self.content_loss_tracker.update_state(loss_content)
self.total_loss_tracker.update_state(total_loss)
return {
"style_loss": self.style_loss_tracker.result(),
"content_loss": self.content_loss_tracker.result(),
"total_loss": self.total_loss_tracker.result(),
}
@property
def metrics(self):
return [
self.style_loss_tracker,
self.content_loss_tracker,
self.total_loss_tracker,
]<jupyter_output><empty_output><jupyter_text>Train Monitor callbackThis callback is used to visualize the style transfer output ofthe model at the end of each epoch. The objective of style transfer cannot bequantified properly, and is to be subjectively evaluated by an audience.For this reason, visualization is a key aspect of evaluating the model.<jupyter_code>test_style, test_content = next(iter(test_ds))
class TrainMonitor(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
# Encode the style and content image.
test_style_encoded = self.model.encoder(test_style)
test_content_encoded = self.model.encoder(test_content)
# Compute the AdaIN features.
test_t = ada_in(style=test_style_encoded, content=test_content_encoded)
test_reconstructed_image = self.model.decoder(test_t)
# Plot the Style, Content and the NST image.
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(20, 5))
ax[0].imshow(tf.keras.utils.array_to_img(test_style[0]))
ax[0].set_title(f"Style: {epoch:03d}")
ax[1].imshow(tf.keras.utils.array_to_img(test_content[0]))
ax[1].set_title(f"Content: {epoch:03d}")
ax[2].imshow(
tf.keras.utils.array_to_img(test_reconstructed_image[0])
)
ax[2].set_title(f"NST: {epoch:03d}")
plt.show()
plt.close()<jupyter_output><empty_output><jupyter_text>Train the modelIn this section, we define the optimizer, the loss function, and thetrainer module. We compile the trainer module with the optimizer andthe loss function and then train it.*Note*: We train the model for a single epoch for time constraints,but we will need to train is for atleast 30 epochs to see good results.<jupyter_code>optimizer = keras.optimizers.Adam(learning_rate=1e-5)
loss_fn = keras.losses.MeanSquaredError()
encoder = get_encoder()
loss_net = get_loss_net()
decoder = get_decoder()
model = NeuralStyleTransfer(
encoder=encoder, decoder=decoder, loss_net=loss_net, style_weight=4.0
)
model.compile(optimizer=optimizer, loss_fn=loss_fn)
history = model.fit(
train_ds,
epochs=EPOCHS,
steps_per_epoch=50,
validation_data=val_ds,
validation_steps=50,
callbacks=[TrainMonitor()],
)<jupyter_output><empty_output><jupyter_text>InferenceAfter we train the model, we now need to run inference with it. We willpass arbitrary content and style images from the test dataset and take a look atthe output images.*NOTE*: To try out the model on your own images, you can use this[Hugging Face demo](https://huggingface.co/spaces/ariG23498/nst).<jupyter_code>for style, content in test_ds.take(1):
style_encoded = model.encoder(style)
content_encoded = model.encoder(content)
t = ada_in(style=style_encoded, content=content_encoded)
reconstructed_image = model.decoder(t)
fig, axes = plt.subplots(nrows=10, ncols=3, figsize=(10, 30))
[ax.axis("off") for ax in np.ravel(axes)]
for axis, style_image, content_image, reconstructed_image in zip(
axes, style[0:10], content[0:10], reconstructed_image[0:10]
):
(ax_style, ax_content, ax_reconstructed) = axis
ax_style.imshow(style_image)
ax_style.set_title("Style Image")
ax_content.imshow(content_image)
ax_content.set_title("Content Image")
ax_reconstructed.imshow(reconstructed_image)
ax_reconstructed.set_title("NST Image")<jupyter_output><empty_output> | keras-io/examples/generative/ipynb/adain.ipynb/0 | {
"file_path": "keras-io/examples/generative/ipynb/adain.ipynb",
"repo_id": "keras-io",
"token_count": 7962
} | 92 |
"""
Title: Variational AutoEncoder
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/05/03
Last modified: 2023/11/22
Description: Convolutional Variational AutoEncoder (VAE) trained on MNIST digits.
Accelerator: GPU
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import tensorflow as tf
import keras
from keras import layers
"""
## Create a sampling layer
"""
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.random.normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
"""
## Build the encoder
"""
latent_dim = 2
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(encoder_inputs)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Flatten()(x)
x = layers.Dense(16, activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()([z_mean, z_log_var])
encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
"""
## Build the decoder
"""
latent_inputs = keras.Input(shape=(latent_dim,))
x = layers.Dense(7 * 7 * 64, activation="relu")(latent_inputs)
x = layers.Reshape((7, 7, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
decoder.summary()
"""
## Define the VAE as a `Model` with a custom `train_step`
"""
class VAE(keras.Model):
def __init__(self, encoder, decoder, **kwargs):
super().__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.kl_loss_tracker,
]
def train_step(self, data):
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean(
tf.reduce_sum(
keras.losses.binary_crossentropy(data, reconstruction),
axis=(1, 2),
)
)
kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var))
kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1))
total_loss = reconstruction_loss + kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.kl_loss_tracker.update_state(kl_loss)
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"kl_loss": self.kl_loss_tracker.result(),
}
"""
## Train the VAE
"""
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
mnist_digits = np.concatenate([x_train, x_test], axis=0)
mnist_digits = np.expand_dims(mnist_digits, -1).astype("float32") / 255
vae = VAE(encoder, decoder)
vae.compile(optimizer=keras.optimizers.Adam())
vae.fit(mnist_digits, epochs=30, batch_size=128)
"""
## Display a grid of sampled digits
"""
import matplotlib.pyplot as plt
def plot_latent_space(vae, n=30, figsize=15):
# display a n*n 2D manifold of digits
digit_size = 28
scale = 1.0
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = np.linspace(-scale, scale, n)
grid_y = np.linspace(-scale, scale, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]])
x_decoded = vae.decoder.predict(z_sample, verbose=0)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[
i * digit_size : (i + 1) * digit_size,
j * digit_size : (j + 1) * digit_size,
] = digit
plt.figure(figsize=(figsize, figsize))
start_range = digit_size // 2
end_range = n * digit_size + start_range
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.imshow(figure, cmap="Greys_r")
plt.show()
plot_latent_space(vae)
"""
## Display how the latent space clusters different digit classes
"""
def plot_label_clusters(vae, data, labels):
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = vae.encoder.predict(data, verbose=0)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.show()
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1).astype("float32") / 255
plot_label_clusters(vae, x_train, y_train)
| keras-io/examples/generative/vae.py/0 | {
"file_path": "keras-io/examples/generative/vae.py",
"repo_id": "keras-io",
"token_count": 2672
} | 93 |
<jupyter_start><jupyter_text>Graph attention network (GAT) for node classification**Author:** [akensert](https://github.com/akensert)**Date created:** 2021/09/13**Last modified:** 2021/12/26**Description:** An implementation of a Graph Attention Network (GAT) for node classification. Introduction[Graph neural networks](https://en.wikipedia.org/wiki/Graph_neural_network)is the prefered neural network architecture for processing data structured asgraphs (for example, social networks or molecule structures), yieldingbetter results than fully-connected networks or convolutional networks.In this tutorial, we will implement a specific graph neural network known as a[Graph Attention Network](https://arxiv.org/abs/1710.10903) (GAT) to predict labels ofscientific papers based on what type of papers cite them (using the[Cora](https://linqs.soe.ucsc.edu/data) dataset). ReferencesFor more information on GAT, see the original paper[Graph Attention Networks](https://arxiv.org/abs/1710.10903) as well as[DGL's Graph Attention Networks](https://docs.dgl.ai/en/0.4.x/tutorials/models/1_gnn/9_gat.html)documentation. Import packages<jupyter_code>import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import os
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", 6)
pd.set_option("display.max_rows", 6)
np.random.seed(2)<jupyter_output><empty_output><jupyter_text>Obtain the datasetThe preparation of the [Cora dataset](https://linqs.soe.ucsc.edu/data) follows that of the[Node classification with Graph Neural Networks](https://keras.io/examples/graph/gnn_citations/)tutorial. Refer to this tutorial for more details on the dataset and exploratory data analysis.In brief, the Cora dataset consists of two files: `cora.cites` which contains *directed links* (citations) betweenpapers; and `cora.content` which contains *features* of the corresponding papers and oneof seven labels (the *subject* of the paper).<jupyter_code>zip_file = keras.utils.get_file(
fname="cora.tgz",
origin="https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz",
extract=True,
)
data_dir = os.path.join(os.path.dirname(zip_file), "cora")
citations = pd.read_csv(
os.path.join(data_dir, "cora.cites"),
sep="\t",
header=None,
names=["target", "source"],
)
papers = pd.read_csv(
os.path.join(data_dir, "cora.content"),
sep="\t",
header=None,
names=["paper_id"] + [f"term_{idx}" for idx in range(1433)] + ["subject"],
)
class_values = sorted(papers["subject"].unique())
class_idx = {name: id for id, name in enumerate(class_values)}
paper_idx = {name: idx for idx, name in enumerate(sorted(papers["paper_id"].unique()))}
papers["paper_id"] = papers["paper_id"].apply(lambda name: paper_idx[name])
citations["source"] = citations["source"].apply(lambda name: paper_idx[name])
citations["target"] = citations["target"].apply(lambda name: paper_idx[name])
papers["subject"] = papers["subject"].apply(lambda value: class_idx[value])
print(citations)
print(papers)<jupyter_output><empty_output><jupyter_text>Split the dataset<jupyter_code># Obtain random indices
random_indices = np.random.permutation(range(papers.shape[0]))
# 50/50 split
train_data = papers.iloc[random_indices[: len(random_indices) // 2]]
test_data = papers.iloc[random_indices[len(random_indices) // 2 :]]<jupyter_output><empty_output><jupyter_text>Prepare the graph data<jupyter_code># Obtain paper indices which will be used to gather node states
# from the graph later on when training the model
train_indices = train_data["paper_id"].to_numpy()
test_indices = test_data["paper_id"].to_numpy()
# Obtain ground truth labels corresponding to each paper_id
train_labels = train_data["subject"].to_numpy()
test_labels = test_data["subject"].to_numpy()
# Define graph, namely an edge tensor and a node feature tensor
edges = tf.convert_to_tensor(citations[["target", "source"]])
node_states = tf.convert_to_tensor(papers.sort_values("paper_id").iloc[:, 1:-1])
# Print shapes of the graph
print("Edges shape:\t\t", edges.shape)
print("Node features shape:", node_states.shape)<jupyter_output><empty_output><jupyter_text>Build the modelGAT takes as input a graph (namely an edge tensor and a node feature tensor) andoutputs \[updated\] node states. The node states are, for each target node, neighborhoodaggregated information of *N*-hops (where *N* is decided by the number of layers of theGAT). Importantly, in contrast to the[graph convolutional network](https://arxiv.org/abs/1609.02907) (GCN)the GAT makes use of attention machanismsto aggregate information from neighboring nodes (or *source nodes*). In other words, instead of simplyaveraging/summing node states from source nodes (*source papers*) to the target node (*target papers*),GAT first applies normalized attention scores to each source node state and then sums. (Multi-head) graph attention layerThe GAT model implements multi-head graph attention layers. The `MultiHeadGraphAttention`layer is simply a concatenation (or averaging) of multiple graph attention layers(`GraphAttention`), each with separate learnable weights `W`. The `GraphAttention` layerdoes the following:Consider inputs node states `h^{l}` which are linearly transformed by `W^{l}`, resulting in `z^{l}`.For each target node:1. Computes pair-wise attention scores `a^{l}^{T}(z^{l}_{i}||z^{l}_{j})` for all `j`,resulting in `e_{ij}` (for all `j`).`||` denotes a concatenation, `_{i}` corresponds to the target node, and `_{j}`corresponds to a given 1-hop neighbor/source node.2. Normalizes `e_{ij}` via softmax, so as the sum of incoming edges' attention scoresto the target node (`sum_{k}{e_{norm}_{ik}}`) will add up to 1.3. Applies attention scores `e_{norm}_{ij}` to `z_{j}`and adds it to the new target node state `h^{l+1}_{i}`, for all `j`.<jupyter_code>class GraphAttention(layers.Layer):
def __init__(
self,
units,
kernel_initializer="glorot_uniform",
kernel_regularizer=None,
**kwargs,
):
super().__init__(**kwargs)
self.units = units
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[0][-1], self.units),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name="kernel",
)
self.kernel_attention = self.add_weight(
shape=(self.units * 2, 1),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
name="kernel_attention",
)
self.built = True
def call(self, inputs):
node_states, edges = inputs
# Linearly transform node states
node_states_transformed = tf.matmul(node_states, self.kernel)
# (1) Compute pair-wise attention scores
node_states_expanded = tf.gather(node_states_transformed, edges)
node_states_expanded = tf.reshape(
node_states_expanded, (tf.shape(edges)[0], -1)
)
attention_scores = tf.nn.leaky_relu(
tf.matmul(node_states_expanded, self.kernel_attention)
)
attention_scores = tf.squeeze(attention_scores, -1)
# (2) Normalize attention scores
attention_scores = tf.math.exp(tf.clip_by_value(attention_scores, -2, 2))
attention_scores_sum = tf.math.unsorted_segment_sum(
data=attention_scores,
segment_ids=edges[:, 0],
num_segments=tf.reduce_max(edges[:, 0]) + 1,
)
attention_scores_sum = tf.repeat(
attention_scores_sum, tf.math.bincount(tf.cast(edges[:, 0], "int32"))
)
attention_scores_norm = attention_scores / attention_scores_sum
# (3) Gather node states of neighbors, apply attention scores and aggregate
node_states_neighbors = tf.gather(node_states_transformed, edges[:, 1])
out = tf.math.unsorted_segment_sum(
data=node_states_neighbors * attention_scores_norm[:, tf.newaxis],
segment_ids=edges[:, 0],
num_segments=tf.shape(node_states)[0],
)
return out
class MultiHeadGraphAttention(layers.Layer):
def __init__(self, units, num_heads=8, merge_type="concat", **kwargs):
super().__init__(**kwargs)
self.num_heads = num_heads
self.merge_type = merge_type
self.attention_layers = [GraphAttention(units) for _ in range(num_heads)]
def call(self, inputs):
atom_features, pair_indices = inputs
# Obtain outputs from each attention head
outputs = [
attention_layer([atom_features, pair_indices])
for attention_layer in self.attention_layers
]
# Concatenate or average the node states from each head
if self.merge_type == "concat":
outputs = tf.concat(outputs, axis=-1)
else:
outputs = tf.reduce_mean(tf.stack(outputs, axis=-1), axis=-1)
# Activate and return node states
return tf.nn.relu(outputs)<jupyter_output><empty_output><jupyter_text>Implement training logic with custom `train_step`, `test_step`, and `predict_step` methodsNotice, the GAT model operates on the entire graph (namely, `node_states` and`edges`) in all phases (training, validation and testing). Hence, `node_states` and`edges` are passed to the constructor of the `keras.Model` and used as attributes.The difference between the phases are the indices (and labels), which gatherscertain outputs (`tf.gather(outputs, indices)`).<jupyter_code>class GraphAttentionNetwork(keras.Model):
def __init__(
self,
node_states,
edges,
hidden_units,
num_heads,
num_layers,
output_dim,
**kwargs,
):
super().__init__(**kwargs)
self.node_states = node_states
self.edges = edges
self.preprocess = layers.Dense(hidden_units * num_heads, activation="relu")
self.attention_layers = [
MultiHeadGraphAttention(hidden_units, num_heads) for _ in range(num_layers)
]
self.output_layer = layers.Dense(output_dim)
def call(self, inputs):
node_states, edges = inputs
x = self.preprocess(node_states)
for attention_layer in self.attention_layers:
x = attention_layer([x, edges]) + x
outputs = self.output_layer(x)
return outputs
def train_step(self, data):
indices, labels = data
with tf.GradientTape() as tape:
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute loss
loss = self.compiled_loss(labels, tf.gather(outputs, indices))
# Compute gradients
grads = tape.gradient(loss, self.trainable_weights)
# Apply gradients (update weights)
optimizer.apply_gradients(zip(grads, self.trainable_weights))
# Update metric(s)
self.compiled_metrics.update_state(labels, tf.gather(outputs, indices))
return {m.name: m.result() for m in self.metrics}
def predict_step(self, data):
indices = data
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute probabilities
return tf.nn.softmax(tf.gather(outputs, indices))
def test_step(self, data):
indices, labels = data
# Forward pass
outputs = self([self.node_states, self.edges])
# Compute loss
loss = self.compiled_loss(labels, tf.gather(outputs, indices))
# Update metric(s)
self.compiled_metrics.update_state(labels, tf.gather(outputs, indices))
return {m.name: m.result() for m in self.metrics}<jupyter_output><empty_output><jupyter_text>Train and evaluate<jupyter_code># Define hyper-parameters
HIDDEN_UNITS = 100
NUM_HEADS = 8
NUM_LAYERS = 3
OUTPUT_DIM = len(class_values)
NUM_EPOCHS = 100
BATCH_SIZE = 256
VALIDATION_SPLIT = 0.1
LEARNING_RATE = 3e-1
MOMENTUM = 0.9
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.SGD(LEARNING_RATE, momentum=MOMENTUM)
accuracy_fn = keras.metrics.SparseCategoricalAccuracy(name="acc")
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_acc", min_delta=1e-5, patience=5, restore_best_weights=True
)
# Build model
gat_model = GraphAttentionNetwork(
node_states, edges, HIDDEN_UNITS, NUM_HEADS, NUM_LAYERS, OUTPUT_DIM
)
# Compile model
gat_model.compile(loss=loss_fn, optimizer=optimizer, metrics=[accuracy_fn])
gat_model.fit(
x=train_indices,
y=train_labels,
validation_split=VALIDATION_SPLIT,
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
callbacks=[early_stopping],
verbose=2,
)
_, test_accuracy = gat_model.evaluate(x=test_indices, y=test_labels, verbose=0)
print("--" * 38 + f"\nTest Accuracy {test_accuracy*100:.1f}%")<jupyter_output><empty_output><jupyter_text>Predict (probabilities)<jupyter_code>test_probs = gat_model.predict(x=test_indices)
mapping = {v: k for (k, v) in class_idx.items()}
for i, (probs, label) in enumerate(zip(test_probs[:10], test_labels[:10])):
print(f"Example {i+1}: {mapping[label]}")
for j, c in zip(probs, class_idx.keys()):
print(f"\tProbability of {c: <24} = {j*100:7.3f}%")
print("---" * 20)<jupyter_output><empty_output> | keras-io/examples/graph/ipynb/gat_node_classification.ipynb/0 | {
"file_path": "keras-io/examples/graph/ipynb/gat_node_classification.ipynb",
"repo_id": "keras-io",
"token_count": 5264
} | 94 |
"""
Title: Endpoint layer pattern
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2019/05/10
Last modified: 2023/11/22
Description: Demonstration of the "endpoint layer" pattern (layer that handles loss management).
Accelerator: GPU
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
import numpy as np
"""
## Usage of endpoint layers in the Functional API
An "endpoint layer" has access to the model's targets, and creates arbitrary losses
in `call()` using `self.add_loss()` and `Metric.update_state()`.
This enables you to define losses and
metrics that don't match the usual signature `fn(y_true, y_pred, sample_weight=None)`.
Note that you could have separate metrics for training and eval with this pattern.
"""
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_metric = keras.metrics.BinaryAccuracy(name="accuracy")
def call(self, logits, targets=None, sample_weight=None):
if targets is not None:
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weight)
self.add_loss(loss)
# Log the accuracy as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.)
self.accuracy_metric.update_state(targets, logits, sample_weight)
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
inputs = keras.Input((764,), name="inputs")
logits = keras.layers.Dense(1)(inputs)
targets = keras.Input((1,), name="targets")
sample_weight = keras.Input((1,), name="sample_weight")
preds = LogisticEndpoint()(logits, targets, sample_weight)
model = keras.Model([inputs, targets, sample_weight], preds)
data = {
"inputs": np.random.random((1000, 764)),
"targets": np.random.random((1000, 1)),
"sample_weight": np.random.random((1000, 1)),
}
model.compile(keras.optimizers.Adam(1e-3))
model.fit(data, epochs=2)
"""
## Exporting an inference-only model
Simply don't include `targets` in the model. The weights stay the same.
"""
inputs = keras.Input((764,), name="inputs")
logits = keras.layers.Dense(1)(inputs)
preds = LogisticEndpoint()(logits, targets=None, sample_weight=None)
inference_model = keras.Model(inputs, preds)
inference_model.set_weights(model.get_weights())
preds = inference_model.predict(np.random.random((1000, 764)))
"""
## Usage of loss endpoint layers in subclassed models
"""
class LogReg(keras.Model):
def __init__(self):
super().__init__()
self.dense = keras.layers.Dense(1)
self.logistic_endpoint = LogisticEndpoint()
def call(self, inputs):
# Note that all inputs should be in the first argument
# since we want to be able to call `model.fit(inputs)`.
logits = self.dense(inputs["inputs"])
preds = self.logistic_endpoint(
logits=logits,
targets=inputs["targets"],
sample_weight=inputs["sample_weight"],
)
return preds
model = LogReg()
data = {
"inputs": np.random.random((1000, 764)),
"targets": np.random.random((1000, 1)),
"sample_weight": np.random.random((1000, 1)),
}
model.compile(keras.optimizers.Adam(1e-3))
model.fit(data, epochs=2)
| keras-io/examples/keras_recipes/endpoint_layer_pattern.py/0 | {
"file_path": "keras-io/examples/keras_recipes/endpoint_layer_pattern.py",
"repo_id": "keras-io",
"token_count": 1356
} | 95 |
<jupyter_start><jupyter_text>Customizing the convolution operation of a Conv2D layer**Author:** [lukewood](https://lukewood.xyz)**Date created:** 11/03/2021**Last modified:** 11/03/2021**Description:** This example shows how to implement custom convolution layers using the `Conv.convolution_op()` API. IntroductionYou may sometimes need to implement custom versions of convolution layers like `Conv1D` and `Conv2D`.Keras enables you do this without implementing the entire layer from scratch: you can reusemost of the base convolution layer and just customize the convolution op itself via the`convolution_op()` method.This method was introduced in Keras 2.7. So before using the`convolution_op()` API, ensure that you are running Keras version 2.7.0 or greater. A Simple `StandardizedConv2D` implementationThere are two ways to use the `Conv.convolution_op()` API. The first wayis to override the `convolution_op()` method on a convolution layer subclass.Using this approach, we can quickly implement a[StandardizedConv2D](https://arxiv.org/abs/1903.10520) as shown below.<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
from keras import layers
import numpy as np
class StandardizedConv2DWithOverride(layers.Conv2D):
def convolution_op(self, inputs, kernel):
mean, var = tf.nn.moments(kernel, axes=[0, 1, 2], keepdims=True)
return tf.nn.conv2d(
inputs,
(kernel - mean) / tf.sqrt(var + 1e-10),
padding="VALID",
strides=list(self.strides),
name=self.__class__.__name__,
)<jupyter_output><empty_output><jupyter_text>The other way to use the `Conv.convolution_op()` API is to directly call the`convolution_op()` method from the `call()` method of a convolution layer subclass.A comparable class implemented using this approach is shown below.<jupyter_code>class StandardizedConv2DWithCall(layers.Conv2D):
def call(self, inputs):
mean, var = tf.nn.moments(self.kernel, axes=[0, 1, 2], keepdims=True)
result = self.convolution_op(
inputs, (self.kernel - mean) / tf.sqrt(var + 1e-10)
)
if self.use_bias:
result = result + self.bias
return result<jupyter_output><empty_output><jupyter_text>Example UsageBoth of these layers work as drop-in replacements for `Conv2D`. The followingdemonstration performs classification on the MNIST dataset.<jupyter_code># Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = keras.Sequential(
[
keras.layers.Input(shape=input_shape),
StandardizedConv2DWithCall(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
StandardizedConv2DWithOverride(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
batch_size = 128
epochs = 5
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=batch_size, epochs=5, validation_split=0.1)<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/subclassing_conv_layers.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/subclassing_conv_layers.ipynb",
"repo_id": "keras-io",
"token_count": 1439
} | 96 |
# Evaluating and exporting scikit-learn metrics in a Keras callback
**Author:** [lukewood](https://lukewood.xyz)<br>
**Date created:** 10/07/2021<br>
**Last modified:** 11/17/2023<br>
**Description:** This example shows how to use Keras callbacks to evaluate and export non-TensorFlow based metrics.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/sklearn_metric_callbacks.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/sklearn_metric_callbacks.py)
---
## Introduction
[Keras callbacks](https://keras.io/api/callbacks/) allow for the execution of arbitrary
code at various stages of the Keras training process. While Keras offers first-class
support for metric evaluation, [Keras metrics](https://keras.io/api/metrics/) may only
rely on TensorFlow code internally.
While there are TensorFlow implementations of many metrics online, some metrics are
implemented using [NumPy](https://numpy.org/) or another Python-based numerical computation library.
By performing metric evaluation inside of a Keras callback, we can leverage any existing
metric, and ultimately export the result to TensorBoard.
---
## Jaccard score metric
This example makes use of a sklearn metric, `sklearn.metrics.jaccard_score()`, and
writes the result to TensorBoard using the `tf.summary` API.
This template can be modified slightly to make it work with any existing sklearn metric.
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras as keras
from keras import layers
from sklearn.metrics import jaccard_score
import numpy as np
import os
class JaccardScoreCallback(keras.callbacks.Callback):
"""Computes the Jaccard score and logs the results to TensorBoard."""
def __init__(self, name, x_test, y_test, log_dir):
self.x_test = x_test
self.y_test = y_test
self.keras_metric = keras.metrics.Mean("jaccard_score")
self.epoch = 0
self.summary_writer = tf.summary.create_file_writer(os.path.join(log_dir, name))
def on_epoch_end(self, batch, logs=None):
self.epoch += 1
self.keras_metric.reset_state()
predictions = self.model.predict(self.x_test)
jaccard_value = jaccard_score(
np.argmax(predictions, axis=-1), self.y_test, average=None
)
self.keras_metric.update_state(jaccard_value)
self._write_metric(
self.keras_metric.name, self.keras_metric.result().numpy().astype(float)
)
def _write_metric(self, name, value):
with self.summary_writer.as_default():
tf.summary.scalar(
name,
value,
step=self.epoch,
)
self.summary_writer.flush()
```
---
## Sample usage
Let's test our `JaccardScoreCallback` class with a Keras model.
```python
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# The data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
batch_size = 128
epochs = 15
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
callbacks = [
JaccardScoreCallback(model.name, x_test, np.argmax(y_test, axis=-1), "logs")
]
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.1,
callbacks=callbacks,
)
```
<div class="k-default-codeblock">
```
x_train shape: (60000, 28, 28, 1)
60000 train samples
10000 test samples
```
</div>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">320</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">13</span>, <span style="color: #00af00; text-decoration-color: #00af00">13</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">11</span>, <span style="color: #00af00; text-decoration-color: #00af00">11</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">18,496</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1600</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1600</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,010</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">34,826</span> (136.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">34,826</span> (136.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<div class="k-default-codeblock">
```
Epoch 1/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 16ms/step - accuracy: 0.7706 - loss: 0.7534 - val_accuracy: 0.9768 - val_loss: 0.0842
Epoch 2/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 16ms/step - accuracy: 0.9627 - loss: 0.1228 - val_accuracy: 0.9862 - val_loss: 0.0533
Epoch 3/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 16ms/step - accuracy: 0.9739 - loss: 0.0854 - val_accuracy: 0.9870 - val_loss: 0.0466
Epoch 4/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 17ms/step - accuracy: 0.9787 - loss: 0.0676 - val_accuracy: 0.9892 - val_loss: 0.0416
Epoch 5/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 17ms/step - accuracy: 0.9818 - loss: 0.0590 - val_accuracy: 0.9892 - val_loss: 0.0396
Epoch 6/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 17ms/step - accuracy: 0.9834 - loss: 0.0534 - val_accuracy: 0.9920 - val_loss: 0.0341
Epoch 7/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 17ms/step - accuracy: 0.9837 - loss: 0.0528 - val_accuracy: 0.9907 - val_loss: 0.0358
Epoch 8/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 8s 18ms/step - accuracy: 0.9847 - loss: 0.0466 - val_accuracy: 0.9908 - val_loss: 0.0327
Epoch 9/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 18ms/step - accuracy: 0.9873 - loss: 0.0397 - val_accuracy: 0.9912 - val_loss: 0.0346
Epoch 10/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 8s 18ms/step - accuracy: 0.9862 - loss: 0.0419 - val_accuracy: 0.9913 - val_loss: 0.0315
Epoch 11/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 17ms/step - accuracy: 0.9880 - loss: 0.0370 - val_accuracy: 0.9915 - val_loss: 0.0309
Epoch 12/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 17ms/step - accuracy: 0.9880 - loss: 0.0377 - val_accuracy: 0.9912 - val_loss: 0.0318
Epoch 13/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 17ms/step - accuracy: 0.9889 - loss: 0.0347 - val_accuracy: 0.9930 - val_loss: 0.0293
Epoch 14/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 16ms/step - accuracy: 0.9896 - loss: 0.0333 - val_accuracy: 0.9913 - val_loss: 0.0326
Epoch 15/15
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step
422/422 ━━━━━━━━━━━━━━━━━━━━ 8s 18ms/step - accuracy: 0.9908 - loss: 0.0282 - val_accuracy: 0.9925 - val_loss: 0.0303
<keras.src.callbacks.history.History at 0x17f0655a0>
```
</div>
If you now launch a TensorBoard instance using `tensorboard --logdir=logs`, you will
see the `jaccard_score` metric alongside any other exported metrics!

---
## Conclusion
Many ML practitioners and researchers rely on metrics that may not yet have a TensorFlow
implementation. Keras users can still leverage the wide variety of existing metric
implementations in other frameworks by using a Keras callback. These metrics can be
exported, viewed and analyzed in the TensorBoard like any other metric.
| keras-io/examples/keras_recipes/md/sklearn_metric_callbacks.md/0 | {
"file_path": "keras-io/examples/keras_recipes/md/sklearn_metric_callbacks.md",
"repo_id": "keras-io",
"token_count": 5696
} | 97 |
"""
Title: Abstractive Text Summarization with BART
Author: [Abheesht Sharma](https://github.com/abheesht17/)
Date created: 2023/07/08
Last modified: 2023/07/08
Description: Use KerasNLP to fine-tune BART on the abstractive summarization task.
Accelerator: GPU
"""
"""
## Introduction
In the era of information overload, it has become crucial to extract the crux
of a long document or a conversation and express it in a few sentences. Owing
to the fact that summarization has widespread applications in different domains,
it has become a key, well-studied NLP task in recent years.
[Bidirectional Autoregressive Transformer (BART)](https://arxiv.org/abs/1910.13461)
is a Transformer-based encoder-decoder model, often used for
sequence-to-sequence tasks like summarization and neural machine translation.
BART is pre-trained in a self-supervised fashion on a large text corpus. During
pre-training, the text is corrupted and BART is trained to reconstruct the
original text (hence called a "denoising autoencoder"). Some pre-training tasks
include token masking, token deletion, sentence permutation (shuffle sentences
and train BART to fix the order), etc.
In this example, we will demonstrate how to fine-tune BART on the abstractive
summarization task (on conversations!) using KerasNLP, and generate summaries
using the fine-tuned model.
"""
"""
## Setup
Before we start implementing the pipeline, let's install and import all the
libraries we need. We'll be using the KerasNLP library. We will also need a
couple of utility libraries.
"""
"""shell
pip install git+https://github.com/keras-team/keras-nlp.git py7zr -q
"""
"""
This examples uses [Keras Core](https://keras.io/keras_core/) to work in any of
`"tensorflow"`, `"jax"` or `"torch"`. Support for Keras Core is baked into
KerasNLP, simply change the `"KERAS_BACKEND"` environment variable to select
the backend of your choice. We select the JAX backend below.
"""
import os
os.environ["KERAS_BACKEND"] = "jax"
"""
Import all necessary libraries.
"""
import py7zr
import time
import keras_nlp
import tensorflow as tf
import tensorflow_datasets as tfds
import keras_core as keras
"""
Let's also define our hyperparameters.
"""
BATCH_SIZE = 8
NUM_BATCHES = 600
EPOCHS = 1 # Can be set to a higher value for better results
MAX_ENCODER_SEQUENCE_LENGTH = 512
MAX_DECODER_SEQUENCE_LENGTH = 128
MAX_GENERATION_LENGTH = 40
"""
## Dataset
Let's load the [SAMSum dataset](https://arxiv.org/abs/1911.12237). This dataset
contains around 15,000 pairs of conversations/dialogues and summaries.
"""
# Download the dataset.
filename = keras.utils.get_file(
"corpus.7z",
origin="https://huggingface.co/datasets/samsum/resolve/main/data/corpus.7z",
)
# Extract the `.7z` file.
with py7zr.SevenZipFile(filename, mode="r") as z:
z.extractall(path="/root/tensorflow_datasets/downloads/manual")
# Load data using TFDS.
samsum_ds = tfds.load("samsum", split="train", as_supervised=True)
"""
The dataset has two fields: `dialogue` and `summary`. Let's see a sample.
"""
for dialogue, summary in samsum_ds:
print(dialogue.numpy())
print(summary.numpy())
break
"""
We'll now batch the dataset and retain only a subset of the dataset for the
purpose of this example. The dialogue is fed to the encoder, and the
corresponding summary serves as input to the decoder. We will, therefore, change
the format of the dataset to a dictionary having two keys: `"encoder_text"` and
`"decoder_text"`.This is how `keras_nlp.models.BartSeq2SeqLMPreprocessor`
expects the input format to be.
"""
train_ds = (
samsum_ds.map(
lambda dialogue, summary: {"encoder_text": dialogue, "decoder_text": summary}
)
.batch(BATCH_SIZE)
.cache()
)
train_ds = train_ds.take(NUM_BATCHES)
"""
## Fine-tune BART
Let's load the model and preprocessor first. We use sequence lengths of 512
and 128 for the encoder and decoder, respectively, instead of 1024 (which is the
default sequence length). This will allow us to run this example quickly
on Colab.
If you observe carefully, the preprocessor is attached to the model. What this
means is that we don't have to worry about preprocessing the text inputs;
everything will be done internally. The preprocessor tokenizes the encoder text
and the decoder text, adds special tokens and pads them. To generate labels
for auto-regressive training, the preprocessor shifts the decoder text one
position to the right. This is done because at every timestep, the model is
trained to predict the next token.
"""
preprocessor = keras_nlp.models.BartSeq2SeqLMPreprocessor.from_preset(
"bart_base_en",
encoder_sequence_length=MAX_ENCODER_SEQUENCE_LENGTH,
decoder_sequence_length=MAX_DECODER_SEQUENCE_LENGTH,
)
bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset(
"bart_base_en", preprocessor=preprocessor
)
bart_lm.summary()
"""
Define the optimizer and loss. We use the Adam optimizer with a linearly
decaying learning rate. Compile the model.
"""
optimizer = keras.optimizers.AdamW(
learning_rate=5e-5,
weight_decay=0.01,
epsilon=1e-6,
global_clipnorm=1.0, # Gradient clipping.
)
# Exclude layernorm and bias terms from weight decay.
optimizer.exclude_from_weight_decay(var_names=["bias"])
optimizer.exclude_from_weight_decay(var_names=["gamma"])
optimizer.exclude_from_weight_decay(var_names=["beta"])
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
bart_lm.compile(
optimizer=optimizer,
loss=loss,
weighted_metrics=["accuracy"],
)
"""
Let's train the model!
"""
bart_lm.fit(train_ds, epochs=EPOCHS)
"""
## Generate summaries and evaluate them!
Now that the model has been trained, let's get to the fun part - actually
generating summaries! Let's pick the first 100 samples from the validation set
and generate summaries for them. We will use the default decoding strategy, i.e.,
greedy search.
Generation in KerasNLP is highly optimized. It is backed by the power of XLA.
Secondly, key/value tensors in the self-attention layer and cross-attention layer
in the decoder are cached to avoid recomputation at every timestep.
"""
def generate_text(model, input_text, max_length=200, print_time_taken=False):
start = time.time()
output = model.generate(input_text, max_length=max_length)
end = time.time()
print(f"Total Time Elapsed: {end - start:.2f}s")
return output
# Load the dataset.
val_ds = tfds.load("samsum", split="validation", as_supervised=True)
val_ds = val_ds.take(100)
dialogues = []
ground_truth_summaries = []
for dialogue, summary in val_ds:
dialogues.append(dialogue.numpy())
ground_truth_summaries.append(summary.numpy())
# Let's make a dummy call - the first call to XLA generally takes a bit longer.
_ = generate_text(bart_lm, "sample text", max_length=MAX_GENERATION_LENGTH)
# Generate summaries.
generated_summaries = generate_text(
bart_lm,
val_ds.map(lambda dialogue, _: dialogue).batch(8),
max_length=MAX_GENERATION_LENGTH,
print_time_taken=True,
)
"""
Let's see some of the summaries.
"""
for dialogue, generated_summary, ground_truth_summary in zip(
dialogues[:5], generated_summaries[:5], ground_truth_summaries[:5]
):
print("Dialogue:", dialogue)
print("Generated Summary:", generated_summary)
print("Ground Truth Summary:", ground_truth_summary)
print("=============================")
"""
The generated summaries look awesome! Not bad for a model trained only for 1
epoch and on 5000 examples :)
"""
| keras-io/examples/nlp/abstractive_summarization_with_bart.py/0 | {
"file_path": "keras-io/examples/nlp/abstractive_summarization_with_bart.py",
"repo_id": "keras-io",
"token_count": 2465
} | 98 |
<jupyter_start><jupyter_text>MultipleChoice Task with Transfer Learning**Author:** Md Awsafur Rahman**Date created:** 2023/09/14**Last modified:** 2023/09/14**Description:** Use pre-trained nlp models for multiplechoice task. IntroductionIn this example, we will demonstrate how to perform the **MultipleChoice** task byfinetuning pre-trained DebertaV3 model. In this task, several candidate answers areprovided along with a context and the model is trained to select the correct answerunlike question answering. We will use SWAG dataset to demonstrate this example. Setup<jupyter_code>import keras_nlp
import keras
import tensorflow as tf # For tf.data only.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>DatasetIn this example we'll use **SWAG** dataset for multiplechoice task.<jupyter_code>!wget "https://github.com/rowanz/swagaf/archive/refs/heads/master.zip" -O swag.zip
!unzip -q swag.zip
!ls swagaf-master/data<jupyter_output><empty_output><jupyter_text>Configuration<jupyter_code>class CFG:
preset = "deberta_v3_extra_small_en" # Name of pretrained models
sequence_length = 200 # Input sequence length
seed = 42 # Random seed
epochs = 5 # Training epochs
batch_size = 8 # Batch size
augment = True # Augmentation (Shuffle Options)<jupyter_output><empty_output><jupyter_text>ReproducibilitySets value for random seed to produce similar result in each run.<jupyter_code>keras.utils.set_random_seed(CFG.seed)<jupyter_output><empty_output><jupyter_text>Meta Data* **train.csv** - will be used for training.* `sent1` and `sent2`: these fields show how a sentence starts, and if you put the twotogether, you get the `startphrase` field.* `ending_`: suggests a possible ending for how a sentence can end, but only one ofthem is correct. * `label`: identifies the correct sentence ending.* **val.csv** - similar to `train.csv` but will be used for validation.<jupyter_code># Train data
train_df = pd.read_csv(
"swagaf-master/data/train.csv", index_col=0
) # Read CSV file into a DataFrame
train_df = train_df.sample(frac=0.02)
print("# Train Data: {:,}".format(len(train_df)))
# Valid data
valid_df = pd.read_csv(
"swagaf-master/data/val.csv", index_col=0
) # Read CSV file into a DataFrame
valid_df = valid_df.sample(frac=0.02)
print("# Valid Data: {:,}".format(len(valid_df)))<jupyter_output><empty_output><jupyter_text>Contextualize OptionsOur approach entails furnishing the model with question and answer pairs, as opposed toemploying a single question for all five options. In practice, this signifies that forthe five options, we will supply the model with the same set of five questions combinedwith each respective answer choice (e.g., `(Q + A)`, `(Q + B)`, and so on). This analogydraws parallels to the practice of revisiting a question multiple times during an exam topromote a deeper understanding of the problem at hand.> Notably, in the context of SWAG dataset, question is the start of a sentence andoptions are possible ending of that sentence.<jupyter_code># Define a function to create options based on the prompt and choices
def make_options(row):
row["options"] = [
f"{row.startphrase}\n{row.ending0}", # Option 0
f"{row.startphrase}\n{row.ending1}", # Option 1
f"{row.startphrase}\n{row.ending2}", # Option 2
f"{row.startphrase}\n{row.ending3}",
] # Option 3
return row<jupyter_output><empty_output><jupyter_text>Apply the `make_options` function to each row of the dataframe<jupyter_code>train_df = train_df.apply(make_options, axis=1)
valid_df = valid_df.apply(make_options, axis=1)<jupyter_output><empty_output><jupyter_text>Preprocessing**What it does:** The preprocessor takes input strings and transforms them into adictionary (`token_ids`, `padding_mask`) containing preprocessed tensors. This processstarts with tokenization, where input strings are converted into sequences of token IDs.**Why it's important:** Initially, raw text data is complex and challenging for modelingdue to its high dimensionality. By converting text into a compact set of tokens, such astransforming `"The quick brown fox"` into `["the", "qu", "ick", "br", "own", "fox"]`,we simplify the data. Many models rely on special tokens and additional tensors tounderstand input. These tokens help divide input and identify padding, among other tasks.Making all sequences the same length through padding boosts computational efficiency,making subsequent steps smoother.Explore the following pages to access the available preprocessing and tokenizer layers in**KerasNLP**:- [Preprocessing](https://keras.io/api/keras_nlp/preprocessing_layers/)- [Tokenizers](https://keras.io/api/keras_nlp/tokenizers/)<jupyter_code>preprocessor = keras_nlp.models.DebertaV3Preprocessor.from_preset(
preset=CFG.preset, # Name of the model
sequence_length=CFG.sequence_length, # Max sequence length, will be padded if shorter
)<jupyter_output><empty_output><jupyter_text>Now, let's examine what the output shape of the preprocessing layer looks like. Theoutput shape of the layer can be represented as $(num\_choices, sequence\_length)$.<jupyter_code>outs = preprocessor(train_df.options.iloc[0]) # Process options for the first row
# Display the shape of each processed output
for k, v in outs.items():
print(k, ":", v.shape)<jupyter_output><empty_output><jupyter_text>We'll use the `preprocessing_fn` function to transform each text option using the`dataset.map(preprocessing_fn)` method.<jupyter_code>def preprocess_fn(text, label=None):
text = preprocessor(text) # Preprocess text
return (
(text, label) if label is not None else text
) # Return processed text and label if available<jupyter_output><empty_output><jupyter_text>AugmentationIn this notebook, we'll experiment with an interesting augmentation technique,`option_shuffle`. Since we're providing the model with one option at a time, we canintroduce a shuffle to the order of options. For instance, options `[A, C, E, D, B]`would be rearranged as `[D, B, A, E, C]`. This practice will help the model focus on thecontent of the options themselves, rather than being influenced by their positions.**Note:** Even though `option_shuffle` function is written in puretensorflow, it can be used with any backend (e.g. JAX, PyTorch) as it is only usedin `tf.data.Dataset` pipeline which is compatible with Keras 3 routines.<jupyter_code>def option_shuffle(options, labels, prob=0.50, seed=None):
if tf.random.uniform([]) > prob: # Shuffle probability check
return options, labels
# Shuffle indices of options and labels in the same order
indices = tf.random.shuffle(tf.range(tf.shape(options)[0]), seed=seed)
# Shuffle options and labels
options = tf.gather(options, indices)
labels = tf.gather(labels, indices)
return options, labels<jupyter_output><empty_output><jupyter_text>In the following function, we'll merge all augmentation functions to apply to the text.These augmentations will be applied to the data using the `dataset.map(augment_fn)`approach.<jupyter_code>def augment_fn(text, label=None):
text, label = option_shuffle(text, label, prob=0.5) # Shuffle the options
return (text, label) if label is not None else text<jupyter_output><empty_output><jupyter_text>DataLoaderThe code below sets up a robust data flow pipeline using `tf.data.Dataset` for dataprocessing. Notable aspects of `tf.data` include its ability to simplify pipelineconstruction and represent components in sequences.To learn more about `tf.data`, refer to this[documentation](https://www.tensorflow.org/guide/data).<jupyter_code>def build_dataset(
texts,
labels=None,
batch_size=32,
cache=False,
augment=False,
repeat=False,
shuffle=1024,
):
AUTO = tf.data.AUTOTUNE # AUTOTUNE option
slices = (
(texts,)
if labels is None
else (texts, keras.utils.to_categorical(labels, num_classes=4))
) # Create slices
ds = tf.data.Dataset.from_tensor_slices(slices) # Create dataset from slices
ds = ds.cache() if cache else ds # Cache dataset if enabled
if augment: # Apply augmentation if enabled
ds = ds.map(augment_fn, num_parallel_calls=AUTO)
ds = ds.map(preprocess_fn, num_parallel_calls=AUTO) # Map preprocessing function
ds = ds.repeat() if repeat else ds # Repeat dataset if enabled
opt = tf.data.Options() # Create dataset options
if shuffle:
ds = ds.shuffle(shuffle, seed=CFG.seed) # Shuffle dataset if enabled
opt.experimental_deterministic = False
ds = ds.with_options(opt) # Set dataset options
ds = ds.batch(batch_size, drop_remainder=True) # Batch dataset
ds = ds.prefetch(AUTO) # Prefetch next batch
return ds # Return the built dataset<jupyter_output><empty_output><jupyter_text>Now let's create train and valid dataloader using above funciton.<jupyter_code># Build train dataloader
train_texts = train_df.options.tolist() # Extract training texts
train_labels = train_df.label.tolist() # Extract training labels
train_ds = build_dataset(
train_texts,
train_labels,
batch_size=CFG.batch_size,
cache=True,
shuffle=True,
repeat=True,
augment=CFG.augment,
)
# Build valid dataloader
valid_texts = valid_df.options.tolist() # Extract validation texts
valid_labels = valid_df.label.tolist() # Extract validation labels
valid_ds = build_dataset(
valid_texts,
valid_labels,
batch_size=CFG.batch_size,
cache=True,
shuffle=False,
repeat=False,
augment=False,
)<jupyter_output><empty_output><jupyter_text>LR ScheduleImplementing a learning rate scheduler is crucial for transfer learning. The learningrate initiates at `lr_start` and gradually tapers down to `lr_min` using **cosine**curve.**Importance:** A well-structured learning rate schedule is essential for efficient modeltraining, ensuring optimal convergence and avoiding issues such as overshooting orstagnation.<jupyter_code>import math
def get_lr_callback(batch_size=8, mode="cos", epochs=10, plot=False):
lr_start, lr_max, lr_min = 1.0e-6, 0.6e-6 * batch_size, 1e-6
lr_ramp_ep, lr_sus_ep = 2, 0
def lrfn(epoch): # Learning rate update function
if epoch < lr_ramp_ep:
lr = (lr_max - lr_start) / lr_ramp_ep * epoch + lr_start
elif epoch < lr_ramp_ep + lr_sus_ep:
lr = lr_max
else:
decay_total_epochs, decay_epoch_index = (
epochs - lr_ramp_ep - lr_sus_ep + 3,
epoch - lr_ramp_ep - lr_sus_ep,
)
phase = math.pi * decay_epoch_index / decay_total_epochs
lr = (lr_max - lr_min) * 0.5 * (1 + math.cos(phase)) + lr_min
return lr
if plot: # Plot lr curve if plot is True
plt.figure(figsize=(10, 5))
plt.plot(
np.arange(epochs),
[lrfn(epoch) for epoch in np.arange(epochs)],
marker="o",
)
plt.xlabel("epoch")
plt.ylabel("lr")
plt.title("LR Scheduler")
plt.show()
return keras.callbacks.LearningRateScheduler(
lrfn, verbose=False
) # Create lr callback
_ = get_lr_callback(CFG.batch_size, plot=True)<jupyter_output><empty_output><jupyter_text>CallbacksThe function below will gather all the training callbacks, such as `lr_scheduler`,`model_checkpoint`.<jupyter_code>def get_callbacks():
callbacks = []
lr_cb = get_lr_callback(CFG.batch_size) # Get lr callback
ckpt_cb = keras.callbacks.ModelCheckpoint(
f"best.keras",
monitor="val_accuracy",
save_best_only=True,
save_weights_only=False,
mode="max",
) # Get Model checkpoint callback
callbacks.extend([lr_cb, ckpt_cb]) # Add lr and checkpoint callbacks
return callbacks # Return the list of callbacks
callbacks = get_callbacks()<jupyter_output><empty_output><jupyter_text>MultipleChoice Model Pre-trained ModelsThe `KerasNLP` library provides comprehensive, ready-to-use implementations of popularNLP model architectures. It features a variety of pre-trained models including `Bert`,`Roberta`, `DebertaV3`, and more. In this notebook, we'll showcase the usage of`DistillBert`. However, feel free to explore all available models in the [KerasNLPdocumentation](https://keras.io/api/keras_nlp/models/). Also for a deeper understandingof `KerasNLP`, refer to the informative [getting startedguide](https://keras.io/guides/keras_nlp/getting_started/).Our approach involves using `keras_nlp.models.XXClassifier` to process each question andoption pari (e.g. (Q+A), (Q+B), etc.), generating logits. These logits are then combinedand passed through a softmax function to produce the final output. Classifier for Multiple-Choice TasksWhen dealing with multiple-choice questions, instead of giving the model the question andall options together `(Q + A + B + C ...)`, we provide the model with one option at atime along with the question. For instance, `(Q + A)`, `(Q + B)`, and so on. Once we havethe prediction scores (logits) for all options, we combine them using the `Softmax`function to get the ultimate result. If we had given all options at once to the model,the text's length would increase, making it harder for the model to handle. The picturebelow illustrates this idea: Picture Credict: <ahref="https://twitter.com/johnowhitaker"> @johnowhitaker From a coding perspective, remember that we use the same model for all five options, withshared weights. Despite the figure suggesting five separate models, they are, in fact,one model with shared weights. Another point to consider is the the input shapes ofClassifier and MultipleChoice.* Input shape for **Multiple Choice**: $(batch\_size, num\_choices, seq\_length)$* Input shape for **Classifier**: $(batch\_size, seq\_length)$Certainly, it's clear that we can't directly give the data for the multiple-choice taskto the model because the input shapes don't match. To handle this, we'll use **slicing**.This means we'll separate the features of each option, like $feature_{(Q + A)}$ and$feature_{(Q + B)}$, and give them one by one to the NLP classifier. After we get theprediction scores $logits_{(Q + A)}$ and $logits_{(Q + B)}$ for all the options, we'lluse the Softmax function, like $\operatorname{Softmax}([logits_{(Q + A)}, logits_{(Q +B)}])$, to combine them. This final step helps us make the ultimate decision or choice.> Note that in the classifier, we set `num_classes=1` instead of `5`. This is because theclassifier produces a single output for each option. When dealing with five options,these individual outputs are joined together and then processed through a softmaxfunction to generate the final result, which has a dimension of `5`.<jupyter_code># Selects one option from five
class SelectOption(keras.layers.Layer):
def __init__(self, index, **kwargs):
super().__init__(**kwargs)
self.index = index
def call(self, inputs):
# Selects a specific slice from the inputs tensor
return inputs[:, self.index, :]
def get_config(self):
# For serialize the model
base_config = super().get_config()
config = {
"index": self.index,
}
return {**base_config, **config}
def build_model():
# Define input layers
inputs = {
"token_ids": keras.Input(shape=(4, None), dtype="int32", name="token_ids"),
"padding_mask": keras.Input(
shape=(4, None), dtype="int32", name="padding_mask"
),
}
# Create a DebertaV3Classifier model
classifier = keras_nlp.models.DebertaV3Classifier.from_preset(
CFG.preset,
preprocessor=None,
num_classes=1, # one output per one option, for five options total 5 outputs
)
logits = []
# Loop through each option (Q+A), (Q+B) etc and compute associted logits
for option_idx in range(4):
option = {
k: SelectOption(option_idx, name=f"{k}_{option_idx}")(v)
for k, v in inputs.items()
}
logit = classifier(option)
logits.append(logit)
# Compute final output
logits = keras.layers.Concatenate(axis=-1)(logits)
outputs = keras.layers.Softmax(axis=-1)(logits)
model = keras.Model(inputs, outputs)
# Compile the model with optimizer, loss, and metrics
model.compile(
optimizer=keras.optimizers.AdamW(5e-6),
loss=keras.losses.CategoricalCrossentropy(label_smoothing=0.02),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
],
jit_compile=True,
)
return model
# Build the Build
model = build_model()<jupyter_output><empty_output><jupyter_text>Let's checkout the model summary to have a better insight on the model.<jupyter_code>model.summary()<jupyter_output><empty_output><jupyter_text>Finally, let's check the model structure visually if everything is in place.<jupyter_code>keras.utils.plot_model(model, show_shapes=True)<jupyter_output><empty_output><jupyter_text>Training<jupyter_code># Start training the model
history = model.fit(
train_ds,
epochs=CFG.epochs,
validation_data=valid_ds,
callbacks=callbacks,
steps_per_epoch=int(len(train_df) / CFG.batch_size),
verbose=1,
)<jupyter_output><empty_output><jupyter_text>Inference<jupyter_code># Make predictions using the trained model on last validation data
predictions = model.predict(
valid_ds,
batch_size=CFG.batch_size, # max batch size = valid size
verbose=1,
)
# Format predictions and true answers
pred_answers = np.arange(4)[np.argsort(-predictions)][:, 0]
true_answers = valid_df.label.values
# Check 5 Predictions
print("# Predictions\n")
for i in range(0, 50, 10):
row = valid_df.iloc[i]
question = row.startphrase
pred_answer = f"ending{pred_answers[i]}"
true_answer = f"ending{true_answers[i]}"
print(f"❓ Sentence {i+1}:\n{question}\n")
print(f"✅ True Ending: {true_answer}\n >> {row[true_answer]}\n")
print(f"🤖 Predicted Ending: {pred_answer}\n >> {row[pred_answer]}\n")
print("-" * 90, "\n")<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/multiple_choice_task_with_transfer_learning.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/multiple_choice_task_with_transfer_learning.ipynb",
"repo_id": "keras-io",
"token_count": 6386
} | 99 |
<jupyter_start><jupyter_text>Text classification using Decision Forests and pretrained embeddings**Author:** Gitesh Chawda**Date created:** 09/05/2022**Last modified:** 09/05/2022**Description:** Using Tensorflow Decision Forests for text classification. Introduction[TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests) (TF-DF)is a collection of state-of-the-art algorithms for Decision Forest models that arecompatible with Keras APIs. The module includes Random Forests, Gradient Boosted Trees,and CART, and can be used for regression, classification, and ranking tasks.In this example we will use Gradient Boosted Trees with pretrained embeddings toclassify disaster-related tweets. See also:- [TF-DF beginner tutorial](https://www.tensorflow.org/decision_forests/tutorials/beginner_colab)- [TF-DF intermediate tutorial](https://www.tensorflow.org/decision_forests/tutorials/intermediate_colab). Install Tensorflow Decision Forest using following command :`pip install tensorflow_decision_forests` Imports<jupyter_code>import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
from tensorflow.keras import layers
import tensorflow_decision_forests as tfdf
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Get the dataThe Dataset is avalaible on [Kaggle](https://www.kaggle.com/c/nlp-getting-started)Dataset description:**Files:**- train.csv: the training set**Columns:**- id: a unique identifier for each tweet- text: the text of the tweet- location: the location the tweet was sent from (may be blank)- keyword: a particular keyword from the tweet (may be blank)- target: in train.csv only, this denotes whether a tweet is about a real disaster (1) or not (0)<jupyter_code># Turn .csv files into pandas DataFrame's
df = pd.read_csv(
"https://raw.githubusercontent.com/IMvision12/Tweets-Classification-NLP/main/train.csv"
)
print(df.head())<jupyter_output><empty_output><jupyter_text>The dataset includes 7613 samples with 5 columns:<jupyter_code>print(f"Training dataset shape: {df.shape}")<jupyter_output><empty_output><jupyter_text>Shuffling and dropping unnecessary columns:<jupyter_code>df_shuffled = df.sample(frac=1, random_state=42)
# Dropping id, keyword and location columns as these columns consists of mostly nan values
# we will be using only text and target columns
df_shuffled.drop(["id", "keyword", "location"], axis=1, inplace=True)
df_shuffled.reset_index(inplace=True, drop=True)
print(df_shuffled.head())<jupyter_output><empty_output><jupyter_text>Printing information about the shuffled dataframe:<jupyter_code>print(df_shuffled.info())<jupyter_output><empty_output><jupyter_text>Total number of "disaster" and "non-disaster" tweets:<jupyter_code>print(
"Total Number of disaster and non-disaster tweets: "
f"{df_shuffled.target.value_counts()}"
)<jupyter_output><empty_output><jupyter_text>Let's preview a few samples:<jupyter_code>for index, example in df_shuffled[:5].iterrows():
print(f"Example #{index}")
print(f"\tTarget : {example['target']}")
print(f"\tText : {example['text']}")<jupyter_output><empty_output><jupyter_text>Splitting dataset into training and test sets:<jupyter_code>test_df = df_shuffled.sample(frac=0.1, random_state=42)
train_df = df_shuffled.drop(test_df.index)
print(f"Using {len(train_df)} samples for training and {len(test_df)} for validation")<jupyter_output><empty_output><jupyter_text>Total number of "disaster" and "non-disaster" tweets in the training data:<jupyter_code>print(train_df["target"].value_counts())<jupyter_output><empty_output><jupyter_text>Total number of "disaster" and "non-disaster" tweets in the test data:<jupyter_code>print(test_df["target"].value_counts())<jupyter_output><empty_output><jupyter_text>Convert data to a `tf.data.Dataset`<jupyter_code>def create_dataset(dataframe):
dataset = tf.data.Dataset.from_tensor_slices(
(dataframe["text"].to_numpy(), dataframe["target"].to_numpy())
)
dataset = dataset.batch(100)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
train_ds = create_dataset(train_df)
test_ds = create_dataset(test_df)<jupyter_output><empty_output><jupyter_text>Downloading pretrained embeddingsThe Universal Sentence Encoder embeddings encode text into high-dimensional vectors that can beused for text classification, semantic similarity, clustering and other natural languagetasks. They're trained on a variety of data sources and a variety of tasks. Their input isvariable-length English text and their output is a 512 dimensional vector.To learn more about these pretrained embeddings, see[Universal Sentence Encoder](https://tfhub.dev/google/universal-sentence-encoder/4).<jupyter_code>sentence_encoder_layer = hub.KerasLayer(
"https://tfhub.dev/google/universal-sentence-encoder/4"
)<jupyter_output><empty_output><jupyter_text>Creating our modelsWe create two models. In the first model (model_1) raw text will be first encoded viapretrained embeddings and then passed to a Gradient Boosted Tree model forclassification. In the second model (model_2) raw text will be directly passed tothe Gradient Boosted Trees model. Building model_1<jupyter_code>inputs = layers.Input(shape=(), dtype=tf.string)
outputs = sentence_encoder_layer(inputs)
preprocessor = keras.Model(inputs=inputs, outputs=outputs)
model_1 = tfdf.keras.GradientBoostedTreesModel(preprocessing=preprocessor)<jupyter_output><empty_output><jupyter_text>Building model_2<jupyter_code>model_2 = tfdf.keras.GradientBoostedTreesModel()<jupyter_output><empty_output><jupyter_text>Train the modelsWe compile our model by passing the metrics `Accuracy`, `Recall`, `Precision` and`AUC`. When it comes to the loss, TF-DF automatically detects the best loss for the task(Classification or regression). It is printed in the model summary.Also, because they're batch-training models rather than mini-batch gradient descent models,TF-DF models do not need a validation dataset to monitor overfitting, or to stoptraining early. Some algorithms do not use a validation dataset (e.g. Random Forest)while some others do (e.g. Gradient Boosted Trees). If a validation dataset isneeded, it will be extracted automatically from the training dataset.<jupyter_code># Compiling model_1
model_1.compile(metrics=["Accuracy", "Recall", "Precision", "AUC"])
# Here we do not specify epochs as, TF-DF trains exactly one epoch of the dataset
model_1.fit(train_ds)
# Compiling model_2
model_2.compile(metrics=["Accuracy", "Recall", "Precision", "AUC"])
# Here we do not specify epochs as, TF-DF trains exactly one epoch of the dataset
model_2.fit(train_ds)<jupyter_output><empty_output><jupyter_text>Prints training logs of model_1<jupyter_code>logs_1 = model_1.make_inspector().training_logs()
print(logs_1)<jupyter_output><empty_output><jupyter_text>Prints training logs of model_2<jupyter_code>logs_2 = model_2.make_inspector().training_logs()
print(logs_2)<jupyter_output><empty_output><jupyter_text>The model.summary() method prints a variety of information about your decision tree model, including model type, task, input features, and feature importance.<jupyter_code>print("model_1 summary: ")
print(model_1.summary())
print()
print("model_2 summary: ")
print(model_2.summary())<jupyter_output><empty_output><jupyter_text>Plotting training metrics<jupyter_code>def plot_curve(logs):
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot([log.num_trees for log in logs], [log.evaluation.accuracy for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Accuracy")
plt.subplot(1, 2, 2)
plt.plot([log.num_trees for log in logs], [log.evaluation.loss for log in logs])
plt.xlabel("Number of trees")
plt.ylabel("Loss")
plt.show()
plot_curve(logs_1)
plot_curve(logs_2)<jupyter_output><empty_output><jupyter_text>Evaluating on test data<jupyter_code>results = model_1.evaluate(test_ds, return_dict=True, verbose=0)
print("model_1 Evaluation: \n")
for name, value in results.items():
print(f"{name}: {value:.4f}")
results = model_2.evaluate(test_ds, return_dict=True, verbose=0)
print("model_2 Evaluation: \n")
for name, value in results.items():
print(f"{name}: {value:.4f}")<jupyter_output><empty_output><jupyter_text>Predicting on validation data<jupyter_code>test_df.reset_index(inplace=True, drop=True)
for index, row in test_df.iterrows():
text = tf.expand_dims(row["text"], axis=0)
preds = model_1.predict_step(text)
preds = tf.squeeze(tf.round(preds))
print(f"Text: {row['text']}")
print(f"Prediction: {int(preds)}")
print(f"Ground Truth : {row['target']}")
if index == 10:
break<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/tweet-classification-using-tfdf.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/tweet-classification-using-tfdf.ipynb",
"repo_id": "keras-io",
"token_count": 2956
} | 100 |
# English-to-Spanish translation with KerasNLP
**Author:** [Abheesht Sharma](https://github.com/abheesht17/)<br>
**Date created:** 2022/05/26<br>
**Last modified:** 2022/12/21<br>
**Description:** Use KerasNLP to train a sequence-to-sequence Transformer model on the machine translation task.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/neural_machine_translation_with_keras_nlp.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/neural_machine_translation_with_keras_nlp.py)
---
## Introduction
KerasNLP provides building blocks for NLP (model layers, tokenizers, metrics, etc.) and
makes it convenient to construct NLP pipelines.
In this example, we'll use KerasNLP layers to build an encoder-decoder Transformer
model, and train it on the English-to-Spanish machine translation task.
This example is based on the
[English-to-Spanish NMT
example](https://keras.io/examples/nlp/neural_machine_translation_with_transformer/)
by [fchollet](https://twitter.com/fchollet). The original example is more low-level
and implements layers from scratch, whereas this example uses KerasNLP to show
some more advanced approaches, such as subword tokenization and using metrics
to compute the quality of generated translations.
You'll learn how to:
- Tokenize text using `keras_nlp.tokenizers.WordPieceTokenizer`.
- Implement a sequence-to-sequence Transformer model using KerasNLP's
`keras_nlp.layers.TransformerEncoder`, `keras_nlp.layers.TransformerDecoder` and
`keras_nlp.layers.TokenAndPositionEmbedding` layers, and train it.
- Use `keras_nlp.samplers` to generate translations of unseen input sentences
using the top-p decoding strategy!
Don't worry if you aren't familiar with KerasNLP. This tutorial will start with
the basics. Let's dive right in!
---
## Setup
Before we start implementing the pipeline, let's import all the libraries we need.
```python
!pip install -q --upgrade rouge-score
!pip install -q --upgrade keras-nlp
!pip install -q --upgrade keras # Upgrade to Keras 3.
```
```python
import keras_nlp
import pathlib
import random
import keras
from keras import ops
import tensorflow.data as tf_data
from tensorflow_text.tools.wordpiece_vocab import (
bert_vocab_from_dataset as bert_vocab,
)
```
<div class="k-default-codeblock">
```
['\x1b[33mWARNING: There was an error checking the latest version of pip.\x1b[0m\x1b[33m',
'\x1b[0m']
```
</div>
Let's also define our parameters/hyperparameters.
```python
BATCH_SIZE = 64
EPOCHS = 1 # This should be at least 10 for convergence
MAX_SEQUENCE_LENGTH = 40
ENG_VOCAB_SIZE = 15000
SPA_VOCAB_SIZE = 15000
EMBED_DIM = 256
INTERMEDIATE_DIM = 2048
NUM_HEADS = 8
```
---
## Downloading the data
We'll be working with an English-to-Spanish translation dataset
provided by [Anki](https://www.manythings.org/anki/). Let's download it:
```python
text_file = keras.utils.get_file(
fname="spa-eng.zip",
origin="http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip",
extract=True,
)
text_file = pathlib.Path(text_file).parent / "spa-eng" / "spa.txt"
```
---
## Parsing the data
Each line contains an English sentence and its corresponding Spanish sentence.
The English sentence is the *source sequence* and Spanish one is the *target sequence*.
Before adding the text to a list, we convert it to lowercase.
```python
with open(text_file) as f:
lines = f.read().split("\n")[:-1]
text_pairs = []
for line in lines:
eng, spa = line.split("\t")
eng = eng.lower()
spa = spa.lower()
text_pairs.append((eng, spa))
```
Here's what our sentence pairs look like:
```python
for _ in range(5):
print(random.choice(text_pairs))
```
<div class="k-default-codeblock">
```
('will the coffee stain ruin the carpet?', '¿la mancha de café va a arruinar la alfombra?')
('is it only about money?', '¿sólo se trata de dinero?')
('most students come to school on foot.', 'la mayoría de los estudiantes vienen a la escuela de a pie.')
("tom doesn't want to make mary angry.", 'tom no quiere hacer enojar a mary.')
('i can fly.', 'puedo volar.')
```
</div>
Now, let's split the sentence pairs into a training set, a validation set,
and a test set.
```python
random.shuffle(text_pairs)
num_val_samples = int(0.15 * len(text_pairs))
num_train_samples = len(text_pairs) - 2 * num_val_samples
train_pairs = text_pairs[:num_train_samples]
val_pairs = text_pairs[num_train_samples : num_train_samples + num_val_samples]
test_pairs = text_pairs[num_train_samples + num_val_samples :]
print(f"{len(text_pairs)} total pairs")
print(f"{len(train_pairs)} training pairs")
print(f"{len(val_pairs)} validation pairs")
print(f"{len(test_pairs)} test pairs")
```
<div class="k-default-codeblock">
```
118964 total pairs
83276 training pairs
17844 validation pairs
17844 test pairs
```
</div>
---
## Tokenizing the data
We'll define two tokenizers - one for the source language (English), and the other
for the target language (Spanish). We'll be using
`keras_nlp.tokenizers.WordPieceTokenizer` to tokenize the text.
`keras_nlp.tokenizers.WordPieceTokenizer` takes a WordPiece vocabulary
and has functions for tokenizing the text, and detokenizing sequences of tokens.
Before we define the two tokenizers, we first need to train them on the dataset
we have. The WordPiece tokenization algorithm is a subword tokenization algorithm;
training it on a corpus gives us a vocabulary of subwords. A subword tokenizer
is a compromise between word tokenizers (word tokenizers need very large
vocabularies for good coverage of input words), and character tokenizers
(characters don't really encode meaning like words do). Luckily, KerasNLP
makes it very simple to train WordPiece on a corpus with the
`keras_nlp.tokenizers.compute_word_piece_vocabulary` utility.
```python
def train_word_piece(text_samples, vocab_size, reserved_tokens):
word_piece_ds = tf_data.Dataset.from_tensor_slices(text_samples)
vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(
word_piece_ds.batch(1000).prefetch(2),
vocabulary_size=vocab_size,
reserved_tokens=reserved_tokens,
)
return vocab
```
Every vocabulary has a few special, reserved tokens. We have four such tokens:
- `"[PAD]"` - Padding token. Padding tokens are appended to the input sequence
length when the input sequence length is shorter than the maximum sequence length.
- `"[UNK]"` - Unknown token.
- `"[START]"` - Token that marks the start of the input sequence.
- `"[END]"` - Token that marks the end of the input sequence.
```python
reserved_tokens = ["[PAD]", "[UNK]", "[START]", "[END]"]
eng_samples = [text_pair[0] for text_pair in train_pairs]
eng_vocab = train_word_piece(eng_samples, ENG_VOCAB_SIZE, reserved_tokens)
spa_samples = [text_pair[1] for text_pair in train_pairs]
spa_vocab = train_word_piece(spa_samples, SPA_VOCAB_SIZE, reserved_tokens)
```
Let's see some tokens!
```python
print("English Tokens: ", eng_vocab[100:110])
print("Spanish Tokens: ", spa_vocab[100:110])
```
<div class="k-default-codeblock">
```
English Tokens: ['at', 'know', 'him', 'there', 'they', 'go', 'her', 'has', 'will', 're']
Spanish Tokens: ['qué', 'le', 'ella', 'para', 'te', 'mary', 'las', 'más', 'al', 'yo']
```
</div>
Now, let's define the tokenizers. We will configure the tokenizers with the
the vocabularies trained above.
```python
eng_tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=eng_vocab, lowercase=False
)
spa_tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=spa_vocab, lowercase=False
)
```
Let's try and tokenize a sample from our dataset! To verify whether the text has
been tokenized correctly, we can also detokenize the list of tokens back to the
original text.
```python
eng_input_ex = text_pairs[0][0]
eng_tokens_ex = eng_tokenizer.tokenize(eng_input_ex)
print("English sentence: ", eng_input_ex)
print("Tokens: ", eng_tokens_ex)
print(
"Recovered text after detokenizing: ",
eng_tokenizer.detokenize(eng_tokens_ex),
)
print()
spa_input_ex = text_pairs[0][1]
spa_tokens_ex = spa_tokenizer.tokenize(spa_input_ex)
print("Spanish sentence: ", spa_input_ex)
print("Tokens: ", spa_tokens_ex)
print(
"Recovered text after detokenizing: ",
spa_tokenizer.detokenize(spa_tokens_ex),
)
```
<div class="k-default-codeblock">
```
English sentence: tom thinks mary should apologize to john for not doing what she said she'd do.
Tokens: tf.Tensor(
[ 69 640 86 151 1274 67 309 82 97 288 85 84 181 84
8 29 77 11], shape=(18,), dtype=int32)
Recovered text after detokenizing: tf.Tensor(b"tom thinks mary should apologize to john for not doing what she said she ' d do .", shape=(), dtype=string)
```
</div>
<div class="k-default-codeblock">
```
Spanish sentence: tom piensa que mary debería pedirle perdón a john por no hacer lo que había dicho que haría.
Tokens: tf.Tensor(
[ 82 704 80 105 262 1666 1894 29 314 91 81 125 92 80
179 464 80 915 14], shape=(19,), dtype=int32)
Recovered text after detokenizing: tf.Tensor(b'tom piensa que mary deber\xc3\xada pedirle perd\xc3\xb3n a john por no hacer lo que hab\xc3\xada dicho que har\xc3\xada .', shape=(), dtype=string)
```
</div>
---
## Format datasets
Next, we'll format our datasets.
At each training step, the model will seek to predict target words N+1 (and beyond)
using the source sentence and the target words 0 to N.
As such, the training dataset will yield a tuple `(inputs, targets)`, where:
- `inputs` is a dictionary with the keys `encoder_inputs` and `decoder_inputs`.
`encoder_inputs` is the tokenized source sentence and `decoder_inputs` is the target
sentence "so far",
that is to say, the words 0 to N used to predict word N+1 (and beyond) in the target
sentence.
- `target` is the target sentence offset by one step:
it provides the next words in the target sentence -- what the model will try to predict.
We will add special tokens, `"[START]"` and `"[END]"`, to the input Spanish
sentence after tokenizing the text. We will also pad the input to a fixed length.
This can be easily done using `keras_nlp.layers.StartEndPacker`.
```python
def preprocess_batch(eng, spa):
batch_size = ops.shape(spa)[0]
eng = eng_tokenizer(eng)
spa = spa_tokenizer(spa)
# Pad `eng` to `MAX_SEQUENCE_LENGTH`.
eng_start_end_packer = keras_nlp.layers.StartEndPacker(
sequence_length=MAX_SEQUENCE_LENGTH,
pad_value=eng_tokenizer.token_to_id("[PAD]"),
)
eng = eng_start_end_packer(eng)
# Add special tokens (`"[START]"` and `"[END]"`) to `spa` and pad it as well.
spa_start_end_packer = keras_nlp.layers.StartEndPacker(
sequence_length=MAX_SEQUENCE_LENGTH + 1,
start_value=spa_tokenizer.token_to_id("[START]"),
end_value=spa_tokenizer.token_to_id("[END]"),
pad_value=spa_tokenizer.token_to_id("[PAD]"),
)
spa = spa_start_end_packer(spa)
return (
{
"encoder_inputs": eng,
"decoder_inputs": spa[:, :-1],
},
spa[:, 1:],
)
def make_dataset(pairs):
eng_texts, spa_texts = zip(*pairs)
eng_texts = list(eng_texts)
spa_texts = list(spa_texts)
dataset = tf_data.Dataset.from_tensor_slices((eng_texts, spa_texts))
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(preprocess_batch, num_parallel_calls=tf_data.AUTOTUNE)
return dataset.shuffle(2048).prefetch(16).cache()
train_ds = make_dataset(train_pairs)
val_ds = make_dataset(val_pairs)
```
Let's take a quick look at the sequence shapes
(we have batches of 64 pairs, and all sequences are 40 steps long):
```python
for inputs, targets in train_ds.take(1):
print(f'inputs["encoder_inputs"].shape: {inputs["encoder_inputs"].shape}')
print(f'inputs["decoder_inputs"].shape: {inputs["decoder_inputs"].shape}')
print(f"targets.shape: {targets.shape}")
```
<div class="k-default-codeblock">
```
inputs["encoder_inputs"].shape: (64, 40)
inputs["decoder_inputs"].shape: (64, 40)
targets.shape: (64, 40)
```
</div>
---
## Building the model
Now, let's move on to the exciting part - defining our model!
We first need an embedding layer, i.e., a vector for every token in our input sequence.
This embedding layer can be initialised randomly. We also need a positional
embedding layer which encodes the word order in the sequence. The convention is
to add these two embeddings. KerasNLP has a `keras_nlp.layers.TokenAndPositionEmbedding `
layer which does all of the above steps for us.
Our sequence-to-sequence Transformer consists of a `keras_nlp.layers.TransformerEncoder`
layer and a `keras_nlp.layers.TransformerDecoder` layer chained together.
The source sequence will be passed to `keras_nlp.layers.TransformerEncoder`, which
will produce a new representation of it. This new representation will then be passed
to the `keras_nlp.layers.TransformerDecoder`, together with the target sequence
so far (target words 0 to N). The `keras_nlp.layers.TransformerDecoder` will
then seek to predict the next words in the target sequence (N+1 and beyond).
A key detail that makes this possible is causal masking.
The `keras_nlp.layers.TransformerDecoder` sees the entire sequence at once, and
thus we must make sure that it only uses information from target tokens 0 to N
when predicting token N+1 (otherwise, it could use information from the future,
which would result in a model that cannot be used at inference time). Causal masking
is enabled by default in `keras_nlp.layers.TransformerDecoder`.
We also need to mask the padding tokens (`"[PAD]"`). For this, we can set the
`mask_zero` argument of the `keras_nlp.layers.TokenAndPositionEmbedding` layer
to True. This will then be propagated to all subsequent layers.
```python
# Encoder
encoder_inputs = keras.Input(shape=(None,), name="encoder_inputs")
x = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=ENG_VOCAB_SIZE,
sequence_length=MAX_SEQUENCE_LENGTH,
embedding_dim=EMBED_DIM,
)(encoder_inputs)
encoder_outputs = keras_nlp.layers.TransformerEncoder(
intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS
)(inputs=x)
encoder = keras.Model(encoder_inputs, encoder_outputs)
# Decoder
decoder_inputs = keras.Input(shape=(None,), name="decoder_inputs")
encoded_seq_inputs = keras.Input(shape=(None, EMBED_DIM), name="decoder_state_inputs")
x = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=SPA_VOCAB_SIZE,
sequence_length=MAX_SEQUENCE_LENGTH,
embedding_dim=EMBED_DIM,
)(decoder_inputs)
x = keras_nlp.layers.TransformerDecoder(
intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS
)(decoder_sequence=x, encoder_sequence=encoded_seq_inputs)
x = keras.layers.Dropout(0.5)(x)
decoder_outputs = keras.layers.Dense(SPA_VOCAB_SIZE, activation="softmax")(x)
decoder = keras.Model(
[
decoder_inputs,
encoded_seq_inputs,
],
decoder_outputs,
)
decoder_outputs = decoder([decoder_inputs, encoder_outputs])
transformer = keras.Model(
[encoder_inputs, decoder_inputs],
decoder_outputs,
name="transformer",
)
```
---
## Training our model
We'll use accuracy as a quick way to monitor training progress on the validation data.
Note that machine translation typically uses BLEU scores as well as other metrics,
rather than accuracy. However, in order to use metrics like ROUGE, BLEU, etc. we
will have decode the probabilities and generate the text. Text generation is
computationally expensive, and performing this during training is not recommended.
Here we only train for 1 epoch, but to get the model to actually converge
you should train for at least 10 epochs.
```python
transformer.summary()
transformer.compile(
"rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
transformer.fit(train_ds, epochs=EPOCHS, validation_data=val_ds)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "transformer"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ encoder_inputs │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ token_and_position… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3,850,…</span> │ encoder_inputs[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">TokenAndPositionE…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ decoder_inputs │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ transformer_encoder │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,315,…</span> │ token_and_position_… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">TransformerEncode…</span> │ │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ functional_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">9,283,…</span> │ decoder_inputs[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">15000</span>) │ │ transformer_encoder… │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">14,449,304</span> (55.12 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">14,449,304</span> (55.12 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<div class="k-default-codeblock">
```
1302/1302 ━━━━━━━━━━━━━━━━━━━━ 22s 15ms/step - accuracy: 0.8164 - loss: 1.4953 - val_accuracy: 0.8683 - val_loss: 0.7952
<keras.src.callbacks.history.History at 0x7f6563fd2140>
```
</div>
---
## Decoding test sentences (qualitative analysis)
Finally, let's demonstrate how to translate brand new English sentences.
We simply feed into the model the tokenized English sentence
as well as the target token `"[START]"`. The model outputs probabilities of the
next token. We then we repeatedly generated the next token conditioned on the
tokens generated so far, until we hit the token `"[END]"`.
For decoding, we will use the `keras_nlp.samplers` module from
KerasNLP. Greedy Decoding is a text decoding method which outputs the most
likely next token at each time step, i.e., the token with the highest probability.
```python
def decode_sequences(input_sentences):
batch_size = 1
# Tokenize the encoder input.
encoder_input_tokens = ops.convert_to_tensor(eng_tokenizer(input_sentences))
if len(encoder_input_tokens[0]) < MAX_SEQUENCE_LENGTH:
pads = ops.full((1, MAX_SEQUENCE_LENGTH - len(encoder_input_tokens[0])), 0)
encoder_input_tokens = ops.concatenate([encoder_input_tokens, pads], 1)
# Define a function that outputs the next token's probability given the
# input sequence.
def next(prompt, cache, index):
logits = transformer([encoder_input_tokens, prompt])[:, index - 1, :]
# Ignore hidden states for now; only needed for contrastive search.
hidden_states = None
return logits, hidden_states, cache
# Build a prompt of length 40 with a start token and padding tokens.
length = 40
start = ops.full((batch_size, 1), spa_tokenizer.token_to_id("[START]"))
pad = ops.full((batch_size, length - 1), spa_tokenizer.token_to_id("[PAD]"))
prompt = ops.concatenate((start, pad), axis=-1)
generated_tokens = keras_nlp.samplers.GreedySampler()(
next,
prompt,
end_token_id=spa_tokenizer.token_to_id("[END]"),
index=1, # Start sampling after start token.
)
generated_sentences = spa_tokenizer.detokenize(generated_tokens)
return generated_sentences
test_eng_texts = [pair[0] for pair in test_pairs]
for i in range(2):
input_sentence = random.choice(test_eng_texts)
translated = decode_sequences([input_sentence])
translated = translated.numpy()[0].decode("utf-8")
translated = (
translated.replace("[PAD]", "")
.replace("[START]", "")
.replace("[END]", "")
.strip()
)
print(f"** Example {i} **")
print(input_sentence)
print(translated)
print()
```
<div class="k-default-codeblock">
```
** Example 0 **
he is always complaining.
él siempre está en la escuela .
```
</div>
<div class="k-default-codeblock">
```
** Example 1 **
i think you're all wrong.
creo que te representan todos los días .
```
</div>
---
## Evaluating our model (quantitative analysis)
There are many metrics which are used for text generation tasks. Here, to
evaluate translations generated by our model, let's compute the ROUGE-1 and
ROUGE-2 scores. Essentially, ROUGE-N is a score based on the number of common
n-grams between the reference text and the generated text. ROUGE-1 and ROUGE-2
use the number of common unigrams and bigrams, respectively.
We will calculate the score over 30 test samples (since decoding is an
expensive process).
```python
rouge_1 = keras_nlp.metrics.RougeN(order=1)
rouge_2 = keras_nlp.metrics.RougeN(order=2)
for test_pair in test_pairs[:30]:
input_sentence = test_pair[0]
reference_sentence = test_pair[1]
translated_sentence = decode_sequences([input_sentence])
translated_sentence = translated_sentence.numpy()[0].decode("utf-8")
translated_sentence = (
translated_sentence.replace("[PAD]", "")
.replace("[START]", "")
.replace("[END]", "")
.strip()
)
rouge_1(reference_sentence, translated_sentence)
rouge_2(reference_sentence, translated_sentence)
print("ROUGE-1 Score: ", rouge_1.result())
print("ROUGE-2 Score: ", rouge_2.result())
```
<div class="k-default-codeblock">
```
ROUGE-1 Score: {'precision': Array(0.33075738, dtype=float32), 'recall': Array(0.33867723, dtype=float32), 'f1_score': Array(0.3302676, dtype=float32)}
ROUGE-2 Score: {'precision': Array(0.13534392, dtype=float32), 'recall': Array(0.13344036, dtype=float32), 'f1_score': Array(0.13272808, dtype=float32)}
```
</div>
After 10 epochs, the scores are as follows:
| | **ROUGE-1** | **ROUGE-2** |
|:-------------:|:-----------:|:-----------:|
| **Precision** | 0.568 | 0.374 |
| **Recall** | 0.615 | 0.394 |
| **F1 Score** | 0.579 | 0.381 |
| keras-io/examples/nlp/md/neural_machine_translation_with_keras_nlp.md/0 | {
"file_path": "keras-io/examples/nlp/md/neural_machine_translation_with_keras_nlp.md",
"repo_id": "keras-io",
"token_count": 9648
} | 101 |
"""
Title: Large-scale multi-label text classification
Author: [Sayak Paul](https://twitter.com/RisingSayak), [Soumik Rakshit](https://github.com/soumik12345)
Date created: 2020/09/25
Last modified: 2020/12/23
Description: Implementing a large-scale multi-label text classification model.
Accelerator: GPU
"""
"""
## Introduction
In this example, we will build a multi-label text classifier to predict the subject areas
of arXiv papers from their abstract bodies. This type of classifier can be useful for
conference submission portals like [OpenReview](https://openreview.net/). Given a paper
abstract, the portal could provide suggestions for which areas the paper would
best belong to.
The dataset was collected using the
[`arXiv` Python library](https://github.com/lukasschwab/arxiv.py)
that provides a wrapper around the
[original arXiv API](http://arxiv.org/help/api/index).
To learn more about the data collection process, please refer to
[this notebook](https://github.com/soumik12345/multi-label-text-classification/blob/master/arxiv_scrape.ipynb).
Additionally, you can also find the dataset on
[Kaggle](https://www.kaggle.com/spsayakpaul/arxiv-paper-abstracts).
"""
"""
## Imports
"""
from tensorflow.keras import layers
from tensorflow import keras
import tensorflow as tf
from sklearn.model_selection import train_test_split
from ast import literal_eval
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
"""
## Perform exploratory data analysis
In this section, we first load the dataset into a `pandas` dataframe and then perform
some basic exploratory data analysis (EDA).
"""
arxiv_data = pd.read_csv(
"https://github.com/soumik12345/multi-label-text-classification/releases/download/v0.2/arxiv_data.csv"
)
arxiv_data.head()
"""
Our text features are present in the `summaries` column and their corresponding labels
are in `terms`. As you can notice, there are multiple categories associated with a
particular entry.
"""
print(f"There are {len(arxiv_data)} rows in the dataset.")
"""
Real-world data is noisy. One of the most commonly observed source of noise is data
duplication. Here we notice that our initial dataset has got about 13k duplicate entries.
"""
total_duplicate_titles = sum(arxiv_data["titles"].duplicated())
print(f"There are {total_duplicate_titles} duplicate titles.")
"""
Before proceeding further, we drop these entries.
"""
arxiv_data = arxiv_data[~arxiv_data["titles"].duplicated()]
print(f"There are {len(arxiv_data)} rows in the deduplicated dataset.")
# There are some terms with occurrence as low as 1.
print(sum(arxiv_data["terms"].value_counts() == 1))
# How many unique terms?
print(arxiv_data["terms"].nunique())
"""
As observed above, out of 3,157 unique combinations of `terms`, 2,321 entries have the
lowest occurrence. To prepare our train, validation, and test sets with
[stratification](https://en.wikipedia.org/wiki/Stratified_sampling), we need to drop
these terms.
"""
# Filtering the rare terms.
arxiv_data_filtered = arxiv_data.groupby("terms").filter(lambda x: len(x) > 1)
arxiv_data_filtered.shape
"""
## Convert the string labels to lists of strings
The initial labels are represented as raw strings. Here we make them `List[str]` for a
more compact representation.
"""
arxiv_data_filtered["terms"] = arxiv_data_filtered["terms"].apply(
lambda x: literal_eval(x)
)
arxiv_data_filtered["terms"].values[:5]
"""
## Use stratified splits because of class imbalance
The dataset has a
[class imbalance problem](https://developers.google.com/machine-learning/glossary/#class-imbalanced-dataset).
So, to have a fair evaluation result, we need to ensure the datasets are sampled with
stratification. To know more about different strategies to deal with the class imbalance
problem, you can follow
[this tutorial](https://www.tensorflow.org/tutorials/structured_data/imbalanced_data).
For an end-to-end demonstration of classification with imbablanced data, refer to
[Imbalanced classification: credit card fraud detection](https://keras.io/examples/structured_data/imbalanced_classification/).
"""
test_split = 0.1
# Initial train and test split.
train_df, test_df = train_test_split(
arxiv_data_filtered,
test_size=test_split,
stratify=arxiv_data_filtered["terms"].values,
)
# Splitting the test set further into validation
# and new test sets.
val_df = test_df.sample(frac=0.5)
test_df.drop(val_df.index, inplace=True)
print(f"Number of rows in training set: {len(train_df)}")
print(f"Number of rows in validation set: {len(val_df)}")
print(f"Number of rows in test set: {len(test_df)}")
"""
## Multi-label binarization
Now we preprocess our labels using the
[`StringLookup`](https://keras.io/api/layers/preprocessing_layers/categorical/string_lookup)
layer.
"""
terms = tf.ragged.constant(train_df["terms"].values)
lookup = tf.keras.layers.StringLookup(output_mode="multi_hot")
lookup.adapt(terms)
vocab = lookup.get_vocabulary()
def invert_multi_hot(encoded_labels):
"""Reverse a single multi-hot encoded label to a tuple of vocab terms."""
hot_indices = np.argwhere(encoded_labels == 1.0)[..., 0]
return np.take(vocab, hot_indices)
print("Vocabulary:\n")
print(vocab)
"""
Here we are separating the individual unique classes available from the label
pool and then using this information to represent a given label set with 0's and 1's.
Below is an example.
"""
sample_label = train_df["terms"].iloc[0]
print(f"Original label: {sample_label}")
label_binarized = lookup([sample_label])
print(f"Label-binarized representation: {label_binarized}")
"""
## Data preprocessing and `tf.data.Dataset` objects
We first get percentile estimates of the sequence lengths. The purpose will be clear in a
moment.
"""
train_df["summaries"].apply(lambda x: len(x.split(" "))).describe()
"""
Notice that 50% of the abstracts have a length of 154 (you may get a different number
based on the split). So, any number close to that value is a good enough approximate for the
maximum sequence length.
Now, we implement utilities to prepare our datasets.
"""
max_seqlen = 150
batch_size = 128
padding_token = "<pad>"
auto = tf.data.AUTOTUNE
def make_dataset(dataframe, is_train=True):
labels = tf.ragged.constant(dataframe["terms"].values)
label_binarized = lookup(labels).numpy()
dataset = tf.data.Dataset.from_tensor_slices(
(dataframe["summaries"].values, label_binarized)
)
dataset = dataset.shuffle(batch_size * 10) if is_train else dataset
return dataset.batch(batch_size)
"""
Now we can prepare the `tf.data.Dataset` objects.
"""
train_dataset = make_dataset(train_df, is_train=True)
validation_dataset = make_dataset(val_df, is_train=False)
test_dataset = make_dataset(test_df, is_train=False)
"""
## Dataset preview
"""
text_batch, label_batch = next(iter(train_dataset))
for i, text in enumerate(text_batch[:5]):
label = label_batch[i].numpy()[None, ...]
print(f"Abstract: {text}")
print(f"Label(s): {invert_multi_hot(label[0])}")
print(" ")
"""
## Vectorization
Before we feed the data to our model, we need to vectorize it (represent it in a numerical form).
For that purpose, we will use the
[`TextVectorization` layer](https://keras.io/api/layers/preprocessing_layers/text/text_vectorization).
It can operate as a part of your main model so that the model is excluded from the core
preprocessing logic. This greatly reduces the chances of training / serving skew during inference.
We first calculate the number of unique words present in the abstracts.
"""
# Source: https://stackoverflow.com/a/18937309/7636462
vocabulary = set()
train_df["summaries"].str.lower().str.split().apply(vocabulary.update)
vocabulary_size = len(vocabulary)
print(vocabulary_size)
"""
We now create our vectorization layer and `map()` to the `tf.data.Dataset`s created
earlier.
"""
text_vectorizer = layers.TextVectorization(
max_tokens=vocabulary_size, ngrams=2, output_mode="tf_idf"
)
# `TextVectorization` layer needs to be adapted as per the vocabulary from our
# training set.
with tf.device("/CPU:0"):
text_vectorizer.adapt(train_dataset.map(lambda text, label: text))
train_dataset = train_dataset.map(
lambda text, label: (text_vectorizer(text), label), num_parallel_calls=auto
).prefetch(auto)
validation_dataset = validation_dataset.map(
lambda text, label: (text_vectorizer(text), label), num_parallel_calls=auto
).prefetch(auto)
test_dataset = test_dataset.map(
lambda text, label: (text_vectorizer(text), label), num_parallel_calls=auto
).prefetch(auto)
"""
A batch of raw text will first go through the `TextVectorization` layer and it will
generate their integer representations. Internally, the `TextVectorization` layer will
first create bi-grams out of the sequences and then represent them using
[TF-IDF](https://wikipedia.org/wiki/Tf%E2%80%93idf). The output representations will then
be passed to the shallow model responsible for text classification.
To learn more about other possible configurations with `TextVectorizer`, please consult
the
[official documentation](https://keras.io/api/layers/preprocessing_layers/text/text_vectorization).
**Note**: Setting the `max_tokens` argument to a pre-calculated vocabulary size is
not a requirement.
"""
"""
## Create a text classification model
We will keep our model simple -- it will be a small stack of fully-connected layers with
ReLU as the non-linearity.
"""
def make_model():
shallow_mlp_model = keras.Sequential(
[
layers.Dense(512, activation="relu"),
layers.Dense(256, activation="relu"),
layers.Dense(lookup.vocabulary_size(), activation="sigmoid"),
] # More on why "sigmoid" has been used here in a moment.
)
return shallow_mlp_model
"""
## Train the model
We will train our model using the binary crossentropy loss. This is because the labels
are not disjoint. For a given abstract, we may have multiple categories. So, we will
divide the prediction task into a series of multiple binary classification problems. This
is also why we kept the activation function of the classification layer in our model to
sigmoid. Researchers have used other combinations of loss function and activation
function as well. For example, in [Exploring the Limits of Weakly Supervised Pretraining](https://arxiv.org/abs/1805.00932),
Mahajan et al. used the softmax activation function and cross-entropy loss to train
their models.
There are several options of metrics that can be used in multi-label classification.
To keep this code example narrow we decided to use the
[binary accuracy metric](https://keras.io/api/metrics/accuracy_metrics/#binaryaccuracy-class).
To see the explanation why this metric is used we refer to this
[pull-request](https://github.com/keras-team/keras-io/pull/1133#issuecomment-1322736860).
There are also other suitable metrics for multi-label classification, like
[F1 Score](https://www.tensorflow.org/addons/api_docs/python/tfa/metrics/F1Score) or
[Hamming loss](https://www.tensorflow.org/addons/api_docs/python/tfa/metrics/HammingLoss).
"""
epochs = 20
shallow_mlp_model = make_model()
shallow_mlp_model.compile(
loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"]
)
history = shallow_mlp_model.fit(
train_dataset, validation_data=validation_dataset, epochs=epochs
)
def plot_result(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_result("loss")
plot_result("binary_accuracy")
"""
While training, we notice an initial sharp fall in the loss followed by a gradual decay.
"""
"""
### Evaluate the model
"""
_, binary_acc = shallow_mlp_model.evaluate(test_dataset)
print(f"Categorical accuracy on the test set: {round(binary_acc * 100, 2)}%.")
"""
The trained model gives us an evaluation accuracy of ~99%.
"""
"""
## Inference
An important feature of the
[preprocessing layers provided by Keras](https://keras.io/guides/preprocessing_layers/)
is that they can be included inside a `tf.keras.Model`. We will export an inference model
by including the `text_vectorization` layer on top of `shallow_mlp_model`. This will
allow our inference model to directly operate on raw strings.
**Note** that during training it is always preferable to use these preprocessing
layers as a part of the data input pipeline rather than the model to avoid
surfacing bottlenecks for the hardware accelerators. This also allows for
asynchronous data processing.
"""
# Create a model for inference.
model_for_inference = keras.Sequential([text_vectorizer, shallow_mlp_model])
# Create a small dataset just for demoing inference.
inference_dataset = make_dataset(test_df.sample(100), is_train=False)
text_batch, label_batch = next(iter(inference_dataset))
predicted_probabilities = model_for_inference.predict(text_batch)
# Perform inference.
for i, text in enumerate(text_batch[:5]):
label = label_batch[i].numpy()[None, ...]
print(f"Abstract: {text}")
print(f"Label(s): {invert_multi_hot(label[0])}")
predicted_proba = [proba for proba in predicted_probabilities[i]]
top_3_labels = [
x
for _, x in sorted(
zip(predicted_probabilities[i], lookup.get_vocabulary()),
key=lambda pair: pair[0],
reverse=True,
)
][:3]
print(f"Predicted Label(s): ({', '.join([label for label in top_3_labels])})")
print(" ")
"""
The prediction results are not that great but not below the par for a simple model like
ours. We can improve this performance with models that consider word order like LSTM or
even those that use Transformers ([Vaswani et al.](https://arxiv.org/abs/1706.03762)).
"""
"""
## Acknowledgements
We would like to thank [Matt Watson](https://github.com/mattdangerw) for helping us
tackle the multi-label binarization part and inverse-transforming the processed labels
to the original form.
Thanks to [Cingis Kratochvil](https://github.com/cumbalik) for suggesting and extending this code example by introducing binary accuracy as the evaluation metric.
"""
| keras-io/examples/nlp/multi_label_classification.py/0 | {
"file_path": "keras-io/examples/nlp/multi_label_classification.py",
"repo_id": "keras-io",
"token_count": 4617
} | 102 |
"""
Title: Text classification with Transformer
Author: [Apoorv Nandan](https://twitter.com/NandanApoorv)
Date created: 2020/05/10
Last modified: 2024/01/18
Description: Implement a Transformer block as a Keras layer and use it for text classification.
Accelerator: GPU
Converted to Keras 3 by: [Sitam Meur](https://github.com/sitamgithub-MSIT)
"""
"""
## Setup
"""
import keras
from keras import ops
from keras import layers
"""
## Implement a Transformer block as a layer
"""
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super().__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.ffn = keras.Sequential(
[
layers.Dense(ff_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, inputs):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output)
return self.layernorm2(out1 + ffn_output)
"""
## Implement embedding layer
Two seperate embedding layers, one for tokens, one for token index (positions).
"""
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super().__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = ops.shape(x)[-1]
positions = ops.arange(start=0, stop=maxlen, step=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
"""
## Download and prepare dataset
"""
vocab_size = 20000 # Only consider the top 20k words
maxlen = 200 # Only consider the first 200 words of each movie review
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
x_train = keras.utils.pad_sequences(x_train, maxlen=maxlen)
x_val = keras.utils.pad_sequences(x_val, maxlen=maxlen)
"""
## Create classifier model using transformer layer
Transformer layer outputs one vector for each time step of our input sequence.
Here, we take the mean across all time steps and
use a feed forward network on top of it to classify text.
"""
embed_dim = 32 # Embedding size for each token
num_heads = 2 # Number of attention heads
ff_dim = 32 # Hidden layer size in feed forward network inside transformer
inputs = layers.Input(shape=(maxlen,))
embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
x = embedding_layer(inputs)
transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(20, activation="relu")(x)
x = layers.Dropout(0.1)(x)
outputs = layers.Dense(2, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
"""
## Train and Evaluate
"""
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
history = model.fit(
x_train, y_train, batch_size=32, epochs=2, validation_data=(x_val, y_val)
)
| keras-io/examples/nlp/text_classification_with_transformer.py/0 | {
"file_path": "keras-io/examples/nlp/text_classification_with_transformer.py",
"repo_id": "keras-io",
"token_count": 1406
} | 103 |
"""
Title: Classification with Gated Residual and Variable Selection Networks
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/02/10
Last modified: 2021/02/10
Description: Using Gated Residual and Variable Selection Networks for income level prediction.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates the use of Gated
Residual Networks (GRN) and Variable Selection Networks (VSN), proposed by
Bryan Lim et al. in
[Temporal Fusion Transformers (TFT) for Interpretable Multi-horizon Time Series Forecasting](https://arxiv.org/abs/1912.09363),
for structured data classification. GRNs give the flexibility to the model to apply
non-linear processing only where needed. VSNs allow the model to softly remove any
unnecessary noisy inputs which could negatively impact performance.
Together, those techniques help improving the learning capacity of deep neural
network models.
Note that this example implements only the GRN and VSN components described in
in the paper, rather than the whole TFT model, as GRN and VSN can be useful on
their own for structured data learning tasks.
To run the code you need to use TensorFlow 2.3 or higher.
"""
"""
## The dataset
This example uses the
[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29)
provided by the
[UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).
The task is binary classification to determine whether a person makes over 50K a year.
The dataset includes ~300K instances with 41 input features: 7 numerical features
and 34 categorical features.
"""
"""
## Setup
"""
import os
# Only the TensorFlow backend supports string inputs.
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import pandas as pd
import tensorflow as tf
import keras
from keras import layers
"""
## Prepare the data
First we load the data from the UCI Machine Learning Repository into a Pandas DataFrame.
"""
# Column names.
CSV_HEADER = [
"age",
"class_of_worker",
"detailed_industry_recode",
"detailed_occupation_recode",
"education",
"wage_per_hour",
"enroll_in_edu_inst_last_wk",
"marital_stat",
"major_industry_code",
"major_occupation_code",
"race",
"hispanic_origin",
"sex",
"member_of_a_labor_union",
"reason_for_unemployment",
"full_or_part_time_employment_stat",
"capital_gains",
"capital_losses",
"dividends_from_stocks",
"tax_filer_stat",
"region_of_previous_residence",
"state_of_previous_residence",
"detailed_household_and_family_stat",
"detailed_household_summary_in_household",
"instance_weight",
"migration_code-change_in_msa",
"migration_code-change_in_reg",
"migration_code-move_within_reg",
"live_in_this_house_1_year_ago",
"migration_prev_res_in_sunbelt",
"num_persons_worked_for_employer",
"family_members_under_18",
"country_of_birth_father",
"country_of_birth_mother",
"country_of_birth_self",
"citizenship",
"own_business_or_self_employed",
"fill_inc_questionnaire_for_veterans_admin",
"veterans_benefits",
"weeks_worked_in_year",
"year",
"income_level",
]
data_url = "https://archive.ics.uci.edu/static/public/20/census+income.zip"
keras.utils.get_file(origin=data_url, extract=True)
train_data_path = os.path.join(
os.path.expanduser("~"), ".keras", "datasets", "adult.data"
)
test_data_path = os.path.join(
os.path.expanduser("~"), ".keras", "datasets", "adult.test"
)
data = pd.read_csv(train_data_path, header=None, names=CSV_HEADER)
test_data = pd.read_csv(test_data_path, header=None, names=CSV_HEADER)
print(f"Data shape: {data.shape}")
print(f"Test data shape: {test_data.shape}")
"""
We convert the target column from string to integer.
"""
data["income_level"] = data["income_level"].apply(
lambda x: 0 if x == " - 50000." else 1
)
test_data["income_level"] = test_data["income_level"].apply(
lambda x: 0 if x == " - 50000." else 1
)
"""
Then, We split the dataset into train and validation sets.
"""
random_selection = np.random.rand(len(data.index)) <= 0.85
train_data = data[random_selection]
valid_data = data[~random_selection]
"""
Finally we store the train and test data splits locally to CSV files.
"""
train_data_file = "train_data.csv"
valid_data_file = "valid_data.csv"
test_data_file = "test_data.csv"
train_data.to_csv(train_data_file, index=False, header=False)
valid_data.to_csv(valid_data_file, index=False, header=False)
test_data.to_csv(test_data_file, index=False, header=False)
"""
## Define dataset metadata
Here, we define the metadata of the dataset that will be useful for reading and
parsing the data into input features, and encoding the input features with respect
to their types.
"""
# Target feature name.
TARGET_FEATURE_NAME = "income_level"
# Weight column name.
WEIGHT_COLUMN_NAME = "instance_weight"
# Numeric feature names.
NUMERIC_FEATURE_NAMES = [
"age",
"wage_per_hour",
"capital_gains",
"capital_losses",
"dividends_from_stocks",
"num_persons_worked_for_employer",
"weeks_worked_in_year",
]
# Categorical features and their vocabulary lists.
# Note that we add 'v=' as a prefix to all categorical feature values to make
# sure that they are treated as strings.
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
feature_name: sorted([str(value) for value in list(data[feature_name].unique())])
for feature_name in CSV_HEADER
if feature_name
not in list(NUMERIC_FEATURE_NAMES + [WEIGHT_COLUMN_NAME, TARGET_FEATURE_NAME])
}
# All features names.
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + list(
CATEGORICAL_FEATURES_WITH_VOCABULARY.keys()
)
# Feature default values.
COLUMN_DEFAULTS = [
(
[0.0]
if feature_name
in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME, WEIGHT_COLUMN_NAME]
else ["NA"]
)
for feature_name in CSV_HEADER
]
"""
## Create a `tf.data.Dataset` for training and evaluation
We create an input function to read and parse the file, and convert features and
labels into a [`tf.data.Dataset`](https://www.tensorflow.org/guide/datasets) for
training and evaluation.
"""
def process(features, target):
for feature_name in features:
if feature_name in CATEGORICAL_FEATURES_WITH_VOCABULARY:
# Cast categorical feature values to string.
features[feature_name] = keras.ops.cast(features[feature_name], "string")
# Get the instance weight.
weight = features.pop(WEIGHT_COLUMN_NAME)
return features, target, weight
def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
dataset = tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
column_defaults=COLUMN_DEFAULTS,
label_name=TARGET_FEATURE_NAME,
num_epochs=1,
header=False,
shuffle=shuffle,
).map(process)
return dataset
"""
## Create model inputs
"""
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
if feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="float32"
)
else:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="string"
)
return inputs
"""
## Encode input features
For categorical features, we encode them using `layers.Embedding` using the
`encoding_size` as the embedding dimensions. For the numerical features,
we apply linear transformation using `layers.Dense` to project each feature into
`encoding_size`-dimensional vector. Thus, all the encoded features will have the
same dimensionality.
"""
def encode_inputs(inputs, encoding_size):
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURES_WITH_VOCABULARY:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
index = layers.StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
# Convert the string input values into integer indices.
value_index = index(inputs[feature_name])
# Create an embedding layer with the specified dimensions
embedding_ecoder = layers.Embedding(
input_dim=len(vocabulary), output_dim=encoding_size
)
# Convert the index values to embedding representations.
encoded_feature = embedding_ecoder(value_index)
else:
# Project the numeric feature to encoding_size using linear transformation.
encoded_feature = keras.ops.expand_dims(inputs[feature_name], -1)
encoded_feature = layers.Dense(units=encoding_size)(encoded_feature)
encoded_features.append(encoded_feature)
return encoded_features
"""
## Implement the Gated Linear Unit
[Gated Linear Units (GLUs)](https://arxiv.org/abs/1612.08083) provide the
flexibility to suppress input that are not relevant for a given task.
"""
class GatedLinearUnit(layers.Layer):
def __init__(self, units):
super().__init__()
self.linear = layers.Dense(units)
self.sigmoid = layers.Dense(units, activation="sigmoid")
def call(self, inputs):
return self.linear(inputs) * self.sigmoid(inputs)
"""
## Implement the Gated Residual Network
The Gated Residual Network (GRN) works as follows:
1. Applies the nonlinear ELU transformation to the inputs.
2. Applies linear transformation followed by dropout.
4. Applies GLU and adds the original inputs to the output of the GLU to perform skip
(residual) connection.
6. Applies layer normalization and produces the output.
"""
class GatedResidualNetwork(layers.Layer):
def __init__(self, units, dropout_rate):
super().__init__()
self.units = units
self.elu_dense = layers.Dense(units, activation="elu")
self.linear_dense = layers.Dense(units)
self.dropout = layers.Dropout(dropout_rate)
self.gated_linear_unit = GatedLinearUnit(units)
self.layer_norm = layers.LayerNormalization()
self.project = layers.Dense(units)
def call(self, inputs):
x = self.elu_dense(inputs)
x = self.linear_dense(x)
x = self.dropout(x)
if inputs.shape[-1] != self.units:
inputs = self.project(inputs)
x = inputs + self.gated_linear_unit(x)
x = self.layer_norm(x)
return x
"""
## Implement the Variable Selection Network
The Variable Selection Network (VSN) works as follows:
1. Applies a GRN to each feature individually.
2. Applies a GRN on the concatenation of all the features, followed by a softmax to
produce feature weights.
3. Produces a weighted sum of the output of the individual GRN.
Note that the output of the VSN is [batch_size, encoding_size], regardless of the
number of the input features.
"""
class VariableSelection(layers.Layer):
def __init__(self, num_features, units, dropout_rate):
super().__init__()
self.grns = list()
# Create a GRN for each feature independently
for idx in range(num_features):
grn = GatedResidualNetwork(units, dropout_rate)
self.grns.append(grn)
# Create a GRN for the concatenation of all the features
self.grn_concat = GatedResidualNetwork(units, dropout_rate)
self.softmax = layers.Dense(units=num_features, activation="softmax")
def call(self, inputs):
v = layers.concatenate(inputs)
v = self.grn_concat(v)
v = keras.ops.expand_dims(self.softmax(v), axis=-1)
x = []
for idx, input in enumerate(inputs):
x.append(self.grns[idx](input))
x = keras.ops.stack(x, axis=1)
outputs = keras.ops.squeeze(tf.matmul(v, x, transpose_a=True), axis=1)
return outputs
"""
## Create Gated Residual and Variable Selection Networks model
"""
def create_model(encoding_size):
inputs = create_model_inputs()
feature_list = encode_inputs(inputs, encoding_size)
num_features = len(feature_list)
features = VariableSelection(num_features, encoding_size, dropout_rate)(
feature_list
)
outputs = layers.Dense(units=1, activation="sigmoid")(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
## Compile, train, and evaluate the model
"""
learning_rate = 0.001
dropout_rate = 0.15
batch_size = 265
num_epochs = 20
encoding_size = 16
model = create_model(encoding_size)
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.BinaryAccuracy(name="accuracy")],
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=5, restore_best_weights=True
)
print("Start training the model...")
train_dataset = get_dataset_from_csv(
train_data_file, shuffle=True, batch_size=batch_size
)
valid_dataset = get_dataset_from_csv(valid_data_file, batch_size=batch_size)
model.fit(
train_dataset,
epochs=num_epochs,
validation_data=valid_dataset,
callbacks=[early_stopping],
)
print("Model training finished.")
print("Evaluating model performance...")
test_dataset = get_dataset_from_csv(test_data_file, batch_size=batch_size)
_, accuracy = model.evaluate(test_dataset)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
"""
You should achieve more than 95% accuracy on the test set.
To increase the learning capacity of the model, you can try increasing the
`encoding_size` value, or stacking multiple GRN layers on top of the VSN layer.
This may require to also increase the `dropout_rate` value to avoid overfitting.
"""
"""
**Example available on HuggingFace**
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/structured-data-classification-grn-vsn) | [](https://huggingface.co/spaces/keras-io/structured-data-classification-grn-vsn) |
"""
| keras-io/examples/structured_data/classification_with_grn_and_vsn.py/0 | {
"file_path": "keras-io/examples/structured_data/classification_with_grn_and_vsn.py",
"repo_id": "keras-io",
"token_count": 5493
} | 104 |
# Classification with Neural Decision Forests
**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br>
**Date created:** 2021/01/15<br>
**Last modified:** 2021/01/15<br>
**Description:** How to train differentiable decision trees for end-to-end learning in deep neural networks.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/structured_data/ipynb/deep_neural_decision_forests.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/structured_data/deep_neural_decision_forests.py)
---
## Introduction
This example provides an implementation of the
[Deep Neural Decision Forest](https://ieeexplore.ieee.org/document/7410529)
model introduced by P. Kontschieder et al. for structured data classification.
It demonstrates how to build a stochastic and differentiable decision tree model,
train it end-to-end, and unify decision trees with deep representation learning.
---
## The dataset
This example uses the
[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/census+income)
provided by the
[UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).
The task is binary classification
to predict whether a person is likely to be making over USD 50,000 a year.
The dataset includes 48,842 instances with 14 input features (such as age, work class, education, occupation, and so on): 5 numerical features
and 9 categorical features.
---
## Setup
```python
import keras
from keras import layers
from keras.layers import StringLookup
from keras import ops
from tensorflow import data as tf_data
import numpy as np
import pandas as pd
import math
```
---
## Prepare the data
```python
CSV_HEADER = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"gender",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"income_bracket",
]
train_data_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
)
train_data = pd.read_csv(train_data_url, header=None, names=CSV_HEADER)
test_data_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
)
test_data = pd.read_csv(test_data_url, header=None, names=CSV_HEADER)
print(f"Train dataset shape: {train_data.shape}")
print(f"Test dataset shape: {test_data.shape}")
```
<div class="k-default-codeblock">
```
Train dataset shape: (32561, 15)
Test dataset shape: (16282, 15)
```
</div>
Remove the first record (because it is not a valid data example) and a trailing
'dot' in the class labels.
```python
test_data = test_data[1:]
test_data.income_bracket = test_data.income_bracket.apply(
lambda value: value.replace(".", "")
)
```
We store the training and test data splits locally as CSV files.
```python
train_data_file = "train_data.csv"
test_data_file = "test_data.csv"
train_data.to_csv(train_data_file, index=False, header=False)
test_data.to_csv(test_data_file, index=False, header=False)
```
---
## Define dataset metadata
Here, we define the metadata of the dataset that will be useful for reading and parsing
and encoding input features.
```python
# A list of the numerical feature names.
NUMERIC_FEATURE_NAMES = [
"age",
"education_num",
"capital_gain",
"capital_loss",
"hours_per_week",
]
# A dictionary of the categorical features and their vocabulary.
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
"workclass": sorted(list(train_data["workclass"].unique())),
"education": sorted(list(train_data["education"].unique())),
"marital_status": sorted(list(train_data["marital_status"].unique())),
"occupation": sorted(list(train_data["occupation"].unique())),
"relationship": sorted(list(train_data["relationship"].unique())),
"race": sorted(list(train_data["race"].unique())),
"gender": sorted(list(train_data["gender"].unique())),
"native_country": sorted(list(train_data["native_country"].unique())),
}
# A list of the columns to ignore from the dataset.
IGNORE_COLUMN_NAMES = ["fnlwgt"]
# A list of the categorical feature names.
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys())
# A list of all the input features.
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES
# A list of column default values for each feature.
COLUMN_DEFAULTS = [
[0.0] if feature_name in NUMERIC_FEATURE_NAMES + IGNORE_COLUMN_NAMES else ["NA"]
for feature_name in CSV_HEADER
]
# The name of the target feature.
TARGET_FEATURE_NAME = "income_bracket"
# A list of the labels of the target features.
TARGET_LABELS = [" <=50K", " >50K"]
```
---
## Create `tf_data.Dataset` objects for training and validation
We create an input function to read and parse the file, and convert features and labels
into a [`tf_data.Dataset`](https://www.tensorflow.org/guide/datasets)
for training and validation. We also preprocess the input by mapping the target label
to an index.
```python
target_label_lookup = StringLookup(
vocabulary=TARGET_LABELS, mask_token=None, num_oov_indices=0
)
lookup_dict = {}
for feature_name in CATEGORICAL_FEATURE_NAMES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token, nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = StringLookup(vocabulary=vocabulary, mask_token=None, num_oov_indices=0)
lookup_dict[feature_name] = lookup
def encode_categorical(batch_x, batch_y):
for feature_name in CATEGORICAL_FEATURE_NAMES:
batch_x[feature_name] = lookup_dict[feature_name](batch_x[feature_name])
return batch_x, batch_y
def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
dataset = (
tf_data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
column_defaults=COLUMN_DEFAULTS,
label_name=TARGET_FEATURE_NAME,
num_epochs=1,
header=False,
na_value="?",
shuffle=shuffle,
)
.map(lambda features, target: (features, target_label_lookup(target)))
.map(encode_categorical)
)
return dataset.cache()
```
---
## Create model inputs
```python
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
if feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="float32"
)
else:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype="int32"
)
return inputs
```
---
## Encode input features
```python
def encode_inputs(inputs):
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token, nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
value_index = inputs[feature_name]
embedding_dims = int(math.sqrt(lookup.vocabulary_size()))
# Create an embedding layer with the specified dimensions.
embedding = layers.Embedding(
input_dim=lookup.vocabulary_size(), output_dim=embedding_dims
)
# Convert the index values to embedding representations.
encoded_feature = embedding(value_index)
else:
# Use the numerical features as-is.
encoded_feature = inputs[feature_name]
if inputs[feature_name].shape[-1] is None:
encoded_feature = keras.ops.expand_dims(encoded_feature, -1)
encoded_features.append(encoded_feature)
encoded_features = layers.concatenate(encoded_features)
return encoded_features
```
---
## Deep Neural Decision Tree
A neural decision tree model has two sets of weights to learn. The first set is `pi`,
which represents the probability distribution of the classes in the tree leaves.
The second set is the weights of the routing layer `decision_fn`, which represents the probability
of going to each leave. The forward pass of the model works as follows:
1. The model expects input `features` as a single vector encoding all the features of an instance
in the batch. This vector can be generated from a Convolution Neural Network (CNN) applied to images
or dense transformations applied to structured data features.
2. The model first applies a `used_features_mask` to randomly select a subset of input features to use.
3. Then, the model computes the probabilities (`mu`) for the input instances to reach the tree leaves
by iteratively performing a *stochastic* routing throughout the tree levels.
4. Finally, the probabilities of reaching the leaves are combined by the class probabilities at the
leaves to produce the final `outputs`.
```python
class NeuralDecisionTree(keras.Model):
def __init__(self, depth, num_features, used_features_rate, num_classes):
super().__init__()
self.depth = depth
self.num_leaves = 2**depth
self.num_classes = num_classes
# Create a mask for the randomly selected features.
num_used_features = int(num_features * used_features_rate)
one_hot = np.eye(num_features)
sampled_feature_indices = np.random.choice(
np.arange(num_features), num_used_features, replace=False
)
self.used_features_mask = ops.convert_to_tensor(
one_hot[sampled_feature_indices], dtype="float32"
)
# Initialize the weights of the classes in leaves.
self.pi = self.add_weight(
initializer="random_normal",
shape=[self.num_leaves, self.num_classes],
dtype="float32",
trainable=True,
)
# Initialize the stochastic routing layer.
self.decision_fn = layers.Dense(
units=self.num_leaves, activation="sigmoid", name="decision"
)
def call(self, features):
batch_size = ops.shape(features)[0]
# Apply the feature mask to the input features.
features = ops.matmul(
features, ops.transpose(self.used_features_mask)
) # [batch_size, num_used_features]
# Compute the routing probabilities.
decisions = ops.expand_dims(
self.decision_fn(features), axis=2
) # [batch_size, num_leaves, 1]
# Concatenate the routing probabilities with their complements.
decisions = layers.concatenate(
[decisions, 1 - decisions], axis=2
) # [batch_size, num_leaves, 2]
mu = ops.ones([batch_size, 1, 1])
begin_idx = 1
end_idx = 2
# Traverse the tree in breadth-first order.
for level in range(self.depth):
mu = ops.reshape(mu, [batch_size, -1, 1]) # [batch_size, 2 ** level, 1]
mu = ops.tile(mu, (1, 1, 2)) # [batch_size, 2 ** level, 2]
level_decisions = decisions[
:, begin_idx:end_idx, :
] # [batch_size, 2 ** level, 2]
mu = mu * level_decisions # [batch_size, 2**level, 2]
begin_idx = end_idx
end_idx = begin_idx + 2 ** (level + 1)
mu = ops.reshape(mu, [batch_size, self.num_leaves]) # [batch_size, num_leaves]
probabilities = keras.activations.softmax(self.pi) # [num_leaves, num_classes]
outputs = ops.matmul(mu, probabilities) # [batch_size, num_classes]
return outputs
```
---
## Deep Neural Decision Forest
The neural decision forest model consists of a set of neural decision trees that are
trained simultaneously. The output of the forest model is the average outputs of its trees.
```python
class NeuralDecisionForest(keras.Model):
def __init__(self, num_trees, depth, num_features, used_features_rate, num_classes):
super().__init__()
self.ensemble = []
# Initialize the ensemble by adding NeuralDecisionTree instances.
# Each tree will have its own randomly selected input features to use.
for _ in range(num_trees):
self.ensemble.append(
NeuralDecisionTree(depth, num_features, used_features_rate, num_classes)
)
def call(self, inputs):
# Initialize the outputs: a [batch_size, num_classes] matrix of zeros.
batch_size = ops.shape(inputs)[0]
outputs = ops.zeros([batch_size, num_classes])
# Aggregate the outputs of trees in the ensemble.
for tree in self.ensemble:
outputs += tree(inputs)
# Divide the outputs by the ensemble size to get the average.
outputs /= len(self.ensemble)
return outputs
```
Finally, let's set up the code that will train and evaluate the model.
```python
learning_rate = 0.01
batch_size = 265
num_epochs = 10
def run_experiment(model):
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
print("Start training the model...")
train_dataset = get_dataset_from_csv(
train_data_file, shuffle=True, batch_size=batch_size
)
model.fit(train_dataset, epochs=num_epochs)
print("Model training finished")
print("Evaluating the model on the test data...")
test_dataset = get_dataset_from_csv(test_data_file, batch_size=batch_size)
_, accuracy = model.evaluate(test_dataset)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
```
---
## Experiment 1: train a decision tree model
In this experiment, we train a single neural decision tree model
where we use all input features.
```python
num_trees = 10
depth = 10
used_features_rate = 1.0
num_classes = len(TARGET_LABELS)
def create_tree_model():
inputs = create_model_inputs()
features = encode_inputs(inputs)
features = layers.BatchNormalization()(features)
num_features = features.shape[1]
tree = NeuralDecisionTree(depth, num_features, used_features_rate, num_classes)
outputs = tree(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
tree_model = create_tree_model()
run_experiment(tree_model)
```
<div class="k-default-codeblock">
```
Start training the model...
Epoch 1/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 5s 26ms/step - loss: 0.5308 - sparse_categorical_accuracy: 0.8150
Epoch 2/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - loss: 0.3476 - sparse_categorical_accuracy: 0.8429
Epoch 3/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - loss: 0.3312 - sparse_categorical_accuracy: 0.8478
Epoch 4/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - loss: 0.3247 - sparse_categorical_accuracy: 0.8495
Epoch 5/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - loss: 0.3202 - sparse_categorical_accuracy: 0.8512
Epoch 6/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - loss: 0.3158 - sparse_categorical_accuracy: 0.8536
Epoch 7/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - loss: 0.3116 - sparse_categorical_accuracy: 0.8572
Epoch 8/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - loss: 0.3071 - sparse_categorical_accuracy: 0.8608
Epoch 9/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 11ms/step - loss: 0.3026 - sparse_categorical_accuracy: 0.8630
Epoch 10/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - loss: 0.2975 - sparse_categorical_accuracy: 0.8653
Model training finished
Evaluating the model on the test data...
62/62 ━━━━━━━━━━━━━━━━━━━━ 1s 13ms/step - loss: 0.3279 - sparse_categorical_accuracy: 0.8463
Test accuracy: 85.08%
```
</div>
---
## Experiment 2: train a forest model
In this experiment, we train a neural decision forest with `num_trees` trees
where each tree uses randomly selected 50% of the input features. You can control the number
of features to be used in each tree by setting the `used_features_rate` variable.
In addition, we set the depth to 5 instead of 10 compared to the previous experiment.
```python
num_trees = 25
depth = 5
used_features_rate = 0.5
def create_forest_model():
inputs = create_model_inputs()
features = encode_inputs(inputs)
features = layers.BatchNormalization()(features)
num_features = features.shape[1]
forest_model = NeuralDecisionForest(
num_trees, depth, num_features, used_features_rate, num_classes
)
outputs = forest_model(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
forest_model = create_forest_model()
run_experiment(forest_model)
```
<div class="k-default-codeblock">
```
Start training the model...
Epoch 1/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 47s 202ms/step - loss: 0.5469 - sparse_categorical_accuracy: 0.7915
Epoch 2/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - loss: 0.3459 - sparse_categorical_accuracy: 0.8494
Epoch 3/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - loss: 0.3268 - sparse_categorical_accuracy: 0.8523
Epoch 4/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - loss: 0.3195 - sparse_categorical_accuracy: 0.8524
Epoch 5/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - loss: 0.3149 - sparse_categorical_accuracy: 0.8539
Epoch 6/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - loss: 0.3112 - sparse_categorical_accuracy: 0.8556
Epoch 7/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 10ms/step - loss: 0.3079 - sparse_categorical_accuracy: 0.8566
Epoch 8/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 9ms/step - loss: 0.3050 - sparse_categorical_accuracy: 0.8582
Epoch 9/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 9ms/step - loss: 0.3021 - sparse_categorical_accuracy: 0.8595
Epoch 10/10
123/123 ━━━━━━━━━━━━━━━━━━━━ 1s 9ms/step - loss: 0.2992 - sparse_categorical_accuracy: 0.8617
Model training finished
Evaluating the model on the test data...
62/62 ━━━━━━━━━━━━━━━━━━━━ 5s 39ms/step - loss: 0.3145 - sparse_categorical_accuracy: 0.8503
Test accuracy: 85.55%
```
</div> | keras-io/examples/structured_data/md/deep_neural_decision_forests.md/0 | {
"file_path": "keras-io/examples/structured_data/md/deep_neural_decision_forests.md",
"repo_id": "keras-io",
"token_count": 7337
} | 105 |
# Timeseries anomaly detection using an Autoencoder
**Author:** [pavithrasv](https://github.com/pavithrasv)<br>
**Date created:** 2020/05/31<br>
**Last modified:** 2020/05/31<br>
**Description:** Detect anomalies in a timeseries using an Autoencoder.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/timeseries/ipynb/timeseries_anomaly_detection.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/timeseries/timeseries_anomaly_detection.py)
---
## Introduction
This script demonstrates how you can use a reconstruction convolutional
autoencoder model to detect anomalies in timeseries data.
---
## Setup
```python
import numpy as np
import pandas as pd
import keras
from keras import layers
from matplotlib import pyplot as plt
```
---
## Load the data
We will use the [Numenta Anomaly Benchmark(NAB)](
https://www.kaggle.com/boltzmannbrain/nab) dataset. It provides artificial
timeseries data containing labeled anomalous periods of behavior. Data are
ordered, timestamped, single-valued metrics.
We will use the `art_daily_small_noise.csv` file for training and the
`art_daily_jumpsup.csv` file for testing. The simplicity of this dataset
allows us to demonstrate anomaly detection effectively.
```python
master_url_root = "https://raw.githubusercontent.com/numenta/NAB/master/data/"
df_small_noise_url_suffix = "artificialNoAnomaly/art_daily_small_noise.csv"
df_small_noise_url = master_url_root + df_small_noise_url_suffix
df_small_noise = pd.read_csv(
df_small_noise_url, parse_dates=True, index_col="timestamp"
)
df_daily_jumpsup_url_suffix = "artificialWithAnomaly/art_daily_jumpsup.csv"
df_daily_jumpsup_url = master_url_root + df_daily_jumpsup_url_suffix
df_daily_jumpsup = pd.read_csv(
df_daily_jumpsup_url, parse_dates=True, index_col="timestamp"
)
```
---
## Quick look at the data
```python
print(df_small_noise.head())
print(df_daily_jumpsup.head())
```
<div class="k-default-codeblock">
```
value
timestamp
2014-04-01 00:00:00 18.324919
2014-04-01 00:05:00 21.970327
2014-04-01 00:10:00 18.624806
2014-04-01 00:15:00 21.953684
2014-04-01 00:20:00 21.909120
value
timestamp
2014-04-01 00:00:00 19.761252
2014-04-01 00:05:00 20.500833
2014-04-01 00:10:00 19.961641
2014-04-01 00:15:00 21.490266
2014-04-01 00:20:00 20.187739
```
</div>
---
## Visualize the data
### Timeseries data without anomalies
We will use the following data for training.
```python
fig, ax = plt.subplots()
df_small_noise.plot(legend=False, ax=ax)
plt.show()
```

### Timeseries data with anomalies
We will use the following data for testing and see if the sudden jump up in the
data is detected as an anomaly.
```python
fig, ax = plt.subplots()
df_daily_jumpsup.plot(legend=False, ax=ax)
plt.show()
```

---
## Prepare training data
Get data values from the training timeseries data file and normalize the
`value` data. We have a `value` for every 5 mins for 14 days.
- 24 * 60 / 5 = **288 timesteps per day**
- 288 * 14 = **4032 data points** in total
```python
# Normalize and save the mean and std we get,
# for normalizing test data.
training_mean = df_small_noise.mean()
training_std = df_small_noise.std()
df_training_value = (df_small_noise - training_mean) / training_std
print("Number of training samples:", len(df_training_value))
```
<div class="k-default-codeblock">
```
Number of training samples: 4032
```
</div>
### Create sequences
Create sequences combining `TIME_STEPS` contiguous data values from the
training data.
```python
TIME_STEPS = 288
# Generated training sequences for use in the model.
def create_sequences(values, time_steps=TIME_STEPS):
output = []
for i in range(len(values) - time_steps + 1):
output.append(values[i : (i + time_steps)])
return np.stack(output)
x_train = create_sequences(df_training_value.values)
print("Training input shape: ", x_train.shape)
```
<div class="k-default-codeblock">
```
Training input shape: (3745, 288, 1)
```
</div>
---
## Build a model
We will build a convolutional reconstruction autoencoder model. The model will
take input of shape `(batch_size, sequence_length, num_features)` and return
output of the same shape. In this case, `sequence_length` is 288 and
`num_features` is 1.
```python
model = keras.Sequential(
[
layers.Input(shape=(x_train.shape[1], x_train.shape[2])),
layers.Conv1D(
filters=32,
kernel_size=7,
padding="same",
strides=2,
activation="relu",
),
layers.Dropout(rate=0.2),
layers.Conv1D(
filters=16,
kernel_size=7,
padding="same",
strides=2,
activation="relu",
),
layers.Conv1DTranspose(
filters=16,
kernel_size=7,
padding="same",
strides=2,
activation="relu",
),
layers.Dropout(rate=0.2),
layers.Conv1DTranspose(
filters=32,
kernel_size=7,
padding="same",
strides=2,
activation="relu",
),
layers.Conv1DTranspose(filters=1, kernel_size=7, padding="same"),
]
)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse")
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ conv1d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">144</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">144</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">72</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3,600</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_transpose │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">144</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,808</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">144</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_transpose_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">288</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3,616</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv1d_transpose_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">288</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">225</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1DTranspose</span>) │ │ │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">9,505</span> (37.13 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">9,505</span> (37.13 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Train the model
Please note that we are using `x_train` as both the input and the target
since this is a reconstruction model.
```python
history = model.fit(
x_train,
x_train,
epochs=50,
batch_size=128,
validation_split=0.1,
callbacks=[
keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, mode="min")
],
)
```
<div class="k-default-codeblock">
```
Epoch 1/50
26/27 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 4ms/step - loss: 0.8419
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1700346169.474466 1961179 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
27/27 ━━━━━━━━━━━━━━━━━━━━ 10s 187ms/step - loss: 0.8262 - val_loss: 0.2280
Epoch 2/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.1485 - val_loss: 0.0513
Epoch 3/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0659 - val_loss: 0.0389
Epoch 4/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0563 - val_loss: 0.0341
Epoch 5/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0489 - val_loss: 0.0298
Epoch 6/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0434 - val_loss: 0.0272
Epoch 7/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0386 - val_loss: 0.0258
Epoch 8/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0349 - val_loss: 0.0241
Epoch 9/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0319 - val_loss: 0.0230
Epoch 10/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0297 - val_loss: 0.0236
Epoch 11/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0279 - val_loss: 0.0233
Epoch 12/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0264 - val_loss: 0.0225
Epoch 13/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0255 - val_loss: 0.0228
Epoch 14/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0245 - val_loss: 0.0223
Epoch 15/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0236 - val_loss: 0.0234
Epoch 16/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0227 - val_loss: 0.0256
Epoch 17/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0219 - val_loss: 0.0240
Epoch 18/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0214 - val_loss: 0.0245
Epoch 19/50
27/27 ━━━━━━━━━━━━━━━━━━━━ 0s 5ms/step - loss: 0.0207 - val_loss: 0.0250
```
</div>
Let's plot training and validation loss to see how the training went.
```python
plt.plot(history.history["loss"], label="Training Loss")
plt.plot(history.history["val_loss"], label="Validation Loss")
plt.legend()
plt.show()
```

---
## Detecting anomalies
We will detect anomalies by determining how well our model can reconstruct
the input data.
1. Find MAE loss on training samples.
2. Find max MAE loss value. This is the worst our model has performed trying
to reconstruct a sample. We will make this the `threshold` for anomaly
detection.
3. If the reconstruction loss for a sample is greater than this `threshold`
value then we can infer that the model is seeing a pattern that it isn't
familiar with. We will label this sample as an `anomaly`.
```python
# Get train MAE loss.
x_train_pred = model.predict(x_train)
train_mae_loss = np.mean(np.abs(x_train_pred - x_train), axis=1)
plt.hist(train_mae_loss, bins=50)
plt.xlabel("Train MAE loss")
plt.ylabel("No of samples")
plt.show()
# Get reconstruction loss threshold.
threshold = np.max(train_mae_loss)
print("Reconstruction error threshold: ", threshold)
```
<div class="k-default-codeblock">
```
118/118 ━━━━━━━━━━━━━━━━━━━━ 1s 6ms/step
```
</div>

<div class="k-default-codeblock">
```
Reconstruction error threshold: 0.1232659916089631
```
</div>
### Compare recontruction
Just for fun, let's see how our model has recontructed the first sample.
This is the 288 timesteps from day 1 of our training dataset.
```python
# Checking how the first sequence is learnt
plt.plot(x_train[0])
plt.plot(x_train_pred[0])
plt.show()
```

### Prepare test data
```python
df_test_value = (df_daily_jumpsup - training_mean) / training_std
fig, ax = plt.subplots()
df_test_value.plot(legend=False, ax=ax)
plt.show()
# Create sequences from test values.
x_test = create_sequences(df_test_value.values)
print("Test input shape: ", x_test.shape)
# Get test MAE loss.
x_test_pred = model.predict(x_test)
test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1)
test_mae_loss = test_mae_loss.reshape((-1))
plt.hist(test_mae_loss, bins=50)
plt.xlabel("test MAE loss")
plt.ylabel("No of samples")
plt.show()
# Detect all the samples which are anomalies.
anomalies = test_mae_loss > threshold
print("Number of anomaly samples: ", np.sum(anomalies))
print("Indices of anomaly samples: ", np.where(anomalies))
```

<div class="k-default-codeblock">
```
Test input shape: (3745, 288, 1)
118/118 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step
```
</div>

<div class="k-default-codeblock">
```
Number of anomaly samples: 394
Indices of anomaly samples: (array([1654, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2710, 2711,
2712, 2713, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722,
2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 2733,
2734, 2735, 2736, 2737, 2738, 2739, 2740, 2741, 2742, 2743, 2744,
2745, 2746, 2747, 2748, 2749, 2750, 2751, 2752, 2753, 2754, 2755,
2756, 2757, 2758, 2759, 2760, 2761, 2762, 2763, 2764, 2765, 2766,
2767, 2768, 2769, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777,
2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788,
2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2798, 2799,
2800, 2801, 2802, 2803, 2804, 2805, 2806, 2807, 2808, 2809, 2810,
2811, 2812, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821,
2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2831, 2832,
2833, 2834, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843,
2844, 2845, 2846, 2847, 2848, 2849, 2850, 2851, 2852, 2853, 2854,
2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865,
2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876,
2877, 2878, 2879, 2880, 2881, 2882, 2883, 2884, 2885, 2886, 2887,
2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898,
2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2907, 2908, 2909,
2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920,
2921, 2922, 2923, 2924, 2925, 2926, 2927, 2928, 2929, 2930, 2931,
2932, 2933, 2934, 2935, 2936, 2937, 2938, 2939, 2940, 2941, 2942,
2943, 2944, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953,
2954, 2955, 2956, 2957, 2958, 2959, 2960, 2961, 2962, 2963, 2964,
2965, 2966, 2967, 2968, 2969, 2970, 2971, 2972, 2973, 2974, 2975,
2976, 2977, 2978, 2979, 2980, 2981, 2982, 2983, 2984, 2985, 2986,
2987, 2988, 2989, 2990, 2991, 2992, 2993, 2994, 2995, 2996, 2997,
2998, 2999, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008,
3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3017, 3018, 3019,
3020, 3021, 3022, 3023, 3024, 3025, 3026, 3027, 3028, 3029, 3030,
3031, 3032, 3033, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041,
3042, 3043, 3044, 3045, 3046, 3047, 3048, 3049, 3050, 3051, 3052,
3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063,
3064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074,
3075, 3076, 3077, 3078, 3079, 3080, 3081, 3082, 3083, 3084, 3085,
3086, 3087, 3088, 3089, 3090, 3091, 3092, 3093, 3094]),)
```
</div>
---
## Plot anomalies
We now know the samples of the data which are anomalies. With this, we will
find the corresponding `timestamps` from the original test data. We will be
using the following method to do that:
Let's say time_steps = 3 and we have 10 training values. Our `x_train` will
look like this:
- 0, 1, 2
- 1, 2, 3
- 2, 3, 4
- 3, 4, 5
- 4, 5, 6
- 5, 6, 7
- 6, 7, 8
- 7, 8, 9
All except the initial and the final time_steps-1 data values, will appear in
`time_steps` number of samples. So, if we know that the samples
[(3, 4, 5), (4, 5, 6), (5, 6, 7)] are anomalies, we can say that the data point
5 is an anomaly.
```python
# data i is an anomaly if samples [(i - timesteps + 1) to (i)] are anomalies
anomalous_data_indices = []
for data_idx in range(TIME_STEPS - 1, len(df_test_value) - TIME_STEPS + 1):
if np.all(anomalies[data_idx - TIME_STEPS + 1 : data_idx]):
anomalous_data_indices.append(data_idx)
```
Let's overlay the anomalies on the original test data plot.
```python
df_subset = df_daily_jumpsup.iloc[anomalous_data_indices]
fig, ax = plt.subplots()
df_daily_jumpsup.plot(legend=False, ax=ax)
df_subset.plot(legend=False, ax=ax, color="r")
plt.show()
```

| keras-io/examples/timeseries/md/timeseries_anomaly_detection.md/0 | {
"file_path": "keras-io/examples/timeseries/md/timeseries_anomaly_detection.md",
"repo_id": "keras-io",
"token_count": 9094
} | 106 |
"""
Title: Image Classification using BigTransfer (BiT)
Author: [Sayan Nath](https://twitter.com/sayannath2350)
Date created: 2021/09/24
Last modified: 2024/01/03
Description: BigTransfer (BiT) State-of-the-art transfer learning for image classification.
Accelerator: GPU
Converted to Keras 3 by: [Sitam Meur](https://github.com/sitamgithub-MSIT)
"""
"""
## Introduction
BigTransfer (also known as BiT) is a state-of-the-art transfer learning method for image
classification. Transfer of pre-trained representations improves sample efficiency and
simplifies hyperparameter tuning when training deep neural networks for vision. BiT
revisit the paradigm of pre-training on large supervised datasets and fine-tuning the
model on a target task. The importance of appropriately choosing normalization layers and
scaling the architecture capacity as the amount of pre-training data increases.
BigTransfer(BiT) is trained on public datasets, along with code in
[TF2, Jax and Pytorch](https://github.com/google-research/big_transfer). This will help anyone to reach
state of the art performance on their task of interest, even with just a handful of
labeled images per class.
You can find BiT models pre-trained on
[ImageNet](https://image-net.org/challenges/LSVRC/2012/index) and ImageNet-21k in
[TFHub](https://tfhub.dev/google/collections/bit/1) as TensorFlow2 SavedModels that you
can use easily as Keras Layers. There are a variety of sizes ranging from a standard
ResNet50 to a ResNet152x4 (152 layers deep, 4x wider than a typical ResNet50) for users
with larger computational and memory budgets but higher accuracy requirements.

Figure: The x-axis shows the number of images used per class, ranging from 1 to the full
dataset. On the plots on the left, the curve in blue above is our BiT-L model, whereas
the curve below is a ResNet-50 pre-trained on ImageNet (ILSVRC-2012).
"""
"""
## Setup
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras import ops
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
SEEDS = 42
keras.utils.set_random_seed(SEEDS)
"""
## Gather Flower Dataset
"""
train_ds, validation_ds = tfds.load(
"tf_flowers",
split=["train[:85%]", "train[85%:]"],
as_supervised=True,
)
"""
## Visualise the dataset
"""
plt.figure(figsize=(10, 10))
for i, (image, label) in enumerate(train_ds.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title(int(label))
plt.axis("off")
"""
## Define hyperparameters
"""
RESIZE_TO = 384
CROP_TO = 224
BATCH_SIZE = 64
STEPS_PER_EPOCH = 10
AUTO = tf.data.AUTOTUNE # optimise the pipeline performance
NUM_CLASSES = 5 # number of classes
SCHEDULE_LENGTH = (
500 # we will train on lower resolution images and will still attain good results
)
SCHEDULE_BOUNDARIES = [
200,
300,
400,
] # more the dataset size the schedule length increase
"""
The hyperparamteres like `SCHEDULE_LENGTH` and `SCHEDULE_BOUNDARIES` are determined based
on empirical results. The method has been explained in the [original
paper](https://arxiv.org/abs/1912.11370) and in their [Google AI Blog
Post](https://ai.googleblog.com/2020/05/open-sourcing-bit-exploring-large-scale.html).
The `SCHEDULE_LENGTH` is aslo determined whether to use [MixUp
Augmentation](https://arxiv.org/abs/1710.09412) or not. You can also find an easy MixUp
Implementation in [Keras Coding Examples](https://keras.io/examples/vision/mixup/).

"""
"""
## Define preprocessing helper functions
"""
SCHEDULE_LENGTH = SCHEDULE_LENGTH * 512 / BATCH_SIZE
random_flip = keras.layers.RandomFlip("horizontal")
random_crop = keras.layers.RandomCrop(CROP_TO, CROP_TO)
def preprocess_train(image, label):
image = random_flip(image)
image = ops.image.resize(image, (RESIZE_TO, RESIZE_TO))
image = random_crop(image)
image = image / 255.0
return (image, label)
def preprocess_test(image, label):
image = ops.image.resize(image, (RESIZE_TO, RESIZE_TO))
image = ops.cast(image, dtype="float32")
image = image / 255.0
return (image, label)
DATASET_NUM_TRAIN_EXAMPLES = train_ds.cardinality().numpy()
repeat_count = int(
SCHEDULE_LENGTH * BATCH_SIZE / DATASET_NUM_TRAIN_EXAMPLES * STEPS_PER_EPOCH
)
repeat_count += 50 + 1 # To ensure at least there are 50 epochs of training
"""
## Define the data pipeline
"""
# Training pipeline
pipeline_train = (
train_ds.shuffle(10000)
.repeat(repeat_count) # Repeat dataset_size / num_steps
.map(preprocess_train, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# Validation pipeline
pipeline_validation = (
validation_ds.map(preprocess_test, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
"""
## Visualise the training samples
"""
image_batch, label_batch = next(iter(pipeline_train))
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n])
plt.title(label_batch[n].numpy())
plt.axis("off")
"""
## Load pretrained TF-Hub model into a `KerasLayer`
"""
bit_model_url = "https://tfhub.dev/google/bit/m-r50x1/1"
bit_module = hub.load(bit_model_url)
"""
## Create BigTransfer (BiT) model
To create the new model, we:
1. Cut off the BiT model’s original head. This leaves us with the “pre-logits” output.
We do not have to do this if we use the ‘feature extractor’ models (i.e. all those in
subdirectories titled `feature_vectors`), since for those models the head has already
been cut off.
2. Add a new head with the number of outputs equal to the number of classes of our new
task. Note that it is important that we initialise the head to all zeroes.
"""
class MyBiTModel(keras.Model):
def __init__(self, num_classes, module, **kwargs):
super().__init__(**kwargs)
self.num_classes = num_classes
self.head = keras.layers.Dense(num_classes, kernel_initializer="zeros")
self.bit_model = module
def call(self, images):
bit_embedding = self.bit_model(images)
return self.head(bit_embedding)
model = MyBiTModel(num_classes=NUM_CLASSES, module=bit_module)
"""
## Define optimizer and loss
"""
learning_rate = 0.003 * BATCH_SIZE / 512
# Decay learning rate by a factor of 10 at SCHEDULE_BOUNDARIES.
lr_schedule = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=SCHEDULE_BOUNDARIES,
values=[
learning_rate,
learning_rate * 0.1,
learning_rate * 0.01,
learning_rate * 0.001,
],
)
optimizer = keras.optimizers.SGD(learning_rate=lr_schedule, momentum=0.9)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
"""
## Compile the model
"""
model.compile(optimizer=optimizer, loss=loss_fn, metrics=["accuracy"])
"""
## Set up callbacks
"""
train_callbacks = [
keras.callbacks.EarlyStopping(
monitor="val_accuracy", patience=2, restore_best_weights=True
)
]
"""
## Train the model
"""
history = model.fit(
pipeline_train,
batch_size=BATCH_SIZE,
epochs=int(SCHEDULE_LENGTH / STEPS_PER_EPOCH),
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=pipeline_validation,
callbacks=train_callbacks,
)
"""
## Plot the training and validation metrics
"""
def plot_hist(hist):
plt.plot(hist.history["accuracy"])
plt.plot(hist.history["val_accuracy"])
plt.plot(hist.history["loss"])
plt.plot(hist.history["val_loss"])
plt.title("Training Progress")
plt.ylabel("Accuracy/Loss")
plt.xlabel("Epochs")
plt.legend(["train_acc", "val_acc", "train_loss", "val_loss"], loc="upper left")
plt.show()
plot_hist(history)
"""
## Evaluate the model
"""
accuracy = model.evaluate(pipeline_validation)[1] * 100
print("Accuracy: {:.2f}%".format(accuracy))
"""
## Conclusion
BiT performs well across a surprisingly wide range of data regimes
-- from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on
ILSVRC-2012, 99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark
(VTAB). On small datasets, BiT attains 76.8% on ILSVRC-2012 with 10 examples per class,
and 97.0% on CIFAR-10 with 10 examples per class.

You can experiment further with the BigTransfer Method by following the
[original paper](https://arxiv.org/abs/1912.11370).
**Example available on HuggingFace**
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/bit) | [](https://huggingface.co/spaces/keras-io/siamese-contrastive) |
"""
| keras-io/examples/vision/bit.py/0 | {
"file_path": "keras-io/examples/vision/bit.py",
"repo_id": "keras-io",
"token_count": 3252
} | 107 |
"""
Title: Image Segmentation using Composable Fully-Convolutional Networks
Author: [Suvaditya Mukherjee](https://twitter.com/halcyonrayes)
Date created: 2023/06/16
Last modified: 2023/12/25
Description: Using the Fully-Convolutional Network for Image Segmentation.
Accelerator: GPU
"""
"""
## Introduction
The following example walks through the steps to implement Fully-Convolutional Networks
for Image Segmentation on the Oxford-IIIT Pets dataset.
The model was proposed in the paper,
[Fully Convolutional Networks for Semantic Segmentation by Long et. al.(2014)](https://arxiv.org/abs/1411.4038).
Image segmentation is one of the most common and introductory tasks when it comes to
Computer Vision, where we extend the problem of Image Classification from
one-label-per-image to a pixel-wise classification problem.
In this example, we will assemble the aforementioned Fully-Convolutional Segmentation architecture,
capable of performing Image Segmentation.
The network extends the pooling layer outputs from the VGG in order to perform upsampling
and get a final result. The intermediate outputs coming from the 3rd, 4th and 5th Max-Pooling layers from VGG19 are
extracted out and upsampled at different levels and factors to get a final output with the same shape as that
of the output, but with the class of each pixel present at each location, instead of pixel intensity values.
Different intermediate pool layers are extracted and processed upon for different versions of the network.
The FCN architecture has 3 versions of differing quality.
- FCN-32S
- FCN-16S
- FCN-8S
All versions of the model derive their outputs through an iterative processing of
successive intermediate pool layers of the main backbone used.
A better idea can be gained from the figure below.
|  |
| :--: |
| **Diagram 1**: Combined Architecture Versions (Source: Paper) |
To get a better idea on Image Segmentation or find more pre-trained models, feel free to
navigate to the [Hugging Face Image Segmentation Models](https://huggingface.co/models?pipeline_tag=image-segmentation) page,
or a [PyImageSearch Blog on Semantic Segmentation](https://pyimagesearch.com/2018/09/03/semantic-segmentation-with-opencv-and-deep-learning/)
"""
"""
## Setup Imports
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras import ops
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import numpy as np
AUTOTUNE = tf.data.AUTOTUNE
"""
## Set configurations for notebook variables
We set the required parameters for the experiment.
The chosen dataset has a total of 4 classes per image, with regards to the segmentation mask.
We also set our hyperparameters in this cell.
Mixed Precision as an option is also available in systems which support it, to reduce
load.
This would make most tensors use `16-bit float` values instead of `32-bit float`
values, in places where it will not adversely affect computation.
This means, during computation, TensorFlow will use `16-bit float` Tensors to increase speed at the cost of precision,
while storing the values in their original default `32-bit float` form.
"""
NUM_CLASSES = 4
INPUT_HEIGHT = 224
INPUT_WIDTH = 224
LEARNING_RATE = 1e-3
WEIGHT_DECAY = 1e-4
EPOCHS = 20
BATCH_SIZE = 32
MIXED_PRECISION = True
SHUFFLE = True
# Mixed-precision setting
if MIXED_PRECISION:
policy = keras.mixed_precision.Policy("mixed_float16")
keras.mixed_precision.set_global_policy(policy)
"""
## Load dataset
We make use of the [Oxford-IIIT Pets dataset](http://www.robots.ox.ac.uk/~vgg/data/pets/)
which contains a total of 7,349 samples and their segmentation masks.
We have 37 classes, with roughly 200 samples per class.
Our training and validation dataset has 3,128 and 552 samples respectively.
Aside from this, our test split has a total of 3,669 samples.
We set a `batch_size` parameter that will batch our samples together, use a `shuffle`
parameter to mix our samples together.
"""
(train_ds, valid_ds, test_ds) = tfds.load(
"oxford_iiit_pet",
split=["train[:85%]", "train[85%:]", "test"],
batch_size=BATCH_SIZE,
shuffle_files=SHUFFLE,
)
"""
## Unpack and preprocess dataset
We define a simple function that includes performs Resizing over our
training, validation and test datasets.
We do the same process on the masks as well, to make sure both are aligned in terms of shape and size.
"""
# Image and Mask Pre-processing
def unpack_resize_data(section):
image = section["image"]
segmentation_mask = section["segmentation_mask"]
resize_layer = keras.layers.Resizing(INPUT_HEIGHT, INPUT_WIDTH)
image = resize_layer(image)
segmentation_mask = resize_layer(segmentation_mask)
return image, segmentation_mask
train_ds = train_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)
valid_ds = valid_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)
test_ds = test_ds.map(unpack_resize_data, num_parallel_calls=AUTOTUNE)
"""
## Visualize one random sample from the pre-processed dataset
We visualize what a random sample in our test split of the dataset looks like, and plot
the segmentation mask on top to see the effective mask areas.
Note that we have performed pre-processing on this dataset too,
which makes the image and mask size same.
"""
# Select random image and mask. Cast to NumPy array
# for Matplotlib visualization.
images, masks = next(iter(test_ds))
random_idx = keras.random.uniform([], minval=0, maxval=BATCH_SIZE, seed=10)
test_image = images[int(random_idx)].numpy().astype("float")
test_mask = masks[int(random_idx)].numpy().astype("float")
# Overlay segmentation mask on top of image.
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
ax[0].set_title("Image")
ax[0].imshow(test_image / 255.0)
ax[1].set_title("Image with segmentation mask overlay")
ax[1].imshow(test_image / 255.0)
ax[1].imshow(
test_mask,
cmap="inferno",
alpha=0.6,
)
plt.show()
"""
## Perform VGG-specific pre-processing
`keras.applications.VGG19` requires the use of a `preprocess_input` function that will
pro-actively perform Image-net style Standard Deviation Normalization scheme.
"""
def preprocess_data(image, segmentation_mask):
image = keras.applications.vgg19.preprocess_input(image)
return image, segmentation_mask
train_ds = (
train_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)
valid_ds = (
valid_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)
test_ds = (
test_ds.map(preprocess_data, num_parallel_calls=AUTOTUNE)
.shuffle(buffer_size=1024)
.prefetch(buffer_size=1024)
)
"""
## Model Definition
The Fully-Convolutional Network boasts a simple architecture composed of only
`keras.layers.Conv2D` Layers, `keras.layers.Dense` layers and `keras.layers.Dropout`
layers.
|  |
| :--: |
| **Diagram 2**: Generic FCN Forward Pass (Source: Paper)|
Pixel-wise prediction is performed by having a Softmax Convolutional layer with the same
size of the image, such that we can perform direct comparison
We can find several important metrics such as Accuracy and Mean-Intersection-over-Union on the network.
"""
"""
### Backbone (VGG-19)
We use the [VGG-19 network](https://keras.io/api/applications/vgg/) as the backbone, as
the paper suggests it to be one of the most effective backbones for this network.
We extract different outputs from the network by making use of `keras.models.Model`.
Following this, we add layers on top to make a network perfectly simulating that of
Diagram 1.
The backbone's `keras.layers.Dense` layers will be converted to `keras.layers.Conv2D`
layers based on the [original Caffe code present here.](https://github.com/linxi159/FCN-caffe/blob/master/pascalcontext-fcn16s/net.py)
All 3 networks will share the same backbone weights, but will have differing results
based on their extensions.
We make the backbone non-trainable to improve training time requirements.
It is also noted in the paper that making the network trainable does not yield major benefits.
"""
input_layer = keras.Input(shape=(INPUT_HEIGHT, INPUT_WIDTH, 3))
# VGG Model backbone with pre-trained ImageNet weights.
vgg_model = keras.applications.vgg19.VGG19(include_top=True, weights="imagenet")
# Extracting different outputs from same model
fcn_backbone = keras.models.Model(
inputs=vgg_model.layers[1].input,
outputs=[
vgg_model.get_layer(block_name).output
for block_name in ["block3_pool", "block4_pool", "block5_pool"]
],
)
# Setting backbone to be non-trainable
fcn_backbone.trainable = False
x = fcn_backbone(input_layer)
# Converting Dense layers to Conv2D layers
units = [4096, 4096]
dense_convs = []
for filter_idx in range(len(units)):
dense_conv = keras.layers.Conv2D(
filters=units[filter_idx],
kernel_size=(7, 7) if filter_idx == 0 else (1, 1),
strides=(1, 1),
activation="relu",
padding="same",
use_bias=False,
kernel_initializer=keras.initializers.Constant(1.0),
)
dense_convs.append(dense_conv)
dropout_layer = keras.layers.Dropout(0.5)
dense_convs.append(dropout_layer)
dense_convs = keras.Sequential(dense_convs)
dense_convs.trainable = False
x[-1] = dense_convs(x[-1])
pool3_output, pool4_output, pool5_output = x
"""
### FCN-32S
We extend the last output, perform a `1x1 Convolution` and perform 2D Bilinear Upsampling
by a factor of 32 to get an image of the same size as that of our input.
We use a simple `keras.layers.UpSampling2D` layer over a `keras.layers.Conv2DTranspose`
since it yields performance benefits from being a deterministic mathematical operation
over a Convolutional operation
It is also noted in the paper that making the Up-sampling parameters trainable does not yield benefits.
Original experiments of the paper used Upsampling as well.
"""
# 1x1 convolution to set channels = number of classes
pool5 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="relu",
)
# Get Softmax outputs for all classes
fcn32s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn32s_upsampling = keras.layers.UpSampling2D(
size=(32, 32),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
final_fcn32s_pool = pool5(pool5_output)
final_fcn32s_output = fcn32s_conv_layer(final_fcn32s_pool)
final_fcn32s_output = fcn32s_upsampling(final_fcn32s_output)
fcn32s_model = keras.Model(inputs=input_layer, outputs=final_fcn32s_output)
"""
### FCN-16S
The pooling output from the FCN-32S is extended and added to the 4th-level Pooling output
of our backbone.
Following this, we upsample by a factor of 16 to get image of the same
size as that of our input.
"""
# 1x1 convolution to set channels = number of classes
# Followed from the original Caffe implementation
pool4 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="linear",
kernel_initializer=keras.initializers.Zeros(),
)(pool4_output)
# Intermediate up-sample
pool5 = keras.layers.UpSampling2D(
size=(2, 2),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)(final_fcn32s_pool)
# Get Softmax outputs for all classes
fcn16s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn16s_upsample_layer = keras.layers.UpSampling2D(
size=(16, 16),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
# Add intermediate outputs
final_fcn16s_pool = keras.layers.Add()([pool4, pool5])
final_fcn16s_output = fcn16s_conv_layer(final_fcn16s_pool)
final_fcn16s_output = fcn16s_upsample_layer(final_fcn16s_output)
fcn16s_model = keras.models.Model(inputs=input_layer, outputs=final_fcn16s_output)
"""
### FCN-8S
The pooling output from the FCN-16S is extended once more, and added from the 3rd-level
Pooling output of our backbone.
This result is upsampled by a factor of 8 to get an image of the same size as that of our input.
"""
# 1x1 convolution to set channels = number of classes
# Followed from the original Caffe implementation
pool3 = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
padding="same",
strides=(1, 1),
activation="linear",
kernel_initializer=keras.initializers.Zeros(),
)(pool3_output)
# Intermediate up-sample
intermediate_pool_output = keras.layers.UpSampling2D(
size=(2, 2),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)(final_fcn16s_pool)
# Get Softmax outputs for all classes
fcn8s_conv_layer = keras.layers.Conv2D(
filters=NUM_CLASSES,
kernel_size=(1, 1),
activation="softmax",
padding="same",
strides=(1, 1),
)
# Up-sample to original image size
fcn8s_upsample_layer = keras.layers.UpSampling2D(
size=(8, 8),
data_format=keras.backend.image_data_format(),
interpolation="bilinear",
)
# Add intermediate outputs
final_fcn8s_pool = keras.layers.Add()([pool3, intermediate_pool_output])
final_fcn8s_output = fcn8s_conv_layer(final_fcn8s_pool)
final_fcn8s_output = fcn8s_upsample_layer(final_fcn8s_output)
fcn8s_model = keras.models.Model(inputs=input_layer, outputs=final_fcn8s_output)
"""
### Load weights into backbone
It was noted in the paper, as well as through experimentation that extracting the weights
of the last 2 Fully-connected Dense layers from the backbone, reshaping the weights to
fit that of the `keras.layers.Dense` layers we had previously converted into
`keras.layers.Conv2D`, and setting them to it yields far better results and a significant
increase in mIOU performance.
"""
# VGG's last 2 layers
weights1 = vgg_model.get_layer("fc1").get_weights()[0]
weights2 = vgg_model.get_layer("fc2").get_weights()[0]
weights1 = weights1.reshape(7, 7, 512, 4096)
weights2 = weights2.reshape(1, 1, 4096, 4096)
dense_convs.layers[0].set_weights([weights1])
dense_convs.layers[2].set_weights([weights2])
"""
## Training
The original paper talks about making use of [SGD with Momentum](https://keras.io/api/optimizers/sgd/) as the optimizer of choice.
But it was noticed during experimentation that
[AdamW](https://keras.io/api/optimizers/adamw/)
yielded better results in terms of mIOU and Pixel-wise Accuracy.
"""
"""
### FCN-32S
"""
fcn32s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn32s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn32s_model.compile(
optimizer=fcn32s_optimizer,
loss=fcn32s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn32s_history = fcn32s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)
"""
### FCN-16S
"""
fcn16s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn16s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn16s_model.compile(
optimizer=fcn16s_optimizer,
loss=fcn16s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn16s_history = fcn16s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)
"""
### FCN-8S
"""
fcn8s_optimizer = keras.optimizers.AdamW(
learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY
)
fcn8s_loss = keras.losses.SparseCategoricalCrossentropy()
# Maintain mIOU and Pixel-wise Accuracy as metrics
fcn8s_model.compile(
optimizer=fcn8s_optimizer,
loss=fcn8s_loss,
metrics=[
keras.metrics.MeanIoU(num_classes=NUM_CLASSES, sparse_y_pred=False),
keras.metrics.SparseCategoricalAccuracy(),
],
)
fcn8s_history = fcn8s_model.fit(train_ds, epochs=EPOCHS, validation_data=valid_ds)
"""
## Visualizations
"""
"""
### Plotting metrics for training run
We perform a comparative study between all 3 versions of the model by tracking training
and validation metrics of Accuracy, Loss and Mean IoU.
"""
total_plots = len(fcn32s_history.history)
cols = total_plots // 2
rows = total_plots // cols
if total_plots % cols != 0:
rows += 1
# Set all history dictionary objects
fcn32s_dict = fcn32s_history.history
fcn16s_dict = fcn16s_history.history
fcn8s_dict = fcn8s_history.history
pos = range(1, total_plots + 1)
plt.figure(figsize=(15, 10))
for i, ((key_32s, value_32s), (key_16s, value_16s), (key_8s, value_8s)) in enumerate(
zip(fcn32s_dict.items(), fcn16s_dict.items(), fcn8s_dict.items())
):
plt.subplot(rows, cols, pos[i])
plt.plot(range(len(value_32s)), value_32s)
plt.plot(range(len(value_16s)), value_16s)
plt.plot(range(len(value_8s)), value_8s)
plt.title(str(key_32s) + " (combined)")
plt.legend(["FCN-32S", "FCN-16S", "FCN-8S"])
plt.show()
"""
### Visualizing predicted segmentation masks
To understand the results and see them better, we pick a random image from the test
dataset and perform inference on it to see the masks generated by each model.
Note: For better results, the model must be trained for a higher number of epochs.
"""
images, masks = next(iter(test_ds))
random_idx = keras.random.uniform([], minval=0, maxval=BATCH_SIZE, seed=10)
# Get random test image and mask
test_image = images[int(random_idx)].numpy().astype("float")
test_mask = masks[int(random_idx)].numpy().astype("float")
pred_image = ops.expand_dims(test_image, axis=0)
pred_image = keras.applications.vgg19.preprocess_input(pred_image)
# Perform inference on FCN-32S
pred_mask_32s = fcn32s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_32s = np.argmax(pred_mask_32s, axis=-1)
pred_mask_32s = pred_mask_32s[0, ...]
# Perform inference on FCN-16S
pred_mask_16s = fcn16s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_16s = np.argmax(pred_mask_16s, axis=-1)
pred_mask_16s = pred_mask_16s[0, ...]
# Perform inference on FCN-8S
pred_mask_8s = fcn8s_model.predict(pred_image, verbose=0).astype("float")
pred_mask_8s = np.argmax(pred_mask_8s, axis=-1)
pred_mask_8s = pred_mask_8s[0, ...]
# Plot all results
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15, 8))
fig.delaxes(ax[0, 2])
ax[0, 0].set_title("Image")
ax[0, 0].imshow(test_image / 255.0)
ax[0, 1].set_title("Image with ground truth overlay")
ax[0, 1].imshow(test_image / 255.0)
ax[0, 1].imshow(
test_mask,
cmap="inferno",
alpha=0.6,
)
ax[1, 0].set_title("Image with FCN-32S mask overlay")
ax[1, 0].imshow(test_image / 255.0)
ax[1, 0].imshow(pred_mask_32s, cmap="inferno", alpha=0.6)
ax[1, 1].set_title("Image with FCN-16S mask overlay")
ax[1, 1].imshow(test_image / 255.0)
ax[1, 1].imshow(pred_mask_16s, cmap="inferno", alpha=0.6)
ax[1, 2].set_title("Image with FCN-8S mask overlay")
ax[1, 2].imshow(test_image / 255.0)
ax[1, 2].imshow(pred_mask_8s, cmap="inferno", alpha=0.6)
plt.show()
"""
## Conclusion
The Fully-Convolutional Network is an exceptionally simple network that has yielded
strong results in Image Segmentation tasks across different benchmarks.
With the advent of better mechanisms like [Attention](https://arxiv.org/abs/1706.03762) as used in
[SegFormer](https://arxiv.org/abs/2105.15203) and
[DeTR](https://arxiv.org/abs/2005.12872), this model serves as a quick way to iterate and
find baselines for this task on unknown data.
"""
"""
## Acknowledgements
I thank [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Ayush
Thakur](https://twitter.com/ayushthakur0) and [Ritwik
Raha](https://twitter.com/ritwik_raha) for giving a preliminary review of the example.
I also thank the [Google Developer
Experts](https://developers.google.com/community/experts) program.
"""
| keras-io/examples/vision/fully_convolutional_network.py/0 | {
"file_path": "keras-io/examples/vision/fully_convolutional_network.py",
"repo_id": "keras-io",
"token_count": 7260
} | 108 |
<jupyter_start><jupyter_text>Image classification with EANet (External Attention Transformer)**Author:** [ZhiYong Chang](https://github.com/czy00000)**Date created:** 2021/10/19**Last modified:** 2023/07/18**Description:** Image classification with a Transformer that leverages external attention. IntroductionThis example implements the [EANet](https://arxiv.org/abs/2105.02358)model for image classification, and demonstrates it on the CIFAR-100 dataset.EANet introduces a novel attention mechanismnamed ***external attention***, based on two external, small, learnable, andshared memories, which can be implemented easily by simply using two cascadedlinear layers and two normalization layers. It conveniently replaces self-attentionas used in existing architectures. External attention has linear complexity, as it onlyimplicitly considers the correlations between all samples. Setup<jupyter_code>import keras
from keras import layers
from keras import ops
import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Prepare the data<jupyter_code>num_classes = 100
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")<jupyter_output><empty_output><jupyter_text>Configure the hyperparameters<jupyter_code>weight_decay = 0.0001
learning_rate = 0.001
label_smoothing = 0.1
validation_split = 0.2
batch_size = 128
num_epochs = 50
patch_size = 2 # Size of the patches to be extracted from the input images.
num_patches = (input_shape[0] // patch_size) ** 2 # Number of patch
embedding_dim = 64 # Number of hidden units.
mlp_dim = 64
dim_coefficient = 4
num_heads = 4
attention_dropout = 0.2
projection_dropout = 0.2
num_transformer_blocks = 8 # Number of repetitions of the transformer layer
print(f"Patch size: {patch_size} X {patch_size} = {patch_size ** 2} ")
print(f"Patches per image: {num_patches}")<jupyter_output><empty_output><jupyter_text>Use data augmentation<jupyter_code>data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.RandomFlip("horizontal"),
layers.RandomRotation(factor=0.1),
layers.RandomContrast(factor=0.1),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)<jupyter_output><empty_output><jupyter_text>Implement the patch extraction and encoding layer<jupyter_code>class PatchExtract(layers.Layer):
def __init__(self, patch_size, **kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
def call(self, x):
B, C = ops.shape(x)[0], ops.shape(x)[-1]
x = ops.image.extract_patches(x, self.patch_size)
x = ops.reshape(x, (B, -1, self.patch_size * self.patch_size * C))
return x
class PatchEmbedding(layers.Layer):
def __init__(self, num_patch, embed_dim, **kwargs):
super().__init__(**kwargs)
self.num_patch = num_patch
self.proj = layers.Dense(embed_dim)
self.pos_embed = layers.Embedding(input_dim=num_patch, output_dim=embed_dim)
def call(self, patch):
pos = ops.arange(start=0, stop=self.num_patch, step=1)
return self.proj(patch) + self.pos_embed(pos)<jupyter_output><empty_output><jupyter_text>Implement the external attention block<jupyter_code>def external_attention(
x,
dim,
num_heads,
dim_coefficient=4,
attention_dropout=0,
projection_dropout=0,
):
_, num_patch, channel = x.shape
assert dim % num_heads == 0
num_heads = num_heads * dim_coefficient
x = layers.Dense(dim * dim_coefficient)(x)
# create tensor [batch_size, num_patches, num_heads, dim*dim_coefficient//num_heads]
x = ops.reshape(x, (-1, num_patch, num_heads, dim * dim_coefficient // num_heads))
x = ops.transpose(x, axes=[0, 2, 1, 3])
# a linear layer M_k
attn = layers.Dense(dim // dim_coefficient)(x)
# normalize attention map
attn = layers.Softmax(axis=2)(attn)
# dobule-normalization
attn = layers.Lambda(
lambda attn: ops.divide(
attn,
ops.convert_to_tensor(1e-9) + ops.sum(attn, axis=-1, keepdims=True),
)
)(attn)
attn = layers.Dropout(attention_dropout)(attn)
# a linear layer M_v
x = layers.Dense(dim * dim_coefficient // num_heads)(attn)
x = ops.transpose(x, axes=[0, 2, 1, 3])
x = ops.reshape(x, [-1, num_patch, dim * dim_coefficient])
# a linear layer to project original dim
x = layers.Dense(dim)(x)
x = layers.Dropout(projection_dropout)(x)
return x<jupyter_output><empty_output><jupyter_text>Implement the MLP block<jupyter_code>def mlp(x, embedding_dim, mlp_dim, drop_rate=0.2):
x = layers.Dense(mlp_dim, activation=ops.gelu)(x)
x = layers.Dropout(drop_rate)(x)
x = layers.Dense(embedding_dim)(x)
x = layers.Dropout(drop_rate)(x)
return x<jupyter_output><empty_output><jupyter_text>Implement the Transformer block<jupyter_code>def transformer_encoder(
x,
embedding_dim,
mlp_dim,
num_heads,
dim_coefficient,
attention_dropout,
projection_dropout,
attention_type="external_attention",
):
residual_1 = x
x = layers.LayerNormalization(epsilon=1e-5)(x)
if attention_type == "external_attention":
x = external_attention(
x,
embedding_dim,
num_heads,
dim_coefficient,
attention_dropout,
projection_dropout,
)
elif attention_type == "self_attention":
x = layers.MultiHeadAttention(
num_heads=num_heads,
key_dim=embedding_dim,
dropout=attention_dropout,
)(x, x)
x = layers.add([x, residual_1])
residual_2 = x
x = layers.LayerNormalization(epsilon=1e-5)(x)
x = mlp(x, embedding_dim, mlp_dim)
x = layers.add([x, residual_2])
return x<jupyter_output><empty_output><jupyter_text>Implement the EANet model The EANet model leverages external attention.The computational complexity of traditional self attention is `O(d * N ** 2)`,where `d` is the embedding size, and `N` is the number of patch.the authors find that most pixels are closely related to just a few otherpixels, and an `N`-to-`N` attention matrix may be redundant.So, they propose as an alternative an externalattention module where the computational complexity of external attention is `O(d * S * N)`.As `d` and `S` are hyper-parameters,the proposed algorithm is linear in the number of pixels. In fact, this is equivalentto a drop patch operation, because a lot of information contained in a patchin an image is redundant and unimportant.<jupyter_code>def get_model(attention_type="external_attention"):
inputs = layers.Input(shape=input_shape)
# Image augment
x = data_augmentation(inputs)
# Extract patches.
x = PatchExtract(patch_size)(x)
# Create patch embedding.
x = PatchEmbedding(num_patches, embedding_dim)(x)
# Create Transformer block.
for _ in range(num_transformer_blocks):
x = transformer_encoder(
x,
embedding_dim,
mlp_dim,
num_heads,
dim_coefficient,
attention_dropout,
projection_dropout,
attention_type,
)
x = layers.GlobalAveragePooling1D()(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model<jupyter_output><empty_output><jupyter_text>Train on CIFAR-100<jupyter_code>model = get_model(attention_type="external_attention")
model.compile(
loss=keras.losses.CategoricalCrossentropy(label_smoothing=label_smoothing),
optimizer=keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=validation_split,
)<jupyter_output><empty_output><jupyter_text>Let's visualize the training progress of the model.<jupyter_code>plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()<jupyter_output><empty_output><jupyter_text>Let's display the final results of the test on CIFAR-100.<jupyter_code>loss, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test loss: {round(loss, 2)}")
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/eanet.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/eanet.ipynb",
"repo_id": "keras-io",
"token_count": 3610
} | 109 |
<jupyter_start><jupyter_text>Keypoint Detection with Transfer Learning**Author:** [Sayak Paul](https://twitter.com/RisingSayak), converted to Keras 3 by [Muhammad Anas Raza](https://anasrz.com)**Date created:** 2021/05/02**Last modified:** 2023/07/19**Description:** Training a keypoint detector with data augmentation and transfer learning. Keypoint detection consists of locating key object parts. For example, the key partsof our faces include nose tips, eyebrows, eye corners, and so on. These parts help torepresent the underlying object in a feature-rich manner. Keypoint detection hasapplications that include pose estimation, face detection, etc.In this example, we will build a keypoint detector using the[StanfordExtra dataset](https://github.com/benjiebob/StanfordExtra),using transfer learning. This example requires TensorFlow 2.4 or higher,as well as [`imgaug`](https://imgaug.readthedocs.io/) library,which can be installed using the following command:<jupyter_code>!pip install -q -U imgaug<jupyter_output><empty_output><jupyter_text>Data collection The StanfordExtra dataset contains 12,000 images of dogs together with keypoints andsegmentation maps. It is developed from the [Stanford dogs dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/).It can be downloaded with the command below:<jupyter_code>!wget -q http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar<jupyter_output><empty_output><jupyter_text>Annotations are provided as a single JSON file in the StanfordExtra dataset and one needsto fill [this form](https://forms.gle/sRtbicgxsWvRtRmUA) to get access to it. Theauthors explicitly instruct users not to share the JSON file, and this example respects this wish:you should obtain the JSON file yourself.The JSON file is expected to be locally available as `stanfordextra_v12.zip`.After the files are downloaded, we can extract the archives.<jupyter_code>!tar xf images.tar
!unzip -qq ~/stanfordextra_v12.zip<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>from keras import layers
import keras
from imgaug.augmentables.kps import KeypointsOnImage
from imgaug.augmentables.kps import Keypoint
import imgaug.augmenters as iaa
from PIL import Image
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import json
import os<jupyter_output><empty_output><jupyter_text>Define hyperparameters<jupyter_code>IMG_SIZE = 224
BATCH_SIZE = 64
EPOCHS = 5
NUM_KEYPOINTS = 24 * 2 # 24 pairs each having x and y coordinates<jupyter_output><empty_output><jupyter_text>Load dataThe authors also provide a metadata file that specifies additional information about thekeypoints, like color information, animal pose name, etc. We will load this file in a `pandas`dataframe to extract information for visualization purposes.<jupyter_code>IMG_DIR = "Images"
JSON = "StanfordExtra_V12/StanfordExtra_v12.json"
KEYPOINT_DEF = (
"https://github.com/benjiebob/StanfordExtra/raw/master/keypoint_definitions.csv"
)
# Load the ground-truth annotations.
with open(JSON) as infile:
json_data = json.load(infile)
# Set up a dictionary, mapping all the ground-truth information
# with respect to the path of the image.
json_dict = {i["img_path"]: i for i in json_data}<jupyter_output><empty_output><jupyter_text>A single entry of `json_dict` looks like the following:```'n02085782-Japanese_spaniel/n02085782_2886.jpg':{'img_bbox': [205, 20, 116, 201], 'img_height': 272, 'img_path': 'n02085782-Japanese_spaniel/n02085782_2886.jpg', 'img_width': 350, 'is_multiple_dogs': False, 'joints': [[108.66666666666667, 252.0, 1], [147.66666666666666, 229.0, 1], [163.5, 208.5, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [54.0, 244.0, 1], [77.33333333333333, 225.33333333333334, 1], [79.0, 196.5, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [150.66666666666666, 86.66666666666667, 1], [88.66666666666667, 73.0, 1], [116.0, 106.33333333333333, 1], [109.0, 123.33333333333333, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], 'seg': ...}``` In this example, the keys we are interested in are:* `img_path`* `joints`There are a total of 24 entries present inside `joints`. Each entry has 3 values:* x-coordinate* y-coordinate* visibility flag of the keypoints (1 indicates visibility and 0 indicates non-visibility)As we can see `joints` contain multiple `[0, 0, 0]` entries which denote that thosekeypoints were not labeled. In this example, we will consider both non-visible as well asunlabeled keypoints in order to allow mini-batch learning.<jupyter_code># Load the metdata definition file and preview it.
keypoint_def = pd.read_csv(KEYPOINT_DEF)
keypoint_def.head()
# Extract the colours and labels.
colours = keypoint_def["Hex colour"].values.tolist()
colours = ["#" + colour for colour in colours]
labels = keypoint_def["Name"].values.tolist()
# Utility for reading an image and for getting its annotations.
def get_dog(name):
data = json_dict[name]
img_data = plt.imread(os.path.join(IMG_DIR, data["img_path"]))
# If the image is RGBA convert it to RGB.
if img_data.shape[-1] == 4:
img_data = img_data.astype(np.uint8)
img_data = Image.fromarray(img_data)
img_data = np.array(img_data.convert("RGB"))
data["img_data"] = img_data
return data<jupyter_output><empty_output><jupyter_text>Visualize dataNow, we write a utility function to visualize the images and their keypoints.<jupyter_code># Parts of this code come from here:
# https://github.com/benjiebob/StanfordExtra/blob/master/demo.ipynb
def visualize_keypoints(images, keypoints):
fig, axes = plt.subplots(nrows=len(images), ncols=2, figsize=(16, 12))
[ax.axis("off") for ax in np.ravel(axes)]
for (ax_orig, ax_all), image, current_keypoint in zip(axes, images, keypoints):
ax_orig.imshow(image)
ax_all.imshow(image)
# If the keypoints were formed by `imgaug` then the coordinates need
# to be iterated differently.
if isinstance(current_keypoint, KeypointsOnImage):
for idx, kp in enumerate(current_keypoint.keypoints):
ax_all.scatter(
[kp.x],
[kp.y],
c=colours[idx],
marker="x",
s=50,
linewidths=5,
)
else:
current_keypoint = np.array(current_keypoint)
# Since the last entry is the visibility flag, we discard it.
current_keypoint = current_keypoint[:, :2]
for idx, (x, y) in enumerate(current_keypoint):
ax_all.scatter([x], [y], c=colours[idx], marker="x", s=50, linewidths=5)
plt.tight_layout(pad=2.0)
plt.show()
# Select four samples randomly for visualization.
samples = list(json_dict.keys())
num_samples = 4
selected_samples = np.random.choice(samples, num_samples, replace=False)
images, keypoints = [], []
for sample in selected_samples:
data = get_dog(sample)
image = data["img_data"]
keypoint = data["joints"]
images.append(image)
keypoints.append(keypoint)
visualize_keypoints(images, keypoints)<jupyter_output><empty_output><jupyter_text>The plots show that we have images of non-uniform sizes, which is expected in mostreal-world scenarios. However, if we resize these images to have a uniform shape (forinstance (224 x 224)) their ground-truth annotations will also be affected. The sameapplies if we apply any geometric transformation (horizontal flip, for e.g.) to an image.Fortunately, `imgaug` provides utilities that can handle this issue.In the next section, we will write a data generator inheriting the[`keras.utils.Sequence`](https://keras.io/api/utils/python_utils/sequence-class) classthat applies data augmentation on batches of data using `imgaug`. Prepare data generator<jupyter_code>class KeyPointsDataset(keras.utils.PyDataset):
def __init__(self, image_keys, aug, batch_size=BATCH_SIZE, train=True, **kwargs):
super().__init__(**kwargs)
self.image_keys = image_keys
self.aug = aug
self.batch_size = batch_size
self.train = train
self.on_epoch_end()
def __len__(self):
return len(self.image_keys) // self.batch_size
def on_epoch_end(self):
self.indexes = np.arange(len(self.image_keys))
if self.train:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size]
image_keys_temp = [self.image_keys[k] for k in indexes]
(images, keypoints) = self.__data_generation(image_keys_temp)
return (images, keypoints)
def __data_generation(self, image_keys_temp):
batch_images = np.empty((self.batch_size, IMG_SIZE, IMG_SIZE, 3), dtype="int")
batch_keypoints = np.empty(
(self.batch_size, 1, 1, NUM_KEYPOINTS), dtype="float32"
)
for i, key in enumerate(image_keys_temp):
data = get_dog(key)
current_keypoint = np.array(data["joints"])[:, :2]
kps = []
# To apply our data augmentation pipeline, we first need to
# form Keypoint objects with the original coordinates.
for j in range(0, len(current_keypoint)):
kps.append(Keypoint(x=current_keypoint[j][0], y=current_keypoint[j][1]))
# We then project the original image and its keypoint coordinates.
current_image = data["img_data"]
kps_obj = KeypointsOnImage(kps, shape=current_image.shape)
# Apply the augmentation pipeline.
(new_image, new_kps_obj) = self.aug(image=current_image, keypoints=kps_obj)
batch_images[i,] = new_image
# Parse the coordinates from the new keypoint object.
kp_temp = []
for keypoint in new_kps_obj:
kp_temp.append(np.nan_to_num(keypoint.x))
kp_temp.append(np.nan_to_num(keypoint.y))
# More on why this reshaping later.
batch_keypoints[i,] = np.array(kp_temp).reshape(1, 1, 24 * 2)
# Scale the coordinates to [0, 1] range.
batch_keypoints = batch_keypoints / IMG_SIZE
return (batch_images, batch_keypoints)<jupyter_output><empty_output><jupyter_text>To know more about how to operate with keypoints in `imgaug` check out[this document](https://imgaug.readthedocs.io/en/latest/source/examples_keypoints.html). Define augmentation transforms<jupyter_code>train_aug = iaa.Sequential(
[
iaa.Resize(IMG_SIZE, interpolation="linear"),
iaa.Fliplr(0.3),
# `Sometimes()` applies a function randomly to the inputs with
# a given probability (0.3, in this case).
iaa.Sometimes(0.3, iaa.Affine(rotate=10, scale=(0.5, 0.7))),
]
)
test_aug = iaa.Sequential([iaa.Resize(IMG_SIZE, interpolation="linear")])<jupyter_output><empty_output><jupyter_text>Create training and validation splits<jupyter_code>np.random.shuffle(samples)
train_keys, validation_keys = (
samples[int(len(samples) * 0.15) :],
samples[: int(len(samples) * 0.15)],
)<jupyter_output><empty_output><jupyter_text>Data generator investigation<jupyter_code>train_dataset = KeyPointsDataset(
train_keys, train_aug, workers=2, use_multiprocessing=True
)
validation_dataset = KeyPointsDataset(
validation_keys, test_aug, train=False, workers=2, use_multiprocessing=True
)
print(f"Total batches in training set: {len(train_dataset)}")
print(f"Total batches in validation set: {len(validation_dataset)}")
sample_images, sample_keypoints = next(iter(train_dataset))
assert sample_keypoints.max() == 1.0
assert sample_keypoints.min() == 0.0
sample_keypoints = sample_keypoints[:4].reshape(-1, 24, 2) * IMG_SIZE
visualize_keypoints(sample_images[:4], sample_keypoints)<jupyter_output><empty_output><jupyter_text>Model buildingThe [Stanford dogs dataset](http://vision.stanford.edu/aditya86/ImageNetDogs/) (on whichthe StanfordExtra dataset is based) was built using the [ImageNet-1k dataset](http://image-net.org/).So, it is likely that the models pretrained on the ImageNet-1k dataset would be usefulfor this task. We will use a MobileNetV2 pre-trained on this dataset as a backbone toextract meaningful features from the images and then pass those to a custom regressionhead for predicting coordinates.<jupyter_code>def get_model():
# Load the pre-trained weights of MobileNetV2 and freeze the weights
backbone = keras.applications.MobileNetV2(
weights="imagenet",
include_top=False,
input_shape=(IMG_SIZE, IMG_SIZE, 3),
)
backbone.trainable = False
inputs = layers.Input((IMG_SIZE, IMG_SIZE, 3))
x = keras.applications.mobilenet_v2.preprocess_input(inputs)
x = backbone(x)
x = layers.Dropout(0.3)(x)
x = layers.SeparableConv2D(
NUM_KEYPOINTS, kernel_size=5, strides=1, activation="relu"
)(x)
outputs = layers.SeparableConv2D(
NUM_KEYPOINTS, kernel_size=3, strides=1, activation="sigmoid"
)(x)
return keras.Model(inputs, outputs, name="keypoint_detector")<jupyter_output><empty_output><jupyter_text>Our custom network is fully-convolutional which makes it more parameter-friendly than thesame version of the network having fully-connected dense layers.<jupyter_code>get_model().summary()<jupyter_output><empty_output><jupyter_text>Notice the output shape of the network: `(None, 1, 1, 48)`. This is why we have reshapedthe coordinates as: `batch_keypoints[i, :] = np.array(kp_temp).reshape(1, 1, 24 * 2)`. Model compilation and trainingFor this example, we will train the network only for five epochs.<jupyter_code>model = get_model()
model.compile(loss="mse", optimizer=keras.optimizers.Adam(1e-4))
model.fit(train_dataset, validation_data=validation_dataset, epochs=EPOCHS)<jupyter_output><empty_output><jupyter_text>Make predictions and visualize them<jupyter_code>sample_val_images, sample_val_keypoints = next(iter(validation_dataset))
sample_val_images = sample_val_images[:4]
sample_val_keypoints = sample_val_keypoints[:4].reshape(-1, 24, 2) * IMG_SIZE
predictions = model.predict(sample_val_images).reshape(-1, 24, 2) * IMG_SIZE
# Ground-truth
visualize_keypoints(sample_val_images, sample_val_keypoints)
# Predictions
visualize_keypoints(sample_val_images, predictions)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/keypoint_detection.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/keypoint_detection.ipynb",
"repo_id": "keras-io",
"token_count": 5686
} | 110 |
<jupyter_start><jupyter_text>Self-supervised contrastive learning with SimSiam**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/03/19**Last modified:** 2023/12/29**Description:** Implementation of a self-supervised learning method for computer vision. Self-supervised learning (SSL) is an interesting branch of study in the field ofrepresentation learning. SSL systems try to formulate a supervised signal from a corpusof unlabeled data points. An example is we train a deep neural network to predict thenext word from a given set of words. In literature, these tasks are known as *pretexttasks* or *auxiliary tasks*. If we [train such a network](https://arxiv.org/abs/1801.06146) on a huge dataset (such asthe [Wikipedia text corpus](https://www.corpusdata.org/wikipedia.asp)) it learns very effectiverepresentations that transfer well to downstream tasks. Language models like[BERT](https://arxiv.org/abs/1810.04805), [GPT-3](https://arxiv.org/abs/2005.14165),[ELMo](https://allennlp.org/elmo) all benefit from this.Much like the language models we can train computer vision models using similarapproaches. To make things work in computer vision, we need to formulate the learningtasks such that the underlying model (a deep neural network) is able to make sense of thesemantic information present in vision data. One such task is to a model to _contrast_between two different versions of the same image. The hope is that in this way the modelwill have learn representations where the similar images are grouped as together possiblewhile the dissimilar images are further away.In this example, we will be implementing one such system called **SimSiam** proposed in[Exploring Simple Siamese Representation Learning](https://arxiv.org/abs/2011.10566). Itis implemented as the following:1. We create two different versions of the same dataset with a stochastic dataaugmentation pipeline. Note that the random initialization seed needs to be the sameduring create these versions.2. We take a ResNet without any classification head (**backbone**) and we add a shallowfully-connected network (**projection head**) on top of it. Collectively, this is knownas the **encoder**.3. We pass the output of the encoder through a **predictor** which is again a shallowfully-connected network having an[AutoEncoder](https://en.wikipedia.org/wiki/Autoencoder) like structure.4. We then train our encoder to maximize the cosine similarity between the two differentversions of our dataset. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import keras_cv
from keras import ops
from keras import layers
from keras import regularizers
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np<jupyter_output><empty_output><jupyter_text>Define hyperparameters<jupyter_code>AUTO = tf.data.AUTOTUNE
BATCH_SIZE = 128
EPOCHS = 5
CROP_TO = 32
SEED = 26
PROJECT_DIM = 2048
LATENT_DIM = 512
WEIGHT_DECAY = 0.0005<jupyter_output><empty_output><jupyter_text>Load the CIFAR-10 dataset<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
print(f"Total training examples: {len(x_train)}")
print(f"Total test examples: {len(x_test)}")<jupyter_output><empty_output><jupyter_text>Defining our data augmentation pipelineAs studied in [SimCLR](https://arxiv.org/abs/2002.05709) having the right dataaugmentation pipeline is critical for SSL systems to work effectively in computer vision.Two particular augmentation transforms that seem to matter the most are: 1.) Randomresized crops and 2.) Color distortions. Most of the other SSL systems for computervision (such as [BYOL](https://arxiv.org/abs/2006.07733),[MoCoV2](https://arxiv.org/abs/2003.04297), [SwAV](https://arxiv.org/abs/2006.09882),etc.) include these in their training pipelines.<jupyter_code>strength = [0.4, 0.4, 0.3, 0.1]
random_flip = layers.RandomFlip(mode="horizontal_and_vertical")
random_crop = layers.RandomCrop(CROP_TO, CROP_TO)
random_brightness = layers.RandomBrightness(0.8 * strength[0])
random_contrast = layers.RandomContrast((1 - 0.8 * strength[1], 1 + 0.8 * strength[1]))
random_saturation = keras_cv.layers.RandomSaturation(
(0.5 - 0.8 * strength[2], 0.5 + 0.8 * strength[2])
)
random_hue = keras_cv.layers.RandomHue(0.2 * strength[3], [0,255])
grayscale = keras_cv.layers.Grayscale()
def flip_random_crop(image):
# With random crops we also apply horizontal flipping.
image = random_flip(image)
image = random_crop(image)
return image
def color_jitter(x):
x = random_brightness(x)
x = random_contrast(x)
x = random_saturation(x)
x = random_hue(x)
# Affine transformations can disturb the natural range of
# RGB images, hence this is needed.
x = ops.clip(x, 0, 255)
return x
def color_drop(x):
x = grayscale(x)
x = ops.tile(x, [1, 1, 3])
return x
def random_apply(func, x, p):
if keras.random.uniform([], minval=0, maxval=1) < p:
return func(x)
else:
return x
def custom_augment(image):
# As discussed in the SimCLR paper, the series of augmentation
# transformations (except for random crops) need to be applied
# randomly to impose translational invariance.
image = flip_random_crop(image)
image = random_apply(color_jitter, image, p=0.8)
image = random_apply(color_drop, image, p=0.2)
return image<jupyter_output><empty_output><jupyter_text>It should be noted that an augmentation pipeline is generally dependent on variousproperties of the dataset we are dealing with. For example, if images in the dataset areheavily object-centric then taking random crops with a very high probability may hurt thetraining performance.Let's now apply our augmentation pipeline to our dataset and visualize a few outputs. Convert the data into TensorFlow `Dataset` objectsHere we create two different versions of our dataset *without* any ground-truth labels.<jupyter_code>ssl_ds_one = tf.data.Dataset.from_tensor_slices(x_train)
ssl_ds_one = (
ssl_ds_one.shuffle(1024, seed=SEED)
.map(custom_augment, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
ssl_ds_two = tf.data.Dataset.from_tensor_slices(x_train)
ssl_ds_two = (
ssl_ds_two.shuffle(1024, seed=SEED)
.map(custom_augment, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# We then zip both of these datasets.
ssl_ds = tf.data.Dataset.zip((ssl_ds_one, ssl_ds_two))
# Visualize a few augmented images.
sample_images_one = next(iter(ssl_ds_one))
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(sample_images_one[n].numpy().astype("int"))
plt.axis("off")
plt.show()
# Ensure that the different versions of the dataset actually contain
# identical images.
sample_images_two = next(iter(ssl_ds_two))
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(sample_images_two[n].numpy().astype("int"))
plt.axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>Notice that the images in `samples_images_one` and `sample_images_two` are essentiallythe same but are augmented differently. Defining the encoder and the predictorWe use an implementation of ResNet20 that is specifically configured for the CIFAR10dataset. The code is taken from the[keras-idiomatic-programmer](https://github.com/GoogleCloudPlatform/keras-idiomatic-programmer/blob/master/zoo/resnet/resnet_cifar10_v2.py) repository. The hyperparameters ofthese architectures have been referred from Section 3 and Appendix A of [the originalpaper](https://arxiv.org/abs/2011.10566).<jupyter_code>!wget -q https://git.io/JYx2x -O resnet_cifar10_v2.py
import resnet_cifar10_v2
N = 2
DEPTH = N * 9 + 2
NUM_BLOCKS = ((DEPTH - 2) // 9) - 1
def get_encoder():
# Input and backbone.
inputs = layers.Input((CROP_TO, CROP_TO, 3))
x = layers.Rescaling(scale=1.0 / 127.5, offset=-1)(
inputs
)
x = resnet_cifar10_v2.stem(x)
x = resnet_cifar10_v2.learner(x, NUM_BLOCKS)
x = layers.GlobalAveragePooling2D(name="backbone_pool")(x)
# Projection head.
x = layers.Dense(
PROJECT_DIM, use_bias=False, kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
)(x)
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
x = layers.Dense(
PROJECT_DIM, use_bias=False, kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
)(x)
outputs = layers.BatchNormalization()(x)
return keras.Model(inputs, outputs, name="encoder")
def get_predictor():
model = keras.Sequential(
[
# Note the AutoEncoder-like structure.
layers.Input((PROJECT_DIM,)),
layers.Dense(
LATENT_DIM,
use_bias=False,
kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
),
layers.ReLU(),
layers.BatchNormalization(),
layers.Dense(PROJECT_DIM),
],
name="predictor",
)
return model<jupyter_output><empty_output><jupyter_text>Defining the (pre-)training loopOne of the main reasons behind training networks with these kinds of approaches is toutilize the learned representations for downstream tasks like classification. This is whythis particular training phase is also referred to as _pre-training_.We start by defining the loss function.<jupyter_code>def compute_loss(p, z):
# The authors of SimSiam emphasize the impact of
# the `stop_gradient` operator in the paper as it
# has an important role in the overall optimization.
z = ops.stop_gradient(z)
p = keras.utils.normalize(p, axis=1, order=2)
z = keras.utils.normalize(z, axis=1, order=2)
# Negative cosine similarity (minimizing this is
# equivalent to maximizing the similarity).
return -ops.mean(ops.sum((p * z), axis=1))<jupyter_output><empty_output><jupyter_text>We then define our training loop by overriding the `train_step()` function of the`keras.Model` class.<jupyter_code>class SimSiam(keras.Model):
def __init__(self, encoder, predictor):
super().__init__()
self.encoder = encoder
self.predictor = predictor
self.loss_tracker = keras.metrics.Mean(name="loss")
@property
def metrics(self):
return [self.loss_tracker]
def train_step(self, data):
# Unpack the data.
ds_one, ds_two = data
# Forward pass through the encoder and predictor.
with tf.GradientTape() as tape:
z1, z2 = self.encoder(ds_one), self.encoder(ds_two)
p1, p2 = self.predictor(z1), self.predictor(z2)
# Note that here we are enforcing the network to match
# the representations of two differently augmented batches
# of data.
loss = compute_loss(p1, z2) / 2 + compute_loss(p2, z1) / 2
# Compute gradients and update the parameters.
learnable_params = (
self.encoder.trainable_variables + self.predictor.trainable_variables
)
gradients = tape.gradient(loss, learnable_params)
self.optimizer.apply_gradients(zip(gradients, learnable_params))
# Monitor loss.
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}<jupyter_output><empty_output><jupyter_text>Pre-training our networksIn the interest of this example, we will train the model for only 5 epochs. In reality,this should at least be 100 epochs.<jupyter_code># Create a cosine decay learning scheduler.
num_training_samples = len(x_train)
steps = EPOCHS * (num_training_samples // BATCH_SIZE)
lr_decayed_fn = keras.optimizers.schedules.CosineDecay(
initial_learning_rate=0.03, decay_steps=steps
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="loss", patience=5, restore_best_weights=True
)
# Compile model and start training.
simsiam = SimSiam(get_encoder(), get_predictor())
simsiam.compile(optimizer=keras.optimizers.SGD(lr_decayed_fn, momentum=0.6))
history = simsiam.fit(ssl_ds, epochs=EPOCHS, callbacks=[early_stopping])
# Visualize the training progress of the model.
plt.plot(history.history["loss"])
plt.grid()
plt.title("Negative Cosine Similairty")
plt.show()<jupyter_output><empty_output><jupyter_text>If your solution gets very close to -1 (minimum value of our loss) very quickly with adifferent dataset and a different backbone architecture that is likely because of*representation collapse*. It is a phenomenon where the encoder yields similar output forall the images. In that case additional hyperparameter tuning is required especially inthe following areas:* Strength of the color distortions and their probabilities.* Learning rate and its schedule.* Architecture of both the backbone and their projection head. Evaluating our SSL methodThe most popularly used method to evaluate a SSL method in computer vision (or any otherpre-training method as such) is to learn a linear classifier on the frozen features ofthe trained backbone model (in this case it is ResNet20) and evaluate the classifier onunseen images. Other methods include[fine-tuning](https://keras.io/guides/transfer_learning/) on the source dataset or even atarget dataset with 5% or 10% labels present. Practically, we can use the backbone modelfor any downstream task such as semantic segmentation, object detection, and so on wherethe backbone models are usually pre-trained with *pure supervised learning*.<jupyter_code># We first create labeled `Dataset` objects.
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
# Then we shuffle, batch, and prefetch this dataset for performance. We
# also apply random resized crops as an augmentation but only to the
# training set.
train_ds = (
train_ds.shuffle(1024)
.map(lambda x, y: (flip_random_crop(x), y), num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)
# Extract the backbone ResNet20.
backbone = keras.Model(
simsiam.encoder.input, simsiam.encoder.get_layer("backbone_pool").output
)
# We then create our linear classifier and train it.
backbone.trainable = False
inputs = layers.Input((CROP_TO, CROP_TO, 3))
x = backbone(inputs, training=False)
outputs = layers.Dense(10, activation="softmax")(x)
linear_model = keras.Model(inputs, outputs, name="linear_model")
# Compile model and start training.
linear_model.compile(
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
optimizer=keras.optimizers.SGD(lr_decayed_fn, momentum=0.9),
)
history = linear_model.fit(
train_ds, validation_data=test_ds, epochs=EPOCHS, callbacks=[early_stopping]
)
_, test_acc = linear_model.evaluate(test_ds)
print("Test accuracy: {:.2f}%".format(test_acc * 100))<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/simsiam.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/simsiam.ipynb",
"repo_id": "keras-io",
"token_count": 5196
} | 111 |
# A Vision Transformer without Attention
**Author:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Ritwik Raha](https://twitter.com/ritwik_raha), [Shivalika Singh](https://www.linkedin.com/in/shivalika-singh/)<br>
**Date created:** 2022/02/24<br>
**Last modified:** 2022/10/15<br>
**Description:** A minimal implementation of ShiftViT.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/shiftvit.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/shiftvit.py)
---
## Introduction
[Vision Transformers](https://arxiv.org/abs/2010.11929) (ViTs) have sparked a wave of
research at the intersection of Transformers and Computer Vision (CV).
ViTs can simultaneously model long- and short-range dependencies, thanks to
the Multi-Head Self-Attention mechanism in the Transformer block. Many researchers believe
that the success of ViTs are purely due to the attention layer, and they seldom
think about other parts of the ViT model.
In the academic paper
[When Shift Operation Meets Vision Transformer: An Extremely Simple Alternative to Attention Mechanism](https://arxiv.org/abs/2201.10801)
the authors propose to demystify the success of ViTs with the introduction of a **NO
PARAMETER** operation in place of the attention operation. They swap the attention
operation with a shifting operation.
In this example, we minimally implement the paper with close alignement to the author's
[official implementation](https://github.com/microsoft/SPACH/blob/main/models/shiftvit.py).
This example requires TensorFlow 2.9 or higher, as well as TensorFlow Addons, which can
be installed using the following command:
```python
!pip install -qq -U tensorflow-addons
```
---
## Setup and imports
```python
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_addons as tfa
import pathlib
import glob
# Setting seed for reproducibiltiy
SEED = 42
keras.utils.set_random_seed(SEED)
```
---
## Hyperparameters
These are the hyperparameters that we have chosen for the experiment.
Please feel free to tune them.
```python
class Config(object):
# DATA
batch_size = 256
buffer_size = batch_size * 2
input_shape = (32, 32, 3)
num_classes = 10
# AUGMENTATION
image_size = 48
# ARCHITECTURE
patch_size = 4
projected_dim = 96
num_shift_blocks_per_stages = [2, 4, 8, 2]
epsilon = 1e-5
stochastic_depth_rate = 0.2
mlp_dropout_rate = 0.2
num_div = 12
shift_pixel = 1
mlp_expand_ratio = 2
# OPTIMIZER
lr_start = 1e-5
lr_max = 1e-3
weight_decay = 1e-4
# TRAINING
epochs = 100
# INFERENCE
label_map = {
0: "airplane",
1: "automobile",
2: "bird",
3: "cat",
4: "deer",
5: "dog",
6: "frog",
7: "horse",
8: "ship",
9: "truck",
}
tf_ds_batch_size = 20
config = Config()
```
---
## Load the CIFAR-10 dataset
We use the CIFAR-10 dataset for our experiments.
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
(x_train, y_train), (x_val, y_val) = (
(x_train[:40000], y_train[:40000]),
(x_train[40000:], y_train[40000:]),
)
print(f"Training samples: {len(x_train)}")
print(f"Validation samples: {len(x_val)}")
print(f"Testing samples: {len(x_test)}")
AUTO = tf.data.AUTOTUNE
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_ds = train_ds.shuffle(config.buffer_size).batch(config.batch_size).prefetch(AUTO)
val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_ds = val_ds.batch(config.batch_size).prefetch(AUTO)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.batch(config.batch_size).prefetch(AUTO)
```
<div class="k-default-codeblock">
```
Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170498071/170498071 [==============================] - 3s 0us/step
Training samples: 40000
Validation samples: 10000
Testing samples: 10000
```
</div>
---
## Data Augmentation
The augmentation pipeline consists of:
- Rescaling
- Resizing
- Random cropping
- Random horizontal flipping
_Note_: The image data augmentation layers do not apply
data transformations at inference time. This means that
when these layers are called with `training=False` they
behave differently. Refer to the
[documentation](https://keras.io/api/layers/preprocessing_layers/image_augmentation/)
for more details.
```python
def get_augmentation_model():
"""Build the data augmentation model."""
data_augmentation = keras.Sequential(
[
layers.Resizing(config.input_shape[0] + 20, config.input_shape[0] + 20),
layers.RandomCrop(config.image_size, config.image_size),
layers.RandomFlip("horizontal"),
layers.Rescaling(1 / 255.0),
]
)
return data_augmentation
```
---
## The ShiftViT architecture
In this section, we build the architecture proposed in
[the ShiftViT paper](https://arxiv.org/abs/2201.10801).
|  |
| :--: |
| Figure 1: The entire architecutre of ShiftViT.
[Source](https://arxiv.org/abs/2201.10801) |
The architecture as shown in Fig. 1, is inspired by
[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030).
Here the authors propose a modular architecture with 4 stages. Each stage works on its
own spatial size, creating a hierarchical architecture.
An input image of size `HxWx3` is split into non-overlapping patches of size `4x4`.
This is done via the patchify layer which results in individual tokens of feature size `48`
(`4x4x3`). Each stage comprises two parts:
1. Embedding Generation
2. Stacked Shift Blocks
We discuss the stages and the modules in detail in what follows.
_Note_: Compared to the [official implementation](https://github.com/microsoft/SPACH/blob/main/models/shiftvit.py)
we restructure some key components to better fit the Keras API.
### The ShiftViT Block
|  |
| :--: |
| Figure 2: From the Model to a Shift Block. |
Each stage in the ShiftViT architecture comprises of a Shift Block as shown in Fig 2.
|  |
| :--: |
| Figure 3: The Shift ViT Block. [Source](https://arxiv.org/abs/2201.10801) |
The Shift Block as shown in Fig. 3, comprises of the following:
1. Shift Operation
2. Linear Normalization
3. MLP Layer
#### The MLP block
The MLP block is intended to be a stack of densely-connected layers
```python
class MLP(layers.Layer):
"""Get the MLP layer for each shift block.
Args:
mlp_expand_ratio (int): The ratio with which the first feature map is expanded.
mlp_dropout_rate (float): The rate for dropout.
"""
def __init__(self, mlp_expand_ratio, mlp_dropout_rate, **kwargs):
super().__init__(**kwargs)
self.mlp_expand_ratio = mlp_expand_ratio
self.mlp_dropout_rate = mlp_dropout_rate
def build(self, input_shape):
input_channels = input_shape[-1]
initial_filters = int(self.mlp_expand_ratio * input_channels)
self.mlp = keras.Sequential(
[
layers.Dense(
units=initial_filters,
activation=tf.nn.gelu,
),
layers.Dropout(rate=self.mlp_dropout_rate),
layers.Dense(units=input_channels),
layers.Dropout(rate=self.mlp_dropout_rate),
]
)
def call(self, x):
x = self.mlp(x)
return x
```
#### The DropPath layer
Stochastic depth is a regularization technique that randomly drops a set of
layers. During inference, the layers are kept as they are. It is very
similar to Dropout, but it operates on a block of layers rather
than on individual nodes present inside a layer.
```python
class DropPath(layers.Layer):
"""Drop Path also known as the Stochastic Depth layer.
Refernece:
- https://keras.io/examples/vision/cct/#stochastic-depth-for-regularization
- github.com:rwightman/pytorch-image-models
"""
def __init__(self, drop_path_prob, **kwargs):
super().__init__(**kwargs)
self.drop_path_prob = drop_path_prob
def call(self, x, training=False):
if training:
keep_prob = 1 - self.drop_path_prob
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
random_tensor = tf.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
```
#### Block
The most important operation in this paper is the **shift operation**. In this section,
we describe the shift operation and compare it with its original implementation provided
by the authors.
A generic feature map is assumed to have the shape `[N, H, W, C]`. Here we choose a
`num_div` parameter that decides the division size of the channels. The first 4 divisions
are shifted (1 pixel) in the left, right, up, and down direction. The remaining splits
are kept as is. After partial shifting the shifted channels are padded and the overflown
pixels are chopped off. This completes the partial shifting operation.
In the original implementation, the code is approximately:
```python
out[:, g * 0:g * 1, :, :-1] = x[:, g * 0:g * 1, :, 1:] # shift left
out[:, g * 1:g * 2, :, 1:] = x[:, g * 1:g * 2, :, :-1] # shift right
out[:, g * 2:g * 3, :-1, :] = x[:, g * 2:g * 3, 1:, :] # shift up
out[:, g * 3:g * 4, 1:, :] = x[:, g * 3:g * 4, :-1, :] # shift down
out[:, g * 4:, :, :] = x[:, g * 4:, :, :] # no shift
```
In TensorFlow it would be infeasible for us to assign shifted channels to a tensor in the
middle of the training process. This is why we have resorted to the following procedure:
1. Split the channels with the `num_div` parameter.
2. Select each of the first four spilts and shift and pad them in the respective
directions.
3. After shifting and padding, we concatenate the channel back.
|  |
| :--: |
| Figure 4: The TensorFlow style shifting |
The entire procedure is explained in the Fig. 4.
```python
class ShiftViTBlock(layers.Layer):
"""A unit ShiftViT Block
Args:
shift_pixel (int): The number of pixels to shift. Default to 1.
mlp_expand_ratio (int): The ratio with which MLP features are
expanded. Default to 2.
mlp_dropout_rate (float): The dropout rate used in MLP.
num_div (int): The number of divisions of the feature map's channel.
Totally, 4/num_div of channels will be shifted. Defaults to 12.
epsilon (float): Epsilon constant.
drop_path_prob (float): The drop probability for drop path.
"""
def __init__(
self,
epsilon,
drop_path_prob,
mlp_dropout_rate,
num_div=12,
shift_pixel=1,
mlp_expand_ratio=2,
**kwargs,
):
super().__init__(**kwargs)
self.shift_pixel = shift_pixel
self.mlp_expand_ratio = mlp_expand_ratio
self.mlp_dropout_rate = mlp_dropout_rate
self.num_div = num_div
self.epsilon = epsilon
self.drop_path_prob = drop_path_prob
def build(self, input_shape):
self.H = input_shape[1]
self.W = input_shape[2]
self.C = input_shape[3]
self.layer_norm = layers.LayerNormalization(epsilon=self.epsilon)
self.drop_path = (
DropPath(drop_path_prob=self.drop_path_prob)
if self.drop_path_prob > 0.0
else layers.Activation("linear")
)
self.mlp = MLP(
mlp_expand_ratio=self.mlp_expand_ratio,
mlp_dropout_rate=self.mlp_dropout_rate,
)
def get_shift_pad(self, x, mode):
"""Shifts the channels according to the mode chosen."""
if mode == "left":
offset_height = 0
offset_width = 0
target_height = 0
target_width = self.shift_pixel
elif mode == "right":
offset_height = 0
offset_width = self.shift_pixel
target_height = 0
target_width = self.shift_pixel
elif mode == "up":
offset_height = 0
offset_width = 0
target_height = self.shift_pixel
target_width = 0
else:
offset_height = self.shift_pixel
offset_width = 0
target_height = self.shift_pixel
target_width = 0
crop = tf.image.crop_to_bounding_box(
x,
offset_height=offset_height,
offset_width=offset_width,
target_height=self.H - target_height,
target_width=self.W - target_width,
)
shift_pad = tf.image.pad_to_bounding_box(
crop,
offset_height=offset_height,
offset_width=offset_width,
target_height=self.H,
target_width=self.W,
)
return shift_pad
def call(self, x, training=False):
# Split the feature maps
x_splits = tf.split(x, num_or_size_splits=self.C // self.num_div, axis=-1)
# Shift the feature maps
x_splits[0] = self.get_shift_pad(x_splits[0], mode="left")
x_splits[1] = self.get_shift_pad(x_splits[1], mode="right")
x_splits[2] = self.get_shift_pad(x_splits[2], mode="up")
x_splits[3] = self.get_shift_pad(x_splits[3], mode="down")
# Concatenate the shifted and unshifted feature maps
x = tf.concat(x_splits, axis=-1)
# Add the residual connection
shortcut = x
x = shortcut + self.drop_path(self.mlp(self.layer_norm(x)), training=training)
return x
```
### The ShiftViT blocks
|  |
| :--: |
| Figure 5: Shift Blocks in the architecture. [Source](https://arxiv.org/abs/2201.10801) |
Each stage of the architecture has shift blocks as shown in Fig.5. Each of these blocks
contain a variable number of stacked ShiftViT block (as built in the earlier section).
Shift blocks are followed by a PatchMerging layer that scales down feature inputs. The
PatchMerging layer helps in the pyramidal structure of the model.
#### The PatchMerging layer
This layer merges the two adjacent tokens. This layer helps in scaling the features down
spatially and increasing the features up channel wise. We use a Conv2D layer to merge the
patches.
```python
class PatchMerging(layers.Layer):
"""The Patch Merging layer.
Args:
epsilon (float): The epsilon constant.
"""
def __init__(self, epsilon, **kwargs):
super().__init__(**kwargs)
self.epsilon = epsilon
def build(self, input_shape):
filters = 2 * input_shape[-1]
self.reduction = layers.Conv2D(
filters=filters, kernel_size=2, strides=2, padding="same", use_bias=False
)
self.layer_norm = layers.LayerNormalization(epsilon=self.epsilon)
def call(self, x):
# Apply the patch merging algorithm on the feature maps
x = self.layer_norm(x)
x = self.reduction(x)
return x
```
#### Stacked Shift Blocks
Each stage will have a variable number of stacked ShiftViT Blocks, as suggested in
the paper. This is a generic layer that will contain the stacked shift vit blocks
with the patch merging layer as well. Combining the two operations (shift ViT
block and patch merging) is a design choice we picked for better code reusability.
```python
# Note: This layer will have a different depth of stacking
# for different stages on the model.
class StackedShiftBlocks(layers.Layer):
"""The layer containing stacked ShiftViTBlocks.
Args:
epsilon (float): The epsilon constant.
mlp_dropout_rate (float): The dropout rate used in the MLP block.
num_shift_blocks (int): The number of shift vit blocks for this stage.
stochastic_depth_rate (float): The maximum drop path rate chosen.
is_merge (boolean): A flag that determines the use of the Patch Merge
layer after the shift vit blocks.
num_div (int): The division of channels of the feature map. Defaults to 12.
shift_pixel (int): The number of pixels to shift. Defaults to 1.
mlp_expand_ratio (int): The ratio with which the initial dense layer of
the MLP is expanded Defaults to 2.
"""
def __init__(
self,
epsilon,
mlp_dropout_rate,
num_shift_blocks,
stochastic_depth_rate,
is_merge,
num_div=12,
shift_pixel=1,
mlp_expand_ratio=2,
**kwargs,
):
super().__init__(**kwargs)
self.epsilon = epsilon
self.mlp_dropout_rate = mlp_dropout_rate
self.num_shift_blocks = num_shift_blocks
self.stochastic_depth_rate = stochastic_depth_rate
self.is_merge = is_merge
self.num_div = num_div
self.shift_pixel = shift_pixel
self.mlp_expand_ratio = mlp_expand_ratio
def build(self, input_shapes):
# Calculate stochastic depth probabilities.
# Reference: https://keras.io/examples/vision/cct/#the-final-cct-model
dpr = [
x
for x in np.linspace(
start=0, stop=self.stochastic_depth_rate, num=self.num_shift_blocks
)
]
# Build the shift blocks as a list of ShiftViT Blocks
self.shift_blocks = list()
for num in range(self.num_shift_blocks):
self.shift_blocks.append(
ShiftViTBlock(
num_div=self.num_div,
epsilon=self.epsilon,
drop_path_prob=dpr[num],
mlp_dropout_rate=self.mlp_dropout_rate,
shift_pixel=self.shift_pixel,
mlp_expand_ratio=self.mlp_expand_ratio,
)
)
if self.is_merge:
self.patch_merge = PatchMerging(epsilon=self.epsilon)
def call(self, x, training=False):
for shift_block in self.shift_blocks:
x = shift_block(x, training=training)
if self.is_merge:
x = self.patch_merge(x)
return x
# Since this is a custom layer, we need to overwrite get_config()
# so that model can be easily saved & loaded after training
def get_config(self):
config = super().get_config()
config.update(
{
"epsilon": self.epsilon,
"mlp_dropout_rate": self.mlp_dropout_rate,
"num_shift_blocks": self.num_shift_blocks,
"stochastic_depth_rate": self.stochastic_depth_rate,
"is_merge": self.is_merge,
"num_div": self.num_div,
"shift_pixel": self.shift_pixel,
"mlp_expand_ratio": self.mlp_expand_ratio,
}
)
return config
```
---
## The ShiftViT model
Build the ShiftViT custom model.
```python
class ShiftViTModel(keras.Model):
"""The ShiftViT Model.
Args:
data_augmentation (keras.Model): A data augmentation model.
projected_dim (int): The dimension to which the patches of the image are
projected.
patch_size (int): The patch size of the images.
num_shift_blocks_per_stages (list[int]): A list of all the number of shit
blocks per stage.
epsilon (float): The epsilon constant.
mlp_dropout_rate (float): The dropout rate used in the MLP block.
stochastic_depth_rate (float): The maximum drop rate probability.
num_div (int): The number of divisions of the channesl of the feature
map. Defaults to 12.
shift_pixel (int): The number of pixel to shift. Default to 1.
mlp_expand_ratio (int): The ratio with which the initial mlp dense layer
is expanded to. Defaults to 2.
"""
def __init__(
self,
data_augmentation,
projected_dim,
patch_size,
num_shift_blocks_per_stages,
epsilon,
mlp_dropout_rate,
stochastic_depth_rate,
num_div=12,
shift_pixel=1,
mlp_expand_ratio=2,
**kwargs,
):
super().__init__(**kwargs)
self.data_augmentation = data_augmentation
self.patch_projection = layers.Conv2D(
filters=projected_dim,
kernel_size=patch_size,
strides=patch_size,
padding="same",
)
self.stages = list()
for index, num_shift_blocks in enumerate(num_shift_blocks_per_stages):
if index == len(num_shift_blocks_per_stages) - 1:
# This is the last stage, do not use the patch merge here.
is_merge = False
else:
is_merge = True
# Build the stages.
self.stages.append(
StackedShiftBlocks(
epsilon=epsilon,
mlp_dropout_rate=mlp_dropout_rate,
num_shift_blocks=num_shift_blocks,
stochastic_depth_rate=stochastic_depth_rate,
is_merge=is_merge,
num_div=num_div,
shift_pixel=shift_pixel,
mlp_expand_ratio=mlp_expand_ratio,
)
)
self.global_avg_pool = layers.GlobalAveragePooling2D()
self.classifier = layers.Dense(config.num_classes)
def get_config(self):
config = super().get_config()
config.update(
{
"data_augmentation": self.data_augmentation,
"patch_projection": self.patch_projection,
"stages": self.stages,
"global_avg_pool": self.global_avg_pool,
"classifier": self.classifier,
}
)
return config
def _calculate_loss(self, data, training=False):
(images, labels) = data
# Augment the images
augmented_images = self.data_augmentation(images, training=training)
# Create patches and project the pathces.
projected_patches = self.patch_projection(augmented_images)
# Pass through the stages
x = projected_patches
for stage in self.stages:
x = stage(x, training=training)
# Get the logits.
x = self.global_avg_pool(x)
logits = self.classifier(x)
# Calculate the loss and return it.
total_loss = self.compiled_loss(labels, logits)
return total_loss, labels, logits
def train_step(self, inputs):
with tf.GradientTape() as tape:
total_loss, labels, logits = self._calculate_loss(
data=inputs, training=True
)
# Apply gradients.
train_vars = [
self.data_augmentation.trainable_variables,
self.patch_projection.trainable_variables,
self.global_avg_pool.trainable_variables,
self.classifier.trainable_variables,
]
train_vars = train_vars + [stage.trainable_variables for stage in self.stages]
# Optimize the gradients.
grads = tape.gradient(total_loss, train_vars)
trainable_variable_list = []
for (grad, var) in zip(grads, train_vars):
for g, v in zip(grad, var):
trainable_variable_list.append((g, v))
self.optimizer.apply_gradients(trainable_variable_list)
# Update the metrics
self.compiled_metrics.update_state(labels, logits)
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
_, labels, logits = self._calculate_loss(data=data, training=False)
# Update the metrics
self.compiled_metrics.update_state(labels, logits)
return {m.name: m.result() for m in self.metrics}
def call(self, images):
augmented_images = self.data_augmentation(images)
x = self.patch_projection(augmented_images)
for stage in self.stages:
x = stage(x, training=False)
x = self.global_avg_pool(x)
logits = self.classifier(x)
return logits
```
---
## Instantiate the model
```python
model = ShiftViTModel(
data_augmentation=get_augmentation_model(),
projected_dim=config.projected_dim,
patch_size=config.patch_size,
num_shift_blocks_per_stages=config.num_shift_blocks_per_stages,
epsilon=config.epsilon,
mlp_dropout_rate=config.mlp_dropout_rate,
stochastic_depth_rate=config.stochastic_depth_rate,
num_div=config.num_div,
shift_pixel=config.shift_pixel,
mlp_expand_ratio=config.mlp_expand_ratio,
)
```
---
## Learning rate schedule
In many experiments, we want to warm up the model with a slowly increasing learning rate
and then cool down the model with a slowly decaying learning rate. In the warmup cosine
decay, the learning rate linearly increases for the warmup steps and then decays with a
cosine decay.
```python
# Some code is taken from:
# https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2.
class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule):
"""A LearningRateSchedule that uses a warmup cosine decay schedule."""
def __init__(self, lr_start, lr_max, warmup_steps, total_steps):
"""
Args:
lr_start: The initial learning rate
lr_max: The maximum learning rate to which lr should increase to in
the warmup steps
warmup_steps: The number of steps for which the model warms up
total_steps: The total number of steps for the model training
"""
super().__init__()
self.lr_start = lr_start
self.lr_max = lr_max
self.warmup_steps = warmup_steps
self.total_steps = total_steps
self.pi = tf.constant(np.pi)
def __call__(self, step):
# Check whether the total number of steps is larger than the warmup
# steps. If not, then throw a value error.
if self.total_steps < self.warmup_steps:
raise ValueError(
f"Total number of steps {self.total_steps} must be"
+ f"larger or equal to warmup steps {self.warmup_steps}."
)
# `cos_annealed_lr` is a graph that increases to 1 from the initial
# step to the warmup step. After that this graph decays to -1 at the
# final step mark.
cos_annealed_lr = tf.cos(
self.pi
* (tf.cast(step, tf.float32) - self.warmup_steps)
/ tf.cast(self.total_steps - self.warmup_steps, tf.float32)
)
# Shift the mean of the `cos_annealed_lr` graph to 1. Now the grpah goes
# from 0 to 2. Normalize the graph with 0.5 so that now it goes from 0
# to 1. With the normalized graph we scale it with `lr_max` such that
# it goes from 0 to `lr_max`
learning_rate = 0.5 * self.lr_max * (1 + cos_annealed_lr)
# Check whether warmup_steps is more than 0.
if self.warmup_steps > 0:
# Check whether lr_max is larger that lr_start. If not, throw a value
# error.
if self.lr_max < self.lr_start:
raise ValueError(
f"lr_start {self.lr_start} must be smaller or"
+ f"equal to lr_max {self.lr_max}."
)
# Calculate the slope with which the learning rate should increase
# in the warumup schedule. The formula for slope is m = ((b-a)/steps)
slope = (self.lr_max - self.lr_start) / self.warmup_steps
# With the formula for a straight line (y = mx+c) build the warmup
# schedule
warmup_rate = slope * tf.cast(step, tf.float32) + self.lr_start
# When the current step is lesser that warmup steps, get the line
# graph. When the current step is greater than the warmup steps, get
# the scaled cos graph.
learning_rate = tf.where(
step < self.warmup_steps, warmup_rate, learning_rate
)
# When the current step is more that the total steps, return 0 else return
# the calculated graph.
return tf.where(
step > self.total_steps, 0.0, learning_rate, name="learning_rate"
)
def get_config(self):
config = {
"lr_start": self.lr_start,
"lr_max": self.lr_max,
"total_steps": self.total_steps,
"warmup_steps": self.warmup_steps,
}
return config
```
---
## Compile and train the model
```python
# pass sample data to the model so that input shape is available at the time of
# saving the model
sample_ds, _ = next(iter(train_ds))
model(sample_ds, training=False)
# Get the total number of steps for training.
total_steps = int((len(x_train) / config.batch_size) * config.epochs)
# Calculate the number of steps for warmup.
warmup_epoch_percentage = 0.15
warmup_steps = int(total_steps * warmup_epoch_percentage)
# Initialize the warmupcosine schedule.
scheduled_lrs = WarmUpCosine(
lr_start=1e-5,
lr_max=1e-3,
warmup_steps=warmup_steps,
total_steps=total_steps,
)
# Get the optimizer.
optimizer = tfa.optimizers.AdamW(
learning_rate=scheduled_lrs, weight_decay=config.weight_decay
)
# Compile and pretrain the model.
model.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="accuracy"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
# Train the model
history = model.fit(
train_ds,
epochs=config.epochs,
validation_data=val_ds,
callbacks=[
keras.callbacks.EarlyStopping(
monitor="val_accuracy",
patience=5,
mode="auto",
)
],
)
# Evaluate the model with the test dataset.
print("TESTING")
loss, acc_top1, acc_top5 = model.evaluate(test_ds)
print(f"Loss: {loss:0.2f}")
print(f"Top 1 test accuracy: {acc_top1*100:0.2f}%")
print(f"Top 5 test accuracy: {acc_top5*100:0.2f}%")
```
<div class="k-default-codeblock">
```
Epoch 1/100
157/157 [==============================] - 72s 332ms/step - loss: 2.3844 - accuracy: 0.1444 - top-5-accuracy: 0.6051 - val_loss: 2.0984 - val_accuracy: 0.2610 - val_top-5-accuracy: 0.7638
Epoch 2/100
157/157 [==============================] - 49s 314ms/step - loss: 1.9457 - accuracy: 0.2893 - top-5-accuracy: 0.8103 - val_loss: 1.9459 - val_accuracy: 0.3356 - val_top-5-accuracy: 0.8614
Epoch 3/100
157/157 [==============================] - 50s 316ms/step - loss: 1.7093 - accuracy: 0.3810 - top-5-accuracy: 0.8761 - val_loss: 1.5349 - val_accuracy: 0.4585 - val_top-5-accuracy: 0.9045
Epoch 4/100
157/157 [==============================] - 49s 315ms/step - loss: 1.5473 - accuracy: 0.4374 - top-5-accuracy: 0.9090 - val_loss: 1.4257 - val_accuracy: 0.4862 - val_top-5-accuracy: 0.9298
Epoch 5/100
157/157 [==============================] - 50s 316ms/step - loss: 1.4316 - accuracy: 0.4816 - top-5-accuracy: 0.9243 - val_loss: 1.4032 - val_accuracy: 0.5092 - val_top-5-accuracy: 0.9362
Epoch 6/100
157/157 [==============================] - 50s 316ms/step - loss: 1.3588 - accuracy: 0.5131 - top-5-accuracy: 0.9333 - val_loss: 1.2893 - val_accuracy: 0.5411 - val_top-5-accuracy: 0.9457
Epoch 7/100
157/157 [==============================] - 50s 316ms/step - loss: 1.2894 - accuracy: 0.5385 - top-5-accuracy: 0.9410 - val_loss: 1.2922 - val_accuracy: 0.5416 - val_top-5-accuracy: 0.9432
Epoch 8/100
157/157 [==============================] - 49s 315ms/step - loss: 1.2388 - accuracy: 0.5568 - top-5-accuracy: 0.9468 - val_loss: 1.2100 - val_accuracy: 0.5733 - val_top-5-accuracy: 0.9545
Epoch 9/100
157/157 [==============================] - 49s 315ms/step - loss: 1.2043 - accuracy: 0.5698 - top-5-accuracy: 0.9491 - val_loss: 1.2166 - val_accuracy: 0.5675 - val_top-5-accuracy: 0.9520
Epoch 10/100
157/157 [==============================] - 49s 315ms/step - loss: 1.1694 - accuracy: 0.5861 - top-5-accuracy: 0.9528 - val_loss: 1.1738 - val_accuracy: 0.5883 - val_top-5-accuracy: 0.9541
Epoch 11/100
157/157 [==============================] - 50s 316ms/step - loss: 1.1290 - accuracy: 0.5994 - top-5-accuracy: 0.9575 - val_loss: 1.1161 - val_accuracy: 0.6064 - val_top-5-accuracy: 0.9618
Epoch 12/100
157/157 [==============================] - 50s 316ms/step - loss: 1.0861 - accuracy: 0.6157 - top-5-accuracy: 0.9602 - val_loss: 1.1220 - val_accuracy: 0.6133 - val_top-5-accuracy: 0.9576
Epoch 13/100
157/157 [==============================] - 49s 315ms/step - loss: 1.0766 - accuracy: 0.6178 - top-5-accuracy: 0.9612 - val_loss: 1.0108 - val_accuracy: 0.6402 - val_top-5-accuracy: 0.9681
Epoch 14/100
157/157 [==============================] - 49s 315ms/step - loss: 1.0179 - accuracy: 0.6416 - top-5-accuracy: 0.9658 - val_loss: 1.0196 - val_accuracy: 0.6405 - val_top-5-accuracy: 0.9667
Epoch 15/100
157/157 [==============================] - 50s 316ms/step - loss: 1.0028 - accuracy: 0.6470 - top-5-accuracy: 0.9678 - val_loss: 1.0113 - val_accuracy: 0.6415 - val_top-5-accuracy: 0.9672
Epoch 16/100
157/157 [==============================] - 50s 316ms/step - loss: 0.9613 - accuracy: 0.6611 - top-5-accuracy: 0.9710 - val_loss: 1.0516 - val_accuracy: 0.6406 - val_top-5-accuracy: 0.9596
Epoch 17/100
157/157 [==============================] - 50s 316ms/step - loss: 0.9262 - accuracy: 0.6740 - top-5-accuracy: 0.9729 - val_loss: 0.9010 - val_accuracy: 0.6844 - val_top-5-accuracy: 0.9750
Epoch 18/100
157/157 [==============================] - 50s 316ms/step - loss: 0.8768 - accuracy: 0.6916 - top-5-accuracy: 0.9769 - val_loss: 0.8862 - val_accuracy: 0.6908 - val_top-5-accuracy: 0.9767
Epoch 19/100
157/157 [==============================] - 49s 315ms/step - loss: 0.8595 - accuracy: 0.6984 - top-5-accuracy: 0.9768 - val_loss: 0.8732 - val_accuracy: 0.6982 - val_top-5-accuracy: 0.9738
Epoch 20/100
157/157 [==============================] - 50s 317ms/step - loss: 0.8252 - accuracy: 0.7103 - top-5-accuracy: 0.9793 - val_loss: 0.9330 - val_accuracy: 0.6745 - val_top-5-accuracy: 0.9718
Epoch 21/100
157/157 [==============================] - 51s 322ms/step - loss: 0.8003 - accuracy: 0.7180 - top-5-accuracy: 0.9814 - val_loss: 0.8912 - val_accuracy: 0.6948 - val_top-5-accuracy: 0.9728
Epoch 22/100
157/157 [==============================] - 51s 326ms/step - loss: 0.7651 - accuracy: 0.7317 - top-5-accuracy: 0.9829 - val_loss: 0.7894 - val_accuracy: 0.7277 - val_top-5-accuracy: 0.9791
Epoch 23/100
157/157 [==============================] - 52s 328ms/step - loss: 0.7372 - accuracy: 0.7415 - top-5-accuracy: 0.9843 - val_loss: 0.7752 - val_accuracy: 0.7284 - val_top-5-accuracy: 0.9804
Epoch 24/100
157/157 [==============================] - 51s 327ms/step - loss: 0.7324 - accuracy: 0.7423 - top-5-accuracy: 0.9852 - val_loss: 0.7949 - val_accuracy: 0.7340 - val_top-5-accuracy: 0.9792
Epoch 25/100
157/157 [==============================] - 51s 323ms/step - loss: 0.7051 - accuracy: 0.7512 - top-5-accuracy: 0.9858 - val_loss: 0.7967 - val_accuracy: 0.7280 - val_top-5-accuracy: 0.9787
Epoch 26/100
157/157 [==============================] - 51s 323ms/step - loss: 0.6832 - accuracy: 0.7577 - top-5-accuracy: 0.9870 - val_loss: 0.7840 - val_accuracy: 0.7322 - val_top-5-accuracy: 0.9807
Epoch 27/100
157/157 [==============================] - 51s 322ms/step - loss: 0.6609 - accuracy: 0.7654 - top-5-accuracy: 0.9877 - val_loss: 0.7447 - val_accuracy: 0.7434 - val_top-5-accuracy: 0.9816
Epoch 28/100
157/157 [==============================] - 50s 319ms/step - loss: 0.6495 - accuracy: 0.7724 - top-5-accuracy: 0.9883 - val_loss: 0.7885 - val_accuracy: 0.7280 - val_top-5-accuracy: 0.9817
Epoch 29/100
157/157 [==============================] - 50s 317ms/step - loss: 0.6491 - accuracy: 0.7707 - top-5-accuracy: 0.9885 - val_loss: 0.7539 - val_accuracy: 0.7458 - val_top-5-accuracy: 0.9821
Epoch 30/100
157/157 [==============================] - 50s 317ms/step - loss: 0.6213 - accuracy: 0.7823 - top-5-accuracy: 0.9888 - val_loss: 0.7571 - val_accuracy: 0.7470 - val_top-5-accuracy: 0.9815
Epoch 31/100
157/157 [==============================] - 50s 318ms/step - loss: 0.5976 - accuracy: 0.7902 - top-5-accuracy: 0.9906 - val_loss: 0.7430 - val_accuracy: 0.7508 - val_top-5-accuracy: 0.9817
Epoch 32/100
157/157 [==============================] - 50s 318ms/step - loss: 0.5932 - accuracy: 0.7898 - top-5-accuracy: 0.9910 - val_loss: 0.7545 - val_accuracy: 0.7469 - val_top-5-accuracy: 0.9793
Epoch 33/100
157/157 [==============================] - 50s 318ms/step - loss: 0.5977 - accuracy: 0.7850 - top-5-accuracy: 0.9913 - val_loss: 0.7200 - val_accuracy: 0.7569 - val_top-5-accuracy: 0.9830
Epoch 34/100
157/157 [==============================] - 50s 317ms/step - loss: 0.5552 - accuracy: 0.8041 - top-5-accuracy: 0.9920 - val_loss: 0.7377 - val_accuracy: 0.7552 - val_top-5-accuracy: 0.9818
Epoch 35/100
157/157 [==============================] - 50s 319ms/step - loss: 0.5509 - accuracy: 0.8056 - top-5-accuracy: 0.9921 - val_loss: 0.8125 - val_accuracy: 0.7331 - val_top-5-accuracy: 0.9782
Epoch 36/100
157/157 [==============================] - 50s 317ms/step - loss: 0.5296 - accuracy: 0.8116 - top-5-accuracy: 0.9933 - val_loss: 0.6900 - val_accuracy: 0.7680 - val_top-5-accuracy: 0.9849
Epoch 37/100
157/157 [==============================] - 50s 316ms/step - loss: 0.5151 - accuracy: 0.8170 - top-5-accuracy: 0.9941 - val_loss: 0.7275 - val_accuracy: 0.7610 - val_top-5-accuracy: 0.9841
Epoch 38/100
157/157 [==============================] - 50s 317ms/step - loss: 0.5069 - accuracy: 0.8217 - top-5-accuracy: 0.9936 - val_loss: 0.7067 - val_accuracy: 0.7703 - val_top-5-accuracy: 0.9835
Epoch 39/100
157/157 [==============================] - 50s 318ms/step - loss: 0.4771 - accuracy: 0.8304 - top-5-accuracy: 0.9945 - val_loss: 0.7110 - val_accuracy: 0.7668 - val_top-5-accuracy: 0.9836
Epoch 40/100
157/157 [==============================] - 50s 317ms/step - loss: 0.4675 - accuracy: 0.8350 - top-5-accuracy: 0.9956 - val_loss: 0.7130 - val_accuracy: 0.7688 - val_top-5-accuracy: 0.9829
Epoch 41/100
157/157 [==============================] - 50s 319ms/step - loss: 0.4586 - accuracy: 0.8382 - top-5-accuracy: 0.9959 - val_loss: 0.7331 - val_accuracy: 0.7598 - val_top-5-accuracy: 0.9806
Epoch 42/100
157/157 [==============================] - 50s 318ms/step - loss: 0.4558 - accuracy: 0.8380 - top-5-accuracy: 0.9959 - val_loss: 0.7187 - val_accuracy: 0.7722 - val_top-5-accuracy: 0.9832
Epoch 43/100
157/157 [==============================] - 50s 320ms/step - loss: 0.4356 - accuracy: 0.8450 - top-5-accuracy: 0.9958 - val_loss: 0.7162 - val_accuracy: 0.7693 - val_top-5-accuracy: 0.9850
Epoch 44/100
157/157 [==============================] - 49s 314ms/step - loss: 0.4425 - accuracy: 0.8433 - top-5-accuracy: 0.9958 - val_loss: 0.7061 - val_accuracy: 0.7698 - val_top-5-accuracy: 0.9853
Epoch 45/100
157/157 [==============================] - 49s 314ms/step - loss: 0.4072 - accuracy: 0.8551 - top-5-accuracy: 0.9967 - val_loss: 0.7025 - val_accuracy: 0.7820 - val_top-5-accuracy: 0.9848
Epoch 46/100
157/157 [==============================] - 49s 314ms/step - loss: 0.3865 - accuracy: 0.8644 - top-5-accuracy: 0.9970 - val_loss: 0.7178 - val_accuracy: 0.7740 - val_top-5-accuracy: 0.9844
Epoch 47/100
157/157 [==============================] - 49s 313ms/step - loss: 0.3718 - accuracy: 0.8694 - top-5-accuracy: 0.9973 - val_loss: 0.7216 - val_accuracy: 0.7768 - val_top-5-accuracy: 0.9828
Epoch 48/100
157/157 [==============================] - 49s 314ms/step - loss: 0.3733 - accuracy: 0.8673 - top-5-accuracy: 0.9970 - val_loss: 0.7440 - val_accuracy: 0.7713 - val_top-5-accuracy: 0.9841
Epoch 49/100
157/157 [==============================] - 49s 313ms/step - loss: 0.3531 - accuracy: 0.8741 - top-5-accuracy: 0.9979 - val_loss: 0.7220 - val_accuracy: 0.7738 - val_top-5-accuracy: 0.9848
Epoch 50/100
157/157 [==============================] - 49s 314ms/step - loss: 0.3502 - accuracy: 0.8738 - top-5-accuracy: 0.9980 - val_loss: 0.7245 - val_accuracy: 0.7734 - val_top-5-accuracy: 0.9836
TESTING
40/40 [==============================] - 2s 56ms/step - loss: 0.7336 - accuracy: 0.7638 - top-5-accuracy: 0.9855
Loss: 0.73
Top 1 test accuracy: 76.38%
Top 5 test accuracy: 98.55%
```
</div>
---
## Save trained model
Since we created the model by Subclassing, we can't save the model in HDF5 format.
It can be saved in TF SavedModel format only. In general, this is the recommended format for saving models as well.
```python
model.save("ShiftViT")
```
---
## Model inference
**Download sample data for inference**
```python
!wget -q 'https://tinyurl.com/2p9483sw' -O inference_set.zip
!unzip -q inference_set.zip
```
**Load saved model**
```python
# Custom objects are not included when the model is saved.
# At loading time, these objects need to be passed for reconstruction of the model
saved_model = tf.keras.models.load_model(
"ShiftViT",
custom_objects={"WarmUpCosine": WarmUpCosine, "AdamW": tfa.optimizers.AdamW},
)
```
**Utility functions for inference**
```python
def process_image(img_path):
# read image file from string path
img = tf.io.read_file(img_path)
# decode jpeg to uint8 tensor
img = tf.io.decode_jpeg(img, channels=3)
# resize image to match input size accepted by model
# use `method` as `nearest` to preserve dtype of input passed to `resize()`
img = tf.image.resize(
img, [config.input_shape[0], config.input_shape[1]], method="nearest"
)
return img
def create_tf_dataset(image_dir):
data_dir = pathlib.Path(image_dir)
# create tf.data dataset using directory of images
predict_ds = tf.data.Dataset.list_files(str(data_dir / "*.jpg"), shuffle=False)
# use map to convert string paths to uint8 image tensors
# setting `num_parallel_calls' helps in processing multiple images parallely
predict_ds = predict_ds.map(process_image, num_parallel_calls=AUTO)
# create a Prefetch Dataset for better latency & throughput
predict_ds = predict_ds.batch(config.tf_ds_batch_size).prefetch(AUTO)
return predict_ds
def predict(predict_ds):
# ShiftViT model returns logits (non-normalized predictions)
logits = saved_model.predict(predict_ds)
# normalize predictions by calling softmax()
probabilities = tf.nn.softmax(logits)
return probabilities
def get_predicted_class(probabilities):
pred_label = np.argmax(probabilities)
predicted_class = config.label_map[pred_label]
return predicted_class
def get_confidence_scores(probabilities):
# get the indices of the probability scores sorted in descending order
labels = np.argsort(probabilities)[::-1]
confidences = {
config.label_map[label]: np.round((probabilities[label]) * 100, 2)
for label in labels
}
return confidences
```
**Get predictions**
```python
img_dir = "inference_set"
predict_ds = create_tf_dataset(img_dir)
probabilities = predict(predict_ds)
print(f"probabilities: {probabilities[0]}")
confidences = get_confidence_scores(probabilities[0])
print(confidences)
```
<div class="k-default-codeblock">
```
1/1 [==============================] - 2s 2s/step
probabilities: [8.7329084e-01 1.3162658e-03 6.1781306e-05 1.9132349e-05 4.4482469e-05
1.8182898e-06 2.2834571e-05 1.1466043e-05 1.2504059e-01 1.9084632e-04]
{'airplane': 87.33, 'ship': 12.5, 'automobile': 0.13, 'truck': 0.02, 'bird': 0.01, 'deer': 0.0, 'frog': 0.0, 'cat': 0.0, 'horse': 0.0, 'dog': 0.0}
```
</div>
**View predictions**
```python
plt.figure(figsize=(10, 10))
for images in predict_ds:
for i in range(min(6, probabilities.shape[0])):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
predicted_class = get_predicted_class(probabilities[i])
plt.title(predicted_class)
plt.axis("off")
```

---
## Conclusion
The most impactful contribution of the paper is not the novel architecture, but
the idea that hierarchical ViTs trained with no attention can perform quite well. This
opens up the question of how essential attention is to the performance of ViTs.
For curious minds, we would suggest reading the
[ConvNexT](https://arxiv.org/abs/2201.03545) paper which attends more to the training
paradigms and architectural details of ViTs rather than providing a novel architecture
based on attention.
Acknowledgements:
- We would like to thank [PyImageSearch](https://pyimagesearch.com) for providing us with
resources that helped in the completion of this project.
- We would like to thank [JarvisLabs.ai](https://jarvislabs.ai/) for providing with the
GPU credits.
- We would like to thank [Manim Community](https://www.manim.community/) for the manim
library.
- A personal note of thanks to [Puja Roychowdhury](https://twitter.com/pleb_talks) for
helping us with the Learning Rate Schedule.
**Example available on HuggingFace**
| Trained Model | Demo |
| :--: | :--: |
| [](https://huggingface.co/keras-io/shiftvit) | [](https://huggingface.co/spaces/keras-io/shiftvit) |
| keras-io/examples/vision/md/shiftvit.md/0 | {
"file_path": "keras-io/examples/vision/md/shiftvit.md",
"repo_id": "keras-io",
"token_count": 19128
} | 112 |
"""
Title: Point cloud classification with PointNet
Author: [David Griffiths](https://dgriffiths3.github.io)
Date created: 2020/05/25
Last modified: 2024/01/09
Description: Implementation of PointNet for ModelNet10 classification.
Accelerator: GPU
"""
"""
# Point cloud classification
"""
"""
## Introduction
Classification, detection and segmentation of unordered 3D point sets i.e. point clouds
is a core problem in computer vision. This example implements the seminal point cloud
deep learning paper [PointNet (Qi et al., 2017)](https://arxiv.org/abs/1612.00593). For a
detailed intoduction on PointNet see [this blog
post](https://medium.com/@luis_gonzales/an-in-depth-look-at-pointnet-111d7efdaa1a).
"""
"""
## Setup
If using colab first install trimesh with `!pip install trimesh`.
"""
import os
import glob
import trimesh
import numpy as np
from tensorflow import data as tf_data
from keras import ops
import keras
from keras import layers
from matplotlib import pyplot as plt
keras.utils.set_random_seed(seed=42)
"""
## Load dataset
We use the ModelNet10 model dataset, the smaller 10 class version of the ModelNet40
dataset. First download the data:
"""
DATA_DIR = keras.utils.get_file(
"modelnet.zip",
"http://3dvision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip",
extract=True,
)
DATA_DIR = os.path.join(os.path.dirname(DATA_DIR), "ModelNet10")
"""
We can use the `trimesh` package to read and visualize the `.off` mesh files.
"""
mesh = trimesh.load(os.path.join(DATA_DIR, "chair/train/chair_0001.off"))
mesh.show()
"""
To convert a mesh file to a point cloud we first need to sample points on the mesh
surface. `.sample()` performs a uniform random sampling. Here we sample at 2048 locations
and visualize in `matplotlib`.
"""
points = mesh.sample(2048)
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(111, projection="3d")
ax.scatter(points[:, 0], points[:, 1], points[:, 2])
ax.set_axis_off()
plt.show()
"""
To generate a `tf.data.Dataset()` we need to first parse through the ModelNet data
folders. Each mesh is loaded and sampled into a point cloud before being added to a
standard python list and converted to a `numpy` array. We also store the current
enumerate index value as the object label and use a dictionary to recall this later.
"""
def parse_dataset(num_points=2048):
train_points = []
train_labels = []
test_points = []
test_labels = []
class_map = {}
folders = glob.glob(os.path.join(DATA_DIR, "[!README]*"))
for i, folder in enumerate(folders):
print("processing class: {}".format(os.path.basename(folder)))
# store folder name with ID so we can retrieve later
class_map[i] = folder.split("/")[-1]
# gather all files
train_files = glob.glob(os.path.join(folder, "train/*"))
test_files = glob.glob(os.path.join(folder, "test/*"))
for f in train_files:
train_points.append(trimesh.load(f).sample(num_points))
train_labels.append(i)
for f in test_files:
test_points.append(trimesh.load(f).sample(num_points))
test_labels.append(i)
return (
np.array(train_points),
np.array(test_points),
np.array(train_labels),
np.array(test_labels),
class_map,
)
"""
Set the number of points to sample and batch size and parse the dataset. This can take
~5minutes to complete.
"""
NUM_POINTS = 2048
NUM_CLASSES = 10
BATCH_SIZE = 32
train_points, test_points, train_labels, test_labels, CLASS_MAP = parse_dataset(
NUM_POINTS
)
"""
Our data can now be read into a `tf.data.Dataset()` object. We set the shuffle buffer
size to the entire size of the dataset as prior to this the data is ordered by class.
Data augmentation is important when working with point cloud data. We create a
augmentation function to jitter and shuffle the train dataset.
"""
def augment(points, label):
# jitter points
points += keras.random.uniform(points.shape, -0.005, 0.005, dtype="float64")
# shuffle points
points = keras.random.shuffle(points)
return points, label
train_size = 0.8
dataset = tf_data.Dataset.from_tensor_slices((train_points, train_labels))
test_dataset = tf_data.Dataset.from_tensor_slices((test_points, test_labels))
train_dataset_size = int(len(dataset) * train_size)
dataset = dataset.shuffle(len(train_points)).map(augment)
test_dataset = test_dataset.shuffle(len(test_points)).batch(BATCH_SIZE)
train_dataset = dataset.take(train_dataset_size).batch(BATCH_SIZE)
validation_dataset = dataset.skip(train_dataset_size).batch(BATCH_SIZE)
"""
### Build a model
Each convolution and fully-connected layer (with exception for end layers) consists of
Convolution / Dense -> Batch Normalization -> ReLU Activation.
"""
def conv_bn(x, filters):
x = layers.Conv1D(filters, kernel_size=1, padding="valid")(x)
x = layers.BatchNormalization(momentum=0.0)(x)
return layers.Activation("relu")(x)
def dense_bn(x, filters):
x = layers.Dense(filters)(x)
x = layers.BatchNormalization(momentum=0.0)(x)
return layers.Activation("relu")(x)
"""
PointNet consists of two core components. The primary MLP network, and the transformer
net (T-net). The T-net aims to learn an affine transformation matrix by its own mini
network. The T-net is used twice. The first time to transform the input features (n, 3)
into a canonical representation. The second is an affine transformation for alignment in
feature space (n, 3). As per the original paper we constrain the transformation to be
close to an orthogonal matrix (i.e. ||X*X^T - I|| = 0).
"""
class OrthogonalRegularizer(keras.regularizers.Regularizer):
def __init__(self, num_features, l2reg=0.001):
self.num_features = num_features
self.l2reg = l2reg
self.eye = ops.eye(num_features)
def __call__(self, x):
x = ops.reshape(x, (-1, self.num_features, self.num_features))
xxt = ops.tensordot(x, x, axes=(2, 2))
xxt = ops.reshape(xxt, (-1, self.num_features, self.num_features))
return ops.sum(self.l2reg * ops.square(xxt - self.eye))
"""
We can then define a general function to build T-net layers.
"""
def tnet(inputs, num_features):
# Initialise bias as the identity matrix
bias = keras.initializers.Constant(np.eye(num_features).flatten())
reg = OrthogonalRegularizer(num_features)
x = conv_bn(inputs, 32)
x = conv_bn(x, 64)
x = conv_bn(x, 512)
x = layers.GlobalMaxPooling1D()(x)
x = dense_bn(x, 256)
x = dense_bn(x, 128)
x = layers.Dense(
num_features * num_features,
kernel_initializer="zeros",
bias_initializer=bias,
activity_regularizer=reg,
)(x)
feat_T = layers.Reshape((num_features, num_features))(x)
# Apply affine transformation to input features
return layers.Dot(axes=(2, 1))([inputs, feat_T])
"""
The main network can be then implemented in the same manner where the t-net mini models
can be dropped in a layers in the graph. Here we replicate the network architecture
published in the original paper but with half the number of weights at each layer as we
are using the smaller 10 class ModelNet dataset.
"""
inputs = keras.Input(shape=(NUM_POINTS, 3))
x = tnet(inputs, 3)
x = conv_bn(x, 32)
x = conv_bn(x, 32)
x = tnet(x, 32)
x = conv_bn(x, 32)
x = conv_bn(x, 64)
x = conv_bn(x, 512)
x = layers.GlobalMaxPooling1D()(x)
x = dense_bn(x, 256)
x = layers.Dropout(0.3)(x)
x = dense_bn(x, 128)
x = layers.Dropout(0.3)(x)
outputs = layers.Dense(NUM_CLASSES, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs, name="pointnet")
model.summary()
"""
### Train model
Once the model is defined it can be trained like any other standard classification model
using `.compile()` and `.fit()`.
"""
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=0.001),
metrics=["sparse_categorical_accuracy"],
)
model.fit(train_dataset, epochs=20, validation_data=validation_dataset)
"""
## Visualize predictions
We can use matplotlib to visualize our trained model performance.
"""
data = test_dataset.take(1)
points, labels = list(data)[0]
points = points[:8, ...]
labels = labels[:8, ...]
# run test data through model
preds = model.predict(points)
preds = ops.argmax(preds, -1)
points = points.numpy()
# plot points with predicted class and label
fig = plt.figure(figsize=(15, 10))
for i in range(8):
ax = fig.add_subplot(2, 4, i + 1, projection="3d")
ax.scatter(points[i, :, 0], points[i, :, 1], points[i, :, 2])
ax.set_title(
"pred: {:}, label: {:}".format(
CLASS_MAP[preds[i].numpy()], CLASS_MAP[labels.numpy()[i]]
)
)
ax.set_axis_off()
plt.show()
| keras-io/examples/vision/pointnet.py/0 | {
"file_path": "keras-io/examples/vision/pointnet.py",
"repo_id": "keras-io",
"token_count": 3264
} | 113 |
"""
Title: Image classification with Swin Transformers
Author: [Rishit Dagli](https://twitter.com/rishit_dagli)
Date created: 2021/09/08
Last modified: 2021/09/08
Description: Image classification using Swin Transformers, a general-purpose backbone for computer vision.
Accelerator: GPU
"""
"""
This example implements
[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030)
by Liu et al. for image classification, and demonstrates it on the
[CIFAR-100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html).
Swin Transformer (**S**hifted **Win**dow Transformer) can serve as a
general-purpose backbone for computer vision. Swin Transformer is a hierarchical
Transformer whose representations are computed with _shifted windows_. The
shifted window scheme brings greater efficiency by limiting self-attention
computation to non-overlapping local windows while also allowing for
cross-window connections. This architecture has the flexibility to model
information at various scales and has a linear computational complexity with
respect to image size.
This example requires TensorFlow 2.5 or higher.
"""
"""
## Setup
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf # For tf.data and preprocessing only.
import keras
from keras import layers
from keras import ops
"""
## Configure the hyperparameters
A key parameter to pick is the `patch_size`, the size of the input patches.
In order to use each pixel as an individual input, you can set `patch_size` to
`(1, 1)`. Below, we take inspiration from the original paper settings for
training on ImageNet-1K, keeping most of the original settings for this example.
"""
num_classes = 100
input_shape = (32, 32, 3)
patch_size = (2, 2) # 2-by-2 sized patches
dropout_rate = 0.03 # Dropout rate
num_heads = 8 # Attention heads
embed_dim = 64 # Embedding dimension
num_mlp = 256 # MLP layer size
# Convert embedded patches to query, key, and values with a learnable additive
# value
qkv_bias = True
window_size = 2 # Size of attention window
shift_size = 1 # Size of shifting window
image_dimension = 32 # Initial image size
num_patch_x = input_shape[0] // patch_size[0]
num_patch_y = input_shape[1] // patch_size[1]
learning_rate = 1e-3
batch_size = 128
num_epochs = 40
validation_split = 0.1
weight_decay = 0.0001
label_smoothing = 0.1
"""
## Prepare the data
We load the CIFAR-100 dataset through `keras.datasets`,
normalize the images, and convert the integer labels to one-hot encoded vectors.
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
num_train_samples = int(len(x_train) * (1 - validation_split))
num_val_samples = len(x_train) - num_train_samples
x_train, x_val = np.split(x_train, [num_train_samples])
y_train, y_val = np.split(y_train, [num_train_samples])
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i])
plt.show()
"""
## Helper functions
We create two helper functions to help us get a sequence of
patches from the image, merge patches, and apply dropout.
"""
def window_partition(x, window_size):
_, height, width, channels = x.shape
patch_num_y = height // window_size
patch_num_x = width // window_size
x = ops.reshape(
x,
(
-1,
patch_num_y,
window_size,
patch_num_x,
window_size,
channels,
),
)
x = ops.transpose(x, (0, 1, 3, 2, 4, 5))
windows = ops.reshape(x, (-1, window_size, window_size, channels))
return windows
def window_reverse(windows, window_size, height, width, channels):
patch_num_y = height // window_size
patch_num_x = width // window_size
x = ops.reshape(
windows,
(
-1,
patch_num_y,
patch_num_x,
window_size,
window_size,
channels,
),
)
x = ops.transpose(x, (0, 1, 3, 2, 4, 5))
x = ops.reshape(x, (-1, height, width, channels))
return x
"""
## Window based multi-head self-attention
Usually Transformers perform global self-attention, where the relationships
between a token and all other tokens are computed. The global computation leads
to quadratic complexity with respect to the number of tokens. Here, as the
[original paper](https://arxiv.org/abs/2103.14030) suggests, we compute
self-attention within local windows, in a non-overlapping manner. Global
self-attention leads to quadratic computational complexity in the number of
patches, whereas window-based self-attention leads to linear complexity and is
easily scalable.
"""
class WindowAttention(layers.Layer):
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
dropout_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
self.scale = (dim // num_heads) ** -0.5
self.qkv = layers.Dense(dim * 3, use_bias=qkv_bias)
self.dropout = layers.Dropout(dropout_rate)
self.proj = layers.Dense(dim)
num_window_elements = (2 * self.window_size[0] - 1) * (
2 * self.window_size[1] - 1
)
self.relative_position_bias_table = self.add_weight(
shape=(num_window_elements, self.num_heads),
initializer=keras.initializers.Zeros(),
trainable=True,
)
coords_h = np.arange(self.window_size[0])
coords_w = np.arange(self.window_size[1])
coords_matrix = np.meshgrid(coords_h, coords_w, indexing="ij")
coords = np.stack(coords_matrix)
coords_flatten = coords.reshape(2, -1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.transpose([1, 2, 0])
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.relative_position_index = keras.Variable(
initializer=relative_position_index,
shape=relative_position_index.shape,
dtype="int",
trainable=False,
)
def call(self, x, mask=None):
_, size, channels = x.shape
head_dim = channels // self.num_heads
x_qkv = self.qkv(x)
x_qkv = ops.reshape(x_qkv, (-1, size, 3, self.num_heads, head_dim))
x_qkv = ops.transpose(x_qkv, (2, 0, 3, 1, 4))
q, k, v = x_qkv[0], x_qkv[1], x_qkv[2]
q = q * self.scale
k = ops.transpose(k, (0, 1, 3, 2))
attn = q @ k
num_window_elements = self.window_size[0] * self.window_size[1]
relative_position_index_flat = ops.reshape(self.relative_position_index, (-1,))
relative_position_bias = ops.take(
self.relative_position_bias_table,
relative_position_index_flat,
axis=0,
)
relative_position_bias = ops.reshape(
relative_position_bias,
(num_window_elements, num_window_elements, -1),
)
relative_position_bias = ops.transpose(relative_position_bias, (2, 0, 1))
attn = attn + ops.expand_dims(relative_position_bias, axis=0)
if mask is not None:
nW = mask.shape[0]
mask_float = ops.cast(
ops.expand_dims(ops.expand_dims(mask, axis=1), axis=0),
"float32",
)
attn = ops.reshape(attn, (-1, nW, self.num_heads, size, size)) + mask_float
attn = ops.reshape(attn, (-1, self.num_heads, size, size))
attn = keras.activations.softmax(attn, axis=-1)
else:
attn = keras.activations.softmax(attn, axis=-1)
attn = self.dropout(attn)
x_qkv = attn @ v
x_qkv = ops.transpose(x_qkv, (0, 2, 1, 3))
x_qkv = ops.reshape(x_qkv, (-1, size, channels))
x_qkv = self.proj(x_qkv)
x_qkv = self.dropout(x_qkv)
return x_qkv
"""
## The complete Swin Transformer model
Finally, we put together the complete Swin Transformer by replacing the standard
multi-head attention (MHA) with shifted windows attention. As suggested in the
original paper, we create a model comprising of a shifted window-based MHA
layer, followed by a 2-layer MLP with GELU nonlinearity in between, applying
`LayerNormalization` before each MSA layer and each MLP, and a residual
connection after each of these layers.
Notice that we only create a simple MLP with 2 Dense and
2 Dropout layers. Often you will see models using ResNet-50 as the MLP which is
quite standard in the literature. However in this paper the authors use a
2-layer MLP with GELU nonlinearity in between.
"""
class SwinTransformer(layers.Layer):
def __init__(
self,
dim,
num_patch,
num_heads,
window_size=7,
shift_size=0,
num_mlp=1024,
qkv_bias=True,
dropout_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.dim = dim # number of input dimensions
self.num_patch = num_patch # number of embedded patches
self.num_heads = num_heads # number of attention heads
self.window_size = window_size # size of window
self.shift_size = shift_size # size of window shift
self.num_mlp = num_mlp # number of MLP nodes
self.norm1 = layers.LayerNormalization(epsilon=1e-5)
self.attn = WindowAttention(
dim,
window_size=(self.window_size, self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)
self.drop_path = layers.Dropout(dropout_rate)
self.norm2 = layers.LayerNormalization(epsilon=1e-5)
self.mlp = keras.Sequential(
[
layers.Dense(num_mlp),
layers.Activation(keras.activations.gelu),
layers.Dropout(dropout_rate),
layers.Dense(dim),
layers.Dropout(dropout_rate),
]
)
if min(self.num_patch) < self.window_size:
self.shift_size = 0
self.window_size = min(self.num_patch)
def build(self, input_shape):
if self.shift_size == 0:
self.attn_mask = None
else:
height, width = self.num_patch
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
mask_array = np.zeros((1, height, width, 1))
count = 0
for h in h_slices:
for w in w_slices:
mask_array[:, h, w, :] = count
count += 1
mask_array = ops.convert_to_tensor(mask_array)
# mask array to windows
mask_windows = window_partition(mask_array, self.window_size)
mask_windows = ops.reshape(
mask_windows, [-1, self.window_size * self.window_size]
)
attn_mask = ops.expand_dims(mask_windows, axis=1) - ops.expand_dims(
mask_windows, axis=2
)
attn_mask = ops.where(attn_mask != 0, -100.0, attn_mask)
attn_mask = ops.where(attn_mask == 0, 0.0, attn_mask)
self.attn_mask = keras.Variable(
initializer=attn_mask,
shape=attn_mask.shape,
dtype=attn_mask.dtype,
trainable=False,
)
def call(self, x, training=False):
height, width = self.num_patch
_, num_patches_before, channels = x.shape
x_skip = x
x = self.norm1(x)
x = ops.reshape(x, (-1, height, width, channels))
if self.shift_size > 0:
shifted_x = ops.roll(
x, shift=[-self.shift_size, -self.shift_size], axis=[1, 2]
)
else:
shifted_x = x
x_windows = window_partition(shifted_x, self.window_size)
x_windows = ops.reshape(
x_windows, (-1, self.window_size * self.window_size, channels)
)
attn_windows = self.attn(x_windows, mask=self.attn_mask)
attn_windows = ops.reshape(
attn_windows,
(-1, self.window_size, self.window_size, channels),
)
shifted_x = window_reverse(
attn_windows, self.window_size, height, width, channels
)
if self.shift_size > 0:
x = ops.roll(
shifted_x, shift=[self.shift_size, self.shift_size], axis=[1, 2]
)
else:
x = shifted_x
x = ops.reshape(x, (-1, height * width, channels))
x = self.drop_path(x, training=training)
x = x_skip + x
x_skip = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = x_skip + x
return x
"""
## Model training and evaluation
### Extract and embed patches
We first create 3 layers to help us extract, embed and merge patches from the
images on top of which we will later use the Swin Transformer class we built.
"""
# Using tf ops since it is only used in tf.data.
def patch_extract(images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=(1, patch_size[0], patch_size[1], 1),
strides=(1, patch_size[0], patch_size[1], 1),
rates=(1, 1, 1, 1),
padding="VALID",
)
patch_dim = patches.shape[-1]
patch_num = patches.shape[1]
return tf.reshape(patches, (batch_size, patch_num * patch_num, patch_dim))
class PatchEmbedding(layers.Layer):
def __init__(self, num_patch, embed_dim, **kwargs):
super().__init__(**kwargs)
self.num_patch = num_patch
self.proj = layers.Dense(embed_dim)
self.pos_embed = layers.Embedding(input_dim=num_patch, output_dim=embed_dim)
def call(self, patch):
pos = ops.arange(start=0, stop=self.num_patch)
return self.proj(patch) + self.pos_embed(pos)
class PatchMerging(keras.layers.Layer):
def __init__(self, num_patch, embed_dim):
super().__init__()
self.num_patch = num_patch
self.embed_dim = embed_dim
self.linear_trans = layers.Dense(2 * embed_dim, use_bias=False)
def call(self, x):
height, width = self.num_patch
_, _, C = x.shape
x = ops.reshape(x, (-1, height, width, C))
x0 = x[:, 0::2, 0::2, :]
x1 = x[:, 1::2, 0::2, :]
x2 = x[:, 0::2, 1::2, :]
x3 = x[:, 1::2, 1::2, :]
x = ops.concatenate((x0, x1, x2, x3), axis=-1)
x = ops.reshape(x, (-1, (height // 2) * (width // 2), 4 * C))
return self.linear_trans(x)
"""
### Prepare the tf.data.Dataset
We do all the steps, which do not have trainable weights with tf.data.
Prepare the training, validation and testing sets.
"""
def augment(x):
x = tf.image.random_crop(x, size=(image_dimension, image_dimension, 3))
x = tf.image.random_flip_left_right(x)
return x
dataset = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.map(lambda x, y: (augment(x), y))
.batch(batch_size=batch_size)
.map(lambda x, y: (patch_extract(x), y))
.prefetch(tf.data.experimental.AUTOTUNE)
)
dataset_val = (
tf.data.Dataset.from_tensor_slices((x_val, y_val))
.batch(batch_size=batch_size)
.map(lambda x, y: (patch_extract(x), y))
.prefetch(tf.data.experimental.AUTOTUNE)
)
dataset_test = (
tf.data.Dataset.from_tensor_slices((x_test, y_test))
.batch(batch_size=batch_size)
.map(lambda x, y: (patch_extract(x), y))
.prefetch(tf.data.experimental.AUTOTUNE)
)
"""
### Build the model
We put together the Swin Transformer model.
"""
input = layers.Input(shape=(256, 12))
x = PatchEmbedding(num_patch_x * num_patch_y, embed_dim)(input)
x = SwinTransformer(
dim=embed_dim,
num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads,
window_size=window_size,
shift_size=0,
num_mlp=num_mlp,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)(x)
x = SwinTransformer(
dim=embed_dim,
num_patch=(num_patch_x, num_patch_y),
num_heads=num_heads,
window_size=window_size,
shift_size=shift_size,
num_mlp=num_mlp,
qkv_bias=qkv_bias,
dropout_rate=dropout_rate,
)(x)
x = PatchMerging((num_patch_x, num_patch_y), embed_dim=embed_dim)(x)
x = layers.GlobalAveragePooling1D()(x)
output = layers.Dense(num_classes, activation="softmax")(x)
"""
### Train on CIFAR-100
We train the model on CIFAR-100. Here, we only train the model
for 40 epochs to keep the training time short in this example.
In practice, you should train for 150 epochs to reach convergence.
"""
model = keras.Model(input, output)
model.compile(
loss=keras.losses.CategoricalCrossentropy(label_smoothing=label_smoothing),
optimizer=keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
),
metrics=[
keras.metrics.CategoricalAccuracy(name="accuracy"),
keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"),
],
)
history = model.fit(
dataset,
batch_size=batch_size,
epochs=num_epochs,
validation_data=dataset_val,
)
"""
Let's visualize the training progress of the model.
"""
plt.plot(history.history["loss"], label="train_loss")
plt.plot(history.history["val_loss"], label="val_loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Train and Validation Losses Over Epochs", fontsize=14)
plt.legend()
plt.grid()
plt.show()
"""
Let's display the final results of the training on CIFAR-100.
"""
loss, accuracy, top_5_accuracy = model.evaluate(dataset_test)
print(f"Test loss: {round(loss, 2)}")
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
"""
The Swin Transformer model we just trained has just 152K parameters, and it gets
us to ~75% test top-5 accuracy within just 40 epochs without any signs of
overfitting as well as seen in above graph. This means we can train this network
for longer (perhaps with a bit more regularization) and obtain even better
performance. This performance can further be improved by additional techniques
like cosine decay learning rate schedule, other data augmentation techniques.
While experimenting, I tried training the model for 150 epochs with a slightly
higher dropout and greater embedding dimensions which pushes the performance to
~72% test accuracy on CIFAR-100 as you can see in the screenshot.

The authors present a top-1 accuracy of 87.3% on ImageNet. The authors also
present a number of experiments to study how input sizes, optimizers etc. affect
the final performance of this model. The authors further present using this
model for object detection, semantic segmentation and instance segmentation as
well and report competitive results for these. You are strongly advised to also
check out the [original paper](https://arxiv.org/abs/2103.14030).
This example takes inspiration from the official
[PyTorch](https://github.com/microsoft/Swin-Transformer) and
[TensorFlow](https://github.com/VcampSoldiers/Swin-Transformer-Tensorflow)
implementations.
"""
| keras-io/examples/vision/swin_transformers.py/0 | {
"file_path": "keras-io/examples/vision/swin_transformers.py",
"repo_id": "keras-io",
"token_count": 8723
} | 114 |
<jupyter_start><jupyter_text>Semantic Segmentation with KerasCV**Author:** [Divyashree Sreepathihalli](https://github.com/divyashreepathihalli), [Ian Stenbit](https://github.com/ianstenbit)**Date created:** 2023/08/22**Last modified:** 2023/08/24**Description:** Train and use DeepLabv3+ segmentation model with KerasCV. BackgroundSemantic segmentation is a type of computer vision task that involves assigning aclass label such as person, bike, or background to each individual pixel of animage, effectively dividing the image into regions that correspond to differentfobject classes or categories.KerasCV offers the DeepLabv3+ model developed by Google for semanticsegmentation. This guide demonstrates how to finetune and use DeepLabv3+ model forimage semantic segmentaion with KerasCV. Its architecture that combines atrous convolutions,contextual information aggregation, and powerful backbones to achieve accurate anddetailed semantic segmentation. The DeepLabv3+ model has been shown to achievestate-of-the-art results on a variety of image segmentation benchmarks. References[Encoder-Decoder with Atrous Separable Convolution for Semantic ImageSegmentation](https://arxiv.org/abs/1802.02611)[Rethinking Atrous Convolution for Semantic ImageSegmentation](https://arxiv.org/abs/1706.05587) Setup and ImportsLet's install the dependencies and import the necessary modules.To run this tutorial, you will need to install the following packages:* `keras-cv`* `keras-core`<jupyter_code>!pip install -q --upgrade keras-cv
!pip install -q --upgrade keras # Upgrade to Keras 3.<jupyter_output><empty_output><jupyter_text>After installing `keras-core` and `keras-cv`, set the backend for `keras-core`.This guide can be run with any backend (Tensorflow, JAX, PyTorch).```import osos.environ["KERAS_BACKEND"] = "jax"```<jupyter_code>import keras
from keras import ops
import keras_cv
import numpy as np
from keras_cv.datasets.pascal_voc.segmentation import load as load_voc<jupyter_output><empty_output><jupyter_text>Perform semantic segmentation with a pretrained DeepLabv3+ modelThe highest level API in the KerasCV semantic segmentation API is the `keras_cv.models`API. This API includes fully pretrained semantic segmentation models, such as`keras_cv.models.DeepLabV3Plus`.Let's get started by constructing a DeepLabv3+ pretrained on the pascalvoc dataset.<jupyter_code>model = keras_cv.models.DeepLabV3Plus.from_preset(
"deeplab_v3_plus_resnet50_pascalvoc",
num_classes=21,
input_shape=[512, 512, 3],
)<jupyter_output><empty_output><jupyter_text>Let us visualize the results of this pretrained model<jupyter_code>filepath = keras.utils.get_file(origin="https://i.imgur.com/gCNcJJI.jpg")
image = keras.utils.load_img(filepath)
resize = keras_cv.layers.Resizing(height=512, width=512)
image = resize(image)
image = keras.ops.expand_dims(np.array(image), axis=0)
preds = ops.expand_dims(ops.argmax(model(image), axis=-1), axis=-1)
keras_cv.visualization.plot_segmentation_mask_gallery(
image,
value_range=(0, 255),
num_classes=1,
y_true=None,
y_pred=preds,
scale=3,
rows=1,
cols=1,
)<jupyter_output><empty_output><jupyter_text>Train a custom semantic segmentation modelIn this guide, we'll assemble a full training pipeline for a KerasCV DeepLabV3 semanticsegmentation model. This includes data loading, augmentation, training, metricevaluation, and inference! Download the dataWe download[Pascal VOC dataset](https://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz)with KerasCV datasets and split them into train dataset `train_ds` and `eval_ds`.<jupyter_code>train_ds = load_voc(split="sbd_train")
eval_ds = load_voc(split="sbd_eval")<jupyter_output><empty_output><jupyter_text>Preprocess the dataThe `preprocess_tfds_inputs` utility function preprocesses the inputs to a dictionary of`images` and `segmentation_masks`. The images and segmentation masks are resized to512x512. The resulting dataset is then batched into groups of 4 image and segmentationmask pairs.A batch of this preprocessed input training data can be visualized using the`keras_cv.visualization.plot_segmentation_mask_gallery` function. This function takes abatch of images and segmentation masks as input and displays them in a grid.<jupyter_code>def preprocess_tfds_inputs(inputs):
def unpackage_tfds_inputs(tfds_inputs):
return {
"images": tfds_inputs["image"],
"segmentation_masks": tfds_inputs["class_segmentation"],
}
outputs = inputs.map(unpackage_tfds_inputs)
outputs = outputs.map(keras_cv.layers.Resizing(height=512, width=512))
outputs = outputs.batch(4, drop_remainder=True)
return outputs
train_ds = preprocess_tfds_inputs(train_ds)
batch = train_ds.take(1).get_single_element()
keras_cv.visualization.plot_segmentation_mask_gallery(
batch["images"],
value_range=(0, 255),
num_classes=21, # The number of classes for the oxford iiit pet dataset. The VOC dataset also includes 1 class for the background.
y_true=batch["segmentation_masks"],
scale=3,
rows=2,
cols=2,
)<jupyter_output><empty_output><jupyter_text>The preprocessing is applied to the evaluation dataset `eval_ds`.<jupyter_code>eval_ds = preprocess_tfds_inputs(eval_ds)<jupyter_output><empty_output><jupyter_text>Data AugmentationKerasCV provides a variety of image augmentation options. In this example, we will usethe `RandomFlip` augmentation to augment the training dataset. The `RandomFlip`augmentation randomly flips the images in the training dataset horizontally orvertically. This can help to improve the model's robustness to changes in the orientationof the objects in the images.<jupyter_code>train_ds = train_ds.map(keras_cv.layers.RandomFlip())
batch = train_ds.take(1).get_single_element()
keras_cv.visualization.plot_segmentation_mask_gallery(
batch["images"],
value_range=(0, 255),
num_classes=21,
y_true=batch["segmentation_masks"],
scale=3,
rows=2,
cols=2,
)<jupyter_output><empty_output><jupyter_text>Model ConfigurationPlease feel free to modify the configurations for model training and note how thetraining results changes. This is an great exercise to get a better understanding of thetraining pipeline.The learning rate schedule is used by the optimizer to calculate the learning rate foreach epoch. The optimizer then uses the learning rate to update the weights of the model.In this case, the learning rate schedule uses a cosine decay function. A cosine decayfunction starts high and then decreases over time, eventually reaching zero. Thecardinality of the VOC dataset is 2124 with a batch size of 4. The dataset cardinalityis important for learning rate decay because it determines how many steps the modelwill train for. The initial learning rate is proportional to 0.007 and the decaysteps are 2124. This means that the learning rate will start at `INITIAL_LR` and thendecrease to zero over 2124 steps.<jupyter_code>BATCH_SIZE = 4
INITIAL_LR = 0.007 * BATCH_SIZE / 16
EPOCHS = 1
NUM_CLASSES = 21
learning_rate = keras.optimizers.schedules.CosineDecay(
INITIAL_LR,
decay_steps=EPOCHS * 2124,
)<jupyter_output><empty_output><jupyter_text>We instantiate a DeepLabV3+ model with a ResNet50 backbone pretrained on ImageNet classification:`resnet50_v2_imagenet` pre-trained weights will be used as the backbone featureextractor for the DeepLabV3Plus model. The `num_classes` parameter specifies the number ofclasses that the model will be trained to segment.<jupyter_code>model = keras_cv.models.DeepLabV3Plus.from_preset(
"resnet50_v2_imagenet", num_classes=NUM_CLASSES
)<jupyter_output><empty_output><jupyter_text>Compile the modelThe model.compile() function sets up the training process for the model. It defines the- optimization algorithm - Stochastic Gradient Descent (SGD)- the loss function - categorical cross-entropy- the evaluation metrics - Mean IoU and categorical accuracySemantic segmentation evaluation metrics:Mean Intersection over Union (MeanIoU):MeanIoU measures how well a semantic segmentation model accurately identifiesand delineates different objects or regions in an image. It calculates theoverlap between predicted and actual object boundaries, providing a scorebetween 0 and 1, where 1 represents a perfect match.Categorical Accuracy:Categorical Accuracy measures the proportion of correctly classified pixels inan image. It gives a simple percentage indicating how accurately the modelpredicts the categories of pixels in the entire image.In essence, MeanIoU emphasizes the accuracy of identifying specific objectboundaries, while Categorical Accuracy gives a broad overview of overallpixel-level correctness.<jupyter_code>model.compile(
optimizer=keras.optimizers.SGD(
learning_rate=learning_rate, weight_decay=0.0001, momentum=0.9, clipnorm=10.0
),
loss=keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=[
keras.metrics.MeanIoU(
num_classes=NUM_CLASSES, sparse_y_true=False, sparse_y_pred=False
),
keras.metrics.CategoricalAccuracy(),
],
)
model.summary()<jupyter_output><empty_output><jupyter_text>The utility function `dict_to_tuple` effectively transforms the dictionaries of trainingand validation datasets into tuples of images and one-hot encoded segmentation masks,which is used during training and evaluation of the DeepLabv3+ model.<jupyter_code>def dict_to_tuple(x):
import tensorflow as tf
return x["images"], tf.one_hot(
tf.cast(tf.squeeze(x["segmentation_masks"], axis=-1), "int32"), 21
)
train_ds = train_ds.map(dict_to_tuple)
eval_ds = eval_ds.map(dict_to_tuple)
model.fit(train_ds, validation_data=eval_ds, epochs=EPOCHS)<jupyter_output><empty_output><jupyter_text>Predictions with trained modelNow that the model training of DeepLabv3+ has completed, let's test it by makingpredicationson a few sample images.<jupyter_code>test_ds = load_voc(split="sbd_eval")
test_ds = preprocess_tfds_inputs(test_ds)
images, masks = next(iter(train_ds.take(1)))
images = ops.convert_to_tensor(images)
masks = ops.convert_to_tensor(masks)
preds = ops.expand_dims(ops.argmax(model(images), axis=-1), axis=-1)
masks = ops.expand_dims(ops.argmax(masks, axis=-1), axis=-1)
keras_cv.visualization.plot_segmentation_mask_gallery(
images,
value_range=(0, 255),
num_classes=21,
y_true=masks,
y_pred=preds,
scale=3,
rows=1,
cols=4,
)<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_cv/semantic_segmentation_deeplab_v3_plus.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_cv/semantic_segmentation_deeplab_v3_plus.ipynb",
"repo_id": "keras-io",
"token_count": 3377
} | 115 |
<jupyter_start><jupyter_text>Training & evaluation with the built-in methods**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2019/03/01**Last modified:** 2023/06/25**Description:** Complete guide to training & evaluation with `fit()` and `evaluate()`. Setup<jupyter_code># We import torch & TF so as to use torch Dataloaders & tf.data.Datasets.
import torch
import tensorflow as tf
import os
import numpy as np
import keras
from keras import layers
from keras import ops<jupyter_output><empty_output><jupyter_text>IntroductionThis guide covers training, evaluation, and prediction (inference) modelswhen using built-in APIs for training & validation (such as `Model.fit()`,`Model.evaluate()` and `Model.predict()`).If you are interested in leveraging `fit()` while specifying yourown training step function, see the guides on customizing what happens in `fit()`:- [Writing a custom train step with TensorFlow](/guides/custom_train_step_in_tensorflow/)- [Writing a custom train step with JAX](/guides/custom_train_step_in_jax/)- [Writing a custom train step with PyTorch](/guides/custom_train_step_in_torch/)If you are interested in writing your own training & evaluation loops fromscratch, see the guides on writing training loops:- [Writing a training loop with TensorFlow](/guides/writing_a_custom_training_loop_in_tensorflow/)- [Writing a training loop with JAX](/guides/writing_a_custom_training_loop_in_jax/)- [Writing a training loop with PyTorch](/guides/writing_a_custom_training_loop_in_torch/)In general, whether you are using built-in loops or writing your own, model training &evaluation works strictly in the same way across every kind of Keras model --Sequential models, models built with the Functional API, and models written fromscratch via model subclassing. API overview: a first end-to-end exampleWhen passing data to the built-in training loops of a model, you should either use:- NumPy arrays (if your data is small and fits in memory)- Subclasses of `keras.utils.PyDataset`- `tf.data.Dataset` objects- PyTorch `DataLoader` instancesIn the next few paragraphs, we'll use the MNIST dataset as NumPy arrays, inorder to demonstrate how to use optimizers, losses, and metrics. Afterwards, we'lltake a close look at each of the other options.Let's consider the following model (here, we build in with the Functional API, but itcould be a Sequential model or a subclassed model as well):<jupyter_code>inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)<jupyter_output><empty_output><jupyter_text>Here's what the typical end-to-end workflow looks like, consisting of:- Training- Validation on a holdout set generated from the original training data- Evaluation on the test dataWe'll use MNIST data for this example.<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are NumPy arrays)
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]<jupyter_output><empty_output><jupyter_text>We specify the training configuration (optimizer, loss, metrics):<jupyter_code>model.compile(
optimizer=keras.optimizers.RMSprop(), # Optimizer
# Loss function to minimize
loss=keras.losses.SparseCategoricalCrossentropy(),
# List of metrics to monitor
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)<jupyter_output><empty_output><jupyter_text>We call `fit()`, which will train the model by slicing the data into "batches" of size`batch_size`, and repeatedly iterating over the entire dataset for a given number of`epochs`.<jupyter_code>print("Fit model on training data")
history = model.fit(
x_train,
y_train,
batch_size=64,
epochs=2,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val),
)<jupyter_output><empty_output><jupyter_text>The returned `history` object holds a record of the loss values and metric valuesduring training:<jupyter_code>print(history.history)<jupyter_output><empty_output><jupyter_text>We evaluate the model on the test data via `evaluate()`:<jupyter_code># Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = model.evaluate(x_test, y_test, batch_size=128)
print("test loss, test acc:", results)
# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print("Generate predictions for 3 samples")
predictions = model.predict(x_test[:3])
print("predictions shape:", predictions.shape)<jupyter_output><empty_output><jupyter_text>Now, let's review each piece of this workflow in detail. The `compile()` method: specifying a loss, metrics, and an optimizerTo train a model with `fit()`, you need to specify a loss function, an optimizer, andoptionally, some metrics to monitor.You pass these to the model as arguments to the `compile()` method:<jupyter_code>model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)<jupyter_output><empty_output><jupyter_text>The `metrics` argument should be a list -- your model can have any number of metrics.If your model has multiple outputs, you can specify different losses and metrics foreach output, and you can modulate the contribution of each output to the total loss ofthe model. You will find more details about this in the **Passing data to multi-input,multi-output models** section.Note that if you're satisfied with the default settings, in many cases the optimizer,loss, and metrics can be specified via string identifiers as a shortcut:<jupyter_code>model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)<jupyter_output><empty_output><jupyter_text>For later reuse, let's put our model definition and compile step in functions; we willcall them several times across different examples in this guide.<jupyter_code>def get_uncompiled_model():
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def get_compiled_model():
model = get_uncompiled_model()
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model<jupyter_output><empty_output><jupyter_text>Many built-in optimizers, losses, and metrics are availableIn general, you won't have to create your own losses, metrics, or optimizersfrom scratch, because what you need is likely to be already part of the Keras API:Optimizers:- `SGD()` (with or without momentum)- `RMSprop()`- `Adam()`- etc.Losses:- `MeanSquaredError()`- `KLDivergence()`- `CosineSimilarity()`- etc.Metrics:- `AUC()`- `Precision()`- `Recall()`- etc. Custom lossesIf you need to create a custom loss, Keras provides three ways to do so.The first method involves creating a function that accepts inputs `y_true` and`y_pred`. The following example shows a loss function that computes the mean squarederror between the real data and the predictions:<jupyter_code>def custom_mean_squared_error(y_true, y_pred):
return ops.mean(ops.square(y_true - y_pred), axis=-1)
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error)
# We need to one-hot encode the labels to use MSE
y_train_one_hot = ops.one_hot(y_train, num_classes=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)<jupyter_output><empty_output><jupyter_text>If you need a loss function that takes in parameters beside `y_true` and `y_pred`, youcan subclass the `keras.losses.Loss` class and implement the following two methods:- `__init__(self)`: accept parameters to pass during the call of your loss function- `call(self, y_true, y_pred)`: use the targets (y_true) and the model predictions(y_pred) to compute the model's lossLet's say you want to use mean squared error, but with an added term thatwill de-incentivize prediction values far from 0.5 (we assume that the categoricaltargets are one-hot encoded and take values between 0 and 1). Thiscreates an incentive for the model not to be too confident, which may helpreduce overfitting (we won't know if it works until we try!).Here's how you would do it:<jupyter_code>class CustomMSE(keras.losses.Loss):
def __init__(self, regularization_factor=0.1, name="custom_mse"):
super().__init__(name=name)
self.regularization_factor = regularization_factor
def call(self, y_true, y_pred):
mse = ops.mean(ops.square(y_true - y_pred), axis=-1)
reg = ops.mean(ops.square(0.5 - y_pred), axis=-1)
return mse + reg * self.regularization_factor
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE())
y_train_one_hot = ops.one_hot(y_train, num_classes=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)<jupyter_output><empty_output><jupyter_text>Custom metricsIf you need a metric that isn't part of the API, you can easily create custom metricsby subclassing the `keras.metrics.Metric` class. You will need to implement 4methods:- `__init__(self)`, in which you will create state variables for your metric.- `update_state(self, y_true, y_pred, sample_weight=None)`, which uses the targetsy_true and the model predictions y_pred to update the state variables.- `result(self)`, which uses the state variables to compute the final results.- `reset_state(self)`, which reinitializes the state of the metric.State update and results computation are kept separate (in `update_state()` and`result()`, respectively) because in some cases, the results computation might be veryexpensive and would only be done periodically.Here's a simple example showing how to implement a `CategoricalTruePositives` metricthat counts how many samples were correctly classified as belonging to a given class:<jupyter_code>class CategoricalTruePositives(keras.metrics.Metric):
def __init__(self, name="categorical_true_positives", **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_variable(
shape=(), name="ctp", initializer="zeros"
)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = ops.reshape(ops.argmax(y_pred, axis=1), (-1, 1))
values = ops.cast(y_true, "int32") == ops.cast(y_pred, "int32")
values = ops.cast(values, "float32")
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, "float32")
values = ops.multiply(values, sample_weight)
self.true_positives.assign_add(ops.sum(values))
def result(self):
return self.true_positives.value
def reset_state(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.0)
model = get_uncompiled_model()
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[CategoricalTruePositives()],
)
model.fit(x_train, y_train, batch_size=64, epochs=3)<jupyter_output><empty_output><jupyter_text>Handling losses and metrics that don't fit the standard signatureThe overwhelming majority of losses and metrics can be computed from `y_true` and`y_pred`, where `y_pred` is an output of your model -- but not all of them. Forinstance, a regularization loss may only require the activation of a layer (there areno targets in this case), and this activation may not be a model output.In such cases, you can call `self.add_loss(loss_value)` from inside the call method ofa custom layer. Losses added in this way get added to the "main" loss during training(the one passed to `compile()`). Here's a simple example that adds activityregularization (note that activity regularization is built-in in all Keras layers --this layer is just for the sake of providing a concrete example):<jupyter_code>class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(ops.sum(inputs) * 0.1)
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
# The displayed loss will be much higher than before
# due to the regularization component.
model.fit(x_train, y_train, batch_size=64, epochs=1)<jupyter_output><empty_output><jupyter_text>Note that when you pass losses via `add_loss()`, it becomes possible to call`compile()` without a loss function, since the model already has a loss to minimize.Consider the following `LogisticEndpoint` layer: it takes as inputstargets & logits, and it tracks a crossentropy loss via `add_loss()`.<jupyter_code>class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super().__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Return the inference-time prediction tensor (for `.predict()`).
return ops.softmax(logits)<jupyter_output><empty_output><jupyter_text>You can use it in a model with two inputs (input data & targets), compiled without a`loss` argument, like this:<jupyter_code>inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(targets, logits)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam") # No loss argument!
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)<jupyter_output><empty_output><jupyter_text>For more information about training multi-input models, see the section **Passing datato multi-input, multi-output models**. Automatically setting apart a validation holdout setIn the first end-to-end example you saw, we used the `validation_data` argument to passa tuple of NumPy arrays `(x_val, y_val)` to the model for evaluating a validation lossand validation metrics at the end of each epoch.Here's another option: the argument `validation_split` allows you to automaticallyreserve part of your training data for validation. The argument value represents thefraction of the data to be reserved for validation, so it should be set to a numberhigher than 0 and lower than 1. For instance, `validation_split=0.2` means "use 20% ofthe data for validation", and `validation_split=0.6` means "use 60% of the data forvalidation".The way the validation is computed is by taking the last x% samples of the arraysreceived by the `fit()` call, before any shuffling.Note that you can only use `validation_split` when training with NumPy data.<jupyter_code>model = get_compiled_model()
model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1)<jupyter_output><empty_output><jupyter_text>Training & evaluation using `tf.data` DatasetsIn the past few paragraphs, you've seen how to handle losses, metrics, and optimizers,and you've seen how to use the `validation_data` and `validation_split` arguments in`fit()`, when your data is passed as NumPy arrays.Another option is to use an iterator-like, such as a `tf.data.Dataset`, aPyTorch `DataLoader`, or a Keras `PyDataset`. Let's take look at the former.The `tf.data` API is a set of utilities in TensorFlow 2.0 for loading and preprocessingdata in a way that's fast and scalable. For a complete guide about creating `Datasets`,see the [tf.data documentation](https://www.tensorflow.org/guide/data).**You can use `tf.data` to train your Kerasmodels regardless of the backend you're using --whether it's JAX, PyTorch, or TensorFlow.**You can pass a `Dataset` instance directly to the methods `fit()`, `evaluate()`, and`predict()`:<jupyter_code>model = get_compiled_model()
# First, let's create a training Dataset instance.
# For the sake of our example, we'll use the same MNIST data as before.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Now we get a test dataset.
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(64)
# Since the dataset already takes care of batching,
# we don't pass a `batch_size` argument.
model.fit(train_dataset, epochs=3)
# You can also evaluate or predict on a dataset.
print("Evaluate")
result = model.evaluate(test_dataset)
dict(zip(model.metrics_names, result))<jupyter_output><empty_output><jupyter_text>Note that the Dataset is reset at the end of each epoch, so it can be reused of thenext epoch.If you want to run training only on a specific number of batches from this Dataset, youcan pass the `steps_per_epoch` argument, which specifies how many training steps themodel should run using this Dataset before moving on to the next epoch.<jupyter_code>model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Only use the 100 batches per epoch (that's 64 * 100 samples)
model.fit(train_dataset, epochs=3, steps_per_epoch=100)<jupyter_output><empty_output><jupyter_text>You can also pass a `Dataset` instance as the `validation_data` argument in `fit()`:<jupyter_code>model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(train_dataset, epochs=1, validation_data=val_dataset)<jupyter_output><empty_output><jupyter_text>At the end of each epoch, the model will iterate over the validation dataset andcompute the validation loss and validation metrics.If you want to run validation only on a specific number of batches from this dataset,you can pass the `validation_steps` argument, which specifies how many validationsteps the model should run with the validation dataset before interrupting validationand moving on to the next epoch:<jupyter_code>model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(
train_dataset,
epochs=1,
# Only run validation using the first 10 batches of the dataset
# using the `validation_steps` argument
validation_data=val_dataset,
validation_steps=10,
)<jupyter_output><empty_output><jupyter_text>Note that the validation dataset will be reset after each use (so that you will alwaysbe evaluating on the same samples from epoch to epoch).The argument `validation_split` (generating a holdout set from the training data) isnot supported when training from `Dataset` objects, since this feature requires theability to index the samples of the datasets, which is not possible in general withthe `Dataset` API. Training & evaluation using `PyDataset` instances`keras.utils.PyDataset` is a utility that you can subclass to obtaina Python generator with two important properties:- It works well with multiprocessing.- It can be shuffled (e.g. when passing `shuffle=True` in `fit()`).A `PyDataset` must implement two methods:- `__getitem__`- `__len__`The method `__getitem__` should return a complete batch.If you want to modify your dataset between epochs, you may implement `on_epoch_end`.Here's a quick example:<jupyter_code>class ExamplePyDataset(keras.utils.PyDataset):
def __init__(self, x, y, batch_size, **kwargs):
super().__init__(**kwargs)
self.x = x
self.y = y
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size : (idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size : (idx + 1) * self.batch_size]
return batch_x, batch_y
train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32)
val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32)<jupyter_output><empty_output><jupyter_text>To fit the model, pass the dataset instead as the `x` argument (no need for a `y`argument since the dataset includes the targets), and pass the validation datasetas the `validation_data` argument. And no need for the `batch_size` argument, sincethe dataset is already batched!<jupyter_code>model = get_compiled_model()
model.fit(train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1)<jupyter_output><empty_output><jupyter_text>Evaluating the model is just as easy:<jupyter_code>model.evaluate(val_py_dataset)<jupyter_output><empty_output><jupyter_text>Importantly, `PyDataset` objects support three common constructor argumentsthat handle the parallel processing configuration:- `workers`: Number of workers to use in multithreading or multiprocessing. Typically, you'd set it to the number of cores on your CPU.- `use_multiprocessing`: Whether to use Python multiprocessing for parallelism. Setting this to `True` means that your dataset will be replicated in multiple forked processes. This is necessary to gain compute-level (rather than I/O level) benefits from parallelism. However it can only be set to `True` if your dataset can be safely pickled.- `max_queue_size`: Maximum number of batches to keep in the queue when iterating over the dataset in a multithreaded or multipricessed setting. You can reduce this value to reduce the CPU memory consumption of your dataset. It defaults to 10.By default, multiprocessing is disabled (`use_multiprocessing=False`) and onlyone thread is used. You should make sure to only turn on `use_multiprocessing` ifyour code is running inside a Python `if __name__ == "__main__":` block in orderto avoid issues.Here's a 4-thread, non-multiprocessed example:<jupyter_code>train_py_dataset = ExamplePyDataset(x_train, y_train, batch_size=32, workers=4)
val_py_dataset = ExamplePyDataset(x_val, y_val, batch_size=32, workers=4)
model = get_compiled_model()
model.fit(train_py_dataset, batch_size=64, validation_data=val_py_dataset, epochs=1)<jupyter_output><empty_output><jupyter_text>Training & evaluation using PyTorch `DataLoader` objectsAll built-in training and evaluation APIs are also compatible with `torch.utils.data.Dataset` and`torch.utils.data.DataLoader` objects -- regardless of whether you're using the PyTorch backend,or the JAX or TensorFlow backends. Let's take a look at a simple example.Unlike `PyDataset` which are batch-centric, PyTorch `Dataset` objects are sample-centric:the `__len__` method returns the number of samples,and the `__getitem__` method returns a specific sample.<jupyter_code>class ExampleTorchDataset(torch.utils.data.Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
train_torch_dataset = ExampleTorchDataset(x_train, y_train)
val_torch_dataset = ExampleTorchDataset(x_val, y_val)<jupyter_output><empty_output><jupyter_text>To use a PyTorch Dataset, you need to wrap it into a `Dataloader` which takes careof batching and shuffling:<jupyter_code>train_dataloader = torch.utils.data.DataLoader(
train_torch_dataset, batch_size=32, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_torch_dataset, batch_size=32, shuffle=True
)<jupyter_output><empty_output><jupyter_text>Now you can use them in the Keras API just like any other iterator:<jupyter_code>model = get_compiled_model()
model.fit(train_dataloader, batch_size=64, validation_data=val_dataloader, epochs=1)
model.evaluate(val_dataloader)<jupyter_output><empty_output><jupyter_text>Using sample weighting and class weightingWith the default settings the weight of a sample is decided by its frequencyin the dataset. There are two methods to weight the data, independent ofsample frequency:* Class weights* Sample weights Class weightsThis is set by passing a dictionary to the `class_weight` argument to`Model.fit()`. This dictionary maps class indices to the weight that shouldbe used for samples belonging to this class.This can be used to balance classes without resampling, or to train amodel that gives more importance to a particular class.For instance, if class "0" is half as represented as class "1" in your data,you could use `Model.fit(..., class_weight={0: 1., 1: 0.5})`. Here's a NumPy example where we use class weights or sample weights togive more importance to the correct classification of class 5 (whichis the digit "5" in the MNIST dataset).<jupyter_code>class_weight = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
# Set weight "2" for class "5",
# making this class 2x more important
5: 2.0,
6: 1.0,
7: 1.0,
8: 1.0,
9: 1.0,
}
print("Fit with class weight")
model = get_compiled_model()
model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1)<jupyter_output><empty_output><jupyter_text>Sample weightsFor fine grained control, or if you are not building a classifier,you can use "sample weights".- When training from NumPy data: Pass the `sample_weight` argument to `Model.fit()`.- When training from `tf.data` or any other sort of iterator: Yield `(input_batch, label_batch, sample_weight_batch)` tuples.A "sample weights" array is an array of numbers that specify how much weighteach sample in a batch should have in computing the total loss. It is commonlyused in imbalanced classification problems (the idea being to give more weightto rarely-seen classes).When the weights used are ones and zeros, the array can be used as a *mask* forthe loss function (entirely discarding the contribution of certain samples tothe total loss).<jupyter_code>sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
print("Fit with sample weight")
model = get_compiled_model()
model.fit(x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1)<jupyter_output><empty_output><jupyter_text>Here's a matching `Dataset` example:<jupyter_code>sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
# Create a Dataset that includes sample weights
# (3rd element in the return tuple).
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train, sample_weight))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model = get_compiled_model()
model.fit(train_dataset, epochs=1)<jupyter_output><empty_output><jupyter_text>Passing data to multi-input, multi-output modelsIn the previous examples, we were considering a model with a single input (a tensor ofshape `(764,)`) and a single output (a prediction tensor of shape `(10,)`). But whatabout models that have multiple inputs or outputs?Consider the following model, which has an image input of shape `(32, 32, 3)` (that's`(height, width, channels)`) and a time series input of shape `(None, 10)` (that's`(timesteps, features)`). Our model will have two outputs computed from thecombination of these inputs: a "score" (of shape `(1,)`) and a probabilitydistribution over five classes (of shape `(5,)`).<jupyter_code>image_input = keras.Input(shape=(32, 32, 3), name="img_input")
timeseries_input = keras.Input(shape=(None, 10), name="ts_input")
x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)
x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)
x = layers.concatenate([x1, x2])
score_output = layers.Dense(1, name="score_output")(x)
class_output = layers.Dense(5, name="class_output")(x)
model = keras.Model(
inputs=[image_input, timeseries_input], outputs=[score_output, class_output]
)<jupyter_output><empty_output><jupyter_text>Let's plot this model, so you can clearly see what we're doing here (note that theshapes shown in the plot are batch shapes, rather than per-sample shapes).<jupyter_code>keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)<jupyter_output><empty_output><jupyter_text>At compilation time, we can specify different losses to different outputs, by passingthe loss functions as a list:<jupyter_code>model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
)<jupyter_output><empty_output><jupyter_text>If we only passed a single loss function to the model, the same loss function would beapplied to every output (which is not appropriate here).Likewise for metrics:<jupyter_code>model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
metrics=[
[
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
[keras.metrics.CategoricalAccuracy()],
],
)<jupyter_output><empty_output><jupyter_text>Since we gave names to our output layers, we could also specify per-output losses andmetrics via a dict:<jupyter_code>model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
)<jupyter_output><empty_output><jupyter_text>We recommend the use of explicit names and dicts if you have more than 2 outputs.It's possible to give different weights to different output-specific losses (forinstance, one might wish to privilege the "score" loss in our example, by giving to 2xthe importance of the class loss), using the `loss_weights` argument:<jupyter_code>model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
loss_weights={"score_output": 2.0, "class_output": 1.0},
)<jupyter_output><empty_output><jupyter_text>You could also choose not to compute a loss for certain outputs, if these outputs aremeant for prediction but not for training:<jupyter_code># List loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[None, keras.losses.CategoricalCrossentropy()],
)
# Or dict loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={"class_output": keras.losses.CategoricalCrossentropy()},
)<jupyter_output><empty_output><jupyter_text>Passing data to a multi-input or multi-output model in `fit()` works in a similar way asspecifying a loss function in compile: you can pass **lists of NumPy arrays** (with1:1 mapping to the outputs that received a loss function) or **dicts mapping outputnames to NumPy arrays**.<jupyter_code>model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[
keras.losses.MeanSquaredError(),
keras.losses.CategoricalCrossentropy(),
],
)
# Generate dummy NumPy data
img_data = np.random.random_sample(size=(100, 32, 32, 3))
ts_data = np.random.random_sample(size=(100, 20, 10))
score_targets = np.random.random_sample(size=(100, 1))
class_targets = np.random.random_sample(size=(100, 5))
# Fit on lists
model.fit([img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1)
# Alternatively, fit on dicts
model.fit(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
batch_size=32,
epochs=1,
)<jupyter_output><empty_output><jupyter_text>Here's the `Dataset` use case: similarly as what we did for NumPy arrays, the `Dataset`should return a tuple of dicts.<jupyter_code>train_dataset = tf.data.Dataset.from_tensor_slices(
(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
)
)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model.fit(train_dataset, epochs=1)<jupyter_output><empty_output><jupyter_text>Using callbacksCallbacks in Keras are objects that are called at different points during training (atthe start of an epoch, at the end of a batch, at the end of an epoch, etc.). Theycan be used to implement certain behaviors, such as:- Doing validation at different points during training (beyond the built-in per-epochvalidation)- Checkpointing the model at regular intervals or when it exceeds a certain accuracythreshold- Changing the learning rate of the model when training seems to be plateauing- Doing fine-tuning of the top layers when training seems to be plateauing- Sending email or instant message notifications when training ends or where a certainperformance threshold is exceeded- Etc.Callbacks can be passed as a list to your call to `fit()`:<jupyter_code>model = get_compiled_model()
callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor="val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=20,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)<jupyter_output><empty_output><jupyter_text>Many built-in callbacks are availableThere are many built-in callbacks already available in Keras, such as:- `ModelCheckpoint`: Periodically save the model.- `EarlyStopping`: Stop training when training is no longer improving the validationmetrics.- `TensorBoard`: periodically write model logs that can be visualized in[TensorBoard](https://www.tensorflow.org/tensorboard) (more details in the section"Visualization").- `CSVLogger`: streams loss and metrics data to a CSV file.- etc.See the [callbacks documentation](/api/callbacks/) for the complete list. Writing your own callbackYou can create a custom callback by extending the base class`keras.callbacks.Callback`. A callback has access to its associated model through theclass property `self.model`.Make sure to read the[complete guide to writing custom callbacks](/guides/writing_your_own_callbacks/).Here's a simple example saving a list of per-batch loss values during training:<jupyter_code>class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.per_batch_losses = []
def on_batch_end(self, batch, logs):
self.per_batch_losses.append(logs.get("loss"))<jupyter_output><empty_output><jupyter_text>Checkpointing modelsWhen you're training model on relatively large datasets, it's crucial to savecheckpoints of your model at frequent intervals.The easiest way to achieve this is with the `ModelCheckpoint` callback:<jupyter_code>model = get_compiled_model()
callbacks = [
keras.callbacks.ModelCheckpoint(
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath="mymodel_{epoch}.keras",
save_best_only=True, # Only save a model if `val_loss` has improved.
monitor="val_loss",
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=2,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)<jupyter_output><empty_output><jupyter_text>The `ModelCheckpoint` callback can be used to implement fault-tolerance:the ability to restart training from the last saved state of the model in case traininggets randomly interrupted. Here's a basic example:<jupyter_code># Prepare a directory to store all the checkpoints.
checkpoint_dir = "./ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
print("Restoring from", latest_checkpoint)
return keras.models.load_model(latest_checkpoint)
print("Creating a new model")
return get_compiled_model()
model = make_or_restore_model()
callbacks = [
# This callback saves the model every 100 batches.
# We include the training loss in the saved model name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/model-loss={loss:.2f}.keras", save_freq=100
)
]
model.fit(x_train, y_train, epochs=1, callbacks=callbacks)<jupyter_output><empty_output><jupyter_text>You call also write your own callback for saving and restoring models.For a complete guide on serialization and saving, see the[guide to saving and serializing Models](/guides/serialization_and_saving/). Using learning rate schedulesA common pattern when training deep learning models is to gradually reduce the learningas training progresses. This is generally known as "learning rate decay".The learning decay schedule could be static (fixed in advance, as a function of thecurrent epoch or the current batch index), or dynamic (responding to the currentbehavior of the model, in particular the validation loss). Passing a schedule to an optimizerYou can easily use a static learning rate decay schedule by passing a schedule objectas the `learning_rate` argument in your optimizer:<jupyter_code>initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)<jupyter_output><empty_output><jupyter_text>Several built-in schedules are available: `ExponentialDecay`, `PiecewiseConstantDecay`,`PolynomialDecay`, and `InverseTimeDecay`. Using callbacks to implement a dynamic learning rate scheduleA dynamic learning rate schedule (for instance, decreasing the learning rate when thevalidation loss is no longer improving) cannot be achieved with these schedule objects,since the optimizer does not have access to validation metrics.However, callbacks do have access to all metrics, including validation metrics! You canthus achieve this pattern by using a callback that modifies the current learning rateon the optimizer. In fact, this is even built-in as the `ReduceLROnPlateau` callback. Visualizing loss and metrics during training with TensorBoardThe best way to keep an eye on your model during training is to use[TensorBoard](https://www.tensorflow.org/tensorboard) -- a browser-based applicationthat you can run locally that provides you with:- Live plots of the loss and metrics for training and evaluation- (optionally) Visualizations of the histograms of your layer activations- (optionally) 3D visualizations of the embedding spaces learned by your `Embedding`layersIf you have installed TensorFlow with pip, you should be able to launch TensorBoardfrom the command line:```tensorboard --logdir=/full_path_to_your_logs``` Using the TensorBoard callbackThe easiest way to use TensorBoard with a Keras model and the `fit()` method is the`TensorBoard` callback.In the simplest case, just specify where you want the callback to write logs, andyou're good to go:<jupyter_code>keras.callbacks.TensorBoard(
log_dir="/full_path_to_your_logs",
histogram_freq=0, # How often to log histogram visualizations
embeddings_freq=0, # How often to log embedding visualizations
update_freq="epoch",
) # How often to write logs (default: once per epoch)<jupyter_output><empty_output> | keras-io/guides/ipynb/training_with_built_in_methods.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/training_with_built_in_methods.ipynb",
"repo_id": "keras-io",
"token_count": 13746
} | 116 |
# SimSiam Training with TensorFlow Similarity and KerasCV
**Author:** [lukewood](https://lukewood.xyz), Ian Stenbit, Owen Vallis<br>
**Date created:** 2023/01/22<br>
**Last modified:** 2023/01/22<br>
**Description:** Train a KerasCV model using unlabelled data with SimSiam.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_cv/simsiam_with_kerascv.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_cv/simsiam_with_kerascv.py)
---
## Overview
[TensorFlow similarity](https://github.com/tensorflow/similarity) makes it easy to train
KerasCV models on unlabelled corpuses of data using contrastive learning algorithms such
as SimCLR, SimSiam, and Barlow Twins. In this guide, we will train a KerasCV model
using the SimSiam implementation from TensorFlow Similarity.
---
## Background
Self-supervised learning is an approach to pre-training models using unlabeled data.
This approach drastically increases accuracy when you have very few labeled examples but
a lot of unlabelled data.
The key insight is that you can train a self-supervised model to learn data
representations by contrasting multiple augmented views of the same example.
These learned representations capture data invariants, e.g., object translation, color
jitter, noise, etc. Training a simple linear classifier on top of the frozen
representations is easier and requires fewer labels because the pre-trained model
already produces meaningful and generally useful features.
Overall, self-supervised pre-training learns representations which are [more generic and
robust than other approaches to augmented training and pre-training](https://arxiv.org/abs/2002.05709).
An overview of the general contrastive learning process is shown below:

In this tutorial, we will use the [SimSiam](https://arxiv.org/abs/2011.10566) algorithm
for contrastive learning. As of 2022, SimSiam is the state of the art algorithm for
contrastive learning; allowing for unprecedented scores on CIFAR-100 and other datasets.
You may need to install:
```
pip -q install tensorflow_similarity
pip -q install keras-cv
```
To get started, we will sort out some imports.
```python
import resource
import gc
import os
import random
import time
import tensorflow_addons as tfa
import keras_cv
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from tabulate import tabulate
import tensorflow_similarity as tfsim # main package
import tensorflow as tf
from keras_cv import layers as cv_layers
import tensorflow_datasets as tfds
```
<div class="k-default-codeblock">
```
You do not have Waymo Open Dataset installed, so KerasCV Waymo metrics are not available.
Your CPU supports instructions that this binary was not compiled to use: SSE3 SSE4.1 SSE4.2 AVX AVX2
For maximum performance, you can install NMSLIB from sources
pip install --no-binary :all: nmslib
```
</div>
Lets sort out some high level config issues and define some constants.
The resource limit increase is required to load STL-10, `tfsim.utils.tf_cap_memory()`
prevents TensorFlow from hogging the GPU memory in a cluster, and
`tfds.disable_progress_bar()` makes tfds less noisy.
```python
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
tfsim.utils.tf_cap_memory() # Avoid GPU memory blow up
tfds.disable_progress_bar()
BATCH_SIZE = 512
PRE_TRAIN_EPOCHS = 50
VAL_STEPS_PER_EPOCH = 20
WEIGHT_DECAY = 5e-4
INIT_LR = 3e-2 * int(BATCH_SIZE / 256)
WARMUP_LR = 0.0
WARMUP_STEPS = 0
DIM = 2048
```
---
## Data loading
Next, we will load the STL-10 dataset. STL-10 is a dataset consisting of 100k unlabelled
images, 5k labelled training images, and 10k labelled test images. Due to this distribution,
STL-10 is commonly used as a benchmark for contrastive learning models.
First lets load our unlabelled data
```python
train_ds = tfds.load("stl10", split="unlabelled")
train_ds = train_ds.map(
lambda entry: entry["image"], num_parallel_calls=tf.data.AUTOTUNE
)
train_ds = train_ds.map(
lambda image: tf.cast(image, tf.float32), num_parallel_calls=tf.data.AUTOTUNE
)
train_ds = train_ds.shuffle(buffer_size=8 * BATCH_SIZE, reshuffle_each_iteration=True)
```
<div class="k-default-codeblock">
```
[1mDownloading and preparing dataset 2.46 GiB (download: 2.46 GiB, generated: 1.86 GiB, total: 4.32 GiB) to ~/tensorflow_datasets/stl10/1.0.0...[0m
[1mDataset stl10 downloaded and prepared to ~/tensorflow_datasets/stl10/1.0.0. Subsequent calls will reuse this data.[0m
WARNING:tensorflow:From /home/lukewood/.local/lib/python3.7/site-packages/tensorflow/python/autograph/pyct/static_analysis/liveness.py:83: Analyzer.lamba_check (from tensorflow.python.autograph.pyct.static_analysis.liveness) is deprecated and will be removed after 2023-09-23.
Instructions for updating:
Lambda functions will be no more assumed to be used in the statement where they are used, or at least in the same block. https://github.com/tensorflow/tensorflow/issues/56089
WARNING:tensorflow:From /home/lukewood/.local/lib/python3.7/site-packages/tensorflow/python/autograph/pyct/static_analysis/liveness.py:83: Analyzer.lamba_check (from tensorflow.python.autograph.pyct.static_analysis.liveness) is deprecated and will be removed after 2023-09-23.
Instructions for updating:
Lambda functions will be no more assumed to be used in the statement where they are used, or at least in the same block. https://github.com/tensorflow/tensorflow/issues/56089
```
</div>
Next, we need to prepare some labelled samples.
This is done so that TensorFlow similarity can probe the learned embedding to ensure
that the model is learning appropriately.
```python
(x_raw_train, y_raw_train), ds_info = tfds.load(
"stl10", split="train", as_supervised=True, batch_size=-1, with_info=True
)
x_raw_train, y_raw_train = tf.cast(x_raw_train, tf.float32), tf.cast(
y_raw_train, tf.float32
)
x_test, y_test = tfds.load(
"stl10",
split="test",
as_supervised=True,
batch_size=-1,
)
x_test, y_test = tf.cast(x_test, tf.float32), tf.cast(y_test, tf.float32)
```
In self supervised learning, queries and indexes are labeled subset datasets used to
evaluate the quality of the produced latent embedding. The following code assembles
these datasets:
```python
# Compute the indices for query, index, val, and train splits
query_idxs, index_idxs, val_idxs, train_idxs = [], [], [], []
for cid in range(ds_info.features["label"].num_classes):
idxs = tf.random.shuffle(tf.where(y_raw_train == cid))
idxs = tf.reshape(idxs, (-1,))
query_idxs.extend(idxs[:100]) # 200 query examples per class
index_idxs.extend(idxs[100:200]) # 200 index examples per class
val_idxs.extend(idxs[200:300]) # 100 validation examples per class
train_idxs.extend(idxs[300:]) # The remaining are used for training
random.shuffle(query_idxs)
random.shuffle(index_idxs)
random.shuffle(val_idxs)
random.shuffle(train_idxs)
def create_split(idxs: list) -> tuple:
x, y = [], []
for idx in idxs:
x.append(x_raw_train[int(idx)])
y.append(y_raw_train[int(idx)])
return tf.convert_to_tensor(np.array(x), dtype=tf.float32), tf.convert_to_tensor(
np.array(y), dtype=tf.int64
)
x_query, y_query = create_split(query_idxs)
x_index, y_index = create_split(index_idxs)
x_val, y_val = create_split(val_idxs)
x_train, y_train = create_split(train_idxs)
PRE_TRAIN_STEPS_PER_EPOCH = tf.data.experimental.cardinality(train_ds) // BATCH_SIZE
PRE_TRAIN_STEPS_PER_EPOCH = int(PRE_TRAIN_STEPS_PER_EPOCH.numpy())
print(
tabulate(
[
["train", tf.data.experimental.cardinality(train_ds), None],
["val", x_val.shape, y_val.shape],
["query", x_query.shape, y_query.shape],
["index", x_index.shape, y_index.shape],
["test", x_test.shape, y_test.shape],
],
headers=["# of Examples", "Labels"],
)
)
```
<div class="k-default-codeblock">
```
# of Examples Labels
----- ----------------- --------
train 100000
val (1000, 96, 96, 3) (1000,)
query (1000, 96, 96, 3) (1000,)
index (1000, 96, 96, 3) (1000,)
test (8000, 96, 96, 3) (8000,)
```
</div>
---
## Augmentations
Self-supervised networks require at least two augmented "views" of each example.
This can be created using a dataset and an augmentation function.
The dataset treats each example in the batch as its own class and then the augment
function produces two separate views for each example.
This means the resulting batch will yield tuples containing the two views, i.e.,
Tuple[(BATCH_SIZE, 32, 32, 3), (BATCH_SIZE, 32, 32, 3)].
Using KerasCV, it is trivial to construct an augmenter that performs as the one
described in the original SimSiam paper. Lets do that below.
```python
target_size = (96, 96)
crop_area_factor = (0.08, 1)
aspect_ratio_factor = (3 / 4, 4 / 3)
grayscale_rate = 0.2
color_jitter_rate = 0.8
brightness_factor = 0.2
contrast_factor = 0.8
saturation_factor = (0.3, 0.7)
hue_factor = 0.2
augmenter = keras.Sequential(
[
cv_layers.RandomFlip("horizontal"),
cv_layers.RandomCropAndResize(
target_size,
crop_area_factor=crop_area_factor,
aspect_ratio_factor=aspect_ratio_factor,
),
cv_layers.RandomApply(
cv_layers.Grayscale(output_channels=3), rate=grayscale_rate
),
cv_layers.RandomApply(
cv_layers.RandomColorJitter(
value_range=(0, 255),
brightness_factor=brightness_factor,
contrast_factor=contrast_factor,
saturation_factor=saturation_factor,
hue_factor=hue_factor,
),
rate=color_jitter_rate,
),
],
)
```
Next, lets pass our images through this pipeline.
Note that KerasCV supports batched augmentation, so batching before
augmentation dramatically improves performance
```python
@tf.function()
def process(img):
return augmenter(img), augmenter(img)
def prepare_dataset(dataset):
dataset = dataset.repeat()
dataset = dataset.shuffle(1024)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.map(process, num_parallel_calls=tf.data.AUTOTUNE)
return dataset.prefetch(tf.data.AUTOTUNE)
train_ds = prepare_dataset(train_ds)
val_ds = tf.data.Dataset.from_tensor_slices(x_val)
val_ds = prepare_dataset(val_ds)
print("train_ds", train_ds)
print("val_ds", val_ds)
```
<div class="k-default-codeblock">
```
train_ds <PrefetchDataset element_spec=(TensorSpec(shape=(None, 96, 96, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 96, 96, 3), dtype=tf.float32, name=None))>
val_ds <PrefetchDataset element_spec=(TensorSpec(shape=(None, 96, 96, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 96, 96, 3), dtype=tf.float32, name=None))>
```
</div>
Lets visualize our pairs using the `tfsim.visualization` utility package.
```python
display_imgs = next(train_ds.as_numpy_iterator())
max_pixel = np.max([display_imgs[0].max(), display_imgs[1].max()])
min_pixel = np.min([display_imgs[0].min(), display_imgs[1].min()])
tfsim.visualization.visualize_views(
views=display_imgs,
num_imgs=16,
views_per_col=8,
max_pixel_value=max_pixel,
min_pixel_value=min_pixel,
)
```

---
## Model Creation
Now that our data and augmentation pipeline is setup, we can move on to
constructing the contrastive learning pipeline. First, lets produce a backbone.
For this task, we will use a KerasCV ResNet18 model as the backbone.
```python
def get_backbone(input_shape):
inputs = layers.Input(shape=input_shape)
x = inputs
x = keras_cv.models.ResNet18(
input_shape=input_shape,
include_rescaling=True,
include_top=False,
pooling="avg",
)(x)
return tfsim.models.SimilarityModel(inputs, x)
backbone = get_backbone((96, 96, 3))
backbone.summary()
```
<div class="k-default-codeblock">
```
Model: "similarity_model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 96, 96, 3)] 0
resnet18 (Functional) (None, 512) 11186112
=================================================================
Total params: 11,186,112
Trainable params: 11,176,512
Non-trainable params: 9,600
_________________________________________________________________
```
</div>
This MLP is common to all the self-supervised models and is typically a stack of 3
layers of the same size. However, SimSiam only uses 2 layers for the smaller CIFAR
images. Having too much capacity in the models can make it difficult for the loss to
stabilize and converge.
Note: This is the model output that is returned by `ContrastiveModel.predict()` and
represents the distance based embedding. This embedding can be used for the KNN
lookups and matching classification metrics. However, when using the pre-train
model for downstream tasks, only the `ContrastiveModel.backbone` is used.
```python
def get_projector(input_dim, dim, activation="relu", num_layers: int = 3):
inputs = tf.keras.layers.Input((input_dim,), name="projector_input")
x = inputs
for i in range(num_layers - 1):
x = tf.keras.layers.Dense(
dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name=f"projector_layer_{i}",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5, name=f"batch_normalization_{i}"
)(x)
x = tf.keras.layers.Activation(activation, name=f"{activation}_activation_{i}")(
x
)
x = tf.keras.layers.Dense(
dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="projector_output",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5,
center=False, # Page:5, Paragraph:2 of SimSiam paper
scale=False, # Page:5, Paragraph:2 of SimSiam paper
name=f"batch_normalization_ouput",
)(x)
# Metric Logging layer. Monitors the std of the layer activations.
# Degenerate solutions colapse to 0 while valid solutions will move
# towards something like 0.0220. The actual number will depend on the layer size.
o = tfsim.layers.ActivationStdLoggingLayer(name="proj_std")(x)
projector = tf.keras.Model(inputs, o, name="projector")
return projector
projector = get_projector(input_dim=backbone.output.shape[-1], dim=DIM, num_layers=2)
projector.summary()
```
<div class="k-default-codeblock">
```
Model: "projector"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
projector_input (InputLayer [(None, 512)] 0
)
projector_layer_0 (Dense) (None, 2048) 1048576
batch_normalization_0 (Batc (None, 2048) 8192
hNormalization)
relu_activation_0 (Activati (None, 2048) 0
on)
projector_output (Dense) (None, 2048) 4194304
batch_normalization_ouput ( (None, 2048) 4096
BatchNormalization)
proj_std (ActivationStdLogg (None, 2048) 0
ingLayer)
=================================================================
Total params: 5,255,168
Trainable params: 5,246,976
Non-trainable params: 8,192
_________________________________________________________________
```
</div>
Finally, we must construct the predictor. The predictor is used in SimSiam, and is a
simple stack of two MLP layers, containing a bottleneck in the hidden layer.
```python
def get_predictor(input_dim, hidden_dim=512, activation="relu"):
inputs = tf.keras.layers.Input(shape=(input_dim,), name="predictor_input")
x = inputs
x = tf.keras.layers.Dense(
hidden_dim,
use_bias=False,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="predictor_layer_0",
)(x)
x = tf.keras.layers.BatchNormalization(
epsilon=1.001e-5, name="batch_normalization_0"
)(x)
x = tf.keras.layers.Activation(activation, name=f"{activation}_activation_0")(x)
x = tf.keras.layers.Dense(
input_dim,
kernel_initializer=tf.keras.initializers.LecunUniform(),
name="predictor_output",
)(x)
# Metric Logging layer. Monitors the std of the layer activations.
# Degenerate solutions colapse to 0 while valid solutions will move
# towards something like 0.0220. The actual number will depend on the layer size.
o = tfsim.layers.ActivationStdLoggingLayer(name="pred_std")(x)
predictor = tf.keras.Model(inputs, o, name="predictor")
return predictor
predictor = get_predictor(input_dim=DIM, hidden_dim=512)
predictor.summary()
```
<div class="k-default-codeblock">
```
Model: "predictor"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
predictor_input (InputLayer [(None, 2048)] 0
)
predictor_layer_0 (Dense) (None, 512) 1048576
batch_normalization_0 (Batc (None, 512) 2048
hNormalization)
relu_activation_0 (Activati (None, 512) 0
on)
predictor_output (Dense) (None, 2048) 1050624
pred_std (ActivationStdLogg (None, 2048) 0
ingLayer)
=================================================================
Total params: 2,101,248
Trainable params: 2,100,224
Non-trainable params: 1,024
_________________________________________________________________
```
</div>
---
## Training
First, we need to initialize our training model, loss, and optimizer.
```python
loss = tfsim.losses.SimSiamLoss(projection_type="cosine_distance", name="simsiam")
contrastive_model = tfsim.models.ContrastiveModel(
backbone=backbone,
projector=projector,
predictor=predictor, # NOTE: simiam requires predictor model.
algorithm="simsiam",
name="simsiam",
)
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate=INIT_LR,
decay_steps=PRE_TRAIN_EPOCHS * PRE_TRAIN_STEPS_PER_EPOCH,
)
wd_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate=WEIGHT_DECAY,
decay_steps=PRE_TRAIN_EPOCHS * PRE_TRAIN_STEPS_PER_EPOCH,
)
optimizer = tfa.optimizers.SGDW(
learning_rate=lr_decayed_fn, weight_decay=wd_decayed_fn, momentum=0.9
)
```
Next we can compile the model the same way you compile any other Keras model.
```python
contrastive_model.compile(
optimizer=optimizer,
loss=loss,
)
```
We track the training using `EvalCallback`.
`EvalCallback` creates an index at the end of each epoch and provides a proxy for the
nearest neighbor matching classification using `binary_accuracy`.
Calculates how often the query label matches the derived lookup label.
Accuracy is technically (TP+TN)/(TP+FP+TN+FN), but here we filter all
queries above the distance threshold. In the case of binary matching, this
makes all the TPs and FPs below the distance threshold and all the TNs and
FNs above the distance threshold.
As we are only concerned with the matches below the distance threshold, the
accuracy simplifies to TP/(TP+FP) and is equivalent to the precision with
respect to the unfiltered queries. However, we also want to consider the
query coverage at the distance threshold, i.e., the percentage of queries
that return a match, computed as (TP+FP)/(TP+FP+TN+FN). Therefore, we can
take $ precision \times query_coverage $ to produce a measure that capture
the precision scaled by the query coverage. This simplifies down to the
binary accuracy presented here, giving TP/(TP+FP+TN+FN).
```python
DATA_PATH = Path("./")
log_dir = DATA_PATH / "models" / "logs" / f"{loss.name}_{time.time()}"
chkpt_dir = DATA_PATH / "models" / "checkpoints" / f"{loss.name}_{time.time()}"
callbacks = [
tfsim.callbacks.EvalCallback(
tf.cast(x_query, tf.float32),
y_query,
tf.cast(x_index, tf.float32),
y_index,
metrics=["binary_accuracy"],
k=1,
tb_logdir=log_dir,
),
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=1,
update_freq=100,
),
tf.keras.callbacks.ModelCheckpoint(
filepath=chkpt_dir,
monitor="val_loss",
mode="min",
save_best_only=True,
save_weights_only=True,
),
]
```
<div class="k-default-codeblock">
```
TensorBoard logging enable in models/logs/simsiam_1674516693.2898047/index
```
</div>
All that is left to do is run fit()!
```python
print(train_ds)
print(val_ds)
history = contrastive_model.fit(
train_ds,
epochs=PRE_TRAIN_EPOCHS,
steps_per_epoch=PRE_TRAIN_STEPS_PER_EPOCH,
validation_data=val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
callbacks=callbacks,
)
```
<div class="k-default-codeblock">
```
<PrefetchDataset element_spec=(TensorSpec(shape=(None, 96, 96, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 96, 96, 3), dtype=tf.float32, name=None))>
<PrefetchDataset element_spec=(TensorSpec(shape=(None, 96, 96, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None, 96, 96, 3), dtype=tf.float32, name=None))>
Epoch 1/50
195/195 [==============================] - ETA: 0s - loss: 0.3137 - proj_std: 0.0122 - pred_std: 0.0076binary_accuracy: 0.2270
195/195 [==============================] - 90s 398ms/step - loss: 0.3137 - proj_std: 0.0122 - pred_std: 0.0076 - val_loss: 0.0764 - val_proj_std: 0.0068 - val_pred_std: 0.0026 - binary_accuracy: 0.2270
Epoch 2/50
195/195 [==============================] - ETA: 0s - loss: 0.1469 - proj_std: 0.0101 - pred_std: 0.0048binary_accuracy: 0.2260
195/195 [==============================] - 76s 390ms/step - loss: 0.1469 - proj_std: 0.0101 - pred_std: 0.0048 - val_loss: 0.3514 - val_proj_std: 0.0162 - val_pred_std: 0.0096 - binary_accuracy: 0.2260
Epoch 3/50
195/195 [==============================] - ETA: 0s - loss: 0.2779 - proj_std: 0.0145 - pred_std: 0.0102binary_accuracy: 0.2550
195/195 [==============================] - 74s 379ms/step - loss: 0.2779 - proj_std: 0.0145 - pred_std: 0.0102 - val_loss: 0.4038 - val_proj_std: 0.0158 - val_pred_std: 0.0122 - binary_accuracy: 0.2550
Epoch 4/50
195/195 [==============================] - ETA: 0s - loss: 0.2763 - proj_std: 0.0167 - pred_std: 0.0145binary_accuracy: 0.2630
195/195 [==============================] - 75s 385ms/step - loss: 0.2763 - proj_std: 0.0167 - pred_std: 0.0145 - val_loss: 0.1668 - val_proj_std: 0.0135 - val_pred_std: 0.0114 - binary_accuracy: 0.2630
Epoch 5/50
195/195 [==============================] - ETA: 0s - loss: 0.2235 - proj_std: 0.0174 - pred_std: 0.0154binary_accuracy: 0.2530
195/195 [==============================] - 63s 326ms/step - loss: 0.2235 - proj_std: 0.0174 - pred_std: 0.0154 - val_loss: 0.1847 - val_proj_std: 0.0144 - val_pred_std: 0.0127 - binary_accuracy: 0.2530
Epoch 6/50
195/195 [==============================] - ETA: 0s - loss: 0.2091 - proj_std: 0.0181 - pred_std: 0.0165binary_accuracy: 0.2580
195/195 [==============================] - 68s 350ms/step - loss: 0.2091 - proj_std: 0.0181 - pred_std: 0.0165 - val_loss: 0.2072 - val_proj_std: 0.0189 - val_pred_std: 0.0176 - binary_accuracy: 0.2580
Epoch 7/50
195/195 [==============================] - ETA: 0s - loss: 0.2385 - proj_std: 0.0191 - pred_std: 0.0178binary_accuracy: 0.2700
195/195 [==============================] - 69s 354ms/step - loss: 0.2385 - proj_std: 0.0191 - pred_std: 0.0178 - val_loss: 0.4700 - val_proj_std: 0.0186 - val_pred_std: 0.0199 - binary_accuracy: 0.2700
Epoch 8/50
195/195 [==============================] - ETA: 0s - loss: 0.1986 - proj_std: 0.0186 - pred_std: 0.0174binary_accuracy: 0.2750
195/195 [==============================] - 68s 350ms/step - loss: 0.1986 - proj_std: 0.0186 - pred_std: 0.0174 - val_loss: 0.3135 - val_proj_std: 0.0192 - val_pred_std: 0.0181 - binary_accuracy: 0.2750
Epoch 9/50
195/195 [==============================] - ETA: 0s - loss: 0.2182 - proj_std: 0.0191 - pred_std: 0.0180binary_accuracy: 0.2670
195/195 [==============================] - 68s 350ms/step - loss: 0.2182 - proj_std: 0.0191 - pred_std: 0.0180 - val_loss: 0.2822 - val_proj_std: 0.0155 - val_pred_std: 0.0135 - binary_accuracy: 0.2670
Epoch 10/50
195/195 [==============================] - ETA: 0s - loss: 0.1991 - proj_std: 0.0185 - pred_std: 0.0173binary_accuracy: 0.3090
195/195 [==============================] - 69s 353ms/step - loss: 0.1991 - proj_std: 0.0185 - pred_std: 0.0173 - val_loss: 0.1550 - val_proj_std: 0.0134 - val_pred_std: 0.0117 - binary_accuracy: 0.3090
Epoch 11/50
195/195 [==============================] - ETA: 0s - loss: 0.2080 - proj_std: 0.0185 - pred_std: 0.0175binary_accuracy: 0.2840
195/195 [==============================] - 69s 353ms/step - loss: 0.2080 - proj_std: 0.0185 - pred_std: 0.0175 - val_loss: 0.2511 - val_proj_std: 0.0185 - val_pred_std: 0.0181 - binary_accuracy: 0.2840
Epoch 12/50
195/195 [==============================] - ETA: 0s - loss: 0.1934 - proj_std: 0.0186 - pred_std: 0.0176binary_accuracy: 0.2980
195/195 [==============================] - 68s 352ms/step - loss: 0.1934 - proj_std: 0.0186 - pred_std: 0.0176 - val_loss: 0.1785 - val_proj_std: 0.0122 - val_pred_std: 0.0104 - binary_accuracy: 0.2980
Epoch 13/50
195/195 [==============================] - ETA: 0s - loss: 0.1945 - proj_std: 0.0190 - pred_std: 0.0181binary_accuracy: 0.3020
195/195 [==============================] - 69s 356ms/step - loss: 0.1945 - proj_std: 0.0190 - pred_std: 0.0181 - val_loss: 0.1189 - val_proj_std: 0.0118 - val_pred_std: 0.0107 - binary_accuracy: 0.3020
Epoch 14/50
195/195 [==============================] - ETA: 0s - loss: 0.2009 - proj_std: 0.0194 - pred_std: 0.0187binary_accuracy: 0.3320
195/195 [==============================] - 68s 350ms/step - loss: 0.2009 - proj_std: 0.0194 - pred_std: 0.0187 - val_loss: 0.1736 - val_proj_std: 0.0127 - val_pred_std: 0.0114 - binary_accuracy: 0.3320
Epoch 15/50
195/195 [==============================] - ETA: 0s - loss: 0.2029 - proj_std: 0.0194 - pred_std: 0.0186binary_accuracy: 0.3320
195/195 [==============================] - 69s 353ms/step - loss: 0.2029 - proj_std: 0.0194 - pred_std: 0.0186 - val_loss: 0.1638 - val_proj_std: 0.0154 - val_pred_std: 0.0148 - binary_accuracy: 0.3320
Epoch 16/50
195/195 [==============================] - ETA: 0s - loss: 0.1972 - proj_std: 0.0196 - pred_std: 0.0190binary_accuracy: 0.3060
195/195 [==============================] - 68s 348ms/step - loss: 0.1972 - proj_std: 0.0196 - pred_std: 0.0190 - val_loss: 0.2987 - val_proj_std: 0.0203 - val_pred_std: 0.0202 - binary_accuracy: 0.3060
Epoch 17/50
195/195 [==============================] - ETA: 0s - loss: 0.1885 - proj_std: 0.0196 - pred_std: 0.0190binary_accuracy: 0.3050
195/195 [==============================] - 68s 350ms/step - loss: 0.1885 - proj_std: 0.0196 - pred_std: 0.0190 - val_loss: 0.1805 - val_proj_std: 0.0161 - val_pred_std: 0.0150 - binary_accuracy: 0.3050
Epoch 18/50
195/195 [==============================] - ETA: 0s - loss: 0.1933 - proj_std: 0.0196 - pred_std: 0.0189binary_accuracy: 0.3270
195/195 [==============================] - 68s 352ms/step - loss: 0.1933 - proj_std: 0.0196 - pred_std: 0.0189 - val_loss: 0.1917 - val_proj_std: 0.0159 - val_pred_std: 0.0151 - binary_accuracy: 0.3270
Epoch 19/50
195/195 [==============================] - ETA: 0s - loss: 0.1934 - proj_std: 0.0196 - pred_std: 0.0189binary_accuracy: 0.3260
195/195 [==============================] - 69s 353ms/step - loss: 0.1934 - proj_std: 0.0196 - pred_std: 0.0189 - val_loss: 0.1808 - val_proj_std: 0.0171 - val_pred_std: 0.0165 - binary_accuracy: 0.3260
Epoch 20/50
195/195 [==============================] - ETA: 0s - loss: 0.1757 - proj_std: 0.0194 - pred_std: 0.0187binary_accuracy: 0.3180
195/195 [==============================] - 68s 349ms/step - loss: 0.1757 - proj_std: 0.0194 - pred_std: 0.0187 - val_loss: 0.1957 - val_proj_std: 0.0176 - val_pred_std: 0.0167 - binary_accuracy: 0.3180
Epoch 21/50
195/195 [==============================] - ETA: 0s - loss: 0.1752 - proj_std: 0.0194 - pred_std: 0.0188binary_accuracy: 0.3000
195/195 [==============================] - 78s 403ms/step - loss: 0.1752 - proj_std: 0.0194 - pred_std: 0.0188 - val_loss: 0.2070 - val_proj_std: 0.0178 - val_pred_std: 0.0172 - binary_accuracy: 0.3000
Epoch 22/50
195/195 [==============================] - ETA: 0s - loss: 0.1743 - proj_std: 0.0195 - pred_std: 0.0190binary_accuracy: 0.3280
195/195 [==============================] - 62s 317ms/step - loss: 0.1743 - proj_std: 0.0195 - pred_std: 0.0190 - val_loss: 0.2240 - val_proj_std: 0.0181 - val_pred_std: 0.0176 - binary_accuracy: 0.3280
Epoch 23/50
195/195 [==============================] - ETA: 0s - loss: 0.1692 - proj_std: 0.0193 - pred_std: 0.0188binary_accuracy: 0.3140
195/195 [==============================] - 68s 348ms/step - loss: 0.1692 - proj_std: 0.0193 - pred_std: 0.0188 - val_loss: 0.1892 - val_proj_std: 0.0186 - val_pred_std: 0.0181 - binary_accuracy: 0.3140
Epoch 24/50
195/195 [==============================] - ETA: 0s - loss: 0.1529 - proj_std: 0.0190 - pred_std: 0.0184binary_accuracy: 0.3280
195/195 [==============================] - 69s 353ms/step - loss: 0.1529 - proj_std: 0.0190 - pred_std: 0.0184 - val_loss: 0.2405 - val_proj_std: 0.0196 - val_pred_std: 0.0194 - binary_accuracy: 0.3280
Epoch 25/50
195/195 [==============================] - ETA: 0s - loss: 0.1425 - proj_std: 0.0187 - pred_std: 0.0182binary_accuracy: 0.3560
195/195 [==============================] - 75s 384ms/step - loss: 0.1425 - proj_std: 0.0187 - pred_std: 0.0182 - val_loss: 0.1602 - val_proj_std: 0.0181 - val_pred_std: 0.0178 - binary_accuracy: 0.3560
Epoch 26/50
195/195 [==============================] - ETA: 0s - loss: 0.1277 - proj_std: 0.0186 - pred_std: 0.0182binary_accuracy: 0.3080
195/195 [==============================] - 63s 322ms/step - loss: 0.1277 - proj_std: 0.0186 - pred_std: 0.0182 - val_loss: 0.1815 - val_proj_std: 0.0193 - val_pred_std: 0.0192 - binary_accuracy: 0.3080
Epoch 27/50
195/195 [==============================] - ETA: 0s - loss: 0.1326 - proj_std: 0.0189 - pred_std: 0.0185binary_accuracy: 0.3540
195/195 [==============================] - 69s 357ms/step - loss: 0.1326 - proj_std: 0.0189 - pred_std: 0.0185 - val_loss: 0.1919 - val_proj_std: 0.0177 - val_pred_std: 0.0174 - binary_accuracy: 0.3540
Epoch 28/50
195/195 [==============================] - ETA: 0s - loss: 0.1383 - proj_std: 0.0187 - pred_std: 0.0183binary_accuracy: 0.4060
195/195 [==============================] - 75s 388ms/step - loss: 0.1383 - proj_std: 0.0187 - pred_std: 0.0183 - val_loss: 0.1795 - val_proj_std: 0.0170 - val_pred_std: 0.0165 - binary_accuracy: 0.4060
Epoch 29/50
195/195 [==============================] - ETA: 0s - loss: 0.1348 - proj_std: 0.0177 - pred_std: 0.0172binary_accuracy: 0.3410
195/195 [==============================] - 61s 312ms/step - loss: 0.1348 - proj_std: 0.0177 - pred_std: 0.0172 - val_loss: 0.2115 - val_proj_std: 0.0187 - val_pred_std: 0.0185 - binary_accuracy: 0.3410
Epoch 30/50
195/195 [==============================] - ETA: 0s - loss: 0.1198 - proj_std: 0.0178 - pred_std: 0.0174binary_accuracy: 0.3520
195/195 [==============================] - 78s 401ms/step - loss: 0.1198 - proj_std: 0.0178 - pred_std: 0.0174 - val_loss: 0.1277 - val_proj_std: 0.0124 - val_pred_std: 0.0115 - binary_accuracy: 0.3520
Epoch 31/50
195/195 [==============================] - ETA: 0s - loss: 0.1185 - proj_std: 0.0180 - pred_std: 0.0176binary_accuracy: 0.3840
195/195 [==============================] - 68s 349ms/step - loss: 0.1185 - proj_std: 0.0180 - pred_std: 0.0176 - val_loss: 0.1637 - val_proj_std: 0.0187 - val_pred_std: 0.0185 - binary_accuracy: 0.3840
Epoch 32/50
195/195 [==============================] - ETA: 0s - loss: 0.1228 - proj_std: 0.0181 - pred_std: 0.0177binary_accuracy: 0.3790
195/195 [==============================] - 61s 312ms/step - loss: 0.1228 - proj_std: 0.0181 - pred_std: 0.0177 - val_loss: 0.1381 - val_proj_std: 0.0185 - val_pred_std: 0.0182 - binary_accuracy: 0.3790
Epoch 33/50
195/195 [==============================] - ETA: 0s - loss: 0.1180 - proj_std: 0.0176 - pred_std: 0.0173binary_accuracy: 0.4050
195/195 [==============================] - 70s 358ms/step - loss: 0.1180 - proj_std: 0.0176 - pred_std: 0.0173 - val_loss: 0.1273 - val_proj_std: 0.0188 - val_pred_std: 0.0186 - binary_accuracy: 0.4050
Epoch 34/50
195/195 [==============================] - ETA: 0s - loss: 0.1145 - proj_std: 0.0176 - pred_std: 0.0173binary_accuracy: 0.3880
195/195 [==============================] - 67s 342ms/step - loss: 0.1145 - proj_std: 0.0176 - pred_std: 0.0173 - val_loss: 0.1958 - val_proj_std: 0.0191 - val_pred_std: 0.0193 - binary_accuracy: 0.3880
Epoch 35/50
195/195 [==============================] - ETA: 0s - loss: 0.1112 - proj_std: 0.0175 - pred_std: 0.0172binary_accuracy: 0.3840
195/195 [==============================] - 68s 348ms/step - loss: 0.1112 - proj_std: 0.0175 - pred_std: 0.0172 - val_loss: 0.1372 - val_proj_std: 0.0186 - val_pred_std: 0.0185 - binary_accuracy: 0.3840
Epoch 36/50
195/195 [==============================] - ETA: 0s - loss: 0.1149 - proj_std: 0.0173 - pred_std: 0.0171binary_accuracy: 0.4030
195/195 [==============================] - 67s 343ms/step - loss: 0.1149 - proj_std: 0.0173 - pred_std: 0.0171 - val_loss: 0.1284 - val_proj_std: 0.0165 - val_pred_std: 0.0163 - binary_accuracy: 0.4030
Epoch 37/50
195/195 [==============================] - ETA: 0s - loss: 0.1108 - proj_std: 0.0174 - pred_std: 0.0171binary_accuracy: 0.4100
195/195 [==============================] - 71s 366ms/step - loss: 0.1108 - proj_std: 0.0174 - pred_std: 0.0171 - val_loss: 0.1387 - val_proj_std: 0.0145 - val_pred_std: 0.0141 - binary_accuracy: 0.4100
Epoch 38/50
195/195 [==============================] - ETA: 0s - loss: 0.1028 - proj_std: 0.0174 - pred_std: 0.0172binary_accuracy: 0.4180
195/195 [==============================] - 66s 338ms/step - loss: 0.1028 - proj_std: 0.0174 - pred_std: 0.0172 - val_loss: 0.1183 - val_proj_std: 0.0182 - val_pred_std: 0.0180 - binary_accuracy: 0.4180
Epoch 39/50
195/195 [==============================] - ETA: 0s - loss: 0.1011 - proj_std: 0.0171 - pred_std: 0.0170binary_accuracy: 0.4020
195/195 [==============================] - 69s 357ms/step - loss: 0.1011 - proj_std: 0.0171 - pred_std: 0.0170 - val_loss: 0.1056 - val_proj_std: 0.0177 - val_pred_std: 0.0176 - binary_accuracy: 0.4020
Epoch 40/50
195/195 [==============================] - ETA: 0s - loss: 0.1081 - proj_std: 0.0167 - pred_std: 0.0165binary_accuracy: 0.4670
195/195 [==============================] - 67s 346ms/step - loss: 0.1081 - proj_std: 0.0167 - pred_std: 0.0165 - val_loss: 0.1144 - val_proj_std: 0.0182 - val_pred_std: 0.0182 - binary_accuracy: 0.4670
Epoch 41/50
195/195 [==============================] - ETA: 0s - loss: 0.1060 - proj_std: 0.0166 - pred_std: 0.0165binary_accuracy: 0.4280
195/195 [==============================] - 68s 349ms/step - loss: 0.1060 - proj_std: 0.0166 - pred_std: 0.0165 - val_loss: 0.1180 - val_proj_std: 0.0175 - val_pred_std: 0.0174 - binary_accuracy: 0.4280
Epoch 42/50
195/195 [==============================] - ETA: 0s - loss: 0.1063 - proj_std: 0.0163 - pred_std: 0.0162binary_accuracy: 0.4220
195/195 [==============================] - 69s 356ms/step - loss: 0.1063 - proj_std: 0.0163 - pred_std: 0.0162 - val_loss: 0.1143 - val_proj_std: 0.0173 - val_pred_std: 0.0171 - binary_accuracy: 0.4220
Epoch 43/50
195/195 [==============================] - ETA: 0s - loss: 0.1050 - proj_std: 0.0162 - pred_std: 0.0161binary_accuracy: 0.4310
195/195 [==============================] - 69s 353ms/step - loss: 0.1050 - proj_std: 0.0162 - pred_std: 0.0161 - val_loss: 0.1171 - val_proj_std: 0.0169 - val_pred_std: 0.0168 - binary_accuracy: 0.4310
Epoch 44/50
195/195 [==============================] - ETA: 0s - loss: 0.1013 - proj_std: 0.0159 - pred_std: 0.0157binary_accuracy: 0.4140
195/195 [==============================] - 75s 386ms/step - loss: 0.1013 - proj_std: 0.0159 - pred_std: 0.0157 - val_loss: 0.1106 - val_proj_std: 0.0161 - val_pred_std: 0.0159 - binary_accuracy: 0.4140
Epoch 45/50
195/195 [==============================] - ETA: 0s - loss: 0.1035 - proj_std: 0.0160 - pred_std: 0.0159binary_accuracy: 0.4350
195/195 [==============================] - 63s 324ms/step - loss: 0.1035 - proj_std: 0.0160 - pred_std: 0.0159 - val_loss: 0.1086 - val_proj_std: 0.0171 - val_pred_std: 0.0171 - binary_accuracy: 0.4350
Epoch 46/50
195/195 [==============================] - ETA: 0s - loss: 0.0999 - proj_std: 0.0157 - pred_std: 0.0157binary_accuracy: 0.4510
195/195 [==============================] - 69s 354ms/step - loss: 0.0999 - proj_std: 0.0157 - pred_std: 0.0157 - val_loss: 0.1000 - val_proj_std: 0.0164 - val_pred_std: 0.0164 - binary_accuracy: 0.4510
Epoch 47/50
195/195 [==============================] - ETA: 0s - loss: 0.1002 - proj_std: 0.0157 - pred_std: 0.0156binary_accuracy: 0.4680
195/195 [==============================] - 68s 351ms/step - loss: 0.1002 - proj_std: 0.0157 - pred_std: 0.0156 - val_loss: 0.1067 - val_proj_std: 0.0163 - val_pred_std: 0.0163 - binary_accuracy: 0.4680
Epoch 48/50
195/195 [==============================] - ETA: 0s - loss: 0.0980 - proj_std: 0.0155 - pred_std: 0.0153binary_accuracy: 0.4410
195/195 [==============================] - 68s 352ms/step - loss: 0.0980 - proj_std: 0.0155 - pred_std: 0.0153 - val_loss: 0.0986 - val_proj_std: 0.0159 - val_pred_std: 0.0159 - binary_accuracy: 0.4410
Epoch 49/50
195/195 [==============================] - ETA: 0s - loss: 0.0944 - proj_std: 0.0155 - pred_std: 0.0154binary_accuracy: 0.4520
195/195 [==============================] - 69s 355ms/step - loss: 0.0944 - proj_std: 0.0155 - pred_std: 0.0154 - val_loss: 0.0949 - val_proj_std: 0.0164 - val_pred_std: 0.0163 - binary_accuracy: 0.4520
Epoch 50/50
195/195 [==============================] - ETA: 0s - loss: 0.0937 - proj_std: 0.0155 - pred_std: 0.0154binary_accuracy: 0.4570
195/195 [==============================] - 67s 347ms/step - loss: 0.0937 - proj_std: 0.0155 - pred_std: 0.0154 - val_loss: 0.0978 - val_proj_std: 0.0166 - val_pred_std: 0.0165 - binary_accuracy: 0.4570
```
</div>
---
## Plotting and Evaluation
```python
plt.figure(figsize=(15, 4))
plt.subplot(1, 3, 1)
plt.plot(history.history["loss"])
plt.grid()
plt.title(f"{loss.name} - loss")
plt.subplot(1, 3, 2)
plt.plot(history.history["proj_std"], label="proj")
if "pred_std" in history.history:
plt.plot(history.history["pred_std"], label="pred")
plt.grid()
plt.title(f"{loss.name} - std metrics")
plt.legend()
plt.subplot(1, 3, 3)
plt.plot(history.history["binary_accuracy"], label="acc")
plt.grid()
plt.title(f"{loss.name} - match metrics")
plt.legend()
plt.show()
```

---
## Fine Tuning on the Labelled Data
As a final step we will fine tune a classifier on 10% of the training data. This will
allow us to evaluate the quality of our learned representation. First, we handle data
loading:
```python
eval_augmenter = keras.Sequential(
[
keras_cv.layers.RandomCropAndResize(
(96, 96), crop_area_factor=(0.8, 1.0), aspect_ratio_factor=(1.0, 1.0)
),
keras_cv.layers.RandomFlip(mode="horizontal"),
]
)
eval_train_ds = tf.data.Dataset.from_tensor_slices(
(x_raw_train, tf.keras.utils.to_categorical(y_raw_train, 10))
)
eval_train_ds = eval_train_ds.repeat()
eval_train_ds = eval_train_ds.shuffle(1024)
eval_train_ds = eval_train_ds.map(lambda x, y: (eval_augmenter(x), y), tf.data.AUTOTUNE)
eval_train_ds = eval_train_ds.batch(BATCH_SIZE)
eval_train_ds = eval_train_ds.prefetch(tf.data.AUTOTUNE)
eval_val_ds = tf.data.Dataset.from_tensor_slices(
(x_test, tf.keras.utils.to_categorical(y_test, 10))
)
eval_val_ds = eval_val_ds.repeat()
eval_val_ds = eval_val_ds.shuffle(1024)
eval_val_ds = eval_val_ds.batch(BATCH_SIZE)
eval_val_ds = eval_val_ds.prefetch(tf.data.AUTOTUNE)
```
---
## Benchmark Against a Naive Model
Finally, lets setup a naive model that does not leverage the unlabeled data corpus.
```python
TEST_EPOCHS = 50
TEST_STEPS_PER_EPOCH = x_raw_train.shape[0] // BATCH_SIZE
def get_eval_model(img_size, backbone, total_steps, trainable=True, lr=1.8):
backbone.trainable = trainable
inputs = tf.keras.layers.Input((img_size, img_size, 3), name="eval_input")
x = backbone(inputs, training=trainable)
o = tf.keras.layers.Dense(10, activation="softmax")(x)
model = tf.keras.Model(inputs, o)
cosine_decayed_lr = tf.keras.experimental.CosineDecay(
initial_learning_rate=lr, decay_steps=total_steps
)
opt = tf.keras.optimizers.SGD(cosine_decayed_lr, momentum=0.9)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["acc"])
return model
no_pt_eval_model = get_eval_model(
img_size=96,
backbone=get_backbone((96, 96, 3)),
total_steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
trainable=True,
lr=1e-3,
)
no_pt_history = no_pt_eval_model.fit(
eval_train_ds,
batch_size=BATCH_SIZE,
epochs=TEST_EPOCHS,
steps_per_epoch=TEST_STEPS_PER_EPOCH,
validation_data=eval_val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
)
```
<div class="k-default-codeblock">
```
Epoch 1/50
9/9 [==============================] - 6s 249ms/step - loss: 2.4969 - acc: 0.1302 - val_loss: 2.2889 - val_acc: 0.1669
Epoch 2/50
9/9 [==============================] - 1s 139ms/step - loss: 2.2002 - acc: 0.1888 - val_loss: 2.1074 - val_acc: 0.2160
Epoch 3/50
9/9 [==============================] - 1s 139ms/step - loss: 2.0066 - acc: 0.2619 - val_loss: 1.9138 - val_acc: 0.2968
Epoch 4/50
9/9 [==============================] - 1s 139ms/step - loss: 1.8394 - acc: 0.3227 - val_loss: 1.7825 - val_acc: 0.3326
Epoch 5/50
9/9 [==============================] - 1s 140ms/step - loss: 1.7191 - acc: 0.3585 - val_loss: 1.7004 - val_acc: 0.3545
Epoch 6/50
9/9 [==============================] - 1s 140ms/step - loss: 1.6458 - acc: 0.3806 - val_loss: 1.6473 - val_acc: 0.3734
Epoch 7/50
9/9 [==============================] - 1s 139ms/step - loss: 1.5798 - acc: 0.4030 - val_loss: 1.6009 - val_acc: 0.3907
Epoch 8/50
9/9 [==============================] - 1s 139ms/step - loss: 1.5244 - acc: 0.4332 - val_loss: 1.5696 - val_acc: 0.4029
Epoch 9/50
9/9 [==============================] - 1s 140ms/step - loss: 1.4977 - acc: 0.4325 - val_loss: 1.5416 - val_acc: 0.4126
Epoch 10/50
9/9 [==============================] - 1s 139ms/step - loss: 1.4555 - acc: 0.4559 - val_loss: 1.5087 - val_acc: 0.4271
Epoch 11/50
9/9 [==============================] - 1s 140ms/step - loss: 1.4294 - acc: 0.4627 - val_loss: 1.4897 - val_acc: 0.4384
Epoch 12/50
9/9 [==============================] - 1s 139ms/step - loss: 1.4031 - acc: 0.4820 - val_loss: 1.4759 - val_acc: 0.4410
Epoch 13/50
9/9 [==============================] - 1s 141ms/step - loss: 1.3625 - acc: 0.4941 - val_loss: 1.4501 - val_acc: 0.4486
Epoch 14/50
9/9 [==============================] - 1s 140ms/step - loss: 1.3443 - acc: 0.5026 - val_loss: 1.4390 - val_acc: 0.4525
Epoch 15/50
9/9 [==============================] - 1s 139ms/step - loss: 1.3235 - acc: 0.5067 - val_loss: 1.4308 - val_acc: 0.4578
Epoch 16/50
9/9 [==============================] - 1s 139ms/step - loss: 1.2863 - acc: 0.5328 - val_loss: 1.4089 - val_acc: 0.4650
Epoch 17/50
9/9 [==============================] - 1s 140ms/step - loss: 1.2851 - acc: 0.5339 - val_loss: 1.3944 - val_acc: 0.4700
Epoch 18/50
9/9 [==============================] - 1s 141ms/step - loss: 1.2501 - acc: 0.5464 - val_loss: 1.3887 - val_acc: 0.4773
Epoch 19/50
9/9 [==============================] - 1s 139ms/step - loss: 1.2324 - acc: 0.5510 - val_loss: 1.3783 - val_acc: 0.4820
Epoch 20/50
9/9 [==============================] - 1s 140ms/step - loss: 1.2223 - acc: 0.5562 - val_loss: 1.3655 - val_acc: 0.4848
Epoch 21/50
9/9 [==============================] - 1s 140ms/step - loss: 1.2070 - acc: 0.5664 - val_loss: 1.3579 - val_acc: 0.4867
Epoch 22/50
9/9 [==============================] - 1s 141ms/step - loss: 1.1820 - acc: 0.5738 - val_loss: 1.3482 - val_acc: 0.4913
Epoch 23/50
9/9 [==============================] - 1s 139ms/step - loss: 1.1688 - acc: 0.5790 - val_loss: 1.3375 - val_acc: 0.4964
Epoch 24/50
9/9 [==============================] - 1s 141ms/step - loss: 1.1514 - acc: 0.5896 - val_loss: 1.3403 - val_acc: 0.4966
Epoch 25/50
9/9 [==============================] - 1s 138ms/step - loss: 1.1307 - acc: 0.5961 - val_loss: 1.3321 - val_acc: 0.5025
Epoch 26/50
9/9 [==============================] - 1s 139ms/step - loss: 1.1341 - acc: 0.6009 - val_loss: 1.3220 - val_acc: 0.5035
Epoch 27/50
9/9 [==============================] - 1s 139ms/step - loss: 1.1177 - acc: 0.5987 - val_loss: 1.3149 - val_acc: 0.5074
Epoch 28/50
9/9 [==============================] - 1s 139ms/step - loss: 1.1078 - acc: 0.6068 - val_loss: 1.3089 - val_acc: 0.5137
Epoch 29/50
9/9 [==============================] - 1s 141ms/step - loss: 1.0929 - acc: 0.6046 - val_loss: 1.3015 - val_acc: 0.5139
Epoch 30/50
9/9 [==============================] - 1s 138ms/step - loss: 1.0915 - acc: 0.6139 - val_loss: 1.3064 - val_acc: 0.5149
Epoch 31/50
9/9 [==============================] - 1s 140ms/step - loss: 1.0634 - acc: 0.6254 - val_loss: 1.2955 - val_acc: 0.5123
Epoch 32/50
9/9 [==============================] - 1s 141ms/step - loss: 1.0675 - acc: 0.6254 - val_loss: 1.2979 - val_acc: 0.5167
Epoch 33/50
9/9 [==============================] - 1s 140ms/step - loss: 1.0595 - acc: 0.6289 - val_loss: 1.2911 - val_acc: 0.5186
Epoch 34/50
9/9 [==============================] - 1s 140ms/step - loss: 1.0397 - acc: 0.6328 - val_loss: 1.2906 - val_acc: 0.5208
Epoch 35/50
9/9 [==============================] - 1s 139ms/step - loss: 1.0415 - acc: 0.6378 - val_loss: 1.2863 - val_acc: 0.5222
Epoch 36/50
9/9 [==============================] - 1s 139ms/step - loss: 1.0435 - acc: 0.6257 - val_loss: 1.2830 - val_acc: 0.5215
Epoch 37/50
9/9 [==============================] - 1s 144ms/step - loss: 1.0242 - acc: 0.6461 - val_loss: 1.2820 - val_acc: 0.5268
Epoch 38/50
9/9 [==============================] - 1s 141ms/step - loss: 1.0212 - acc: 0.6421 - val_loss: 1.2766 - val_acc: 0.5259
Epoch 39/50
9/9 [==============================] - 1s 141ms/step - loss: 1.0213 - acc: 0.6385 - val_loss: 1.2770 - val_acc: 0.5259
Epoch 40/50
9/9 [==============================] - 1s 140ms/step - loss: 1.0224 - acc: 0.6428 - val_loss: 1.2742 - val_acc: 0.5262
Epoch 41/50
9/9 [==============================] - 1s 142ms/step - loss: 0.9994 - acc: 0.6510 - val_loss: 1.2755 - val_acc: 0.5238
Epoch 42/50
9/9 [==============================] - 1s 141ms/step - loss: 1.0154 - acc: 0.6474 - val_loss: 1.2784 - val_acc: 0.5244
Epoch 43/50
9/9 [==============================] - 1s 139ms/step - loss: 1.0176 - acc: 0.6441 - val_loss: 1.2680 - val_acc: 0.5247
Epoch 44/50
9/9 [==============================] - 1s 140ms/step - loss: 1.0101 - acc: 0.6471 - val_loss: 1.2711 - val_acc: 0.5288
Epoch 45/50
9/9 [==============================] - 1s 139ms/step - loss: 1.0080 - acc: 0.6536 - val_loss: 1.2691 - val_acc: 0.5275
Epoch 46/50
9/9 [==============================] - 1s 143ms/step - loss: 1.0038 - acc: 0.6428 - val_loss: 1.2706 - val_acc: 0.5302
Epoch 47/50
9/9 [==============================] - 1s 140ms/step - loss: 1.0070 - acc: 0.6573 - val_loss: 1.2678 - val_acc: 0.5293
Epoch 48/50
9/9 [==============================] - 1s 140ms/step - loss: 1.0030 - acc: 0.6450 - val_loss: 1.2723 - val_acc: 0.5278
Epoch 49/50
9/9 [==============================] - 1s 139ms/step - loss: 1.0080 - acc: 0.6447 - val_loss: 1.2691 - val_acc: 0.5252
Epoch 50/50
9/9 [==============================] - 1s 142ms/step - loss: 1.0093 - acc: 0.6497 - val_loss: 1.2712 - val_acc: 0.5278
```
</div>
Pretty bad results! Lets try fine-tuning our SimSiam pretrained model:
```python
pt_eval_model = get_eval_model(
img_size=96,
backbone=contrastive_model.backbone,
total_steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
trainable=False,
lr=30.0,
)
pt_eval_model.summary()
pt_history = pt_eval_model.fit(
eval_train_ds,
batch_size=BATCH_SIZE,
epochs=TEST_EPOCHS,
steps_per_epoch=TEST_STEPS_PER_EPOCH,
validation_data=eval_val_ds,
validation_steps=VAL_STEPS_PER_EPOCH,
)
```
<div class="k-default-codeblock">
```
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
eval_input (InputLayer) [(None, 96, 96, 3)] 0
similarity_model (Similarit (None, 512) 11186112
yModel)
dense_1 (Dense) (None, 10) 5130
=================================================================
Total params: 11,191,242
Trainable params: 5,130
Non-trainable params: 11,186,112
_________________________________________________________________
Epoch 1/50
9/9 [==============================] - 3s 172ms/step - loss: 18.2303 - acc: 0.2563 - val_loss: 16.9489 - val_acc: 0.3463
Epoch 2/50
9/9 [==============================] - 1s 109ms/step - loss: 24.5528 - acc: 0.3498 - val_loss: 19.1886 - val_acc: 0.4050
Epoch 3/50
9/9 [==============================] - 1s 110ms/step - loss: 18.3920 - acc: 0.4477 - val_loss: 20.0611 - val_acc: 0.4456
Epoch 4/50
9/9 [==============================] - 1s 113ms/step - loss: 15.4172 - acc: 0.4993 - val_loss: 12.2465 - val_acc: 0.5116
Epoch 5/50
9/9 [==============================] - 1s 110ms/step - loss: 10.5517 - acc: 0.5217 - val_loss: 8.5560 - val_acc: 0.5474
Epoch 6/50
9/9 [==============================] - 1s 112ms/step - loss: 7.4812 - acc: 0.5395 - val_loss: 7.9182 - val_acc: 0.5053
Epoch 7/50
9/9 [==============================] - 1s 112ms/step - loss: 8.4429 - acc: 0.5024 - val_loss: 7.7339 - val_acc: 0.5071
Epoch 8/50
9/9 [==============================] - 1s 113ms/step - loss: 8.6143 - acc: 0.5109 - val_loss: 10.4784 - val_acc: 0.5157
Epoch 9/50
9/9 [==============================] - 1s 111ms/step - loss: 8.7506 - acc: 0.5061 - val_loss: 7.8201 - val_acc: 0.4914
Epoch 10/50
9/9 [==============================] - 1s 111ms/step - loss: 8.7927 - acc: 0.4996 - val_loss: 9.6188 - val_acc: 0.4668
Epoch 11/50
9/9 [==============================] - 1s 114ms/step - loss: 7.2190 - acc: 0.4844 - val_loss: 8.8605 - val_acc: 0.4240
Epoch 12/50
9/9 [==============================] - 1s 111ms/step - loss: 8.0435 - acc: 0.4681 - val_loss: 8.2731 - val_acc: 0.4425
Epoch 13/50
9/9 [==============================] - 1s 112ms/step - loss: 7.1718 - acc: 0.5048 - val_loss: 5.4667 - val_acc: 0.5485
Epoch 14/50
9/9 [==============================] - 1s 111ms/step - loss: 6.7500 - acc: 0.5111 - val_loss: 5.5898 - val_acc: 0.5158
Epoch 15/50
9/9 [==============================] - 1s 110ms/step - loss: 5.1562 - acc: 0.5467 - val_loss: 3.7606 - val_acc: 0.5587
Epoch 16/50
9/9 [==============================] - 1s 111ms/step - loss: 3.5923 - acc: 0.5814 - val_loss: 5.0881 - val_acc: 0.5336
Epoch 17/50
9/9 [==============================] - 1s 110ms/step - loss: 5.1907 - acc: 0.5221 - val_loss: 7.7393 - val_acc: 0.3609
Epoch 18/50
9/9 [==============================] - 1s 112ms/step - loss: 8.0532 - acc: 0.4768 - val_loss: 7.2504 - val_acc: 0.5265
Epoch 19/50
9/9 [==============================] - 1s 111ms/step - loss: 6.5527 - acc: 0.5221 - val_loss: 6.8659 - val_acc: 0.4729
Epoch 20/50
9/9 [==============================] - 1s 113ms/step - loss: 7.0188 - acc: 0.4924 - val_loss: 6.5774 - val_acc: 0.4729
Epoch 21/50
9/9 [==============================] - 1s 112ms/step - loss: 4.8837 - acc: 0.5293 - val_loss: 4.5986 - val_acc: 0.5568
Epoch 22/50
9/9 [==============================] - 1s 113ms/step - loss: 4.5787 - acc: 0.5536 - val_loss: 4.9848 - val_acc: 0.5343
Epoch 23/50
9/9 [==============================] - 1s 111ms/step - loss: 5.3264 - acc: 0.5501 - val_loss: 6.1620 - val_acc: 0.5257
Epoch 24/50
9/9 [==============================] - 1s 118ms/step - loss: 4.6995 - acc: 0.5681 - val_loss: 2.9108 - val_acc: 0.6004
Epoch 25/50
9/9 [==============================] - 1s 111ms/step - loss: 3.0915 - acc: 0.6024 - val_loss: 2.9674 - val_acc: 0.6097
Epoch 26/50
9/9 [==============================] - 1s 112ms/step - loss: 2.9893 - acc: 0.5940 - val_loss: 2.7857 - val_acc: 0.5975
Epoch 27/50
9/9 [==============================] - 1s 112ms/step - loss: 3.0031 - acc: 0.5990 - val_loss: 3.3214 - val_acc: 0.5661
Epoch 28/50
9/9 [==============================] - 1s 110ms/step - loss: 2.4497 - acc: 0.6118 - val_loss: 2.5389 - val_acc: 0.5864
Epoch 29/50
9/9 [==============================] - 1s 112ms/step - loss: 2.2352 - acc: 0.6222 - val_loss: 2.6069 - val_acc: 0.5891
Epoch 30/50
9/9 [==============================] - 1s 110ms/step - loss: 2.0529 - acc: 0.6230 - val_loss: 2.2986 - val_acc: 0.6147
Epoch 31/50
9/9 [==============================] - 1s 113ms/step - loss: 2.1396 - acc: 0.6337 - val_loss: 2.3893 - val_acc: 0.6115
Epoch 32/50
9/9 [==============================] - 1s 110ms/step - loss: 2.0879 - acc: 0.6309 - val_loss: 2.0767 - val_acc: 0.6139
Epoch 33/50
9/9 [==============================] - 1s 111ms/step - loss: 1.9498 - acc: 0.6417 - val_loss: 2.5760 - val_acc: 0.6166
Epoch 34/50
9/9 [==============================] - 1s 111ms/step - loss: 2.0624 - acc: 0.6456 - val_loss: 2.2055 - val_acc: 0.6306
Epoch 35/50
9/9 [==============================] - 1s 113ms/step - loss: 1.9772 - acc: 0.6573 - val_loss: 1.8998 - val_acc: 0.6148
Epoch 36/50
9/9 [==============================] - 1s 110ms/step - loss: 1.7421 - acc: 0.6411 - val_loss: 1.7790 - val_acc: 0.6320
Epoch 37/50
9/9 [==============================] - 1s 112ms/step - loss: 1.6005 - acc: 0.6493 - val_loss: 1.7596 - val_acc: 0.6132
Epoch 38/50
9/9 [==============================] - 1s 111ms/step - loss: 1.4635 - acc: 0.6623 - val_loss: 1.8133 - val_acc: 0.6142
Epoch 39/50
9/9 [==============================] - 1s 112ms/step - loss: 1.4952 - acc: 0.6517 - val_loss: 1.8677 - val_acc: 0.5960
Epoch 40/50
9/9 [==============================] - 1s 113ms/step - loss: 1.4972 - acc: 0.6519 - val_loss: 1.7388 - val_acc: 0.6311
Epoch 41/50
9/9 [==============================] - 1s 113ms/step - loss: 1.4158 - acc: 0.6693 - val_loss: 1.6358 - val_acc: 0.6398
Epoch 42/50
9/9 [==============================] - 1s 110ms/step - loss: 1.3600 - acc: 0.6721 - val_loss: 1.5624 - val_acc: 0.6381
Epoch 43/50
9/9 [==============================] - 1s 112ms/step - loss: 1.2960 - acc: 0.6812 - val_loss: 1.5512 - val_acc: 0.6380
Epoch 44/50
9/9 [==============================] - 1s 111ms/step - loss: 1.3473 - acc: 0.6727 - val_loss: 1.4881 - val_acc: 0.6448
Epoch 45/50
9/9 [==============================] - 1s 111ms/step - loss: 1.1990 - acc: 0.6892 - val_loss: 1.4914 - val_acc: 0.6437
Epoch 46/50
9/9 [==============================] - 1s 111ms/step - loss: 1.2816 - acc: 0.6823 - val_loss: 1.4654 - val_acc: 0.6466
Epoch 47/50
9/9 [==============================] - 1s 113ms/step - loss: 1.2525 - acc: 0.6838 - val_loss: 1.4802 - val_acc: 0.6479
Epoch 48/50
9/9 [==============================] - 1s 111ms/step - loss: 1.2661 - acc: 0.6799 - val_loss: 1.4692 - val_acc: 0.6447
Epoch 49/50
9/9 [==============================] - 1s 111ms/step - loss: 1.2389 - acc: 0.6866 - val_loss: 1.4733 - val_acc: 0.6436
Epoch 50/50
9/9 [==============================] - 1s 113ms/step - loss: 1.2166 - acc: 0.6875 - val_loss: 1.4666 - val_acc: 0.6444
```
</div>
All that is left to do is evaluate the models:
```python
print(
"no pretrain",
no_pt_eval_model.evaluate(
eval_val_ds,
steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
),
)
print(
"pretrained",
pt_eval_model.evaluate(
eval_val_ds,
steps=TEST_EPOCHS * TEST_STEPS_PER_EPOCH,
),
)
```
<div class="k-default-codeblock">
```
450/450 [==============================] - 14s 30ms/step - loss: 1.2648 - acc: 0.5311
no pretrain [1.2647558450698853, 0.5310590267181396]
450/450 [==============================] - 12s 26ms/step - loss: 1.4653 - acc: 0.6474
pretrained [1.465279221534729, 0.6474305391311646]
```
</div>
Awesome! Our pretrained model stomped the non-pretrained model.
This accuracy is quite good for a ResNet18 on the STL-10 dataset.
For better results, try using an EfficientNetV2B0 instead.
Unfortunately, this will require a higher end graphics card as
SimSiam has a minimum batch size of 512.
---
## Conclusion
TensorFlow Similarity can be used to easily train KerasCV models using
contrastive algorithms such as SimCLR, SimSiam and BarlowTwins.
This allows you to leverage large corpuses of unlabelled data in your
model trainining pipeline.
Some follow-up exercises to this tutorial:
- Train a [`keras_cv.models.EfficientNetV2B0`](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/efficientnet_v2.py)
on STL-10
- Experiment with other data augmentation techniques in pretraining
- Train a model using the [BarlowTwins implementation](https://github.com/tensorflow/similarity/blob/master/examples/unsupervised_hello_world.ipynb) in TensorFlow similarity
- Try pretraining on your own dataset
| keras-io/guides/md/keras_cv/simsiam_with_kerascv.md/0 | {
"file_path": "keras-io/guides/md/keras_cv/simsiam_with_kerascv.md",
"repo_id": "keras-io",
"token_count": 24838
} | 117 |
# Understanding masking & padding
**Authors:** Scott Zhu, Francois Chollet<br>
**Date created:** 2019/07/16<br>
**Last modified:** 2023/07/10<br>
**Description:** Complete guide to using mask-aware sequence layers in Keras.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/understanding_masking_and_padding.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/understanding_masking_and_padding.py)
---
## Setup
```python
import numpy as np
import tensorflow as tf
import keras
from keras import layers
```
---
## Introduction
**Masking** is a way to tell sequence-processing layers that certain timesteps
in an input are missing, and thus should be skipped when processing the data.
**Padding** is a special form of masking where the masked steps are at the start or
the end of a sequence. Padding comes from the need to encode sequence data into
contiguous batches: in order to make all sequences in a batch fit a given standard
length, it is necessary to pad or truncate some sequences.
Let's take a close look.
---
## Padding sequence data
When processing sequence data, it is very common for individual samples to have
different lengths. Consider the following example (text tokenized as words):
```
[
["Hello", "world", "!"],
["How", "are", "you", "doing", "today"],
["The", "weather", "will", "be", "nice", "tomorrow"],
]
```
After vocabulary lookup, the data might be vectorized as integers, e.g.:
```
[
[71, 1331, 4231]
[73, 8, 3215, 55, 927],
[83, 91, 1, 645, 1253, 927],
]
```
The data is a nested list where individual samples have length 3, 5, and 6,
respectively. Since the input data for a deep learning model must be a single tensor
(of shape e.g. `(batch_size, 6, vocab_size)` in this case), samples that are shorter
than the longest item need to be padded with some placeholder value (alternatively,
one might also truncate long samples before padding short samples).
Keras provides a utility function to truncate and pad Python lists to a common length:
`tf.keras.utils.pad_sequences`.
```python
raw_inputs = [
[711, 632, 71],
[73, 8, 3215, 55, 927],
[83, 91, 1, 645, 1253, 927],
]
# By default, this will pad using 0s; it is configurable via the
# "value" parameter.
# Note that you could use "pre" padding (at the beginning) or
# "post" padding (at the end).
# We recommend using "post" padding when working with RNN layers
# (in order to be able to use the
# CuDNN implementation of the layers).
padded_inputs = tf.keras.utils.pad_sequences(raw_inputs, padding="post")
print(padded_inputs)
```
<div class="k-default-codeblock">
```
[[ 711 632 71 0 0 0]
[ 73 8 3215 55 927 0]
[ 83 91 1 645 1253 927]]
```
</div>
---
## Masking
Now that all samples have a uniform length, the model must be informed that some part
of the data is actually padding and should be ignored. That mechanism is **masking**.
There are three ways to introduce input masks in Keras models:
- Add a `keras.layers.Masking` layer.
- Configure a `keras.layers.Embedding` layer with `mask_zero=True`.
- Pass a `mask` argument manually when calling layers that support this argument (e.g.
RNN layers).
---
## Mask-generating layers: `Embedding` and `Masking`
Under the hood, these layers will create a mask tensor (2D tensor with shape `(batch,
sequence_length)`), and attach it to the tensor output returned by the `Masking` or
`Embedding` layer.
```python
embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
masked_output = embedding(padded_inputs)
print(masked_output._keras_mask)
masking_layer = layers.Masking()
# Simulate the embedding lookup by expanding the 2D input to 3D,
# with embedding dimension of 10.
unmasked_embedding = tf.cast(
tf.tile(tf.expand_dims(padded_inputs, axis=-1), [1, 1, 10]), tf.float32
)
masked_embedding = masking_layer(unmasked_embedding)
print(masked_embedding._keras_mask)
```
<div class="k-default-codeblock">
```
tf.Tensor(
[[ True True True False False False]
[ True True True True True False]
[ True True True True True True]], shape=(3, 6), dtype=bool)
tf.Tensor(
[[ True True True False False False]
[ True True True True True False]
[ True True True True True True]], shape=(3, 6), dtype=bool)
```
</div>
As you can see from the printed result, the mask is a 2D boolean tensor with shape
`(batch_size, sequence_length)`, where each individual `False` entry indicates that
the corresponding timestep should be ignored during processing.
---
## Mask propagation in the Functional API and Sequential API
When using the Functional API or the Sequential API, a mask generated by an `Embedding`
or `Masking` layer will be propagated through the network for any layer that is
capable of using them (for example, RNN layers). Keras will automatically fetch the
mask corresponding to an input and pass it to any layer that knows how to use it.
For instance, in the following Sequential model, the `LSTM` layer will automatically
receive a mask, which means it will ignore padded values:
```python
model = keras.Sequential(
[
layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True),
layers.LSTM(32),
]
)
```
This is also the case for the following Functional API model:
```python
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
outputs = layers.LSTM(32)(x)
model = keras.Model(inputs, outputs)
```
---
## Passing mask tensors directly to layers
Layers that can handle masks (such as the `LSTM` layer) have a `mask` argument in their
`__call__` method.
Meanwhile, layers that produce a mask (e.g. `Embedding`) expose a `compute_mask(input,
previous_mask)` method which you can call.
Thus, you can pass the output of the `compute_mask()` method of a mask-producing layer
to the `__call__` method of a mask-consuming layer, like this:
```python
class MyLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
self.lstm = layers.LSTM(32)
def call(self, inputs):
x = self.embedding(inputs)
# Note that you could also prepare a `mask` tensor manually.
# It only needs to be a boolean tensor
# with the right shape, i.e. (batch_size, timesteps).
mask = self.embedding.compute_mask(inputs)
output = self.lstm(x, mask=mask) # The layer will ignore the masked values
return output
layer = MyLayer()
x = np.random.random((32, 10)) * 100
x = x.astype("int32")
layer(x)
```
<div class="k-default-codeblock">
```
<tf.Tensor: shape=(32, 32), dtype=float32, numpy=
array([[ 9.7771059e-04, -3.1520566e-04, -1.3653996e-03, ...,
6.5285452e-03, 1.9427658e-03, -2.5479761e-03],
[-4.3904074e-03, -4.5490772e-03, 3.8578152e-04, ...,
-1.0272469e-02, -1.0101046e-02, 2.7427098e-03],
[ 4.7074426e-03, 8.2715852e-03, -6.1138147e-05, ...,
-3.1140861e-03, 5.4810117e-03, 1.5133659e-03],
...,
[-1.6761322e-03, -6.4350553e-03, -2.0772957e-03, ...,
-6.4317961e-03, -1.2476714e-02, -4.9613118e-03],
[ 4.6702973e-03, 2.0292797e-03, 1.3188898e-04, ...,
-4.3562236e-03, -7.7877212e-03, -1.4023182e-03],
[-2.1285783e-03, 3.0295136e-03, -9.2550175e-04, ...,
-8.2980031e-03, -2.0799299e-03, 6.9086310e-03]], dtype=float32)>
```
</div>
---
## Supporting masking in your custom layers
Sometimes, you may need to write layers that generate a mask (like `Embedding`), or
layers that need to modify the current mask.
For instance, any layer that produces a tensor with a different time dimension than its
input, such as a `Concatenate` layer that concatenates on the time dimension, will
need to modify the current mask so that downstream layers will be able to properly
take masked timesteps into account.
To do this, your layer should implement the `layer.compute_mask()` method, which
produces a new mask given the input and the current mask.
Here is an example of a `TemporalSplit` layer that needs to modify the current mask.
```python
class TemporalSplit(keras.layers.Layer):
"""Split the input tensor into 2 tensors along the time dimension."""
def call(self, inputs):
# Expect the input to be 3D and mask to be 2D, split the input tensor into 2
# subtensors along the time axis (axis 1).
return tf.split(inputs, 2, axis=1)
def compute_mask(self, inputs, mask=None):
# Also split the mask into 2 if it presents.
if mask is None:
return None
return tf.split(mask, 2, axis=1)
first_half, second_half = TemporalSplit()(masked_embedding)
print(first_half._keras_mask)
print(second_half._keras_mask)
```
<div class="k-default-codeblock">
```
tf.Tensor(
[[ True True True]
[ True True True]
[ True True True]], shape=(3, 3), dtype=bool)
tf.Tensor(
[[False False False]
[ True True False]
[ True True True]], shape=(3, 3), dtype=bool)
```
</div>
Here is another example of a `CustomEmbedding` layer that is capable of generating a
mask from input values:
```python
class CustomEmbedding(keras.layers.Layer):
def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs):
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.mask_zero = mask_zero
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer="random_normal",
dtype="float32",
)
def call(self, inputs):
return tf.nn.embedding_lookup(self.embeddings, inputs)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return tf.not_equal(inputs, 0)
layer = CustomEmbedding(10, 32, mask_zero=True)
x = np.random.random((3, 10)) * 9
x = x.astype("int32")
y = layer(x)
mask = layer.compute_mask(x)
print(mask)
```
<div class="k-default-codeblock">
```
tf.Tensor(
[[ True False True True True True True True True True]
[ True True False True True True True True True True]
[ True False True False True True True True True True]], shape=(3, 10), dtype=bool)
```
</div>
Note: For more details about format limitations related to masking, see the
[serialization guide](/guides/serialization_and_saving).
---
## Opting-in to mask propagation on compatible layers
Most layers don't modify the time dimension, so don't need to modify the current mask.
However, they may still want to be able to **propagate** the current mask, unchanged,
to the next layer. **This is an opt-in behavior.** By default, a custom layer will
destroy the current mask (since the framework has no way to tell whether propagating
the mask is safe to do).
If you have a custom layer that does not modify the time dimension, and if you want it
to be able to propagate the current input mask, you should set `self.supports_masking
= True` in the layer constructor. In this case, the default behavior of
`compute_mask()` is to just pass the current mask through.
Here's an example of a layer that is whitelisted for mask propagation:
```python
@keras.saving.register_keras_serializable()
class MyActivation(keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Signal that the layer is safe for mask propagation
self.supports_masking = True
def call(self, inputs):
return tf.nn.relu(inputs)
```
You can now use this custom layer in-between a mask-generating layer (like `Embedding`)
and a mask-consuming layer (like `LSTM`), and it will pass the mask along so that it
reaches the mask-consuming layer.
```python
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
x = MyActivation()(x) # Will pass the mask along
print("Mask found:", x._keras_mask)
outputs = layers.LSTM(32)(x) # Will receive the mask
model = keras.Model(inputs, outputs)
```
<div class="k-default-codeblock">
```
Mask found: KerasTensor(type_spec=TensorSpec(shape=(None, None), dtype=tf.bool, name=None), name='Placeholder_1:0')
```
</div>
---
## Writing layers that need mask information
Some layers are mask *consumers*: they accept a `mask` argument in `call` and use it to
determine whether to skip certain time steps.
To write such a layer, you can simply add a `mask=None` argument in your `call`
signature. The mask associated with the inputs will be passed to your layer whenever
it is available.
Here's a simple example below: a layer that computes a softmax over the time dimension
(axis 1) of an input sequence, while discarding masked timesteps.
```python
@keras.saving.register_keras_serializable()
class TemporalSoftmax(keras.layers.Layer):
def call(self, inputs, mask=None):
broadcast_float_mask = tf.expand_dims(tf.cast(mask, "float32"), -1)
inputs_exp = tf.exp(inputs) * broadcast_float_mask
inputs_sum = tf.reduce_sum(
inputs_exp * broadcast_float_mask, axis=-1, keepdims=True
)
return inputs_exp / inputs_sum
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=10, output_dim=32, mask_zero=True)(inputs)
x = layers.Dense(1)(x)
outputs = TemporalSoftmax()(x)
model = keras.Model(inputs, outputs)
y = model(np.random.randint(0, 10, size=(32, 100)), np.random.random((32, 100, 1)))
```
---
## Summary
That is all you need to know about padding & masking in Keras. To recap:
- "Masking" is how layers are able to know when to skip / ignore certain timesteps in
sequence inputs.
- Some layers are mask-generators: `Embedding` can generate a mask from input values
(if `mask_zero=True`), and so can the `Masking` layer.
- Some layers are mask-consumers: they expose a `mask` argument in their `__call__`
method. This is the case for RNN layers.
- In the Functional API and Sequential API, mask information is propagated
automatically.
- When using layers in a standalone way, you can pass the `mask` arguments to layers
manually.
- You can easily write layers that modify the current mask, that generate a new mask,
or that consume the mask associated with the inputs.
| keras-io/guides/md/understanding_masking_and_padding.md/0 | {
"file_path": "keras-io/guides/md/understanding_masking_and_padding.md",
"repo_id": "keras-io",
"token_count": 5196
} | 118 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/activations/'" />
| keras-io/redirects/activations/index.html/0 | {
"file_path": "keras-io/redirects/activations/index.html",
"repo_id": "keras-io",
"token_count": 35
} | 119 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/saving/serialization_utils/'" />
| keras-io/redirects/api/utils/serialization_utils/index.html/0 | {
"file_path": "keras-io/redirects/api/utils/serialization_utils/index.html",
"repo_id": "keras-io",
"token_count": 36
} | 120 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/'" />
| keras-io/redirects/layers/about-keras-layers/index.html/0 | {
"file_path": "keras-io/redirects/layers/about-keras-layers/index.html",
"repo_id": "keras-io",
"token_count": 32
} | 121 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/models/model/'" />
| keras-io/redirects/models/model/index.html/0 | {
"file_path": "keras-io/redirects/models/model/index.html",
"repo_id": "keras-io",
"token_count": 33
} | 122 |
AUGMENTATION_MASTER = {
"path": "augmentation/",
"title": "Augmentation layers",
"toc": True,
"children": [
{
"path": "auto_contrast",
"title": "AutoContrast layer",
"generate": ["keras_cv.layers.AutoContrast"],
},
{
"path": "aug_mix",
"title": "AugMix layer",
"generate": ["keras_cv.layers.AugMix"],
},
{
"path": "channel_shuffle",
"title": "ChannelShuffle layer",
"generate": ["keras_cv.layers.ChannelShuffle"],
},
{
"path": "cut_mix",
"title": "CutMix layer",
"generate": ["keras_cv.layers.CutMix"],
},
{
"path": "fourier_mix",
"title": "FourierMix layer",
"generate": ["keras_cv.layers.FourierMix"],
},
{
"path": "grid_mask",
"title": "GridMask layer",
"generate": ["keras_cv.layers.GridMask"],
},
{
"path": "jittered_resize",
"title": "JitteredResize layer",
"generate": ["keras_cv.layers.JitteredResize"],
},
{
"path": "mix_up",
"title": "MixUp layer",
"generate": ["keras_cv.layers.MixUp"],
},
{
"path": "rand_augment",
"title": "RandAugment layer",
"generate": ["keras_cv.layers.RandAugment"],
},
{
"path": "random_augmentation_pipeline",
"title": "RandomAugmentationPipeline layer",
"generate": ["keras_cv.layers.RandomAugmentationPipeline"],
},
{
"path": "random_channel_shift",
"title": "RandomChannelShift layer",
"generate": ["keras_cv.layers.RandomChannelShift"],
},
{
"path": "random_color_degeneration",
"title": "RandomColorDegeneration layer",
"generate": ["keras_cv.layers.RandomColorDegeneration"],
},
{
"path": "random_cutout",
"title": "RandomCutout layer",
"generate": ["keras_cv.layers.RandomCutout"],
},
{
"path": "random_hue",
"title": "RandomHue layer",
"generate": ["keras_cv.layers.RandomHue"],
},
{
"path": "random_saturation",
"title": "RandomSaturation layer",
"generate": ["keras_cv.layers.RandomSaturation"],
},
{
"path": "random_sharpness",
"title": "RandomSharpness layer",
"generate": ["keras_cv.layers.RandomSharpness"],
},
{
"path": "random_shear",
"title": "RandomShear layer",
"generate": ["keras_cv.layers.RandomShear"],
},
{
"path": "solarization",
"title": "Solarization layer",
"generate": ["keras_cv.layers.Solarization"],
},
],
}
PREPROCESSING_MASTER = {
"path": "preprocessing/",
"title": "Preprocessing layers",
"toc": True,
"children": [
{
"path": "resizing",
"title": "Resizing layer",
"generate": ["keras_cv.layers.Resizing"],
},
{
"path": "grayscale",
"title": "Grayscale layer",
"generate": ["keras_cv.layers.Grayscale"],
},
{
"path": "equalization",
"title": "Equalization layer",
"generate": ["keras_cv.layers.Equalization"],
},
{
"path": "posterization",
"title": "Posterization layer",
"generate": ["keras_cv.layers.Posterization"],
},
],
}
BOUNDING_BOX_FORMATS = {
"path": "formats",
"title": "Bounding box formats",
"generate": [
"keras_cv.bounding_box.CENTER_XYWH",
"keras_cv.bounding_box.XYWH",
"keras_cv.bounding_box.REL_XYWH",
"keras_cv.bounding_box.XYXY",
"keras_cv.bounding_box.REL_XYXY",
"keras_cv.bounding_box.YXYX",
"keras_cv.bounding_box.REL_YXYX",
],
}
BOUNDING_BOX_UTILS = {
"path": "utils/",
"title": "Bounding box utilities",
"toc": True,
"children": [
{
"path": "convert_format",
"title": "Convert bounding box formats",
"generate": ["keras_cv.bounding_box.convert_format"],
},
{
"path": "compute_iou",
"title": "Compute intersection over union of bounding boxes",
"generate": ["keras_cv.bounding_box.compute_iou"],
},
{
"path": "clip_to_image",
"title": "Clip bounding boxes to be within the bounds of provided images",
"generate": ["keras_cv.bounding_box.clip_to_image"],
},
# {
# "path": "to_dense",
# "title": "Convert a bounding box dictionary to -1 padded Dense tensors",
# "generate": ["keras_cv.bounding_box.to_dense"],
# },
# {
# "path": "to_ragged",
# "title": "Convert a bounding box dictionary batched Ragged tensors",
# "generate": ["keras_cv.bounding_box.to_ragged"],
# },
# {
# "path": "validate_format",
# "title": "Ensure that your bounding boxes comply with the bounding box spec",
# "generate": ["keras_cv.bounding_box.validate_format"],
# },
],
}
BOUNDING_BOX_MASTER = {
"path": "bounding_box/",
"title": "Bounding box formats and utilities",
"toc": True,
"children": [BOUNDING_BOX_FORMATS, BOUNDING_BOX_UTILS],
}
REGULARIZATION_MASTER = {
"path": "regularization/",
"title": "Regularization layers",
"toc": True,
"children": [
{
"path": "dropblock2d",
"title": "DropBlock2D layer",
"generate": [
"keras_cv.layers.DropBlock2D",
],
},
{
"path": "drop_path",
"title": "DropPath layer",
"generate": [
"keras_cv.layers.DropPath",
],
},
{
"path": "squeeze_and_excite_2d",
"title": "SqueezeAndExcite2D layer",
"generate": [
"keras_cv.layers.SqueezeAndExcite2D",
],
},
{
"path": "squeeze_and_excite_2d",
"title": "SqueezeAndExcite2D layer",
"generate": [
"keras_cv.layers.SqueezeAndExcite2D",
],
},
{
"path": "stochastic_depth",
"title": "StochasticDepth layer",
"generate": [
"keras_cv.layers.StochasticDepth",
],
},
],
}
LAYERS_MASTER = {
"path": "layers/",
"title": "Layers",
"toc": True,
"children": [AUGMENTATION_MASTER, PREPROCESSING_MASTER, REGULARIZATION_MASTER],
}
#
# METRICS_MASTER = {
# "path": "metrics/",
# "title": "Metrics",
# "toc": True,
# "children": [
# # Temporarily remove COCO metrics
# # {
# # "path": "coco_mean_average_precision",
# # "title": "COCOMeanAveragePrecision metric",
# # "generate": [
# # "keras_cv.metrics.COCOMeanAveragePrecision",
# # ],
# # },
# # {
# # "path": "coco_recall",
# # "title": "COCORecall metric",
# # "generate": [
# # "keras_cv.metrics.COCORecall",
# # ],
# # },
# ],
# }
BACKBONES_MASTER = {
"path": "backbones/",
"title": "Backbones",
"toc": True,
"children": [
{
"path": "resnet_v1",
"title": "ResNetV1 backbones",
"generate": [
"keras_cv.models.ResNetBackbone",
"keras_cv.models.ResNetBackbone.from_preset",
"keras_cv.models.ResNet18Backbone",
"keras_cv.models.ResNet34Backbone",
"keras_cv.models.ResNet50Backbone",
"keras_cv.models.ResNet101Backbone",
"keras_cv.models.ResNet152Backbone",
],
},
{
"path": "resnet_v2",
"title": "ResNetV2 backbones",
"generate": [
"keras_cv.models.ResNetV2Backbone",
"keras_cv.models.ResNetV2Backbone.from_preset",
"keras_cv.models.ResNet18V2Backbone",
"keras_cv.models.ResNet34V2Backbone",
"keras_cv.models.ResNet50V2Backbone",
"keras_cv.models.ResNet101V2Backbone",
"keras_cv.models.ResNet152V2Backbone",
],
},
# {
# "path": "csp_darknet",
# "title": "CSPDarkNet backbones",
# "generate": [
# "keras_cv.models.CSPDarkNetBackbone",
# "keras_cv.models.CSPDarkNetBackbone.from_preset",
# "keras_cv.models.CSPDarkNetTinyBackbone",
# "keras_cv.models.CSPDarkNetSBackbone",
# "keras_cv.models.CSPDarkNetMBackbone",
# "keras_cv.models.CSPDarkNetLBackbone",
# "keras_cv.models.CSPDarkNetXLBackbone",
# ],
# },
{
"path": "yolo_v8",
"title": "YOLOV8 backbones",
"generate": [
"keras_cv.models.YOLOV8Backbone",
"keras_cv.models.YOLOV8Backbone.from_preset",
],
},
{
"path": "mobilenetv3",
"title": "MobileNetV3 backbones",
"generate": [
"keras_cv.models.MobileNetV3Backbone",
"keras_cv.models.MobileNetV3Backbone.from_preset",
"keras_cv.models.MobileNetV3SmallBackbone",
"keras_cv.models.MobileNetV3LargeBackbone",
],
},
{
"path": "efficientnetv2",
"title": "EfficientNetV2 models",
"generate": [
"keras_cv.models.EfficientNetV2Backbone",
"keras_cv.models.EfficientNetV2Backbone.from_preset",
"keras_cv.models.EfficientNetV2B0Backbone",
"keras_cv.models.EfficientNetV2B1Backbone",
"keras_cv.models.EfficientNetV2B2Backbone",
"keras_cv.models.EfficientNetV2B3Backbone",
"keras_cv.models.EfficientNetV2SBackbone",
"keras_cv.models.EfficientNetV2MBackbone",
"keras_cv.models.EfficientNetV2LBackbone",
],
},
],
}
TASKS_MASKTER = {
"path": "tasks/",
"title": "Tasks",
"toc": True,
"children": [
{
"path": "stable_diffusion",
"title": "StableDiffusion image-generation model",
"generate": [
"keras_cv.models.StableDiffusion",
],
},
{
"path": "retinanet",
"title": "The RetinaNet model",
"generate": [
"keras_cv.models.RetinaNet",
"keras_cv.models.RetinaNet.from_preset",
],
},
{
"path": "image_classifier",
"title": "The ImageClassifier model",
"generate": [
"keras_cv.models.ImageClassifier",
"keras_cv.models.ImageClassifier.from_preset",
],
},
{
"path": "yolo_v8_detector",
"title": "The YOLOV8Detector model",
"generate": [
"keras_cv.models.YOLOV8Detector",
"keras_cv.models.YOLOV8Detector.from_preset",
],
},
],
}
MODELS_MASTER = {
"path": "models/",
"title": "Models",
"toc": True,
"children": [TASKS_MASKTER, BACKBONES_MASTER],
}
CV_API_MASTER = {
"path": "keras_cv/",
"title": "KerasCV",
"toc": True,
"children": [LAYERS_MASTER, MODELS_MASTER, BOUNDING_BOX_MASTER],
}
| keras-io/scripts/cv_api_master.py/0 | {
"file_path": "keras-io/scripts/cv_api_master.py",
"repo_id": "keras-io",
"token_count": 6799
} | 123 |
# Callbacks API
A callback is an object that can perform actions at various stages of training
(e.g. at the start or end of an epoch, before or after a single batch, etc).
You can use callbacks to:
- Write TensorBoard logs after every batch of training to monitor your metrics
- Periodically save your model to disk
- Do early stopping
- Get a view on internal states and statistics of a model during training
- ...and more
---
## Usage of callbacks via the built-in `fit()` loop
You can pass a list of callbacks (as the keyword argument `callbacks`) to the `.fit()` method of a model:
```python
my_callbacks = [
keras.callbacks.EarlyStopping(patience=2),
keras.callbacks.ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.h5'),
keras.callbacks.TensorBoard(log_dir='./logs'),
]
model.fit(dataset, epochs=10, callbacks=my_callbacks)
```
The relevant methods of the callbacks will then be called at each stage of the training.
---
## Using custom callbacks
Creating new callbacks is a simple and powerful way to customize a training loop.
Learn more about creating new callbacks in the guide
[Writing your own Callbacks](/guides/writing_your_own_callbacks), and refer to
the documentation for [the base `Callback` class](base_callback).
---
## Available callbacks
{{toc}}
| keras-io/templates/api/callbacks/index.md/0 | {
"file_path": "keras-io/templates/api/callbacks/index.md",
"repo_id": "keras-io",
"token_count": 390
} | 124 |
# XLM-RoBERTa
Models, tokenizers, and preprocessing layers for XLM-Roberta,
as described in ["Unsupervised Cross-lingual Representation Learning at Scale"](https://arxiv.org/abs/1911.02116).
For a full list of available **presets**, see the
[models page](/api/keras_nlp/models).
{{toc}}
| keras-io/templates/api/keras_nlp/models/xlm_roberta/index.md/0 | {
"file_path": "keras-io/templates/api/keras_nlp/models/xlm_roberta/index.md",
"repo_id": "keras-io",
"token_count": 98
} | 125 |
# Probabilistic metrics
Metrics estimating distances between probability distributions.
{{autogenerated}} | keras-io/templates/api/metrics/probabilistic_metrics/index.md/0 | {
"file_path": "keras-io/templates/api/metrics/probabilistic_metrics/index.md",
"repo_id": "keras-io",
"token_count": 23
} | 126 |
# Developer guides
Our developer guides are deep-dives into specific topics such as layer subclassing, fine-tuning, or model saving.
They're one of the best ways to become a Keras expert.
Most of our guides are written as Jupyter notebooks and can be run in one click in [Google Colab](https://colab.research.google.com/notebooks/welcome.ipynb),
a hosted notebook environment that requires no setup and runs in the cloud. Google Colab includes GPU and TPU runtimes.
## Available guides
{{toc}}
| keras-io/templates/guides/index.md/0 | {
"file_path": "keras-io/templates/guides/index.md",
"repo_id": "keras-io",
"token_count": 131
} | 127 |
{% if legend %}
<p><div class="example-highlight">★</div> = Good starter example</p>
<p><div class="example-highlight"><b>V3</b></div> = Keras 3 example</p>
{% endif %}
{% for category in categories %}
<h2><a href="{{category.path}}">{{category.title}}</a></h2>
{% if category.subcategories %}
{% for subcategory in category.subcategories %}
<h3 class="example-subcategory-title">{{subcategory.title}}</h3>
{% for example in subcategory.examples %}
<a href="{{example.path}}">
<div class="example-card">
{% if example.highlight %}
<div class="example-highlight">★</div>
{% endif %}
{% if example.keras_3 %}
<div class="example-highlight"><b>V3</b></div>
{% else %}
<div class="example-highlight">V2</div>
{% endif %}
<div class="example-card-title">
{{example.title}}
</div>
</div>
</a>
{% endfor %}
{% endfor %}
{% else %}
{% for example in category.examples %}
<a href="{{example.path}}">
<div class="example-card">
<div class="example-card-title">
{{example.title}}
</div>
</div>
</a>
{% endfor %}
{% endif %}
<hr>
{% endfor %}
| keras-io/theme/examples.html/0 | {
"file_path": "keras-io/theme/examples.html",
"repo_id": "keras-io",
"token_count": 540
} | 128 |
# Model Contribution Guide
KerasNLP has a plethora of pre-trained large language models
ranging from BERT to OPT. We are always looking for more models and are always
open to contributions!
In this guide, we will walk you through the steps one needs to take in order to
contribute a new pre-trained model to KerasNLP. For illustration purposes, let's
assume that you want to contribute the DistilBERT model. Before we dive in, we encourage you to go through
[our getting started guide](https://keras.io/guides/keras_nlp/getting_started/)
for an introduction to the library, and our
[contribution guide](https://github.com/keras-team/keras-nlp/blob/master/CONTRIBUTING.md).
## Checklist
This to-do list is a brief outline of how a model can be contributed.
Keep this checklist handy!
### Step 1: Open an issue/find an issue
- [ ] Open an issue or find an issue to contribute a backbone model.
### Step 2: PR #1 - Add XXBackbone
- [ ] An `xx/xx_backbone.py` file which has the model graph \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_backbone.py)\].
- [ ] An `xx/xx_backbone_test.py` file which has unit tests for the backbone \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_backbone_test.py)\].
- [ ] A Colab notebook link in the PR description which matches the outputs of the implemented backbone model with the original source \[[Example](https://colab.research.google.com/drive/1SeZWJorKWmwWJax8ORSdxKrxE25BfhHa?usp=sharing)\].
### Step 3: PR #2 - Add XXTokenizer
- [ ] An `xx/xx_tokenizer.py` file which has the tokenizer for the model \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_tokenizer.py)\].
- [ ] An `xx/xx_tokenizer_test.py` file which has unit tests for the model tokenizer \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py)\].
- [ ] A Colab notebook link in the PR description, demonstrating that the output of the tokenizer matches the original tokenizer \[[Example](https://colab.research.google.com/drive/1MH_rpuFB1Nz_NkKIAvVtVae2HFLjXZDA?usp=sharing)].
### Step 4: PR #3 - Add XX Presets
- [ ] An `xx/xx_presets.py` file with links to weights uploaded to a personal GCP bucket/Google Drive \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_presets.py)\].
- [ ] A `tools/checkpoint_conversion/convert_xx_checkpoints.py` which is reusable script for converting checkpoints \[[Example](https://github.com/keras-team/keras-nlp/blob/master/tools/checkpoint_conversion/convert_distilbert_checkpoints.py)\].
- [ ] A Colab notebook link in the PR description, showing an end-to-end task such as text classification, etc. The task model can be built using the backbone model, with the task head on top \[[Example](https://gist.github.com/mattdangerw/bf0ca07fb66b6738150c8b56ee5bab4e)\].
### Step 5: PR #4 and Beyond - Add XX Tasks and Preprocessors
This PR is optional.
- [ ] An `xx/xx_<task>.py` file for adding a task model like classifier, masked LM, etc. \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_classifier.py)\]
- [ ] An `xx/xx_<task>_preprocessor.py` file which has the preprocessor and can be used to get inputs suitable for the task model \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_preprocessor.py)\].
- [ ] `xx/xx_<task>_test.py` file and `xx/xx_<task>_preprocessor_test.py` files which have unit tests for the above two modules \[[Example 1](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_classifier_test.py) and [Example 2](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py)\].
- [ ] A Colab notebook link in the PR description, demonstrating that the output of the preprocessor matches the output of the original preprocessor \[[Example](https://colab.research.google.com/drive/1GFFC7Y1I_2PtYlWDToqKvzYhHWv1b3nC?usp=sharing)].
## Detailed Instructions
This section discusses, in details, every necessary step.
### Step 1: Open an issue/Find an open issue
Before getting started with the code, it's important to check if there are any
[open issues](https://github.com/keras-team/keras-nlp/issues?q=is%3Aissue+is%3Aopen+label%3Amodel-contribution)
related to the model you wish to contribute. If there is an open issue, you can
claim it by commenting on the issue and letting us know that you're interested
in working on it. This helps us keep track of who is working on what and avoid
duplicated effort.
If there aren't any open issues, you can create one by clicking the "New Issue"
button on our repository page.
Note that you need not have all the answers or complete knowledge of the inner
workings of the model at the time of opening the issue. But it is appreciated if
you can furnish as much detail as possible to enable us to help you with the
contribution! 🙂
### Step 2: PR #1 - Add XXBackbone
#### Add the backbone class
Once you are done identifying all the required layers, you should implement the
model backbone class.
To keep the code simple and readable, we follow
[Keras' functional style model](https://keras.io/guides/functional_api/) wrapped
around by a class to implement our models.
A model is typically split into three/four sections. We would recommend you to
compare this side-by-side with the
[`keras_nlp.layers.DistilBertBackbone` source code](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_backbone.py)!
**Inputs to the model**
Generally, the standard inputs to any text model are:
- `token_ids`: tokenised inputs (An integer representation of the text sequence).
- `padding_mask`: Masks the padding tokens.
**Embedding layer(s)**
Standard layers used: `keras.layers.Embedding`,
`keras_nlp.layers.PositionEmbedding`, `keras_nlp.layers.TokenAndPositionEmbedding`.
**Encoder layers**
Standard layers used: `keras_nlp.layers.TransformerEncoder`, `keras_nlp.layers.FNetEncoder`.
**Decoder layers (possibly)**
Standard layers used: `keras_nlp.layers.TransformerDecoder`.
**Other layers which might be used**
`keras.layers.LayerNorm`, `keras.layers.Dropout`, `keras.layers.Conv1D`, etc.
<br/>
The standard layers provided in Keras and KerasNLP are generally enough for
most of the usecases and it is recommended to do a thorough search
[here](https://keras.io/api/layers/) and [here](https://keras.io/api/keras_nlp/layers/).
However, sometimes, models have small tweaks/paradigm changes in their architecture.
This is when things might slightly get complicated.
If the model introduces a paradigm shift, such as using relative attention instead
of vanilla attention, the contributor will have to implement complete custom layers. A case
in point is `keras_nlp.models.DebertaV3Backbone` where we had to [implement layers
from scratch](https://github.com/keras-team/keras-nlp/tree/master/keras_nlp/models/deberta_v3).
On the other hand, if the model has a small tweak, something simpler can be done.
For instance, in the Whisper model, the self-attention and cross-attention mechanism
is exactly the same as vanilla attention, with the exception that the key projection
layer does not have a bias term. In this case, we can inherit the custom layer
from one of the standard layers and make minor modifications. See [this PR](https://github.com/keras-team/keras-nlp/pull/801/files#diff-8533ae3a7755c0dbe95ccbb71f85c677297f687bf3884fadefc64f1d0fdce51aR22) for
more details.
Since the first PR is only to add the model backbone class, you should omit the
`from_presets()` function; this will be added at a later stage when you open a PR
for adding presets.
#### Convert weights from the original source and check output!
Before you open a PR for adding the model backbone class, it is essential to check
whether the model has been implemented exactly as the source implementation. This
also helps in adding model "presets" at a later stage.
The preferred way of doing this is to add a Colab link in the PR description, which
1) converts the original preset weights to our format, and
2) checks whether the outputs of the original model and your implemented model are close enough.
It is okay if you demonstrate it for one preset at this stage; you can do the conversion
for the other presets when you officially add presets to the library at a later stage.
#### Add Unit Tests
It is essential to add units tests. These unit tests are basic and mostly check
whether the forward pass goes through successfully, whether the model can be saved
and loaded correctly, etc.
### Step 3: PR #2 - Add XXTokenizer
#### Tokenizer
Most text models nowadays use subword tokenizers such as WordPiece, SentencePiece
and BPE Tokenizer. Since KerasNLP has implementations of most of the popular
subword tokenizers, the model tokenizer layer typically inherits from a base
tokenizer class.
For example, DistilBERT uses the WordPiece tokenizer. So, we can introduce a new
class, `DistilBertTokenizer`, which inherits from `keras_nlp.tokenizers.WordPieceTokenizer`.
All the underlying actual tokenization will be taken care of by the superclass.
The important thing here is adding "special tokens". Most models have
special tokens such as beginning-of-sequence token, end-of-sequence token,
mask token, pad token, etc. These have to be
[added as member attributes](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_tokenizer.py#L91-L105)
to the tokenizer class. These member attributes are then accessed by the
preprocessor layers.
For a full list of the tokenizers KerasNLP offers, please visit
[this link](https://keras.io/api/keras_nlp/tokenizers/) and make use of the
tokenizer your model uses!
#### Unit Tests
The last step here is to add unit tests for the tokenizer. A dummy vocabulary is
created, and the output of both these layers is verified including tokenization,
detokenization, etc.
### Step 4: PR #3 - Add XX Presets
Once the backbone and tokenizer PRs have been merged, you can open a PR for
adding presets. For every model, we have a separate file where we mention our
preset configurations. This preset configuration has model-specific arguments
such as number of layers, number of attention heads; preprocessor-specific
arguments such as whether we want to lowercase the input text; checkpoint and
vocabulary file URLs, etc. In the PR description, you can add
Google Drive/personal GCP bucket links to the checkpoint and the vocabulary
files. These files will then be uploaded to GCP by us!
After wrapping up the preset configuration file, you need to
add the `from_preset` function to all three classes, i.e., `DistilBertBackbone`,
and `DistilBertTokenizer`. Here is an
[example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_backbone.py#L187-L189).
The testing for presets is divided into two: "large" and "extra large".
For "large" tests, we pick the smallest preset (in terms of number of parameters)
and verify whether the output is correct. For "extra large tests", we loop over
all the presets and just check whether the backbone and the tokenizer can
be called without any error.
Additionally, a checkpoint conversion script should be added. This script
demonstrates that the outputs of our backbone model and outputs of the source
model match. This should be done for all presets.
### Step 5: PR #4 and Beyond: Add XXTasks and XXPreprocessors
Once you are finished with Steps 1-4, you can add "task" models and
preprocessors.
### Task model
Task models are essentially models which have "task heads" on top of the backbone
models. For instance, for the text classification task, you can have a
feedforward layer on top of a backbone model like DistilBERT. Task models are
very essential since pretrained models are used extensively for downstream tasks
like text classification, token classification, text summarization, neural
machine translation, etc.
#### Preprocessor
The preprocessor class is responsible for making the inputs suitable for
consumption by the model - it packs multiple inputs together, i.e., given
multiple input texts, it will add appropriate special tokens, pad the inputs
and return the dictionary in the form expected by the model.
The preprocessor class might have a few intricacies depending on the model. For example,
the DeBERTaV3 tokenizer does not have the `[MASK]` in the provided sentencepiece
proto file, and we had to make some modifications [here](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/deberta_v3/deberta_v3_preprocessor.py). Secondly, we have
a separate preprocessor class for every task. This is because different tasks
might require different input formats. For instance, we have a [separate preprocessor](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor.py)
for masked language modeling (MLM) for DistilBERT.
## Conclusion
Once all three PRs (and optionally, the fourth PR) have been merged, you have
successfully contributed a model to KerasNLP. Congratulations! 🔥
| keras-nlp/CONTRIBUTING_MODELS.md/0 | {
"file_path": "keras-nlp/CONTRIBUTING_MODELS.md",
"repo_id": "keras-nlp",
"token_count": 3901
} | 129 |
# Copyright 2023 The KerasNLP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT.
This script will create TFRecord files containing BERT training examples with
both word masking and next sentence prediction.
This script will load the entire dataset into memory to setup the next sentence
prediction task, so it is recommended to run this on shards of data at a time to
avoid memory issues.
By default, it will duplicate the input data 10 times with different masks and
sentence pairs, as will the original paper. So a 20gb source of wikipedia and
bookscorpus will result in a 400gb dataset.
This script is adapted from the original BERT respository:
https://github.com/google-research/bert/blob/master/create_pretraining_data.py
Usage:
python create_pretraining_data.py \
--input_files ~/datasets/bert-sentence-split-data/shard_0.txt \
--output_directory ~/datasets/bert-pretraining-data/shard_0.txt \
--vocab_file vocab.txt
"""
import collections
import os
import random
import sys
import tensorflow as tf
import tensorflow_text as tf_text
from absl import app
from absl import flags
from examples.bert_pretraining.bert_config import PREPROCESSING_CONFIG
from examples.utils.scripting_utils import list_filenames_for_arg
# Tokenization will happen with tensorflow and can easily OOM a GPU.
# Restrict the script to run CPU as GPU will not offer speedup here anyway.
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
FLAGS = flags.FLAGS
flags.DEFINE_string(
"input_files",
None,
"Comma seperated list of directories, globs or files.",
)
flags.DEFINE_string(
"output_file",
None,
"Output TF record file.",
)
flags.DEFINE_string(
"vocab_file",
None,
"The vocabulary file for tokenization.",
)
flags.DEFINE_bool(
"do_lower_case",
True,
"Whether to lower case the input text.",
)
flags.DEFINE_integer(
"random_seed",
12345,
"Random seed for data generation.",
)
def convert_to_unicode(text):
"""Converts text to Unicode if it's not already, assuming utf-8 input."""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def printable_text(text):
"""Returns text encoded in a way suitable for print."""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
# This tuple holds a complete training instance of data ready for serialization.
TrainingInstance = collections.namedtuple(
"TrainingInstance",
[
"tokens",
"segment_ids",
"is_random_next",
"masked_lm_positions",
"masked_lm_labels",
],
)
def write_instance_to_example_files(
instances, vocab, max_seq_length, max_predictions_per_seq, output_filename
):
"""Create TF example files from `TrainingInstance`s."""
writer = tf.io.TFRecordWriter(output_filename)
total_written = 0
lookup = dict(zip(vocab, range(len(vocab))))
for inst_index, instance in enumerate(instances):
token_ids = [lookup[x] for x in instance.tokens]
padding_mask = [1] * len(token_ids)
segment_ids = list(instance.segment_ids)
assert len(token_ids) <= max_seq_length
while len(token_ids) < max_seq_length:
token_ids.append(0)
padding_mask.append(0)
segment_ids.append(0)
assert len(token_ids) == max_seq_length
assert len(padding_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = [lookup[x] for x in instance.masked_lm_labels]
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features = collections.OrderedDict()
features["token_ids"] = int_feature(token_ids)
features["padding_mask"] = int_feature(padding_mask)
features["segment_ids"] = int_feature(segment_ids)
features["masked_lm_positions"] = int_feature(masked_lm_positions)
features["masked_lm_ids"] = int_feature(masked_lm_ids)
features["masked_lm_weights"] = float_feature(masked_lm_weights)
features["next_sentence_labels"] = int_feature([next_sentence_label])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features)
)
writer.write(tf_example.SerializeToString())
total_written += 1
writer.close()
print(f"Wrote {total_written} total instances")
def int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
def float_feature(values):
return tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
def create_training_instances(
input_filenames,
tokenizer,
vocab,
max_seq_length,
dupe_factor,
short_seq_prob,
masked_lm_prob,
max_predictions_per_seq,
rng,
):
"""Create `TrainingInstance`s from raw text."""
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
dataset = tf.data.TextLineDataset(input_filenames)
dataset = dataset.map(
lambda x: tokenizer.tokenize(x).flat_values,
num_parallel_calls=tf.data.AUTOTUNE,
)
all_documents = []
current_document = []
for line in dataset.as_numpy_iterator():
if line.size == 0 and current_document:
all_documents.append(current_document)
current_document = []
else:
line = [x.decode("utf-8") for x in line]
if line:
current_document.append(line)
rng.shuffle(all_documents)
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents,
document_index,
max_seq_length,
short_seq_prob,
masked_lm_prob,
max_predictions_per_seq,
vocab,
rng,
)
)
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents,
document_index,
max_seq_length,
short_seq_prob,
masked_lm_prob,
max_predictions_per_seq,
vocab_words,
rng,
):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the
# `A` (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for
# large corpora. However, just to be careful, we try to make
# sure that the random document is not the same as the
# document we're processing.
for _ in range(10):
random_document_index = rng.randint(
0, len(all_documents) - 1
)
if random_document_index != document_index:
break
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them
# back" so they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(
tokens,
masked_lm_positions,
masked_lm_labels,
) = create_masked_lm_predictions(
tokens,
masked_lm_prob,
max_predictions_per_seq,
vocab_words,
rng,
)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels,
)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple(
"MaskedLmInstance", ["index", "label"]
)
def create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng
):
"""Creates the predictions for the masked LM objective."""
# TODO(jbischof): replace with keras_nlp.layers.MaskedLMMaskGenerator
# (Issue #166)
cand_indexes = []
for i, token in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append([i])
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(
max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))),
)
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[
rng.randint(0, len(vocab_words) - 1)
]
output_tokens[index] = masked_token
masked_lms.append(
MaskedLmInstance(index=index, label=tokens[index])
)
assert len(masked_lms) <= num_to_predict
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main(_):
print(f"Reading input data from {FLAGS.input_files}")
input_filenames = list_filenames_for_arg(FLAGS.input_files)
if not input_filenames:
print("No input files found. Check `input_files` flag.")
sys.exit(1)
# Load the vocabulary.
vocab = []
with open(FLAGS.vocab_file, "r") as vocab_file:
for line in vocab_file:
vocab.append(line.strip())
tokenizer = tf_text.BertTokenizer(
FLAGS.vocab_file,
lower_case=FLAGS.do_lower_case,
token_out_type=tf.string,
)
rng = random.Random(FLAGS.random_seed)
instances = create_training_instances(
input_filenames,
tokenizer,
vocab,
PREPROCESSING_CONFIG["max_seq_length"],
PREPROCESSING_CONFIG["dupe_factor"],
PREPROCESSING_CONFIG["short_seq_prob"],
PREPROCESSING_CONFIG["masked_lm_prob"],
PREPROCESSING_CONFIG["max_predictions_per_seq"],
rng,
)
print(f"Outputting to {FLAGS.output_file}.")
output_directory = os.path.dirname(FLAGS.output_file)
if not os.path.exists(output_directory):
os.mkdir(output_directory)
write_instance_to_example_files(
instances,
vocab,
PREPROCESSING_CONFIG["max_seq_length"],
PREPROCESSING_CONFIG["max_predictions_per_seq"],
FLAGS.output_file,
)
if __name__ == "__main__":
flags.mark_flag_as_required("input_files")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("vocab_file")
app.run(main)
| keras-nlp/examples/bert_pretraining/bert_create_pretraining_data.py/0 | {
"file_path": "keras-nlp/examples/bert_pretraining/bert_create_pretraining_data.py",
"repo_id": "keras-nlp",
"token_count": 7927
} | 130 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.backend import ops
@keras_nlp_export("keras_nlp.layers.AlibiBias")
class AlibiBias(keras.layers.Layer):
"""A layer that adds the alibi bias to attention scores.
This layer adds the alibi bias to the attention scores. Alibi bias is a
linear, non-learned bias. Defined and formalized in
[Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation](https://arxiv.org/abs/2108.12409).
This layer takes as input the attention scores. and returns the attention
scores after adding the alibi bias to it. The output will have the same
shape as the input.
Args:
alibi_bias_max: int. This value will be used to compute the slope of
each head. The heads' slopes are a geometric sequence that starts at
`2**(-alibi_bias_max/num_heads)` and uses that same value as its
ratio. Defaults to 8.
Call arguments:
attention_scores: The result of multipying the query and the key of the
multi-head attention layer of the transformer to add alibi bias to
it. With shape `(batch_size, num_heads, query_length, key_length)`.
Examples:
```python
query_length = 10
key_length = 10
num_heads = 4
batch_size = 2
hidden_dim = 8
# Create new alibi layer.
alibi_layer = keras_nlp.layers.AlibiBias()
query = np.zeros((batch_size, num_heads, query_length, hidden_dim))
key = np.zeros((batch_size, num_heads, hidden_dim, key_length))
attention_scores = keras.ops.matmul(query, key)
# Add alibi bias to attention scores.
attention_scores = alibi_layer(attention_scores)
```
References:
- [Press et al., 2021](https://arxiv.org/abs/2108.12409)
"""
def __init__(
self,
alibi_bias_max=8,
**kwargs,
):
super().__init__(**kwargs)
self.alibi_bias_max = alibi_bias_max
def call(self, attention_scores):
shape = ops.shape(attention_scores)
if len(shape) != 4:
raise ValueError(
"Expected `attention_scores` shape to be "
"`(batch_size, num_heads, query_length, key_Length)`."
f" Recived shape={shape}"
)
key_length = shape[-1]
num_heads = shape[-3]
alibi_bias = self._get_alibi_bias(num_heads, key_length)
return ops.add(attention_scores, alibi_bias)
def _get_alibi_bias(self, num_heads, key_length):
slopes = ops.convert_to_tensor(
self._get_slopes(num_heads), dtype=self.compute_dtype
)
slopes = ops.expand_dims(slopes, 1)
seq_range = ops.expand_dims(ops.arange(1 - key_length, 1), 0)
seq_range = ops.cast(seq_range, dtype=self.compute_dtype)
alibi_bias = ops.multiply(slopes, seq_range)
alibi_bias = ops.expand_dims(alibi_bias, 1)
# return shape is `(1, num_heads, 1, key_length)`
return ops.expand_dims(alibi_bias, 0)
def _get_slopes(self, num_heads):
# this function is adopted from Alibi original implementation.
# https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
def get_slopes_power_of_2(n):
start = 2 ** (
-(2 ** -(math.log2(n) - math.log2(self.alibi_bias_max)))
)
ratio = start
return [start * ratio**i for i in range(n)]
if math.log2(num_heads).is_integer():
return get_slopes_power_of_2(num_heads)
else:
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
return (
get_slopes_power_of_2(closest_power_of_2)
+ self._get_slopes(2 * closest_power_of_2)[0::2][
: num_heads - closest_power_of_2
]
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
"alibi_bias_max": self.alibi_bias_max,
}
)
return config
| keras-nlp/keras_nlp/layers/modeling/alibi_bias.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/alibi_bias.py",
"repo_id": "keras-nlp",
"token_count": 2119
} | 131 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.backend import keras
from keras_nlp.layers.modeling.position_embedding import PositionEmbedding
from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding
from keras_nlp.utils.keras_utils import clone_initializer
@keras_nlp_export("keras_nlp.layers.TokenAndPositionEmbedding")
class TokenAndPositionEmbedding(keras.layers.Layer):
"""A layer which sums a token and position embedding.
Token and position embeddings are ways of representing words and their order
in a sentence. This layer creates a `keras.layers.Embedding` token embedding
and a `keras_nlp.layers.PositionEmbedding` position embedding and sums their
output when called. This layer assumes that the last dimension in the input
corresponds to the sequence dimension.
Args:
vocabulary_size: The size of the vocabulary.
sequence_length: The maximum length of input sequence
embedding_dim: The output dimension of the embedding layer
embeddings_initializer: The initializer to use for the Embedding
Layers
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out.
This is useful when using recurrent layers which may take variable
length input. If this is True, then all subsequent layers in the
model need to support masking or an exception will be raised.
If mask_zero` is set to True, as a consequence, index 0 cannot be
used in the vocabulary
(input_dim should equal size of vocabulary + 1).
Examples:
```python
inputs = np.ones(shape=(1, 50), dtype="int32")
embedding_layer = keras_nlp.layers.TokenAndPositionEmbedding(
vocabulary_size=10_000,
sequence_length=50,
embedding_dim=128,
)
outputs = embedding_layer(inputs)
```
"""
def __init__(
self,
vocabulary_size,
sequence_length,
embedding_dim,
tie_weights=True,
embeddings_initializer="uniform",
mask_zero=False,
**kwargs
):
super().__init__(**kwargs)
if vocabulary_size is None:
raise ValueError(
"`vocabulary_size` must be an Integer, received `None`."
)
if sequence_length is None:
raise ValueError(
"`sequence_length` must be an Integer, received `None`."
)
if embedding_dim is None:
raise ValueError(
"`embedding_dim` must be an Integer, received `None`."
)
self.vocabulary_size = int(vocabulary_size)
self.sequence_length = int(sequence_length)
self.embedding_dim = int(embedding_dim)
self.embeddings_initializer = keras.initializers.get(
embeddings_initializer
)
self.token_embedding = ReversibleEmbedding(
vocabulary_size,
embedding_dim,
tie_weights=tie_weights,
embeddings_initializer=clone_initializer(
self.embeddings_initializer
),
mask_zero=mask_zero,
dtype=self.dtype_policy,
name="token_embedding",
)
self.position_embedding = PositionEmbedding(
sequence_length=sequence_length,
initializer=clone_initializer(self.embeddings_initializer),
dtype=self.dtype_policy,
name="position_embedding",
)
self.supports_masking = self.token_embedding.supports_masking
def build(self, input_shape):
input_shape = tuple(input_shape)
self.token_embedding.build(input_shape)
self.position_embedding.build(input_shape + (self.embedding_dim,))
self.built = True
def get_config(self):
config = super().get_config()
config.update(
{
"vocabulary_size": self.vocabulary_size,
"sequence_length": self.sequence_length,
"embedding_dim": self.embedding_dim,
"embeddings_initializer": keras.initializers.serialize(
self.embeddings_initializer
),
"tie_weights": self.token_embedding.tie_weights,
"mask_zero": self.token_embedding.mask_zero,
}
)
return config
def call(self, inputs, start_index=0):
embedded_tokens = self.token_embedding(inputs)
embedded_positions = self.position_embedding(
embedded_tokens,
start_index=start_index,
)
outputs = embedded_tokens + embedded_positions
return outputs
def compute_mask(self, inputs, mask=None):
return self.token_embedding.compute_mask(inputs, mask=mask)
def compute_output_shape(self, input_shape):
return tuple(input_shape) + (self.embedding_dim,)
| keras-nlp/keras_nlp/layers/modeling/token_and_position_embedding.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/token_and_position_embedding.py",
"repo_id": "keras-nlp",
"token_count": 2305
} | 132 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.layers.preprocessing.preprocessing_layer import (
PreprocessingLayer,
)
from keras_nlp.utils.tensor_utils import convert_to_ragged_batch
from keras_nlp.utils.tensor_utils import is_int_dtype
from keras_nlp.utils.tensor_utils import is_string_dtype
@keras_nlp_export("keras_nlp.layers.RandomSwap")
class RandomSwap(PreprocessingLayer):
"""Augments input by randomly swapping words.
This layer comes in handy when you need to generate new data using swap
augmentations as described in the paper [EDA: Easy Data Augmentation
Techniques for Boosting Performance on Text Classification Tasks]
(https://arxiv.org/pdf/1901.11196.pdf). The layer expects the inputs to be
pre-split into token level inputs. This allows control over the level of
augmentation, you can split by character for character level swaps, or by
word for word level swaps.
Input data should be passed as tensors, `tf.RaggedTensor`s, or lists. For
batched input, inputs should be a list of lists or a rank two tensor. For
unbatched inputs, each element should be a list or a rank one tensor.
Args:
rate: The probability of a given token being chosen to be swapped
with another random token.
max_swaps: The maximum number of swaps to be performed.
skip_list: A list of token values that should not be considered
candidates for deletion.
skip_fn: A function that takes as input a scalar tensor token and
returns as output a scalar tensor True/False value. A value of
True indicates that the token should not be considered a
candidate for deletion. This function must be tracable--it
should consist of tensorflow operations.
skip_py_fn: A function that takes as input a python token value and
returns as output `True` or `False`. A value of True
indicates that should not be considered a candidate for deletion.
Unlike the `skip_fn` argument, this argument need not be
tracable--it can be any python function.
seed: A seed for the random number generator.
Examples:
Word level usage.
>>> keras.utils.set_random_seed(1337)
>>> inputs=tf.strings.split(["Hey I like", "Keras and Tensorflow"])
>>> augmenter=keras_nlp.layers.RandomSwap(rate=0.4, seed=42)
>>> augmented=augmenter(inputs)
>>> tf.strings.reduce_join(augmented, separator=" ", axis=-1)
<tf.Tensor: shape=(2,), dtype=string,
numpy=array([b'like I Hey', b'and Keras Tensorflow'], dtype=object)>
Character level usage.
>>> keras.utils.set_random_seed(1337)
>>> inputs=tf.strings.unicode_split(["Hey Dude", "Speed Up"], "UTF-8")
>>> augmenter=keras_nlp.layers.RandomSwap(rate=0.4, seed=42)
>>> augmented=augmenter(inputs)
>>> tf.strings.reduce_join(augmented, axis=-1)
<tf.Tensor: shape=(2,), dtype=string,
numpy=array([b'deD yuHe', b'SUede pp'], dtype=object)>
Usage with skip_list.
>>> keras.utils.set_random_seed(1337)
>>> inputs=tf.strings.split(["Hey I like", "Keras and Tensorflow"])
>>> augmenter=keras_nlp.layers.RandomSwap(rate=0.4,
... skip_list=["Keras"], seed=42)
>>> augmented=augmenter(inputs)
>>> tf.strings.reduce_join(augmented, separator=" ", axis=-1)
<tf.Tensor: shape=(2,), dtype=string,
numpy=array([b'like I Hey', b'Keras and Tensorflow'], dtype=object)>
Usage with skip_fn.
>>> def skip_fn(word):
... return tf.strings.regex_full_match(word, r"[I, a].*")
>>> keras.utils.set_random_seed(1337)
>>> inputs=tf.strings.split(["Hey I like", "Keras and Tensorflow"])
>>> augmenter=keras_nlp.layers.RandomSwap(rate=0.9, max_swaps=3,
... skip_fn=skip_fn, seed=11)
>>> augmented=augmenter(inputs)
>>> tf.strings.reduce_join(augmented, separator=" ", axis=-1)
<tf.Tensor: shape=(2,), dtype=string,
numpy=array([b'like I Hey', b'Keras and Tensorflow'], dtype=object)>
Usage with skip_py_fn.
>>> def skip_py_fn(word):
... return len(word) < 4
>>> keras.utils.set_random_seed(1337)
>>> inputs=tf.strings.split(["He was drifting along", "With the wind"])
>>> augmenter=keras_nlp.layers.RandomSwap(rate=0.8, max_swaps=2,
... skip_py_fn=skip_py_fn, seed=15)
>>> augmented=augmenter(inputs)
>>> tf.strings.reduce_join(augmented, separator=" ", axis=-1)
<tf.Tensor: shape=(2,), dtype=string, numpy=array([b'He was along drifting',
b'wind the With'], dtype=object)>
"""
def __init__(
self,
rate,
max_swaps=None,
skip_list=None,
skip_fn=None,
skip_py_fn=None,
seed=None,
name=None,
dtype="int32",
**kwargs,
):
if not is_int_dtype(dtype) and not is_string_dtype(dtype):
raise ValueError(
"Output dtype must be an integer type or a string. "
f"Received: dtype={dtype}"
)
super().__init__(name=name, dtype=dtype, **kwargs)
self.rate = rate
self.max_swaps = max_swaps
self.seed = random.randint(1, 1e9) if seed is None else seed
self._generator = tf.random.Generator.from_seed(self.seed)
self.skip_list = skip_list
self.skip_fn = skip_fn
self.skip_py_fn = skip_py_fn
if self.max_swaps is not None and self.max_swaps < 0:
raise ValueError(
"max_swaps must be non-negative."
f"Received max_swaps={max_swaps}."
)
if [self.skip_list, self.skip_fn, self.skip_py_fn].count(None) < 2:
raise ValueError(
"Exactly one of skip_list, skip_fn, skip_py_fn must be "
"provided."
)
if self.skip_list:
self.StaticHashTable = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
tf.convert_to_tensor(self.skip_list),
tf.convert_to_tensor([True] * len(self.skip_list)),
),
default_value=False,
)
def call(self, inputs):
inputs, unbatched, _ = convert_to_ragged_batch(inputs)
skip_masks = None
if self.skip_list:
skip_masks = self.StaticHashTable.lookup(inputs.flat_values)
elif self.skip_fn:
skip_masks = tf.map_fn(
self.skip_fn, inputs.flat_values, fn_output_signature="bool"
)
elif self.skip_py_fn:
def string_fn(token):
return self.skip_py_fn(token.numpy().decode("utf-8"))
def int_fn(token):
return self.skip_py_fn(token.numpy())
py_fn = string_fn if inputs.dtype == tf.string else int_fn
skip_masks = tf.map_fn(
lambda x: tf.py_function(py_fn, [x], "bool"),
inputs.flat_values,
fn_output_signature="bool",
)
positions = tf.ragged.range(inputs.row_lengths())
if skip_masks is not None:
skip_masks = tf.logical_not(skip_masks)
skip_masks.set_shape([None])
positions = tf.ragged.boolean_mask(
positions, inputs.with_flat_values(skip_masks)
)
# Figure out how many we are going to select.
token_counts = tf.cast(positions.row_lengths(), "float32")
num_to_select = tf.random.stateless_binomial(
shape=tf.shape(token_counts),
seed=self._generator.make_seeds()[:, 0],
counts=token_counts,
probs=self.rate,
)
if self.max_swaps is not None:
num_to_select = tf.math.minimum(num_to_select, self.max_swaps)
num_to_select = tf.math.minimum(
num_to_select, tf.cast(positions.row_lengths(), "int32")
)
num_to_select = tf.cast(num_to_select, "int64")
def _swap(x):
positions, inputs, num_to_select = x
for _ in range(num_to_select):
index = tf.random.stateless_uniform(
shape=[2],
minval=0,
maxval=tf.size(positions),
dtype="int32",
seed=self._generator.make_seeds()[:, 0],
)
index1, index2 = positions[index[0]], positions[index[1]]
# swap items at the sampled indices with each other
inputs = tf.tensor_scatter_nd_update(
inputs,
[[index1], [index2]],
[inputs[index2], inputs[index1]],
)
return inputs
swapped = tf.map_fn(
_swap,
(positions, inputs, num_to_select),
fn_output_signature=tf.RaggedTensorSpec(
ragged_rank=positions.ragged_rank - 1, dtype=inputs.dtype
),
)
swapped.flat_values.set_shape([None])
if unbatched:
swapped = tf.squeeze(swapped, axis=0)
return swapped
def get_config(self):
config = super().get_config()
config.update(
{
"rate": self.rate,
"max_swaps": self.max_swaps,
"seed": self.seed,
"skip_list": self.skip_list,
"skip_fn": self.skip_fn,
"skip_py_fn": self.skip_py_fn,
}
)
return config
def compute_output_shape(self, inputs_shape):
inputs_shape = list(inputs_shape)
inputs_shape[-1] = None
return tuple(inputs_shape)
| keras-nlp/keras_nlp/layers/preprocessing/random_swap.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/preprocessing/random_swap.py",
"repo_id": "keras-nlp",
"token_count": 4726
} | 133 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.models.albert.albert_backbone import AlbertBackbone
from keras_nlp.models.albert.albert_classifier import AlbertClassifier
from keras_nlp.models.albert.albert_masked_lm import AlbertMaskedLM
from keras_nlp.models.albert.albert_masked_lm_preprocessor import (
AlbertMaskedLMPreprocessor,
)
from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor
from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer
from keras_nlp.models.bart.bart_backbone import BartBackbone
from keras_nlp.models.bart.bart_preprocessor import BartPreprocessor
from keras_nlp.models.bart.bart_seq_2_seq_lm import BartSeq2SeqLM
from keras_nlp.models.bart.bart_seq_2_seq_lm_preprocessor import (
BartSeq2SeqLMPreprocessor,
)
from keras_nlp.models.bart.bart_tokenizer import BartTokenizer
from keras_nlp.models.bert.bert_backbone import BertBackbone
from keras_nlp.models.bert.bert_classifier import BertClassifier
from keras_nlp.models.bert.bert_masked_lm import BertMaskedLM
from keras_nlp.models.bert.bert_masked_lm_preprocessor import (
BertMaskedLMPreprocessor,
)
from keras_nlp.models.bert.bert_preprocessor import BertPreprocessor
from keras_nlp.models.bert.bert_tokenizer import BertTokenizer
from keras_nlp.models.bloom.bloom_backbone import BloomBackbone
from keras_nlp.models.bloom.bloom_tokenizer import BloomTokenizer
from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone
from keras_nlp.models.deberta_v3.deberta_v3_classifier import (
DebertaV3Classifier,
)
from keras_nlp.models.deberta_v3.deberta_v3_masked_lm import DebertaV3MaskedLM
from keras_nlp.models.deberta_v3.deberta_v3_masked_lm_preprocessor import (
DebertaV3MaskedLMPreprocessor,
)
from keras_nlp.models.deberta_v3.deberta_v3_preprocessor import (
DebertaV3Preprocessor,
)
from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer
from keras_nlp.models.distil_bert.distil_bert_backbone import DistilBertBackbone
from keras_nlp.models.distil_bert.distil_bert_classifier import (
DistilBertClassifier,
)
from keras_nlp.models.distil_bert.distil_bert_masked_lm import (
DistilBertMaskedLM,
)
from keras_nlp.models.distil_bert.distil_bert_masked_lm_preprocessor import (
DistilBertMaskedLMPreprocessor,
)
from keras_nlp.models.distil_bert.distil_bert_preprocessor import (
DistilBertPreprocessor,
)
from keras_nlp.models.distil_bert.distil_bert_tokenizer import (
DistilBertTokenizer,
)
from keras_nlp.models.electra.electra_backbone import ElectraBackbone
from keras_nlp.models.electra.electra_tokenizer import ElectraTokenizer
from keras_nlp.models.f_net.f_net_backbone import FNetBackbone
from keras_nlp.models.f_net.f_net_classifier import FNetClassifier
from keras_nlp.models.f_net.f_net_masked_lm import FNetMaskedLM
from keras_nlp.models.f_net.f_net_masked_lm_preprocessor import (
FNetMaskedLMPreprocessor,
)
from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor
from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer
from keras_nlp.models.gemma.gemma_backbone import GemmaBackbone
from keras_nlp.models.gemma.gemma_causal_lm import GemmaCausalLM
from keras_nlp.models.gemma.gemma_causal_lm_preprocessor import (
GemmaCausalLMPreprocessor,
)
from keras_nlp.models.gemma.gemma_preprocessor import GemmaPreprocessor
from keras_nlp.models.gemma.gemma_tokenizer import GemmaTokenizer
from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone
from keras_nlp.models.gpt2.gpt2_causal_lm import GPT2CausalLM
from keras_nlp.models.gpt2.gpt2_causal_lm_preprocessor import (
GPT2CausalLMPreprocessor,
)
from keras_nlp.models.gpt2.gpt2_preprocessor import GPT2Preprocessor
from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer
from keras_nlp.models.gpt_neo_x.gpt_neo_x_backbone import GPTNeoXBackbone
from keras_nlp.models.gpt_neo_x.gpt_neo_x_causal_lm import GPTNeoXCausalLM
from keras_nlp.models.gpt_neo_x.gpt_neo_x_causal_lm_preprocessor import (
GPTNeoXCausalLMPreprocessor,
)
from keras_nlp.models.gpt_neo_x.gpt_neo_x_preprocessor import (
GPTNeoXPreprocessor,
)
from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer
from keras_nlp.models.llama.llama_backbone import LlamaBackbone
from keras_nlp.models.mistral.mistral_backbone import MistralBackbone
from keras_nlp.models.mistral.mistral_causal_lm import MistralCausalLM
from keras_nlp.models.mistral.mistral_causal_lm_preprocessor import (
MistralCausalLMPreprocessor,
)
from keras_nlp.models.mistral.mistral_preprocessor import MistralPreprocessor
from keras_nlp.models.mistral.mistral_tokenizer import MistralTokenizer
from keras_nlp.models.opt.opt_backbone import OPTBackbone
from keras_nlp.models.opt.opt_causal_lm import OPTCausalLM
from keras_nlp.models.opt.opt_causal_lm_preprocessor import (
OPTCausalLMPreprocessor,
)
from keras_nlp.models.opt.opt_preprocessor import OPTPreprocessor
from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer
from keras_nlp.models.roberta.roberta_backbone import RobertaBackbone
from keras_nlp.models.roberta.roberta_classifier import RobertaClassifier
from keras_nlp.models.roberta.roberta_masked_lm import RobertaMaskedLM
from keras_nlp.models.roberta.roberta_masked_lm_preprocessor import (
RobertaMaskedLMPreprocessor,
)
from keras_nlp.models.roberta.roberta_preprocessor import RobertaPreprocessor
from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer
from keras_nlp.models.t5.t5_backbone import T5Backbone
from keras_nlp.models.t5.t5_tokenizer import T5Tokenizer
from keras_nlp.models.whisper.whisper_audio_feature_extractor import (
WhisperAudioFeatureExtractor,
)
from keras_nlp.models.whisper.whisper_backbone import WhisperBackbone
from keras_nlp.models.whisper.whisper_preprocessor import WhisperPreprocessor
from keras_nlp.models.whisper.whisper_tokenizer import WhisperTokenizer
from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone
from keras_nlp.models.xlm_roberta.xlm_roberta_classifier import (
XLMRobertaClassifier,
)
from keras_nlp.models.xlm_roberta.xlm_roberta_masked_lm import (
XLMRobertaMaskedLM,
)
from keras_nlp.models.xlm_roberta.xlm_roberta_masked_lm_preprocessor import (
XLMRobertaMaskedLMPreprocessor,
)
from keras_nlp.models.xlm_roberta.xlm_roberta_preprocessor import (
XLMRobertaPreprocessor,
)
from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import (
XLMRobertaTokenizer,
)
from keras_nlp.models.xlnet.xlnet_backbone import XLNetBackbone
| keras-nlp/keras_nlp/models/__init__.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/__init__.py",
"repo_id": "keras-nlp",
"token_count": 2707
} | 134 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from keras_nlp.backend import keras
# from keras_nlp.backend import ops
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.layers.modeling.transformer_layer_utils import (
compute_causal_mask,
)
from keras_nlp.layers.modeling.transformer_layer_utils import (
merge_padding_and_attention_mask,
)
from keras_nlp.models.bloom.bloom_attention import BloomAttention
from keras_nlp.utils.keras_utils import clone_initializer
class BloomDecoder(keras.layers.Layer):
def __init__(
self,
num_heads,
intermediate_dim,
dropout=0.0,
layer_norm_epsilon=1e-5,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
**kwargs,
):
super().__init__(**kwargs)
self.num_heads = num_heads
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
def build(self, decoder_sequence_shape):
hidden_dim = decoder_sequence_shape[-1]
head_dim = int(hidden_dim // self.num_heads)
if head_dim * self.num_heads != hidden_dim:
raise ValueError(
f"`hidden_dim` must be divisible by num_heads (got `hidden_dim`"
f": {hidden_dim} and `num_heads`: {self.num_heads})."
)
self._pre_attention_layernorm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="pre_attention_layernorm",
)
self._pre_attention_layernorm.build(decoder_sequence_shape)
self._self_attention_layer = BloomAttention(
num_heads=self.num_heads,
dropout=self.dropout,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="self_attention",
)
self._self_attention_layer.build(decoder_sequence_shape)
self._post_attention_layernorm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="post_attention_layernorm",
)
self._post_attention_layernorm.build(decoder_sequence_shape)
self._mlp_intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="mlp_intermediate_dense",
)
self._mlp_intermediate_dense.build(decoder_sequence_shape)
self._mlp_output_dense = keras.layers.Dense(
hidden_dim,
kernel_initializer=clone_initializer(self.kernel_initializer),
bias_initializer=clone_initializer(self.bias_initializer),
dtype=self.dtype_policy,
name="mlp_output_dense",
)
intermediate_shape = list(decoder_sequence_shape)
intermediate_shape[-1] = self.intermediate_dim
self._mlp_output_dense.build(tuple(intermediate_shape))
self._dropout_layer = keras.layers.Dropout(
rate=self.dropout, dtype=self.dtype_policy, name="dropout"
)
self.built = True
def call(
self,
decoder_sequence,
decoder_padding_mask=None,
decoder_attention_mask=None,
attention_cache=None,
attention_cache_update_index=None,
use_causal_mask=True,
):
self_attention_mask = self._compute_attention_mask(
decoder_sequence=decoder_sequence,
decoder_padding_mask=decoder_padding_mask,
decoder_attention_mask=decoder_attention_mask,
use_causal_mask=use_causal_mask,
attention_cache=attention_cache,
attention_cache_update_index=attention_cache_update_index,
)
residual = decoder_sequence
x = self._pre_attention_layernorm(decoder_sequence)
attention_output = self._self_attention_layer(
hidden_states=x,
attention_mask=self_attention_mask,
cache=attention_cache,
cache_update_index=attention_cache_update_index,
)
if attention_cache is None:
x = attention_output
else:
x, attention_cache = attention_output
x = x + residual
residual = x
x = self._post_attention_layernorm(x)
x = self._mlp_intermediate_dense(x)
x = keras.activations.gelu(x, approximate=True)
x = self._mlp_output_dense(x)
x = self._dropout_layer(x)
x = x + residual
if attention_cache is not None:
return x, attention_cache
else:
return x
def _compute_attention_mask(
self,
decoder_sequence,
decoder_padding_mask,
decoder_attention_mask,
use_causal_mask,
attention_cache,
attention_cache_update_index,
):
decoder_mask = merge_padding_and_attention_mask(
decoder_sequence, decoder_padding_mask, decoder_attention_mask
)
if use_causal_mask:
batch_size = ops.shape(decoder_sequence)[0]
input_length = output_length = ops.shape(decoder_sequence)[1]
if attention_cache is not None:
input_length = ops.shape(attention_cache)[2]
causal_mask = compute_causal_mask(
batch_size,
input_length,
output_length,
(
0
if attention_cache_update_index is None
else attention_cache_update_index
),
)
return (
ops.minimum(decoder_mask, causal_mask)
if decoder_mask is not None
else causal_mask
)
return decoder_mask
def get_config(self):
config = super().get_config()
config.update(
{
"num_heads": self.num_heads,
"intermediate_dim": self.intermediate_dim,
"dropout": self.dropout,
"layer_norm_epsilon": self.layer_norm_epsilon,
"kernel_initializer": keras.initializers.serialize(
self.kernel_initializer
),
"bias_initializer": keras.initializers.serialize(
self.bias_initializer
),
}
)
return config
def compute_output_shape(self, decoder_sequence_shape):
return decoder_sequence_shape
| keras-nlp/keras_nlp/models/bloom/bloom_decoder.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bloom/bloom_decoder.py",
"repo_id": "keras-nlp",
"token_count": 3519
} | 135 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.layers.preprocessing.multi_segment_packer import (
MultiSegmentPacker,
)
from keras_nlp.models.distil_bert.distil_bert_presets import backbone_presets
from keras_nlp.models.distil_bert.distil_bert_tokenizer import (
DistilBertTokenizer,
)
from keras_nlp.models.preprocessor import Preprocessor
from keras_nlp.utils.keras_utils import (
convert_inputs_to_list_of_tensor_segments,
)
from keras_nlp.utils.keras_utils import pack_x_y_sample_weight
from keras_nlp.utils.python_utils import classproperty
@keras_nlp_export("keras_nlp.models.DistilBertPreprocessor")
class DistilBertPreprocessor(Preprocessor):
"""A DistilBERT preprocessing layer which tokenizes and packs inputs.
This preprocessing layer will do three things:
1. Tokenize any number of input segments using the `tokenizer`.
2. Pack the inputs together using a `keras_nlp.layers.MultiSegmentPacker`.
with the appropriate `"[CLS]"`, `"[SEP]"` and `"[PAD]"` tokens.
3. Construct a dictionary of with keys `"token_ids"` and `"padding_mask"`,
that can be passed directly to a DistilBERT model.
This layer can be used directly with `tf.data.Dataset.map` to preprocess
string data in the `(x, y, sample_weight)` format used by
`keras.Model.fit`.
Args:
tokenizer: A `keras_nlp.models.DistilBertTokenizer` instance.
sequence_length: The length of the packed inputs.
truncate: string. The algorithm to truncate a list of batched segments
to fit within `sequence_length`. The value can be either
`round_robin` or `waterfall`:
- `"round_robin"`: Available space is assigned one token at a
time in a round-robin fashion to the inputs that still need
some, until the limit is reached.
- `"waterfall"`: The allocation of the budget is done using a
"waterfall" algorithm that allocates quota in a
left-to-right manner and fills up the buckets until we run
out of budget. It supports an arbitrary number of segments.
Call arguments:
x: A tensor of single string sequences, or a tuple of multiple
tensor sequences to be packed together. Inputs may be batched or
unbatched. For single sequences, raw python inputs will be converted
to tensors. For multiple sequences, pass tensors directly.
y: Any label data. Will be passed through unaltered.
sample_weight: Any label weight data. Will be passed through unaltered.
Examples:
Directly calling the layer on data.
```python
preprocessor = keras_nlp.models.DistilBertPreprocessor.from_preset(
"distil_bert_base_en_uncased"
)
preprocessor(["The quick brown fox jumped.", "Call me Ishmael."])
# Custom vocabulary.
vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]
vocab += ["The", "quick", "brown", "fox", "jumped", "."]
tokenizer = keras_nlp.models.DistilBertTokenizer(vocabulary=vocab)
preprocessor = keras_nlp.models.DistilBertPreprocessor(tokenizer)
preprocessor("The quick brown fox jumped.")
```
Mapping with `tf.data.Dataset`.
```python
preprocessor = keras_nlp.models.DistilBertPreprocessor.from_preset(
"distil_bert_base_en_uncased"
)
first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."])
second = tf.constant(["The fox tripped.", "Oh look, a whale."])
label = tf.constant([1, 1])
# Map labeled single sentences.
ds = tf.data.Dataset.from_tensor_slices((first, label))
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map unlabeled single sentences.
ds = tf.data.Dataset.from_tensor_slices(first)
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map labeled sentence pairs.
ds = tf.data.Dataset.from_tensor_slices(((first, second), label))
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map unlabeled sentence pairs.
ds = tf.data.Dataset.from_tensor_slices((first, second))
# Watch out for tf.data's default unpacking of tuples here!
# Best to invoke the `preprocessor` directly in this case.
ds = ds.map(
lambda first, second: preprocessor(x=(first, second)),
num_parallel_calls=tf.data.AUTOTUNE,
)
```
"""
def __init__(
self,
tokenizer,
sequence_length=512,
truncate="round_robin",
**kwargs,
):
super().__init__(**kwargs)
self.tokenizer = tokenizer
self.packer = None
self.sequence_length = sequence_length
self.truncate = truncate
def build(self, input_shape):
super().build(input_shape)
# Defer masker creation to `build()` so that we can be sure tokenizer
# assets have loaded when restoring a saved model.
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.cls_token_id,
end_value=self.tokenizer.sep_token_id,
pad_value=self.tokenizer.pad_token_id,
truncate=self.truncate,
sequence_length=self.sequence_length,
)
def call(self, x, y=None, sample_weight=None):
x = convert_inputs_to_list_of_tensor_segments(x)
x = [self.tokenizer(segment) for segment in x]
token_ids, _ = self.packer(x)
x = {
"token_ids": token_ids,
"padding_mask": token_ids != self.tokenizer.pad_token_id,
}
return pack_x_y_sample_weight(x, y, sample_weight)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"truncate": self.truncate,
}
)
return config
@property
def sequence_length(self):
"""The padded length of model input sequences."""
return self._sequence_length
@sequence_length.setter
def sequence_length(self, value):
self._sequence_length = value
if self.packer is not None:
self.packer.sequence_length = value
@classproperty
def tokenizer_cls(cls):
return DistilBertTokenizer
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
| keras-nlp/keras_nlp/models/distil_bert/distil_bert_preprocessor.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/distil_bert/distil_bert_preprocessor.py",
"repo_id": "keras-nlp",
"token_count": 2800
} | 136 |
# Copyright 2024 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.layers.modeling.transformer_layer_utils import (
compute_causal_mask,
)
from keras_nlp.layers.modeling.transformer_layer_utils import (
merge_padding_and_attention_mask,
)
from keras_nlp.models.gemma.gemma_attention import CachedGemmaAttention
from keras_nlp.models.gemma.rms_normalization import RMSNormalization
class GemmaDecoderBlock(keras.layers.Layer):
def __init__(
self,
hidden_dim,
intermediate_dim,
head_dim,
num_query_heads,
num_key_value_heads,
layer_norm_epsilon=1e-6,
dropout=0,
**kwargs,
):
super().__init__(**kwargs)
self.intermediate_dim = intermediate_dim
self.hidden_dim = hidden_dim
self.num_query_heads = num_query_heads
self.num_key_value_heads = num_key_value_heads
self.head_dim = head_dim
self.layer_norm_epsilon = layer_norm_epsilon
self.dropout = dropout
self.pre_attention_norm = RMSNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="pre_attention_norm",
)
self.attention = CachedGemmaAttention(
head_dim=head_dim,
num_query_heads=num_query_heads,
num_key_value_heads=num_key_value_heads,
dropout=dropout,
dtype=self.dtype_policy,
name="attention",
)
if self.dropout > 0:
self.attention_dropout = keras.layers.Dropout(rate=dropout)
self.feedforward_dropout = keras.layers.Dropout(rate=dropout)
self.pre_ffw_norm = RMSNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="pre_ffw_norm",
)
self.gating_ffw = keras.layers.EinsumDense(
equation="btd,df->btf",
output_shape=(None, self.intermediate_dim // 2),
dtype=self.dtype_policy,
name="ffw_gating",
)
self.gating_ffw_2 = keras.layers.EinsumDense(
equation="btd,df->btf",
output_shape=(None, self.intermediate_dim // 2),
dtype=self.dtype_policy,
name="ffw_gating_2",
)
self.ffw_linear = keras.layers.EinsumDense(
equation="btf,fd->btd",
output_shape=(None, self.hidden_dim),
dtype=self.dtype_policy,
name="ffw_linear",
)
def build(self, input_shape):
self.pre_attention_norm.build(input_shape)
self.attention.build(input_shape)
shape = input_shape
self.pre_ffw_norm.build(shape)
self.gating_ffw.build(shape)
self.gating_ffw_2.build(shape)
shape = self.gating_ffw.compute_output_shape(shape)
self.ffw_linear.build(shape)
self.built = True
def compute_output_shape(self, input_shape):
# Isometric
return input_shape
def _compute_attention_mask(
self, x, padding_mask, cache, cache_update_index
):
decoder_mask = merge_padding_and_attention_mask(
inputs=x, padding_mask=padding_mask, attention_mask=None
)
batch_size = ops.shape(x)[0]
input_length = output_length = ops.shape(x)[1]
if cache is not None:
input_length = ops.shape(cache)[2]
causal_mask = compute_causal_mask(
batch_size=batch_size,
input_length=input_length,
output_length=output_length,
cache_index=cache_update_index,
)
return (
ops.minimum(decoder_mask, causal_mask)
if decoder_mask is not None
else causal_mask
)
def call(
self,
x,
padding_mask=None,
cache=None,
cache_update_index=0,
):
normalized_x = self.pre_attention_norm(x)
attention_mask = self._compute_attention_mask(
normalized_x, padding_mask, cache, cache_update_index
)
if cache is not None:
attention, new_cache = self.attention(
normalized_x,
attention_mask=attention_mask,
cache=cache,
cache_update_index=cache_update_index,
)
else:
attention = self.attention(
normalized_x,
attention_mask=attention_mask,
)
if self.dropout:
attention = self.attention_dropout(attention)
attention_x = x + attention
normalized_x = self.pre_ffw_norm(attention_x)
x1 = self.gating_ffw(normalized_x)
x2 = self.gating_ffw_2(normalized_x)
x = keras.activations.gelu(x1, approximate=True) * x2
x = self.ffw_linear(x)
x = x + attention_x
if cache is not None:
return x, new_cache
return x
def get_config(self):
config = super().get_config()
config.update(
{
"hidden_dim": self.hidden_dim,
"intermediate_dim": self.intermediate_dim,
"head_dim": self.head_dim,
"num_query_heads": self.num_query_heads,
"num_key_value_heads": self.num_key_value_heads,
"layer_norm_epsilon": self.layer_norm_epsilon,
"dropout": self.dropout,
}
)
return config
| keras-nlp/keras_nlp/models/gemma/gemma_decoder_block.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gemma/gemma_decoder_block.py",
"repo_id": "keras-nlp",
"token_count": 2954
} | 137 |
# Copyright 2022 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker
from keras_nlp.models.gpt2.gpt2_presets import backbone_presets
from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer
from keras_nlp.models.preprocessor import Preprocessor
from keras_nlp.utils.keras_utils import (
convert_inputs_to_list_of_tensor_segments,
)
from keras_nlp.utils.keras_utils import pack_x_y_sample_weight
from keras_nlp.utils.python_utils import classproperty
@keras_nlp_export("keras_nlp.models.GPT2Preprocessor")
class GPT2Preprocessor(Preprocessor):
"""GPT2 preprocessing layer which tokenizes and packs inputs.
This preprocessing layer will do 2 things:
- Tokenize the inputs using the `tokenizer`.
- Construct a dictionary with keys `"token_ids"`, `"padding_mask"`, that can
be passed directly to a `keras_nlp.models.GPT2Backbone`.
This layer can be used directly with `tf.data.Dataset.map` to preprocess
string data in the `(x, y, sample_weight)` format used by
`keras.Model.fit`.
The call method of this layer accepts three arguments, `x`, `y`, and
`sample_weight`. `x` can be a python string or tensor representing a single
segment, a list of python strings representing a batch of single segments,
or a list of tensors representing multiple segments to be packed together.
`y` and `sample_weight` are both optional, can have any format, and will be
passed through unaltered.
`GPT2Preprocessor` forces the input to have only one segment, as GPT2 is
mainly used for generation tasks. For tasks having multi-segment inputs
like "glue/mnli", please use a model designed for classification purposes
such as BERT or RoBERTa.
Args:
tokenizer: A `keras_nlp.models.GPT2Tokenizer` instance.
sequence_length: The length of the packed inputs.
add_start_token: If `True`, the preprocessor will prepend the tokenizer
start token to each input sequence.
add_end_token: If `True`, the preprocessor will append the tokenizer
end token to each input sequence.
Call arguments:
x: A string, `tf.Tensor` or list of python strings.
y: Any label data. Will be passed through unaltered.
sample_weight: Any label weight data. Will be passed through unaltered.
sequence_length: Pass to override the configured `sequence_length` of
the layer.
Examples:
Directly calling the layer on data.
```python
preprocessor = keras_nlp.models.GPT2Preprocessor.from_preset("gpt2_base_en")
# Tokenize and pack a single sentence.
preprocessor("The quick brown fox jumped.")
# Tokenize a batch of single sentences.
preprocessor(["The quick brown fox jumped.", "Call me Ishmael."])
# Custom vocabulary.
features = ["a quick fox.", "a fox quick."]
vocab = {"<|endoftext|>": 0, "a": 4, "Ġquick": 5, "Ġfox": 6}
merges = ["Ġ q", "u i", "c k", "ui ck", "Ġq uick"]
merges += ["Ġ f", "o x", "Ġf ox"]
tokenizer = keras_nlp.models.GPT2Tokenizer(
vocabulary=vocab,
merges=merges,
)
preprocessor = keras_nlp.models.GPT2Preprocessor(tokenizer=tokenizer)
preprocessor("The quick brown fox jumped.")
```
Mapping with `tf.data.Dataset`.
```python
preprocessor = keras_nlp.models.GPT2Preprocessor.from_preset("gpt2_base_en")
text = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."])
label = tf.constant([1, 1])
# Map labeled single sentences.
ds = tf.data.Dataset.from_tensor_slices((text, label))
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map unlabeled single sentences.
ds = tf.data.Dataset.from_tensor_slices(text)
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
```
"""
def __init__(
self,
tokenizer,
sequence_length=1024,
add_start_token=True,
add_end_token=True,
**kwargs,
):
super().__init__(**kwargs)
self.tokenizer = tokenizer
self.packer = None
self.sequence_length = sequence_length
self.add_start_token = add_start_token
self.add_end_token = add_end_token
def build(self, input_shape):
# Defer packer creation to `build()` so that we can be sure tokenizer
# assets have loaded when restoring a saved model.
self.packer = StartEndPacker(
start_value=self.tokenizer.start_token_id,
end_value=self.tokenizer.end_token_id,
pad_value=self.tokenizer.pad_token_id,
sequence_length=self.sequence_length,
return_padding_mask=True,
)
self.built = True
def call(
self,
x,
y=None,
sample_weight=None,
sequence_length=None,
):
x = convert_inputs_to_list_of_tensor_segments(x)
if len(x) != 1:
raise ValueError(
"GPT2 requires each input feature to contain only "
f"one segment, but received {len(x)}. If you are using GPT2 "
"for a multi-segment classification task, please refer to "
"classification models like BERT or RoBERTa."
)
sequence_length = sequence_length or self.sequence_length
token_ids, padding_mask = self.packer(
self.tokenizer(x[0]),
sequence_length=sequence_length,
add_start_value=self.add_start_token,
add_end_value=self.add_end_token,
)
x = {
"token_ids": token_ids,
"padding_mask": padding_mask,
}
return pack_x_y_sample_weight(x, y, sample_weight)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"add_start_token": self.add_start_token,
"add_end_token": self.add_end_token,
}
)
return config
@property
def sequence_length(self):
"""The padded length of model input sequences."""
return self._sequence_length
@sequence_length.setter
def sequence_length(self, value):
self._sequence_length = value
if self.packer is not None:
self.packer.sequence_length = value
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
@classproperty
def tokenizer_cls(cls):
return GPT2Tokenizer
| keras-nlp/keras_nlp/models/gpt2/gpt2_preprocessor.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gpt2/gpt2_preprocessor.py",
"repo_id": "keras-nlp",
"token_count": 2865
} | 138 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.tokenizers.byte_pair_tokenizer import BytePairTokenizer
@keras_nlp_export("keras_nlp.models.GPTNeoXTokenizer")
class GPTNeoXTokenizer(BytePairTokenizer):
"""A GPTNeoX tokenizer using Byte-Pair Encoding subword segmentation.
This tokenizer class will tokenize raw strings into integer sequences and
is based on `keras_nlp.tokenizers.BytePairTokenizer`. Unlike the
underlying tokenizer, it will check for all special tokens needed by GPTNeoX
models and provides a `from_preset()` method to automatically download
a matching vocabulary for a GPTNeoX preset.
This tokenizer does not provide truncation or padding of inputs.
If input is a batch of strings (rank > 0), the layer will output a
`tf.RaggedTensor` where the last dimension of the output is ragged.
If input is a scalar string (rank == 0), the layer will output a dense
`tf.Tensor` with static shape `[None]`.
Args:
vocabulary: string or dict, maps token to integer ids. If it is a
string, it should be the file path to a json file.
merges: string or list, contains the merge rule. If it is a string,
it should be the file path to merge rules. The merge rule file
should have one merge rule per line. Every merge rule contains
merge entities separated by a space.
"""
def __init__(
self,
vocabulary=None,
merges=None,
**kwargs,
):
# GPTNeoX uses the same start as end token, i.e., "<|endoftext|>".
self.end_token = self.start_token = "<|endoftext|>"
super().__init__(
vocabulary=vocabulary,
merges=merges,
unsplittable_tokens=[self.end_token],
**kwargs,
)
def set_vocabulary_and_merges(self, vocabulary, merges):
super().set_vocabulary_and_merges(vocabulary, merges)
if vocabulary is not None:
# Check for necessary special tokens.
if self.end_token not in self.get_vocabulary():
raise ValueError(
f"Cannot find token `'{self.end_token}'` in the provided "
f"`vocabulary`. Please provide `'{self.end_token}'` in "
"your `vocabulary` or use a pretrained `vocabulary` name."
)
self.end_token_id = self.token_to_id(self.end_token)
self.start_token_id = self.end_token_id
self.pad_token_id = 0
else:
self.end_token_id = None
self.start_token_id = None
self.pad_token_id = None
def get_config(self):
config = super().get_config()
# In the constructor, we pass the list of special tokens to the
# `unsplittable_tokens` arg of the superclass' constructor. Hence, we
# delete it from the config here.
del config["unsplittable_tokens"]
return config
| keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py",
"repo_id": "keras-nlp",
"token_count": 1379
} | 139 |
# Copyright 2024 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from keras_nlp.models.mistral.mistral_causal_lm_preprocessor import (
MistralCausalLMPreprocessor,
)
from keras_nlp.models.mistral.mistral_tokenizer import MistralTokenizer
from keras_nlp.tests.test_case import TestCase
class MistralCausalLMPreprocessorTest(TestCase):
def setUp(self):
self.tokenizer = MistralTokenizer(
# Generated using create_mistral_test_proto.py
proto=os.path.join(
self.get_test_data_dir(), "mistral_test_vocab.spm"
)
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
"sequence_length": 8,
}
self.input_data = (["the quick brown fox"],)
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=MistralCausalLMPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=(
{
"token_ids": [[1, 3, 8, 4, 6, 0, 0, 0]],
"padding_mask": [[1, 1, 1, 1, 1, 0, 0, 0]],
},
[[3, 8, 4, 6, 0, 0, 0, 0]], # Pass through labels.
[[1, 1, 1, 1, 0, 0, 0, 0]], # Pass through sample_weights.
),
)
def test_no_start_end_token(self):
input_data = ["the quick brown fox"] * 4
preprocessor = MistralCausalLMPreprocessor(
**self.init_kwargs,
add_start_token=False,
add_end_token=False,
)
x, y, sw = preprocessor(input_data)
self.assertAllEqual(x["token_ids"], [[3, 8, 4, 6, 0, 0, 0, 0]] * 4)
self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 0, 0, 0, 0]] * 4)
self.assertAllEqual(y, [[8, 4, 6, 0, 0, 0, 0, 0]] * 4)
self.assertAllEqual(sw, [[1, 1, 1, 0, 0, 0, 0, 0]] * 4)
def test_generate_preprocess(self):
input_data = "the quick brown fox"
preprocessor = MistralCausalLMPreprocessor(**self.init_kwargs)
x = preprocessor.generate_preprocess(input_data)
self.assertAllEqual(x["token_ids"], [1, 3, 8, 4, 6, 0, 0, 0])
self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 0, 0, 0])
def test_generate_postprocess(self):
input_data = {
"token_ids": [1, 3, 8, 4, 6, 0, 0, 0],
"padding_mask": [1, 1, 1, 1, 1, 0, 0, 0],
}
preprocessor = MistralCausalLMPreprocessor(**self.init_kwargs)
x = preprocessor.generate_postprocess(input_data)
self.assertAllEqual(x, "the quick brown fox")
@pytest.mark.extra_large
def test_all_presets(self):
for preset in MistralCausalLMPreprocessor.presets:
self.run_preset_test(
cls=MistralCausalLMPreprocessor,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/mistral/mistral_causal_lm_preprocessor_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/mistral/mistral_causal_lm_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1615
} | 140 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.layers.preprocessing.preprocessing_layer import (
PreprocessingLayer,
)
from keras_nlp.models.whisper.whisper_presets import backbone_presets
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring
@keras_nlp_export("keras_nlp.models.WhisperAudioFeatureExtractor")
class WhisperAudioFeatureExtractor(PreprocessingLayer):
"""
Whisper audio feature extractor layer.
This layer takes in a batch of audio tensors, and computes the log-mel
spectrogram features for each audio tensor.
The input audio tensor can either be of shape `(length_of_audio,)` or
`(batch_size, length_of_audio)`. The output is a tensor of shape
`(batch_size, num_frames, num_mels)`, where `num_frames` is
`(max_audio_length * sampling_rate) / stride`.
Args:
num_mels: int. The number of mel-frequency filters. Defaults to `80`.
num_fft_bins: int. The size of the Fourier Transform in STFT.
Defaults to `400`.
stride: int. The distance between neighboring
sliding window frames while computing STFT.
Defaults to `160`.
sampling_rate: int. The sample rate of the audio. Defaults to `16000`.
max_audio_length: int. The length of each audio chunk in
seconds. The input audio tensor will be padded/trimmed to
`max_audio_length * sampling_rate`. Defaults to `30`.
Examples:
```python
audio_tensor = tf.ones((8000,), dtype="float32")
# Compute the log-mel spectrogram.
whisper_audio_feature_extractor = keras_nlp.models.WhisperAudioFeatureExtractor()
whisper_audio_feature_extractor(audio_tensor)
# Compute the log-mel spectrogram for a batch of audio tensors.
audio_tensor_1 = tf.ones((8000,), dtype="float32")
audio_tensor_2 = tf.ones((10000,), dtype="float32"
audio_tensor = tf.ragged.stack([audio_tensor_1, audio_tensor_2], axis=0)
whisper_audio_feature_extractor(audio_tensor)
```
"""
def __init__(
self,
num_mels=80,
num_fft_bins=400,
stride=160,
sampling_rate=16000,
max_audio_length=30,
**kwargs,
):
# Check dtype and provide a default.
if "dtype" not in kwargs or kwargs["dtype"] is None:
kwargs["dtype"] = "float32"
else:
dtype = tf.dtypes.as_dtype(kwargs["dtype"])
if not dtype.is_floating:
raise ValueError(
f"dtype must be a floating type. Received: dtype={dtype}"
)
super().__init__(**kwargs)
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
self.built = True
self.num_mels = num_mels
self.num_fft_bins = num_fft_bins
self.stride = stride
self.sampling_rate = sampling_rate
self.max_audio_length = max_audio_length
self.num_samples = self.sampling_rate * self.max_audio_length
# After transposition, `self.mel_filters`'s shape is
# `(num_fft_bins // 2 + 1, num_mels).`
self.mel_filters = self._get_mel_filters()
def _get_mel_filters(self):
"""
Adapted from Hugging Face
(https://github.com/huggingface/transformers/blob/v4.27.1/src/transformers/models/whisper/feature_extraction_whisper.py#L86)
"""
# TODO: Convert to TensorFlow ops (if possible).
dtype = np.float32
# Initialize the weights
weights = np.zeros(
(self.num_mels, int(1 + self.num_fft_bins // 2)), dtype=dtype
)
# Center freqs of each FFT bin
fftfreqs = np.fft.rfftfreq(
n=self.num_fft_bins, d=1.0 / self.sampling_rate
)
# 'Center freqs' of mel bands - uniformly spaced between limits
min_mel = 0.0
max_mel = 45.245640471924965
mels = np.linspace(min_mel, max_mel, self.num_mels + 2)
mels = np.asanyarray(mels)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0 # beginning of log region (Hz)
min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels)
logstep = np.log(6.4) / 27.0 # step size for log region
# If we have vector data, vectorize
log_t = mels >= min_log_mel
freqs[log_t] = min_log_hz * np.exp(
logstep * (mels[log_t] - min_log_mel)
)
mel_f = freqs
fdiff = np.diff(mel_f)
ramps = np.subtract.outer(mel_f, fftfreqs)
for i in range(self.num_mels):
# lower and upper slopes for all bins
lower = -ramps[i] / fdiff[i]
upper = ramps[i + 2] / fdiff[i + 1]
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper))
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (mel_f[2 : self.num_mels + 2] - mel_f[: self.num_mels])
weights *= enorm[:, np.newaxis]
weights = np.transpose(weights)
return tf.constant(weights, dtype=self.compute_dtype)
def _extract_audio_features(self, audio):
audio = tf.cast(audio, self.compute_dtype)
# Use "reflection" padding - `tf.signal.stft` uses symmetric padding
# internally.
audio = tf.pad(
audio,
paddings=[[0, 0], [self.num_fft_bins // 2, self.num_fft_bins // 2]],
mode="REFLECT",
)
# Compute the mel spectrogram.
stft = tf.signal.stft(
audio,
frame_length=self.num_fft_bins,
frame_step=self.stride,
fft_length=self.num_fft_bins,
)
magnitudes = tf.square(tf.abs(stft[:, :-1, :]))
mel_spec = tf.matmul(
magnitudes,
self.mel_filters,
)
def tf_log10(x):
"""
Computes log base 10 of input tensor using TensorFlow's natural log operator.
"""
numerator = tf.math.log(x)
denominator = tf.math.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
# Clamp the values to a minimum value of 1e-10. This is done to avoid
# taking the log of 0, i.e., for numerical stability.
mel_spec = tf.maximum(mel_spec, 1e-10)
# Calculate the log mel spectrogram.
log_spec = tf_log10(mel_spec)
# Dynamic range compression.
log_spec_shape = tf.shape(log_spec)
max_value_minus_eight = tf.math.subtract(
tf.math.reduce_max(log_spec, axis=[1, 2]),
tf.cast(8, dtype=log_spec.dtype),
)
max_value_minus_eight = tf.expand_dims(max_value_minus_eight, axis=1)
max_value_minus_eight = tf.repeat(
max_value_minus_eight,
repeats=log_spec_shape[1] * log_spec_shape[2],
axis=1,
)
max_value_minus_eight = tf.reshape(
max_value_minus_eight, shape=log_spec_shape
)
log_spec = tf.maximum(log_spec, max_value_minus_eight)
# Normalization.
type_cast_four = tf.cast(4, dtype=log_spec.dtype)
log_spec = tf.math.divide(
tf.math.add(log_spec, type_cast_four),
type_cast_four,
)
return log_spec
def call(self, audio):
if not isinstance(audio, (tf.Tensor, tf.RaggedTensor)):
audio = tf.convert_to_tensor(audio)
rank_1_input = audio.shape.rank == 1
if rank_1_input:
audio = tf.expand_dims(audio, 0)
# Convert the tensor to a Ragged Tensor.
if isinstance(audio, tf.Tensor):
audio = tf.RaggedTensor.from_tensor(audio)
# Pad audio.
audio_shape = audio.shape.as_list()
audio_shape[-1] = self.num_samples
audio = audio.to_tensor(shape=audio_shape)
# Find the log mel spectrogram.
log_spec = self._extract_audio_features(audio)
if rank_1_input:
log_spec = tf.squeeze(log_spec, 0)
return log_spec
def get_config(self):
config = super().get_config()
config.update(
{
"num_mels": self.num_mels,
"num_fft_bins": self.num_fft_bins,
"stride": self.stride,
"sampling_rate": self.sampling_rate,
"max_audio_length": self.max_audio_length,
}
)
return config
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
@classmethod
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate Whisper audio feature extractor from a given preset.
Args:
preset: string. Must be one of "{{preset_names}}".
Examples:
```python
# Load a preset tokenizer.
audio_feature_extractor = WhisperAudioFeatureExtractor.from_preset(
"{{example_preset_name}}"
)
# Compute the log-mel spectrogram.
audio_tensor = tf.ones((8000,), dtype=tf.float32)
audio_feature_extractor(audio_tensor)
```
"""
if not cls.presets:
raise NotImplementedError(
"No presets have been created for this class"
)
if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
config = cls.presets[preset]["audio_feature_extractor_config"]
return cls.from_config({**config, **kwargs})
format_docstring(
example_preset_name=next(iter(backbone_presets), ""),
preset_names='", "'.join(backbone_presets),
)(WhisperAudioFeatureExtractor.from_preset.__func__)
| keras-nlp/keras_nlp/models/whisper/whisper_audio_feature_extractor.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/whisper/whisper_audio_feature_extractor.py",
"repo_id": "keras-nlp",
"token_count": 4942
} | 141 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
from keras_nlp.tests.test_case import TestCase
from keras_nlp.tokenizers.word_piece_tokenizer_trainer import (
compute_word_piece_vocabulary,
)
class WordPieceTokenizerTrainerTest(TestCase):
def test_dataset_input(self):
test_text = ["baa maa caa saa aaa"]
test_output = ["a", "b", "c", "m", "s", "##aa", "##a", "##b"]
data = tf.data.Dataset.from_tensor_slices(test_text)
vocab = compute_word_piece_vocabulary(data, 8, reserved_tokens=[])
self.assertAllEqual(vocab, test_output)
def test_filenames_input(self):
test_text = "baa maa caa saa aaa"
input_file = os.path.join(self.get_temp_dir(), "test.txt")
with open(input_file, "w+") as f:
f.write(test_text + "\n")
test_output = ["a", "b", "c", "m", "s", "##aa", "##a", "##b"]
vocab = compute_word_piece_vocabulary(
[input_file],
8,
reserved_tokens=[],
)
self.assertAllEqual(vocab, test_output)
def test_filenames_without_split(self):
test_text = "baa maa caa saa aaa"
input_file = os.path.join(self.get_temp_dir(), "test.txt")
with open(input_file, "w+") as f:
f.write(test_text + "\n")
with self.assertRaisesRegex(
ValueError,
"When learning a vocab from files, `split` must be `True`. "
"To compute a vocabulary with custom split rules, load your "
"data as a dataset, split it, and pass it to "
r"`compute_word_piece_vocabulary\(\)` with split=False.",
):
compute_word_piece_vocabulary(["test.txt"], 10, split=False)
def test_invalid_input(self):
test_text_invalid = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4])
with self.assertRaisesRegex(
ValueError,
"The dataset elements in `data` must have string dtype. "
"Received: <dtype: 'int32'>.",
):
compute_word_piece_vocabulary(test_text_invalid, 10)
with self.assertRaisesRegex(
ValueError,
"The `data` argument must be either `tf.data.Dataset` or `list`. "
"Received: <class 'int'>.",
):
compute_word_piece_vocabulary(4, 4)
def test_lowercase(self):
test_text = tf.data.Dataset.from_tensor_slices(["BaA Maa Caa Saa AAa"])
test_output = ["a", "b", "c", "m", "s", "##aa", "##a", "##b"]
vocab = compute_word_piece_vocabulary(
test_text, 8, lowercase=True, reserved_tokens=[]
)
self.assertAllEqual(vocab, test_output)
def test_skip_lowercase(self):
test_text = tf.data.Dataset.from_tensor_slices(["BAA MAA CAA SAA AAA"])
test_output = ["A", "B", "C", "M", "S", "##AA", "##A", "##B"]
vocab = compute_word_piece_vocabulary(
test_text, 8, lowercase=False, reserved_tokens=[]
)
self.assertAllEqual(vocab, test_output)
def test_split(self):
test_text = tf.data.Dataset.from_tensor_slices(
["This string: would be split up."]
)
test_text_split = tf.data.Dataset.from_tensor_slices(
["This", "string", ":", "would", "be", "split", "up", "."]
)
output_vocab_1 = compute_word_piece_vocabulary(
test_text, 20, split=True, lowercase=False, strip_accents=False
)
output_vocab_2 = compute_word_piece_vocabulary(
test_text_split,
20,
split=False,
lowercase=False,
strip_accents=False,
)
self.assertAllEqual(output_vocab_1, output_vocab_2)
def test_split_on_cjk(self):
test_text = tf.data.Dataset.from_tensor_slices(["ah半推zz"])
test_text_split = tf.data.Dataset.from_tensor_slices(
["ah", "半", "推", "zz"]
)
output_vocab_1 = compute_word_piece_vocabulary(
test_text,
4,
split=True,
split_on_cjk=True,
lowercase=False,
strip_accents=False,
)
output_vocab_2 = compute_word_piece_vocabulary(
test_text_split,
4,
split=False,
split_on_cjk=False,
lowercase=False,
strip_accents=False,
)
self.assertAllEqual(output_vocab_1, output_vocab_2)
def test_skip_split(self):
test_text = tf.data.Dataset.from_tensor_slices(
[
"This is a long line that isn't split up, and it exceeds maximum length."
]
)
# The token would be removed for being too long.
vocab = compute_word_piece_vocabulary(
test_text, 20, split=False, reserved_tokens=[]
)
self.assertAllEqual(vocab, [])
def test_strip_accents(self):
test_text = tf.data.Dataset.from_tensor_slices(
["áááá éááá íááá óááá úááá"]
)
output = ["a", "e", "i", "o", "u", "##aaa", "##a", "##e"]
vocab = compute_word_piece_vocabulary(
test_text, 8, strip_accents=True, reserved_tokens=[]
)
self.assertAllEqual(vocab, output)
def test_skip_strip_accents(self):
test_text = tf.data.Dataset.from_tensor_slices(
["áááá éááá íááá óááá úááá"]
)
output = ["á", "é", "í", "ó", "ú", "##ááá", "##á", "##é"]
vocab = compute_word_piece_vocabulary(
test_text, 8, strip_accents=False, reserved_tokens=[]
)
self.assertAllEqual(vocab, output)
def test_output_file(self):
test_text = tf.data.Dataset.from_tensor_slices(["BaA Maa Caa Saa AAa"])
test_output = ["a", "b", "c", "m", "s", "##aa", "##a", "##b"]
vocab_file = os.path.join(self.get_temp_dir(), "test.txt")
compute_word_piece_vocabulary(
test_text,
8,
vocab_file,
lowercase=True,
reserved_tokens=[],
)
vocab_from_file = []
with open(vocab_file, "r", encoding="utf-8") as f:
for line in f:
vocab_from_file.append(line.strip())
self.assertAllEqual(vocab_from_file, test_output)
def test_reserved_tokens(self):
# This dummy text/token would be removed for being too long.
test_text = tf.data.Dataset.from_tensor_slices(
[
"The learner requires at least one input here, but this should be removed."
]
)
output = ["token1", "token2", "token3", "token4"]
vocab = compute_word_piece_vocabulary(
test_text, 20, reserved_tokens=output, split=False
)
self.assertAllEqual(vocab, output)
def test_suffix_indicator(self):
test_text = tf.data.Dataset.from_tensor_slices(["baa maa caa saa aaa"])
test_output = ["a", "b", "c", "m", "s", "@@aa", "@@a", "@@b"]
vocab = compute_word_piece_vocabulary(
test_text, 8, suffix_indicator="@@", reserved_tokens=[]
)
self.assertAllEqual(vocab, test_output)
| keras-nlp/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py",
"repo_id": "keras-nlp",
"token_count": 3786
} | 142 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import numpy as np
import tensorflow as tf
import torch
import transformers
from absl import app
from absl import flags
from checkpoint_conversion_utils import extract_files_from_archive
from checkpoint_conversion_utils import get_md5_checksum
from tensorflow import keras
import keras_nlp
PRESET_MAP = {
"roberta_base_en": ("roberta.base", "roberta-base"),
"roberta_large_en": ("roberta.large", "roberta-large"),
}
DOWNLOAD_SCRIPT_URL = "https://dl.fbaipublicfiles.com/fairseq/models/{}.tar.gz"
EXTRACT_DIR = "./{}"
FLAGS = flags.FLAGS
flags.DEFINE_string(
"preset", None, f'Must be one of {",".join(PRESET_MAP.keys())}'
)
def download_model(size, hf_model_name):
print("-> Download original weights.")
extract_dir = EXTRACT_DIR.format(size)
archive_file_path = keras.utils.get_file(
fname=None,
origin=DOWNLOAD_SCRIPT_URL.format(size),
cache_subdir=os.path.join("checkpoint_conversion", FLAGS.preset),
)
extract_files_from_archive(archive_file_path)
# The original `tar.gz` file does not have the vocab files. Let's fetch
# them from HF.
vocabulary_path = keras.utils.get_file(
fname=None,
origin=f"https://huggingface.co/{hf_model_name}/raw/main/vocab.json",
)
shutil.copy(vocabulary_path, extract_dir)
merges_path = keras.utils.get_file(
fname=None,
origin=f"https://huggingface.co/{hf_model_name}/raw/main/merges.txt",
)
shutil.copy(merges_path, extract_dir)
def convert_checkpoints(size):
print("\n-> Convert original weights to KerasNLP format.")
# RoBERTa paths.
extract_dir = EXTRACT_DIR.format(size)
checkpoint_path = os.path.join(extract_dir, "model.pt")
# Load PyTorch RoBERTa checkpoint.
pt_ckpt = torch.load(checkpoint_path, map_location=torch.device("cpu"))
pt_cfg = pt_ckpt["args"]
pt_model = pt_ckpt["model"]
cfg = {
"num_layers": pt_cfg.encoder_layers,
"num_heads": pt_cfg.encoder_attention_heads,
"hidden_dim": pt_cfg.encoder_embed_dim,
"intermediate_dim": pt_cfg.encoder_ffn_embed_dim,
"dropout": pt_cfg.dropout,
"max_sequence_length": pt_cfg.max_positions,
"vocab_size": (
pt_model["decoder.sentence_encoder.embed_tokens.weight"]
.numpy()
.shape[0]
),
}
print("Config:", cfg)
keras_nlp_model = keras_nlp.models.RobertaBackbone.from_preset(
FLAGS.preset, load_weights=False
)
# Embedding Layer.
keras_nlp_model.get_layer("embeddings").token_embedding.embeddings.assign(
pt_model["decoder.sentence_encoder.embed_tokens.weight"].numpy()
)
keras_nlp_model.get_layer(
"embeddings"
).position_embedding.position_embeddings.assign(
pt_model["decoder.sentence_encoder.embed_positions.weight"].numpy()[
2:, :
]
)
# Embedding LayerNorm.
keras_nlp_model.get_layer("embeddings_layer_norm").gamma.assign(
pt_model["decoder.sentence_encoder.emb_layer_norm.weight"].numpy()
)
keras_nlp_model.get_layer("embeddings_layer_norm").beta.assign(
pt_model["decoder.sentence_encoder.emb_layer_norm.bias"].numpy()
)
# The QKV weights in the original checkpoint are present as one single
# dense layer of shape `(3 * cfg["hidden_dim"], cfg["hidden_dim"])`. Our
# model has three separate dense layers for each of QKV. Hence, we need to
# split the original QKV weights into three chunks.
range_1 = (0, cfg["hidden_dim"])
range_2 = (cfg["hidden_dim"], 2 * cfg["hidden_dim"])
range_3 = (2 * cfg["hidden_dim"], 3 * cfg["hidden_dim"])
# Transformer layers.
for i in range(keras_nlp_model.num_layers):
q_k_v_wts = (
pt_model[
f"decoder.sentence_encoder.layers.{i}.self_attn.in_proj_weight"
]
.numpy()
.T
)
q_k_v_bias = (
pt_model[
f"decoder.sentence_encoder.layers.{i}.self_attn.in_proj_bias"
]
.numpy()
.T
)
# Query
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
q_k_v_wts[:, range_1[0] : range_1[1]].reshape(
(cfg["hidden_dim"], cfg["num_heads"], -1)
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
q_k_v_bias[range_1[0] : range_1[1]].reshape((cfg["num_heads"], -1))
)
# Key
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
q_k_v_wts[:, range_2[0] : range_2[1]].reshape(
(cfg["hidden_dim"], cfg["num_heads"], -1)
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
q_k_v_bias[range_2[0] : range_2[1]].reshape((cfg["num_heads"], -1))
)
# Value
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
q_k_v_wts[:, range_3[0] : range_3[1]].reshape(
(cfg["hidden_dim"], cfg["num_heads"], -1)
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
q_k_v_bias[range_3[0] : range_3[1]].reshape((cfg["num_heads"], -1))
)
# Attention output
attn_output_wts = (
pt_model[
f"decoder.sentence_encoder.layers.{i}.self_attn.out_proj.weight"
]
.numpy()
.T
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
attn_output_wts.reshape((cfg["num_heads"], -1, cfg["hidden_dim"]))
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
pt_model[
f"decoder.sentence_encoder.layers.{i}.self_attn.out_proj.bias"
].numpy()
)
# Attention LayerNorm
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.gamma.assign(
pt_model[
f"decoder.sentence_encoder.layers.{i}.self_attn_layer_norm.weight"
].numpy()
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.beta.assign(
pt_model[
f"decoder.sentence_encoder.layers.{i}.self_attn_layer_norm.bias"
].numpy()
)
# Intermediate FF layer
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
pt_model[f"decoder.sentence_encoder.layers.{i}.fc1.weight"]
.numpy()
.T
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
pt_model[f"decoder.sentence_encoder.layers.{i}.fc1.bias"].numpy()
)
# Output dense layer
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.kernel.assign(
pt_model[f"decoder.sentence_encoder.layers.{i}.fc2.weight"]
.numpy()
.T
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.bias.assign(
pt_model[f"decoder.sentence_encoder.layers.{i}.fc2.bias"].numpy()
)
# FF LayerNorm
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.gamma.assign(
pt_model[
f"decoder.sentence_encoder.layers.{i}.final_layer_norm.weight"
].numpy()
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.beta.assign(
pt_model[
f"decoder.sentence_encoder.layers.{i}.final_layer_norm.bias"
].numpy()
)
# Save the model.
print(f"\n-> Save KerasNLP model weights to `{FLAGS.preset}.h5`.")
keras_nlp_model.save_weights(f"{FLAGS.preset}.h5")
return keras_nlp_model
def define_preprocessor(hf_model_name, size):
print("\n-> Define the tokenizers.")
extract_dir = EXTRACT_DIR.format(size)
vocabulary_path = os.path.join(extract_dir, "vocab.json")
merges_path = os.path.join(extract_dir, "merges.txt")
keras_nlp_tokenizer = keras_nlp.models.RobertaTokenizer(
vocabulary=vocabulary_path, merges=merges_path
)
keras_nlp_preprocessor = keras_nlp.models.RobertaPreprocessor(
keras_nlp_tokenizer
)
hf_tokenizer = transformers.AutoTokenizer.from_pretrained(hf_model_name)
print("\n-> Print MD5 checksum of the vocab files.")
print(f"`{vocabulary_path}` md5sum: ", get_md5_checksum(vocabulary_path))
print(f"`{merges_path}` md5sum: ", get_md5_checksum(merges_path))
return keras_nlp_preprocessor, hf_tokenizer
def check_output(
keras_nlp_model,
keras_nlp_preprocessor,
hf_model,
hf_tokenizer,
):
print("\n-> Check the outputs.")
input_str = ["the quick brown fox ran, galloped and jumped."]
# KerasNLP
keras_nlp_inputs = keras_nlp_preprocessor(tf.constant(input_str))
keras_nlp_output = keras_nlp_model.predict(keras_nlp_inputs)
# HF
hf_inputs = hf_tokenizer(
input_str, padding="max_length", return_tensors="pt"
)
hf_output = hf_model(**hf_inputs).last_hidden_state
print("KerasNLP output:", keras_nlp_output[0, 0, :10])
print("HF output:", hf_output[0, 0, :10])
print("Difference:", np.mean(keras_nlp_output - hf_output.detach().numpy()))
# Show the MD5 checksum of the model weights.
print("Model md5sum: ", get_md5_checksum(f"./{FLAGS.preset}.h5"))
return keras_nlp_output
def main(_):
assert (
FLAGS.preset in PRESET_MAP.keys()
), f'Invalid preset {FLAGS.preset}. Must be one of {",".join(PRESET_MAP.keys())}'
size = PRESET_MAP[FLAGS.preset][0]
hf_model_name = PRESET_MAP[FLAGS.preset][1]
download_model(size, hf_model_name)
keras_nlp_model = convert_checkpoints(size)
print("\n-> Load HF model.")
hf_model = transformers.AutoModel.from_pretrained(hf_model_name)
hf_model.eval()
keras_nlp_preprocessor, hf_tokenizer = define_preprocessor(
hf_model_name, size
)
check_output(
keras_nlp_model,
keras_nlp_preprocessor,
hf_model,
hf_tokenizer,
)
if __name__ == "__main__":
flags.mark_flag_as_required("preset")
app.run(main)
| keras-nlp/tools/checkpoint_conversion/convert_roberta_checkpoints.py/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/convert_roberta_checkpoints.py",
"repo_id": "keras-nlp",
"token_count": 5652
} | 143 |
"""Utilities for performing affine transformations on image data.
"""
import numpy as np
from .utils import array_to_img, img_to_array
try:
import scipy
# scipy.ndimage cannot be accessed until explicitly imported
from scipy import ndimage
except ImportError:
scipy = None
try:
from PIL import Image as pil_image
from PIL import ImageEnhance
except ImportError:
pil_image = None
ImageEnhance = None
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Rotated Numpy image tensor.
"""
theta = np.random.uniform(-rg, rg)
x = apply_affine_transform(x,
theta=theta,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
x = apply_affine_transform(x,
tx=tx,
ty=ty,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
x = apply_affine_transform(x,
shear=shear,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., interpolation_order=1):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
interpolation_order: int, order of spline interpolation.
see `ndimage.interpolation.affine_transform`
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: %s' % (zoom_range,))
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
x = apply_affine_transform(x,
zx=zx,
zy=zy,
row_axis=row_axis,
col_axis=col_axis,
channel_axis=channel_axis,
fill_mode=fill_mode,
cval=cval,
order=interpolation_order)
return x
def apply_channel_shift(x, intensity, channel_axis=0):
"""Performs a channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + intensity,
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_channel_shift(x, intensity_range, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity_range: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
intensity = np.random.uniform(-intensity_range, intensity_range)
return apply_channel_shift(x, intensity, channel_axis=channel_axis)
def apply_brightness_shift(x, brightness, scale=True):
"""Performs a brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness: Float. The new brightness value.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively.
Default: True.
# Returns
Numpy image tensor.
# Raises
ImportError: if PIL is not available.
"""
if ImageEnhance is None:
raise ImportError('Using brightness shifts requires PIL. '
'Install PIL or Pillow.')
x_min, x_max = np.min(x), np.max(x)
local_scale = (x_min < 0) or (x_max > 255)
x = array_to_img(x, scale=local_scale or scale)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
x = imgenhancer_Brightness.enhance(brightness)
x = img_to_array(x)
if not scale and local_scale:
x = x / 255 * (x_max - x_min) + x_min
return x
def random_brightness(x, brightness_range, scale=True):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively.
Default: True.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % (brightness_range,))
u = np.random.uniform(brightness_range[0], brightness_range[1])
return apply_brightness_shift(x, u, scale)
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 - 0.5
o_y = float(y) / 2 - 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1,
row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0., order=1):
"""Applies an affine transformation specified by the parameters given.
# Arguments
x: 3D numpy array - a 2D image with one or more channels.
theta: Rotation angle in degrees.
tx: Width shift.
ty: Heigh shift.
shear: Shear angle in degrees.
zx: Zoom in x direction.
zy: Zoom in y direction
row_axis: Index of axis for rows (aka Y axis) in the input image.
Direction: left to right.
col_axis: Index of axis for columns (aka X axis) in the input image.
Direction: top to bottom.
channel_axis: Index of axis for channels in the input image.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
order: int, order of interpolation
# Returns
The transformed version of the input.
"""
if scipy is None:
raise ImportError('Image transformations require SciPy. '
'Install SciPy.')
# Input sanity checks:
# 1. x must 2D image with one or more channels (i.e., a 3D tensor)
# 2. channels must be either first or last dimension
if np.unique([row_axis, col_axis, channel_axis]).size != 3:
raise ValueError("'row_axis', 'col_axis', and 'channel_axis'"
" must be distinct")
# TODO: shall we support negative indices?
valid_indices = set([0, 1, 2])
actual_indices = set([row_axis, col_axis, channel_axis])
if actual_indices != valid_indices:
raise ValueError(
f"Invalid axis' indices: {actual_indices - valid_indices}")
if x.ndim != 3:
raise ValueError("Input arrays must be multi-channel 2D images.")
if channel_axis not in [0, 2]:
raise ValueError("Channels are allowed and the first and last dimensions.")
transform_matrix = None
if theta != 0:
theta = np.deg2rad(theta)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shift_matrix
else:
transform_matrix = np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear = np.deg2rad(shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = shear_matrix
else:
transform_matrix = np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if transform_matrix is None:
transform_matrix = zoom_matrix
else:
transform_matrix = np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = np.rollaxis(x, channel_axis, 0)
# Matrix construction assumes that coordinates are x, y (in that order).
# However, regular numpy arrays use y,x (aka i,j) indexing.
# Possible solution is:
# 1. Swap the x and y axes.
# 2. Apply transform.
# 3. Swap the x and y axes again to restore image-like data ordering.
# Mathematically, it is equivalent to the following transformation:
# M' = PMP, where P is the permutation matrix, M is the original
# transformation matrix.
if col_axis > row_axis:
transform_matrix[:, [0, 1]] = transform_matrix[:, [1, 0]]
transform_matrix[[0, 1]] = transform_matrix[[1, 0]]
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=order,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
| keras-preprocessing/keras_preprocessing/image/affine_transformations.py/0 | {
"file_path": "keras-preprocessing/keras_preprocessing/image/affine_transformations.py",
"repo_id": "keras-preprocessing",
"token_count": 7028
} | 144 |
import numpy as np
import pytest
from PIL import Image
from keras_preprocessing.image import numpy_array_iterator, utils
from keras_preprocessing.image.image_data_generator import ImageDataGenerator
@pytest.fixture(scope='module')
def all_test_images():
img_w = img_h = 20
rgb_images = []
rgba_images = []
gray_images = []
for n in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = Image.fromarray(imarray.astype('uint8')).convert('RGB')
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 4) * variance + bias
im = Image.fromarray(imarray.astype('uint8')).convert('RGBA')
rgba_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = Image.fromarray(
imarray.astype('uint8').squeeze()).convert('L')
gray_images.append(im)
return [rgb_images, rgba_images, gray_images]
@pytest.fixture(scope='module')
def image_data_generator():
return ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
brightness_range=(1, 5),
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True,
interpolation_order=1
)
def test_numpy_array_iterator(image_data_generator, all_test_images, tmpdir):
for test_images in all_test_images:
img_list = []
for im in test_images:
img_list.append(utils.img_to_array(im)[None, ...])
images = np.vstack(img_list)
dsize = images.shape[0]
iterator = numpy_array_iterator.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=False,
save_to_dir=str(tmpdir),
batch_size=3
)
x, y = next(iterator)
assert x.shape == images[:3].shape
assert list(y) == [0, 1, 2]
# Test with sample weights
iterator = numpy_array_iterator.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=False,
sample_weight=np.arange(images.shape[0]) + 1,
save_to_dir=str(tmpdir),
batch_size=3
)
x, y, w = iterator.next()
assert x.shape == images[:3].shape
assert list(y) == [0, 1, 2]
assert list(w) == [1, 2, 3]
# Test with `shuffle=True`
iterator = numpy_array_iterator.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
image_data_generator,
shuffle=True,
save_to_dir=str(tmpdir),
batch_size=3,
seed=42
)
x, y = iterator.next()
assert x.shape == images[:3].shape
# Check that the sequence is shuffled.
assert list(y) != [0, 1, 2]
# Test without y
iterator = numpy_array_iterator.NumpyArrayIterator(
images,
None,
image_data_generator,
shuffle=True,
save_to_dir=str(tmpdir),
batch_size=3
)
x = iterator.next()
assert type(x) is np.ndarray
assert x.shape == images[:3].shape
# Test with a single miscellaneous input data array
x_misc1 = np.random.random(dsize)
iterator = numpy_array_iterator.NumpyArrayIterator(
(images, x_misc1),
np.arange(dsize),
image_data_generator,
shuffle=False,
batch_size=2
)
for i, (x, y) in enumerate(iterator):
assert x[0].shape == images[:2].shape
assert (x[1] == x_misc1[(i * 2):((i + 1) * 2)]).all()
if i == 2:
break
# Test with two miscellaneous inputs
x_misc2 = np.random.random((dsize, 3, 3))
iterator = numpy_array_iterator.NumpyArrayIterator(
(images, [x_misc1, x_misc2]),
np.arange(dsize),
image_data_generator,
shuffle=False,
batch_size=2
)
for i, (x, y) in enumerate(iterator):
assert x[0].shape == images[:2].shape
assert (x[1] == x_misc1[(i * 2):((i + 1) * 2)]).all()
assert (x[2] == x_misc2[(i * 2):((i + 1) * 2)]).all()
if i == 2:
break
# Test cases with `y = None`
iterator = numpy_array_iterator.NumpyArrayIterator(
images,
None,
image_data_generator,
batch_size=3
)
x = iterator.next()
assert type(x) is np.ndarray
assert x.shape == images[:3].shape
iterator = numpy_array_iterator.NumpyArrayIterator(
(images, x_misc1),
None,
image_data_generator,
batch_size=3,
shuffle=False
)
x = iterator.next()
assert type(x) is list
assert x[0].shape == images[:3].shape
assert (x[1] == x_misc1[:3]).all()
iterator = numpy_array_iterator.NumpyArrayIterator(
(images, [x_misc1, x_misc2]),
None,
image_data_generator,
batch_size=3,
shuffle=False
)
x = iterator.next()
assert type(x) is list
assert x[0].shape == images[:3].shape
assert (x[1] == x_misc1[:3]).all()
assert (x[2] == x_misc2[:3]).all()
# Test with validation split
generator = ImageDataGenerator(validation_split=0.2)
iterator = numpy_array_iterator.NumpyArrayIterator(
images,
None,
generator,
batch_size=3
)
x = iterator.next()
assert isinstance(x, np.ndarray)
assert x.shape == images[:3].shape
# Test some failure cases:
x_misc_err = np.random.random((dsize + 1, 3, 3))
with pytest.raises(ValueError) as e_info:
numpy_array_iterator.NumpyArrayIterator(
(images, x_misc_err),
np.arange(dsize),
generator,
batch_size=3
)
assert str(e_info.value).find('All of the arrays in') != -1
with pytest.raises(ValueError) as e_info:
numpy_array_iterator.NumpyArrayIterator(
(images, x_misc1),
np.arange(dsize + 1),
generator,
batch_size=3
)
assert str(e_info.value).find('`x` (images tensor) and `y` (labels) ') != -1
# Test `flow` behavior as Sequence
seq = numpy_array_iterator.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
generator,
shuffle=False, save_to_dir=str(tmpdir),
batch_size=3
)
assert len(seq) == images.shape[0] // 3 + 1
x, y = seq[0]
assert x.shape == images[:3].shape
assert list(y) == [0, 1, 2]
# Test with `shuffle=True`
seq = numpy_array_iterator.NumpyArrayIterator(
images,
np.arange(images.shape[0]),
generator,
shuffle=True,
save_to_dir=str(tmpdir),
batch_size=3,
seed=123
)
x, y = seq[0]
# Check that the sequence is shuffled.
assert list(y) != [0, 1, 2]
# `on_epoch_end` should reshuffle the sequence.
seq.on_epoch_end()
x2, y2 = seq[0]
assert list(y) != list(y2)
# test order_interpolation
labels = np.array([[2, 2, 0, 2, 2],
[1, 3, 2, 3, 1],
[2, 1, 0, 1, 2],
[3, 1, 0, 2, 0],
[3, 1, 3, 2, 1]])
label_generator = ImageDataGenerator(
rotation_range=90.,
interpolation_order=0
)
labels_gen = numpy_array_iterator.NumpyArrayIterator(
labels[np.newaxis, ..., np.newaxis],
None,
label_generator,
seed=123
)
assert (np.unique(labels) == np.unique(next(labels_gen))).all()
| keras-preprocessing/tests/image/numpy_array_iterator_test.py/0 | {
"file_path": "keras-preprocessing/tests/image/numpy_array_iterator_test.py",
"repo_id": "keras-preprocessing",
"token_count": 4446
} | 145 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for HyperEfficientNet Model."""
import numpy as np
import pytest
from keras_tuner.applications import efficientnet
from keras_tuner.backend import config
from keras_tuner.backend import keras
from keras_tuner.engine import hypermodel as hm_module
from keras_tuner.engine import hyperparameters as hp_module
if config.backend() == "torch":
keras.backend.set_image_data_format("channels_first")
else:
keras.backend.set_image_data_format("channels_last")
if keras.backend.image_data_format() == "channels_last":
INPUT_SHAPE_32 = (32, 32, 3)
INPUT_SHAPE_224 = (224, 224, 3)
INPUT_SHAPE_256 = (256, 256, 3)
else:
INPUT_SHAPE_32 = (3, 32, 32)
INPUT_SHAPE_224 = (3, 224, 224)
INPUT_SHAPE_256 = (3, 256, 256)
@pytest.mark.parametrize("version", ["B0", "B1"])
@pytest.mark.skipif(
config.multi_backend(),
reason="The test is too slow.",
)
def test_model_construction(version):
hp = hp_module.HyperParameters()
hp.Choice("version", [version])
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10
)
model = hypermodel.build(hp)
assert hp.values["version"] == version
assert model.layers
assert model.name == "EfficientNet"
assert model.output_shape == (None, 10)
model.train_on_batch(np.ones((1,) + INPUT_SHAPE_32), np.ones((1, 10)))
out = model.predict(np.ones((1,) + INPUT_SHAPE_32))
assert out.shape == (1, 10)
def test_hyperparameter_existence_and_defaults():
hp = hp_module.HyperParameters()
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_224, classes=10
)
hypermodel.build(hp)
assert hp.get("version") == "B0"
assert hp.get("top_dropout_rate") == 0.2
assert hp.get("learning_rate") == 0.01
assert hp.get("pooling") == "avg"
def test_hyperparameter_override():
hp = hp_module.HyperParameters()
hp.Choice("version", ["B1"])
hp.Fixed("top_dropout_rate", 0.5)
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_256, classes=10
)
hypermodel.build(hp)
assert hp.get("version") == "B1"
assert hp.get("top_dropout_rate") == 0.5
def test_input_tensor():
hp = hp_module.HyperParameters()
inputs = keras.Input(shape=INPUT_SHAPE_256)
hypermodel = efficientnet.HyperEfficientNet(input_tensor=inputs, classes=10)
model = hypermodel.build(hp)
assert model.inputs == [inputs]
def test_override_compiling_phase():
class MyHyperEfficientNet(efficientnet.HyperEfficientNet):
def _compile(self, model, hp):
learning_rate = 0.1
optimizer_name = hp.Choice(
"optimizer", ["adam", "sgd"], default="adam"
)
if optimizer_name == "sgd":
optimizer = keras.optimizers.SGD(
momentum=0.1, learning_rate=learning_rate
)
elif optimizer_name == "adam":
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(
optimizer=optimizer,
loss="categorical_crossentropy",
metrics=["accuracy"],
)
hp = hp_module.HyperParameters()
hypermodel = MyHyperEfficientNet(input_shape=INPUT_SHAPE_32, classes=5)
hypermodel.build(hp)
assert "learning_rate" not in hp.values
assert hp.values["optimizer"] == "adam"
def test_augmentation_param_invalid_input():
with pytest.raises(ValueError):
efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10, augmentation_model=0
)
def test_augmentation_param_fixed_model():
hp = hp_module.HyperParameters()
aug_model = keras.Sequential([keras.layers.RandomRotation(1.0)], name="aug")
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10, augmentation_model=aug_model
)
model = hypermodel.build(hp)
assert model.layers[1].name == "aug"
def test_augmentation_param_hyper_model():
class HyperAug(hm_module.HyperModel):
def build(self, hp):
model = keras.Sequential(name="aug")
scaling_factor = hp.Choice("scaling_factor", [1])
model.add(keras.layers.Lambda(lambda x: x * scaling_factor))
return model
hp = hp_module.HyperParameters()
aug_hm = HyperAug()
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10, augmentation_model=aug_hm
)
model = hypermodel.build(hp)
assert model.layers[1].name == "aug"
assert hp.values["scaling_factor"] == 1
def test_pooling_is_max():
hp = hp_module.HyperParameters()
hp.values["pooling"] = "max"
hypermodel = efficientnet.HyperEfficientNet(
input_shape=INPUT_SHAPE_32, classes=10
)
hypermodel.build(hp)
def test_no_classes_raise_error():
with pytest.raises(ValueError, match="classes"):
efficientnet.HyperEfficientNet(input_shape=INPUT_SHAPE_32)
def test_no_input_shape_tensor_raise_error():
with pytest.raises(ValueError, match="input_tensor"):
efficientnet.HyperEfficientNet(classes=10)
| keras-tuner/keras_tuner/applications/efficientnet_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/applications/efficientnet_test.py",
"repo_id": "keras-tuner",
"token_count": 2323
} | 146 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OracleClient class."""
import os
import grpc
from keras_tuner import protos
from keras_tuner.engine import hyperparameters as hp_module
from keras_tuner.engine import trial as trial_module
TIMEOUT = 5 * 60 # 5 mins
class OracleClient:
"""Wraps an `Oracle` on a worker to send requests to the chief."""
def __init__(self, oracle):
self._oracle = oracle
ip_addr = os.environ["KERASTUNER_ORACLE_IP"]
port = os.environ["KERASTUNER_ORACLE_PORT"]
channel = grpc.insecure_channel(f"{ip_addr}:{port}")
self.stub = protos.get_service_grpc().OracleStub(channel)
self.tuner_id = os.environ["KERASTUNER_TUNER_ID"]
# In multi-worker mode, only the chief of each cluster should report
# results to the chief Oracle.
self.multi_worker = False
self.should_report = True
def __getattr__(self, name):
whitelisted_attrs = {
"objective",
"max_trials",
"allow_new_entries",
"tune_new_entries",
}
if name in whitelisted_attrs:
return getattr(self._oracle, name)
raise AttributeError(f'`OracleClient` object has no attribute "{name}"')
def get_space(self):
response = self.stub.GetSpace(
protos.get_service().GetSpaceRequest(),
wait_for_ready=True,
timeout=TIMEOUT,
)
return hp_module.HyperParameters.from_proto(response.hyperparameters)
def update_space(self, hyperparameters):
if self.should_report:
self.stub.UpdateSpace(
protos.get_service().UpdateSpaceRequest(
hyperparameters=hyperparameters.to_proto()
),
wait_for_ready=True,
timeout=TIMEOUT,
)
def create_trial(self, tuner_id):
response = self.stub.CreateTrial(
protos.get_service().CreateTrialRequest(tuner_id=tuner_id),
wait_for_ready=True,
timeout=TIMEOUT,
)
return trial_module.Trial.from_proto(response.trial)
def update_trial(self, trial_id, metrics, step=0):
# TODO: support early stopping in multi-worker.
if self.should_report:
response = self.stub.UpdateTrial(
protos.get_service().UpdateTrialRequest(
trial_id=trial_id, metrics=metrics, step=step
),
wait_for_ready=True,
timeout=TIMEOUT,
)
if not self.multi_worker:
return trial_module.Trial.from_proto(response.trial)
return trial_module.Trial(self.get_space(), status="RUNNING")
def end_trial(self, trial):
if self.should_report:
self.stub.EndTrial(
protos.get_service().EndTrialRequest(trial=trial.to_proto()),
wait_for_ready=True,
timeout=TIMEOUT,
)
def get_trial(self, trial_id):
response = self.stub.GetTrial(
protos.get_service().GetTrialRequest(trial_id=trial_id),
wait_for_ready=True,
timeout=TIMEOUT,
)
return trial_module.Trial.from_proto(response.trial)
def get_best_trials(self, num_trials=1):
response = self.stub.GetBestTrials(
protos.get_service().GetBestTrialsRequest(num_trials=num_trials),
wait_for_ready=True,
timeout=TIMEOUT,
)
return [
trial_module.Trial.from_proto(trial) for trial in response.trials
]
| keras-tuner/keras_tuner/distribute/oracle_client.py/0 | {
"file_path": "keras-tuner/keras_tuner/distribute/oracle_client.py",
"repo_id": "keras-tuner",
"token_count": 1878
} | 147 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from keras_tuner import protos
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.engine import conditions as conditions_mod
from keras_tuner.engine.hyperparameters import hyperparameter
@keras_tuner_export("keras_tuner.engine.hyperparameters.Fixed")
class Fixed(hyperparameter.HyperParameter):
"""Fixed, untunable value.
Args:
name: A string. the name of parameter. Must be unique for each
`HyperParameter` instance in the search space.
value: The value to use (can be any JSON-serializable Python type).
"""
def __init__(self, name, value, **kwargs):
super().__init__(name=name, default=value, **kwargs)
self.name = name
if isinstance(value, bool):
value = bool(value)
elif isinstance(value, six.integer_types):
value = int(value)
elif isinstance(value, six.string_types):
value = str(value)
elif not isinstance(value, (float, str)):
raise ValueError(
"`Fixed` value must be an `int`, `float`, `str`, "
f"or `bool`, found {value}"
)
self.value = value
def __repr__(self):
return f"Fixed(name: {self.name}, value: {self.value})"
@property
def values(self):
return (self.value,)
def prob_to_value(self, prob):
return self.value
def value_to_prob(self, value):
return 0.5
@property
def default(self):
return self.value
def get_config(self):
config = super().get_config()
config["name"] = self.name
config.pop("default")
config["value"] = self.value
return config
@classmethod
def from_proto(cls, proto):
value = getattr(proto.value, proto.value.WhichOneof("kind"))
conditions = [
conditions_mod.Condition.from_proto(c) for c in proto.conditions
]
return cls(name=proto.name, value=value, conditions=conditions)
def to_proto(self):
if isinstance(self.value, bool):
# Check bool first as bool is subclass of int.
# So bool is also six.integer_types.
value = protos.get_proto().Value(boolean_value=self.value)
elif isinstance(self.value, six.integer_types):
value = protos.get_proto().Value(int_value=self.value)
elif isinstance(self.value, float):
value = protos.get_proto().Value(float_value=self.value)
elif isinstance(self.value, six.string_types):
value = protos.get_proto().Value(string_value=self.value)
return protos.get_proto().Fixed(
name=self.name,
value=value,
conditions=[c.to_proto() for c in self.conditions],
)
| keras-tuner/keras_tuner/engine/hyperparameters/hp_types/fixed_hp.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/fixed_hp.py",
"repo_id": "keras-tuner",
"token_count": 1367
} | 148 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import keras_tuner
from keras_tuner.engine import objective
def test_create_objective_with_str():
obj = objective.create_objective("accuracy")
assert obj.name == "accuracy" and obj.direction == "max"
def test_create_objective_with_objective():
obj = objective.create_objective("accuracy")
obj = objective.create_objective(keras_tuner.Objective("score", "min"))
assert obj.name == "score" and obj.direction == "min"
def test_create_objective_with_multi_objective():
obj = objective.create_objective(
[
keras_tuner.Objective("score", "max"),
keras_tuner.Objective("loss", "min"),
]
)
assert isinstance(obj, objective.MultiObjective)
assert (
obj.objectives[0].name == "score"
and obj.objectives[0].direction == "max"
)
assert (
obj.objectives[1].name == "loss"
and obj.objectives[1].direction == "min"
)
def test_create_objective_with_multi_str():
obj = objective.create_objective(["accuracy", "loss"])
assert isinstance(obj, objective.MultiObjective)
assert (
obj.objectives[0].name == "accuracy"
and obj.objectives[0].direction == "max"
)
assert (
obj.objectives[1].name == "loss"
and obj.objectives[1].direction == "min"
)
def test_objective_better_than_max():
obj = objective.create_objective("accuracy")
assert obj.better_than(1, 0)
assert not obj.better_than(0, 1)
assert not obj.better_than(0, 0)
def test_objective_better_than_min():
obj = objective.create_objective("loss")
assert obj.better_than(0, 1)
assert not obj.better_than(1, 0)
assert not obj.better_than(0, 0)
def test_objective_has_value():
obj = objective.create_objective("loss")
assert obj.has_value({"loss": 3.0})
assert not obj.has_value({"accuracy": 3.0})
def test_objective_get_value():
obj = objective.create_objective("loss")
assert obj.get_value({"accuracy": 3.0, "loss": 2.0}) == 2.0
def test_multi_objective_get_value():
obj = objective.create_objective(["accuracy", "loss"])
assert obj.get_value({"accuracy": 3.0, "loss": 2.0}) == -1.0
def test_objective_equal():
obj1 = objective.Objective(name="accuracy", direction="max")
obj2 = objective.Objective(name="accuracy", direction="max")
assert obj1 == obj2
def test_objective_not_equal_with_diff_name():
obj1 = objective.Objective(name="accuracy1", direction="max")
obj2 = objective.Objective(name="accuracy", direction="max")
assert obj1 != obj2
def test_objective_not_equal_with_diff_dir():
obj1 = objective.Objective(name="accuracy", direction="min")
obj2 = objective.Objective(name="accuracy", direction="max")
assert obj1 != obj2
def test_multi_objective_equal():
obj1 = objective.create_objective(["accuracy", "loss"])
obj2 = objective.create_objective(["loss", "accuracy"])
assert obj1 == obj2
def test_multi_objective_not_equal():
obj1 = objective.create_objective(["loss", "loss"])
obj2 = objective.create_objective(["loss", "accuracy"])
assert obj1 != obj2
def test_multi_objective_has_value():
obj = objective.create_objective(["loss", "accuracy"])
assert obj.has_value({"loss": 1.0, "accuracy": 1.0, "mse": 2.0})
assert not obj.has_value({"accuracy": 1.0, "mse": 2.0})
def test_objective_to_str():
obj = objective.Objective(name="accuracy", direction="min")
assert str(obj) == 'Objective(name="accuracy", direction="min")'
def test_multi_objective_to_str():
obj = objective.create_objective(["loss", "accuracy"])
assert (
str(obj) == 'MultiObjective(name="multi_objective", direction="min"): '
'[Objective(name="loss", direction="min"), '
'Objective(name="accuracy", direction="max")]'
)
def test_unknown_objective_type_error():
with pytest.raises(TypeError, match="not understood, expected str"):
objective.create_objective([3, "accuracy"])
| keras-tuner/keras_tuner/engine/objective_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/objective_test.py",
"repo_id": "keras-tuner",
"token_count": 1697
} | 149 |
[tool:pytest]
addopts=-vv
-p no:warnings
--durations=10
--log-cli-level=CRITICAL
# Do not run tests in the build folder
norecursedirs= build
[coverage:report]
exclude_lines =
pragma: no cover
@abstract
raise NotImplementedError
omit =
*test*
keras_tuner/protos/*
keras_tuner/api_export.py
keras_tuner/distribute/file_utils.py
[flake8]
ignore =
# Conflicts with black
E203
# defaults flake8 ignores
E121,E123,E126,E226,E24,E704,W503,W504
# Function name should be lowercase
N802
# Argument name should be lowercase
N803
# First argument of a method should be named
N805
# Argument name should be lowercase
N806
# lowercase ... imported as non lowercase
# Useful to ignore for "import keras.backend as K"
N812
# do not use bare 'except'
E722
# Escape characters check.
# Conflict with pytest error message regex.
W605
exclude =
*_pb2.py
*_pb2_grpc.py
#imported but unused in __init__.py, that's ok.
per-file-ignores = **/__init__.py:F401
max-line-length = 80
| keras-tuner/setup.cfg/0 | {
"file_path": "keras-tuner/setup.cfg",
"repo_id": "keras-tuner",
"token_count": 465
} | 150 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.