text
stringlengths
5
261k
id
stringlengths
16
106
metadata
dict
__index_level_0__
int64
0
266
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This demo example shows how to use the RandomCropAndResize preprocessing layer. Operates on an image of elephant. In this script the image is loaded, then are passed through the preprocessing layers. Finally, they are shown using matplotlib. """ import demo_utils from keras_cv.layers import RandomCropAndResize def main(): many_elephants = demo_utils.load_elephant_tensor(output_size=(300, 300)) layer = RandomCropAndResize( target_size=(224, 224), crop_area_factor=(0.8, 1.0), aspect_ratio_factor=(3.0 / 4.0, 4.0 / 3.0), ) augmented = layer(many_elephants) demo_utils.gallery_show(augmented.numpy()) layer = RandomCropAndResize( target_size=(224, 224), crop_area_factor=(0.01, 1.0), aspect_ratio_factor=(3.0 / 4.0, 4.0 / 3.0), ) augmented = layer(many_elephants) demo_utils.gallery_show(augmented.numpy()) if __name__ == "__main__": main()
keras-cv/examples/layers/preprocessing/classification/random_crop_and_resize_demo.py/0
{ "file_path": "keras-cv/examples/layers/preprocessing/classification/random_crop_and_resize_demo.py", "repo_id": "keras-cv", "token_count": 528 }
43
""" Title: Plot a bounding box gallery Author: [lukewood](https://lukewood.xyz) Date created: 2023/03/22 Last modified: 2023/03/22 Description: Visualize bounding boxes for a given dataset. """ """ `keras_cv.visualization.plot_bounding_box_gallery()` is a function dedicated to the visualization of bounding boxes predicted by a `keras_cv` object detection model. """ import tensorflow as tf import tensorflow_datasets as tfds import keras_cv """ First, we load a dataset: """ train_ds = tfds.load( "voc/2007", split="train", with_info=False, shuffle_files=True ) def unpackage_tfds_inputs(inputs): image = inputs["image"] image = tf.cast(image, tf.float32) boxes = inputs["objects"]["bbox"] boxes = keras_cv.bounding_box.convert_format( boxes, images=image, source="rel_yxyx", target="xywh", ) classes = tf.cast(inputs["objects"]["label"], tf.float32) bounding_boxes = { "classes": classes, "confidence": tf.random.uniform(tf.shape(classes), minval=0, maxval=1), "boxes": boxes, } return image, bounding_boxes train_ds = train_ds.map(unpackage_tfds_inputs) train_ds = train_ds.ragged_batch(16) images, boxes = next(iter(train_ds.take(1))) """ You can give the utility class IDs to annotate the drawn bounding boxes: """ class_ids = [ "Aeroplane", "Bicycle", "Bird", "Boat", "Bottle", "Bus", "Car", "Cat", "Chair", "Cow", "Dining Table", "Dog", "Horse", "Motorbike", "Person", "Potted Plant", "Sheep", "Sofa", "Train", "Tvmonitor", "Total", ] class_mapping = dict(zip(range(len(class_ids)), class_ids)) """ The function accepts `y_true`, `y_pred`, or both to visualize boxes: """ keras_cv.visualization.plot_bounding_box_gallery( images, value_range=(0, 255), bounding_box_format="xywh", y_true=boxes, scale=5, rows=2, cols=2, line_thickness=4, font_scale=0.5, text_thickness=2, legend=True, class_mapping=class_mapping, )
keras-cv/examples/visualization/plot_bounding_box_gallery.py/0
{ "file_path": "keras-cv/examples/visualization/plot_bounding_box_gallery.py", "repo_id": "keras-cv", "token_count": 876 }
44
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ formats.py contains axis information for each supported format. """ class CENTER_XYZ_DXDYDZ_PHI: """CENTER_XYZ_DXDYDZ_PHI contains axis indices for the CENTER_XYZ_DXDYDZ_PHI format. CENTER_XYZ_DXDYDZ_PHI is a 3D box format that supports vertical boxes with a heading rotated around the Z axis. The CENTER_XYZ_DXDYDZ_PHI format consists of the following required indices: - X: X coordinate of the center of the bounding box - Y: Y coordinate of the center of the bounding box - Z: Z coordinate of the center of the bounding box - DX: size of the bounding box on the x-axis - DY: size of the bounding box on the y-axis - DZ: size of the bounding box on the z-axis - PHI: the rotation of the box with respect to the z axis, in radians and the following optional indices, used in some KerasCV components: - CLASS: class of the object contained in the bounding box """ X = 0 Y = 1 Z = 2 DX = 3 DY = 4 DZ = 5 PHI = 6 CLASS = 7
keras-cv/keras_cv/bounding_box_3d/formats.py/0
{ "file_path": "keras-cv/keras_cv/bounding_box_3d/formats.py", "repo_id": "keras-cv", "token_count": 520 }
45
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data loader for Pascal VOC 2012 segmentation dataset. The image classification and object detection (bounding box) data is covered by existing TF datasets in https://www.tensorflow.org/datasets/catalog/voc. The segmentation data (both class segmentation and instance segmentation) are included in the VOC 2012, but not offered by TF-DS yet. This module is trying to fill this gap while TFDS team can address this feature (b/252870855, https://github.com/tensorflow/datasets/issues/27 and https://github.com/tensorflow/datasets/pull/1198). The schema design is similar to the existing design of TFDS, but trimmed to fit the need of Keras CV models. This module contains following functionalities: 1. Download and unpack original data from Pascal VOC. 2. Reprocess and build up dataset that include image, class label, object bounding boxes, class and instance segmentation masks. 3. Produce tfrecords from the dataset. 4. Load existing tfrecords from result in 3. """ import logging import multiprocessing import os.path import random import tarfile import xml import numpy as np import tensorflow as tf import tensorflow_datasets as tfds from tensorflow import keras from keras_cv.api_export import keras_cv_export VOC_URL = "https://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar" # noqa: E501 """ @InProceedings{{BharathICCV2011, author = "Bharath Hariharan and Pablo Arbelaez and Lubomir Bourdev and Subhransu Maji and Jitendra Malik", title = "Semantic Contours from Inverse Detectors", booktitle = "International Conference on Computer Vision (ICCV)", year = "2011"}} """ # noqa: E501 SBD_URL = "https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz" # noqa: E501 # Note that this list doesn't contain the background class. In the # classification use case, the label is 0 based (aeroplane -> 0), whereas in # segmentation use case, the 0 is reserved for background, so aeroplane maps to # 1. CLASSES = [ "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor", ] # This is used to map between string class to index. CLASS_TO_INDEX = {name: index for index, name in enumerate(CLASSES)} # For the mask data in the PNG file, the encoded raw pixel value need to be # converted to the proper class index. In the following map, [0, 0, 0] will be # convert to 0, and [128, 0, 0] will be converted to 1, so on so forth. Also # note that the mask class is 1 base since class 0 is reserved for the # background. The [128, 0, 0] (class 1) is mapped to `aeroplane`. VOC_PNG_COLOR_VALUE = [ [0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128], ] # Will be populated by _maybe_populate_voc_color_mapping() below. VOC_PNG_COLOR_MAPPING = None def _maybe_populate_voc_color_mapping(): # Lazy creation of VOC_PNG_COLOR_MAPPING, which could take 64M memory. global VOC_PNG_COLOR_MAPPING if VOC_PNG_COLOR_MAPPING is None: VOC_PNG_COLOR_MAPPING = [0] * (256**3) for i, colormap in enumerate(VOC_PNG_COLOR_VALUE): VOC_PNG_COLOR_MAPPING[ (colormap[0] * 256 + colormap[1]) * 256 + colormap[2] ] = i # There is a special mapping with [224, 224, 192] -> 255 VOC_PNG_COLOR_MAPPING[224 * 256 * 256 + 224 * 256 + 192] = 255 VOC_PNG_COLOR_MAPPING = tf.constant(VOC_PNG_COLOR_MAPPING) return VOC_PNG_COLOR_MAPPING def _download_data_file( data_url, extracted_dir, local_dir_path=None, override_extract=False ): """Fetch the original VOC or Semantic Boundaries Dataset from remote URL. Args: data_url: string, the URL for the data to be downloaded, should be in a zipped tar package. local_dir_path: string, the local directory path to save the data. Returns: the path to the folder of extracted data. """ if not local_dir_path: # download to ~/.keras/datasets/fname cache_dir = os.path.join(os.path.expanduser("~"), ".keras/datasets") fname = os.path.join(cache_dir, os.path.basename(data_url)) else: # Make sure the directory exists if not os.path.exists(local_dir_path): os.makedirs(local_dir_path, exist_ok=True) # download to local_dir_path/fname fname = os.path.join(local_dir_path, os.path.basename(data_url)) data_directory = os.path.join(os.path.dirname(fname), extracted_dir) if not override_extract and os.path.exists(data_directory): logging.info("data directory %s already exist", data_directory) return data_directory data_file_path = keras.utils.get_file(fname=fname, origin=data_url) # Extra the data into the same directory as the tar file. data_directory = os.path.dirname(data_file_path) logging.info("Extract data into %s", data_directory) with tarfile.open(data_file_path) as f: f.extractall(data_directory) return os.path.join(data_directory, extracted_dir) def _parse_annotation_data(annotation_file_path): """Parse the annotation XML file for the image. The annotation contains the metadata, as well as the object bounding box information. """ with tf.io.gfile.GFile(annotation_file_path, "r") as f: root = xml.etree.ElementTree.parse(f).getroot() size = root.find("size") width = int(size.find("width").text) height = int(size.find("height").text) objects = [] for obj in root.findall("object"): # Get object's label name. label = CLASS_TO_INDEX[obj.find("name").text.lower()] # Get objects' pose name. pose = obj.find("pose").text.lower() is_truncated = obj.find("truncated").text == "1" is_difficult = obj.find("difficult").text == "1" bndbox = obj.find("bndbox") xmax = int(bndbox.find("xmax").text) xmin = int(bndbox.find("xmin").text) ymax = int(bndbox.find("ymax").text) ymin = int(bndbox.find("ymin").text) objects.append( { "label": label, "pose": pose, "bbox": [ymin, xmin, ymax, xmax], "is_truncated": is_truncated, "is_difficult": is_difficult, } ) return {"width": width, "height": height, "objects": objects} def _get_image_ids(data_dir, split): data_file_mapping = { "train": "train.txt", "eval": "val.txt", "trainval": "trainval.txt", # TODO(tanzhenyu): add diff dataset # "diff": "diff.txt", } with tf.io.gfile.GFile( os.path.join( data_dir, "ImageSets", "Segmentation", data_file_mapping[split] ), "r", ) as f: image_ids = f.read().splitlines() logging.info(f"Received {len(image_ids)} images for {split} dataset.") return image_ids def _get_sbd_image_ids(data_dir, split): data_file_mapping = {"sbd_train": "train.txt", "sbd_eval": "val.txt"} with tf.io.gfile.GFile( os.path.join(data_dir, data_file_mapping[split]), "r", ) as f: image_ids = f.read().splitlines() logging.info(f"Received {len(image_ids)} images for {split} dataset.") return image_ids def _parse_single_image(image_file_path): data_dir, image_file_name = os.path.split(image_file_path) data_dir = os.path.normpath(os.path.join(data_dir, os.path.pardir)) image_id, _ = os.path.splitext(image_file_name) class_segmentation_file_path = os.path.join( data_dir, "SegmentationClass", image_id + ".png" ) object_segmentation_file_path = os.path.join( data_dir, "SegmentationObject", image_id + ".png" ) annotation_file_path = os.path.join( data_dir, "Annotations", image_id + ".xml" ) image_annotations = _parse_annotation_data(annotation_file_path) result = { "image/filename": image_id + ".jpg", "image/file_path": image_file_path, "segmentation/class/file_path": class_segmentation_file_path, "segmentation/object/file_path": object_segmentation_file_path, } result.update(image_annotations) # Labels field should be same as the 'object.label' labels = list(set([o["label"] for o in result["objects"]])) result["labels"] = sorted(labels) return result def _parse_single_sbd_image(image_file_path): data_dir, image_file_name = os.path.split(image_file_path) data_dir = os.path.normpath(os.path.join(data_dir, os.path.pardir)) image_id, _ = os.path.splitext(image_file_name) class_segmentation_file_path = os.path.join( data_dir, "cls", image_id + ".mat" ) object_segmentation_file_path = os.path.join( data_dir, "inst", image_id + ".mat" ) result = { "image/filename": image_id + ".jpg", "image/file_path": image_file_path, "segmentation/class/file_path": class_segmentation_file_path, "segmentation/object/file_path": object_segmentation_file_path, } return result def _build_metadata(data_dir, image_ids): # Parallel process all the images. image_file_paths = [ os.path.join(data_dir, "JPEGImages", i + ".jpg") for i in image_ids ] pool_size = 10 if len(image_ids) > 10 else len(image_ids) with multiprocessing.Pool(pool_size) as p: metadata = p.map(_parse_single_image, image_file_paths) # Transpose the metadata which convert from list of dict to dict of list. keys = [ "image/filename", "image/file_path", "segmentation/class/file_path", "segmentation/object/file_path", "labels", "width", "height", ] result = {} for key in keys: values = [value[key] for value in metadata] result[key] = values # The ragged objects need some special handling for key in ["label", "pose", "bbox", "is_truncated", "is_difficult"]: values = [] objects = [value["objects"] for value in metadata] for object in objects: values.append([o[key] for o in object]) result["objects/" + key] = values return result def _build_sbd_metadata(data_dir, image_ids): # Parallel process all the images. image_file_paths = [ os.path.join(data_dir, "img", i + ".jpg") for i in image_ids ] pool_size = 10 if len(image_ids) > 10 else len(image_ids) with multiprocessing.Pool(pool_size) as p: metadata = p.map(_parse_single_sbd_image, image_file_paths) keys = [ "image/filename", "image/file_path", "segmentation/class/file_path", "segmentation/object/file_path", ] result = {} for key in keys: values = [value[key] for value in metadata] result[key] = values return result # With jit_compile=True, there will be 0.4 sec compilation overhead, but save # about 0.2 sec per 1000 images. See # https://github.com/keras-team/keras-cv/pull/943#discussion_r1001092882 # for more details. @tf.function(jit_compile=True) def _decode_png_mask(mask): """Decode the raw PNG image and convert it to 2D tensor with probably class.""" # Cast the mask to int32 since the original uint8 will overflow when # multiplied with 256 mask = tf.cast(mask, tf.int32) mask = mask[:, :, 0] * 256 * 256 + mask[:, :, 1] * 256 + mask[:, :, 2] mask = tf.expand_dims(tf.gather(VOC_PNG_COLOR_MAPPING, mask), -1) mask = tf.cast(mask, tf.uint8) return mask def _load_images(example): image_file_path = example.pop("image/file_path") segmentation_class_file_path = example.pop("segmentation/class/file_path") segmentation_object_file_path = example.pop("segmentation/object/file_path") image = tf.io.read_file(image_file_path) image = tf.image.decode_jpeg(image) segmentation_class_mask = tf.io.read_file(segmentation_class_file_path) segmentation_class_mask = tf.image.decode_png(segmentation_class_mask) segmentation_class_mask = _decode_png_mask(segmentation_class_mask) segmentation_object_mask = tf.io.read_file(segmentation_object_file_path) segmentation_object_mask = tf.image.decode_png(segmentation_object_mask) segmentation_object_mask = _decode_png_mask(segmentation_object_mask) example.update( { "image": image, "class_segmentation": segmentation_class_mask, "object_segmentation": segmentation_object_mask, } ) return example def _load_sbd_images(image_file_path, seg_cls_file_path, seg_obj_file_path): image = tf.io.read_file(image_file_path) image = tf.image.decode_jpeg(image) segmentation_class_mask = tfds.core.lazy_imports.scipy.io.loadmat( seg_cls_file_path ) segmentation_class_mask = segmentation_class_mask["GTcls"]["Segmentation"][ 0 ][0] segmentation_class_mask = segmentation_class_mask[..., np.newaxis] segmentation_object_mask = tfds.core.lazy_imports.scipy.io.loadmat( seg_obj_file_path ) segmentation_object_mask = segmentation_object_mask["GTinst"][ "Segmentation" ][0][0] segmentation_object_mask = segmentation_object_mask[..., np.newaxis] return { "image": image, "class_segmentation": segmentation_class_mask, "object_segmentation": segmentation_object_mask, } def _build_dataset_from_metadata(metadata): # The objects need some manual conversion to ragged tensor. metadata["labels"] = tf.ragged.constant(metadata["labels"]) metadata["objects/label"] = tf.ragged.constant(metadata["objects/label"]) metadata["objects/pose"] = tf.ragged.constant(metadata["objects/pose"]) metadata["objects/is_truncated"] = tf.ragged.constant( metadata["objects/is_truncated"] ) metadata["objects/is_difficult"] = tf.ragged.constant( metadata["objects/is_difficult"] ) metadata["objects/bbox"] = tf.ragged.constant( metadata["objects/bbox"], ragged_rank=1 ) dataset = tf.data.Dataset.from_tensor_slices(metadata) dataset = dataset.map(_load_images, num_parallel_calls=tf.data.AUTOTUNE) return dataset def _build_sbd_dataset_from_metadata(metadata): img_filepath = metadata["image/file_path"] cls_filepath = metadata["segmentation/class/file_path"] obj_filepath = metadata["segmentation/object/file_path"] def md_gen(): c = list(zip(img_filepath, cls_filepath, obj_filepath)) # random shuffling for each generator boosts up the quality. random.shuffle(c) for fp in c: img_fp, cls_fp, obj_fp = fp yield _load_sbd_images(img_fp, cls_fp, obj_fp) dataset = tf.data.Dataset.from_generator( md_gen, output_signature=( { "image": tf.TensorSpec(shape=(None, None, 3), dtype=tf.uint8), "class_segmentation": tf.TensorSpec( shape=(None, None, 1), dtype=tf.uint8 ), "object_segmentation": tf.TensorSpec( shape=(None, None, 1), dtype=tf.uint8 ), } ), ) return dataset @keras_cv_export( "keras_cv.datasets.pascal_voc.segmentation.load", package="keras_cv.datasets.pascal_voc_segmentation", ) def load( split="sbd_train", data_dir=None, ): """Load the Pacal VOC 2012 dataset. This function will download the data tar file from remote if needed, and untar to the local `data_dir`, and build dataset from it. It supports both VOC2012 and Semantic Boundaries Dataset (SBD). The returned segmentation masks will be int ranging from [0, num_classes), as well as 255 which is the boundary mask. Args: split: string, can be 'train', 'eval', 'trainval", 'sbd_train', or 'sbd_eval'. 'sbd_train' represents the training dataset for SBD dataset, while 'train' represents the training dataset for VOC2012 dataset. Defaults to `sbd_train`. data_dir: string, local directory path for the loaded data. This will be used to download the data file, and unzip. It will be used as a cache directory. Defaults to None, and `~/.keras/pascal_voc_2012` will be used. """ supported_split_value = [ "train", "eval", "trainval", "sbd_train", "sbd_eval", ] if split not in supported_split_value: raise ValueError( f"The support value for `split` are {supported_split_value}. " f"Got: {split}" ) if data_dir is not None: data_dir = os.path.expanduser(data_dir) if "sbd" in split: return _load_sbd(split, data_dir) else: return _load_voc(split, data_dir) def _load_voc( split="train", data_dir=None, ): extracted_dir = os.path.join("VOCdevkit", "VOC2012") data_dir = _download_data_file( VOC_URL, extracted_dir=extracted_dir, local_dir_path=data_dir ) image_ids = _get_image_ids(data_dir, split) # len(metadata) = #samples, metadata[i] is a dict. metadata = _build_metadata(data_dir, image_ids) _maybe_populate_voc_color_mapping() dataset = _build_dataset_from_metadata(metadata) return dataset def _load_sbd( split="sbd_train", data_dir=None, ): extracted_dir = os.path.join("benchmark_RELEASE", "dataset") data_dir = _download_data_file( SBD_URL, extracted_dir=extracted_dir, local_dir_path=data_dir ) image_ids = _get_sbd_image_ids(data_dir, split) # len(metadata) = #samples, metadata[i] is a dict. metadata = _build_sbd_metadata(data_dir, image_ids) dataset = _build_sbd_dataset_from_metadata(metadata) return dataset
keras-cv/keras_cv/datasets/pascal_voc/segmentation.py/0
{ "file_path": "keras-cv/keras_cv/datasets/pascal_voc/segmentation.py", "repo_id": "keras-cv", "token_count": 7954 }
46
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from absl.testing import parameterized from keras_cv.keypoint.utils import filter_out_of_image from keras_cv.tests.test_case import TestCase class UtilsTestCase(TestCase): @parameterized.named_parameters( ( "all inside", np.array([[10.0, 20.0], [30.0, 40.0], [50.0, 50.0]]), np.ones([100, 100, 3]), tf.ragged.constant([[10.0, 20.0], [30.0, 40.0], [50.0, 50.0]]), ), ( "some inside", np.array([[10.0, 20.0], [30.0, 40.0], [50.0, 50.0]]), np.ones([50, 50, 3]), tf.ragged.constant([[10.0, 20.0], [30.0, 40.0]]), ), ( "ragged input", tf.RaggedTensor.from_row_lengths( [[10.0, 20.0], [30.0, 40.0], [50.0, 50.0]], [2, 1] ), np.ones([50, 50, 3]), tf.RaggedTensor.from_row_lengths( [[10.0, 20.0], [30.0, 40.0]], [2, 0] ), ), ( "height - width confusion", np.array([[[10.0, 20.0]], [[40.0, 30.0]], [[30.0, 40.0]]]), np.ones((50, 40, 3)), tf.ragged.constant( [[[10.0, 20.0]], [], [[30.0, 40.0]]], ragged_rank=1 ), ), ) def test_result(self, keypoints, image, expected): self.assertAllClose(filter_out_of_image(keypoints, image), expected)
keras-cv/keras_cv/keypoint/utils_test.py/0
{ "file_path": "keras-cv/keras_cv/keypoint/utils_test.py", "repo_id": "keras-cv", "token_count": 979 }
47
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.backend.config import keras_3 @keras_cv_export("keras_cv.layers.MultiClassNonMaxSuppression") class MultiClassNonMaxSuppression(keras.layers.Layer): """A Keras layer that decodes predictions of an object detection model. Arguments: bounding_box_format: The format of bounding boxes of input dataset. Refer [to the keras.io docs](https://keras.io/api/keras_cv/bounding_box/formats/) for more details on supported bounding box formats. from_logits: boolean, True means input score is logits, False means confidence. iou_threshold: a float value in the range [0, 1] representing the minimum IoU threshold for two boxes to be considered same for suppression. Defaults to 0.5. confidence_threshold: a float value in the range [0, 1]. All boxes with confidence below this value will be discarded, defaults to 0.5. max_detections: the maximum detections to consider after nms is applied. A large number may trigger significant memory overhead, defaults to 100. max_detections_per_class: the maximum detections to consider per class after nms is applied, defaults to 100. """ # noqa: E501 def __init__( self, bounding_box_format, from_logits, iou_threshold=0.5, confidence_threshold=0.5, max_detections=100, max_detections_per_class=100, **kwargs, ): super().__init__(**kwargs) self.bounding_box_format = bounding_box_format self.from_logits = from_logits self.iou_threshold = iou_threshold self.confidence_threshold = confidence_threshold self.max_detections = max_detections self.max_detections_per_class = max_detections_per_class self.built = True def call( self, box_prediction, class_prediction, images=None, image_shape=None ): """Accepts images and raw predictions, and returns bounding box predictions. Args: box_prediction: Dense Tensor of shape [batch, boxes, 4] in the `bounding_box_format` specified in the constructor. class_prediction: Dense Tensor of shape [batch, boxes, num_classes]. """ if keras_3() and keras.backend.backend() != "tensorflow": raise NotImplementedError( "MultiClassNonMaxSuppression does not support non-TensorFlow " "backends. Consider using NonMaxSuppression instead." ) target_format = "yxyx" if bounding_box.is_relative(self.bounding_box_format): target_format = bounding_box.as_relative(target_format) box_prediction = bounding_box.convert_format( box_prediction, source=self.bounding_box_format, target=target_format, images=images, image_shape=image_shape, ) if self.from_logits: class_prediction = ops.sigmoid(class_prediction) box_prediction = ops.expand_dims(box_prediction, axis=-2) ( box_prediction, confidence_prediction, class_prediction, valid_det, ) = tf.image.combined_non_max_suppression( boxes=box_prediction, scores=class_prediction, max_output_size_per_class=self.max_detections_per_class, max_total_size=self.max_detections, score_threshold=self.confidence_threshold, iou_threshold=self.iou_threshold, clip_boxes=False, ) box_prediction = bounding_box.convert_format( box_prediction, source=target_format, target=self.bounding_box_format, images=images, image_shape=image_shape, ) bounding_boxes = { "boxes": box_prediction, "confidence": confidence_prediction, "classes": class_prediction, "num_detections": valid_det, } # this is required to comply with KerasCV bounding box format. return bounding_box.mask_invalid_detections( bounding_boxes, output_ragged=False ) def get_config(self): config = { "bounding_box_format": self.bounding_box_format, "from_logits": self.from_logits, "iou_threshold": self.iou_threshold, "confidence_threshold": self.confidence_threshold, "max_detections_per_class": self.max_detections_per_class, "max_detections": self.max_detections, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/layers/object_detection/multi_class_non_max_suppression.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection/multi_class_non_max_suppression.py", "repo_id": "keras-cv", "token_count": 2332 }
48
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Sequence from typing import Tuple from typing import Union import numpy as np import tensorflow as tf from tensorflow import keras from keras_cv.api_export import keras_cv_export from keras_cv.backend import ops from keras_cv.backend import scope from keras_cv.backend.scope import tf_data from keras_cv.layers.object_detection_3d import voxel_utils # Infinite voxel size. INF_VOXEL_SIZE = 100 def _meshgrid( max_radius_in_voxels: Sequence[int], voxel_size: Sequence[float] ) -> np.ndarray: """Computes the mesh grid given number of points in each dimension. NOTE: this is a pure numpy function. Args: max_radius_in_voxels: max radius in each dimension in units of voxels. voxel_size: voxel size of each dimension. Returns: point tensor of shape [-1, len(voxel_size)]. """ m = max_radius_in_voxels dim = len(m) assert dim == 2 or dim == 3 if dim == 2: mesh = np.mgrid[-m[0] : m[0] + 1, -m[1] : m[1] + 1] else: mesh = np.mgrid[-m[0] : m[0] + 1, -m[1] : m[1] + 1, -m[2] : m[2] + 1] mesh = np.concatenate(mesh[..., np.newaxis], axis=-1) mesh = np.reshape(mesh, [-1, dim]) return mesh * voxel_size @tf_data def compute_heatmap( box_3d: tf.Tensor, box_mask: tf.Tensor, voxel_size: Sequence[float], max_radius: Sequence[float], ) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]: """Compute heatmap for boxes. Args: box_3d: 3d boxes in xyz format, vehicle frame, [B, boxes, 7]. box_mask: box masking, [B, boxes] voxel_size: the size on each voxel dimension (xyz) max_radius: the maximum radius on each voxel dimension (xyz) Returns: point_xyz: the point location w.r.t. vehicle frame, [B, boxes, max_voxels_per_box, 3] mask: point mask, [B, boxes, max_voxels_per_box] heatmap: the returned heatmap w.r.t box frame, [B, boxes, max_voxels_per_box] box_id: the box id each point belongs to, [B, boxes, max_voxels_per_box] """ # convert radius from point unit to voxel unit. max_radius_in_voxels = [ math.ceil(mr / vs) for mr, vs in zip(max_radius, voxel_size) ] # get the mesh grid based on max radius w.r.t each box # [max_num_voxels_per_box, 3] points_numpy = _meshgrid(max_radius_in_voxels, voxel_size=voxel_size) box_center = box_3d[:, :, :3] # voxelize and de-voxelize point_xyz # This ensures that we are computing heatmap for each voxel with these # quantized x,y,z. # [B, N, max_num_voxels_per_box, 3] point_xyz = ( box_center[:, :, tf.newaxis, :] + tf.constant(points_numpy, dtype=tf.float32)[ tf.newaxis, tf.newaxis, :, : ] ) # [B, N, max_num_voxels_per_box, 3] point_xyz = voxel_utils.point_to_voxel_coord( point_xyz, voxel_size, dtype=tf.int32 ) # Map voxel back to xyz to get quantized version. # [B, N, max_num_voxels_per_box, 3] point_xyz = voxel_utils.voxel_coord_to_point( point_xyz, voxel_size, dtype=tf.float32 ) # Transforms these points to the box frame from vehicle frame. heading = box_3d[:, :, -1] # [B, N, 3, 3] rot = voxel_utils.get_yaw_rotation(heading) # [B, N, max_num_voxels_per_box, 3] point_xyz_rot = tf.linalg.matmul(point_xyz, rot) # convert from box frame to vehicle frame. # [B, N, max_num_voxels_per_box, 3] point_xyz_transform = ( point_xyz_rot + voxel_utils.inv_loc(rot, box_center)[:, :, tf.newaxis, :] ) # Due to the transform above, z=0 can be transformed to a non-zero value. # For 2d heatmap, we do not want to use z. if voxel_size[2] > INF_VOXEL_SIZE: point_xyz_transform = tf.concat( [ point_xyz_transform[..., :2], tf.zeros_like(point_xyz_transform[..., :1]), ], axis=-1, ) # The Gaussian radius is set as the dimension of the boxes # [B, N, 3] radius = box_3d[:, :, 3:6] # [B, N, 1, 3] radius = radius[:, :, tf.newaxis, :] # The Gaussian standard deviation is set as 1. # [B, N, 1, 3] sigma = tf.ones_like(radius, dtype=radius.dtype) # Compute point mask. Anything outside the radius is invalid. # [B, N, max_num_voxels_per_box, 3] mask = tf.math.less_equal(tf.math.abs(point_xyz_transform), radius) # [B, N, max_num_voxels_per_box] mask = tf.math.reduce_all(mask, axis=-1) # [B, N, max_num_voxels_per_box] mask = tf.logical_and(box_mask[:, :, tf.newaxis], mask) # [B, N, max_num_voxels_per_box] # Gaussian kernel p2 = point_xyz_transform * point_xyz_transform p2_sigma = p2 * (-0.5 / (sigma * sigma)) # in box frame. heatmap = tf.exp(tf.reduce_sum(p2_sigma, axis=-1)) ( batch_size, num_box, max_num_voxels_per_box, _, ) = ops.shape(point_xyz) box_id = tf.range(num_box, dtype=tf.int32) box_id = tf.tile( box_id[tf.newaxis, :, tf.newaxis], [batch_size, 1, max_num_voxels_per_box], ) point_xyz = tf.reshape( point_xyz, [batch_size, num_box * max_num_voxels_per_box, 3] ) heatmap = tf.reshape( heatmap, [batch_size, num_box * max_num_voxels_per_box] ) box_id = tf.reshape(box_id, [batch_size, num_box * max_num_voxels_per_box]) mask = tf.reshape(mask, [batch_size, num_box * max_num_voxels_per_box]) return point_xyz, mask, heatmap, box_id def scatter_to_dense_heatmap( point_xyz: tf.Tensor, point_mask: tf.Tensor, point_box_id: tf.Tensor, heatmap: tf.Tensor, voxel_size: Sequence[float], spatial_size: Sequence[float], ) -> Tuple[tf.Tensor, tf.Tensor]: """Scatter the heatmap to a dense grid. N = num_boxes * max_voxels_per_box Args: point_xyz: [B, N, 3] 3d points, point coordinate in vehicle frame. point_mask: [B, N] valid point mask. point_box_id: [B, N] box id of each point. The ID indexes into the input box tensors. See compute_heatmap for more details. heatmap: [B, N] heatmap value of each point. voxel_size: voxel size. spatial_size: the spatial size. Returns: dense_heatmap: [B, H, W] heatmap value. dense_box_id: [B, H, W] box id associated with each feature map pixel. Only pixels with positive heatmap value have valid box id set. Other locations have random values. """ # [B, N, 3] # convert to voxel units. point_voxel_xyz = voxel_utils.point_to_voxel_coord( point_xyz, voxel_size, dtype=tf.int32 ) # [3] voxel_origin = voxel_utils.compute_voxel_origin(spatial_size, voxel_size) # [B, N, 3] # shift point voxel coordinates to positive voxel index. point_voxel_xyz = point_voxel_xyz - voxel_origin[tf.newaxis, tf.newaxis, :] voxel_spatial_size = voxel_utils.compute_voxel_spatial_size( spatial_size, voxel_size ) # [B, N] point_voxel_valid_mask = tf.math.reduce_all( tf.math.logical_and( point_voxel_xyz >= 0, point_voxel_xyz < voxel_spatial_size ), axis=-1, ) # [B, N] point_voxel_valid_mask = tf.math.logical_and( point_voxel_valid_mask, point_mask ) # [B, N] point_voxel_xyz = point_voxel_xyz * tf.cast( point_voxel_valid_mask[..., tf.newaxis], dtype=point_voxel_xyz.dtype ) # [B, N] # filtered heatmap with out of range voxels. heatmap = heatmap * tf.cast(point_voxel_valid_mask, dtype=heatmap.dtype) # TODO(tanzheny): consider a batched implementation. def fn(args): """Calls scatter update.""" point_voxel_xyz_i, mask_i, heatmap_i, point_box_id_i = args mask_index = tf.where(mask_i) point_voxel_xyz_i = tf.cast( tf.gather_nd(point_voxel_xyz_i, mask_index), tf.int32 ) heatmap_i = tf.gather_nd(heatmap_i, mask_index) point_box_id_i = tf.gather_nd(point_box_id_i, mask_index) # scatter from local heatmap to global heatmap based on point_xyz voxel # units dense_heatmap_i = tf.tensor_scatter_nd_update( tf.zeros(voxel_spatial_size, dtype=heatmap_i.dtype), point_voxel_xyz_i, heatmap_i, ) dense_box_id_i = tf.tensor_scatter_nd_update( tf.zeros(voxel_spatial_size, dtype=tf.int32), point_voxel_xyz_i, point_box_id_i, ) return dense_heatmap_i, dense_box_id_i dense_heatmap, dense_box_id = tf.map_fn( fn, elems=[point_voxel_xyz, point_voxel_valid_mask, heatmap, point_box_id], fn_output_signature=(heatmap.dtype, point_box_id.dtype), ) return dense_heatmap, dense_box_id def decode_tensor( t: tf.Tensor, dims: Sequence[Union[tf.Tensor, int]] ) -> tf.Tensor: """ Args: t: int32 or int64 tensor of shape [shape], [B, k] dims: list of ints., [H, W, Z] Returns: t_decoded: int32 or int64 decoded tensor of shape [shape, len(dims)], [B, k, 3] """ with tf.name_scope("decode_tensor"): multipliers = [] multiplier = 1 assert dims for d in reversed(dims): multipliers.append(multiplier) multiplier = multiplier * d multipliers = list(reversed(multipliers)) t_decoded_list = [] remainder = t for m in multipliers: t_decoded_list.append(tf.math.floordiv(remainder, m)) remainder = tf.math.floormod(remainder, m) return tf.stack(t_decoded_list, axis=-1) @tf_data def compute_top_k_heatmap_idx(heatmap: tf.Tensor, k: int) -> tf.Tensor: """Computes the top_k heatmap indices. Args: heatmap: [B, H, W] for 2 dimension or [B, H, W, Z] for 3 dimensions k: integer, represent top_k Returns: top_k_index: [B, k, 2] for 2 dimensions or [B, k, 3] for 3 dimensions """ shape = ops.shape(heatmap) # [B, H*W*Z] heatmap_reshape = tf.reshape(heatmap, [shape[0], -1]) # [B, k] # each index in the range of [0, H*W*Z) _, indices = tf.math.top_k(heatmap_reshape, k=k, sorted=False) # [B, k, 2] or [B, k, 3] # shape[1:] = [H, W, Z], convert the indices from 1 dimension to 3 # dimensions in the range of [0, H), [0, W), [0, Z) res = decode_tensor(indices, shape[1:]) return res @keras_cv_export("keras_cv.layers.CenterNetLabelEncoder") class CenterNetLabelEncoder(keras.layers.Layer): """Transforms the raw sparse labels into class specific dense training labels. This layer takes the box locations, box classes and box masks, voxelizes and compute the Gaussian radius for each box, then computes class specific heatmap for classification and class specific box offset w.r.t to feature map for regression. Args: voxel_size: the x, y, z dimension (in meters) of each voxel. max_radius: maximum Gaussian radius in each dimension in meters. spatial_size: the x, y, z boundary of voxels num_classes: number of object classes. top_k_heatmap: A sequence of integers, top k for each class. Can be None. """ def __init__( self, voxel_size: Sequence[float], max_radius: Sequence[float], spatial_size: Sequence[float], num_classes: int, top_k_heatmap: Sequence[int], **kwargs, ): super().__init__(**kwargs) self._voxel_size = voxel_size self._max_radius = max_radius self._spatial_size = spatial_size self._num_classes = num_classes self._top_k_heatmap = top_k_heatmap def call(self, inputs): """ Args: inputs: dictionary of Tensors representing a batch of data. Must contain 3D box targets under the key "3d_boxes". Returns: A dictionary of Tensors with all of the original inputs, plus, for each class, a new key with encoded CenterNet targets in the format: ``` "class_{class_index}": { "heatmap": float Tensor [B, H, W, Z] or [B, H, W] "boxes": float Tensor [B, H, W, Z, 7] or [B, H, W, 7] "tok_k_index": int Tensor [B, k, 3] or [B, k, 2] } ``` where: H: number of voxels in y dimension W: number of voxels in x dimension Z: number of voxels in z dimension k: `top_k_heatmap` slice """ with scope.TFDataScope(): box_3d = inputs["3d_boxes"]["boxes"] box_mask = inputs["3d_boxes"]["mask"] box_classes = inputs["3d_boxes"]["classes"] # point_xyz - [B, num_boxes * max_num_voxels_per_box, 3] # heatmap - [B, num_boxes * max_num_voxels_per_box] # compute localized heatmap around its radius. point_xyz, point_mask, heatmap, box_id = compute_heatmap( box_3d, box_mask, self._voxel_size, self._max_radius, ) # heatmap - [B, H, W, Z] # scatter the localized heatmap to global heatmap in vehicle frame. dense_heatmap, dense_box_id = scatter_to_dense_heatmap( point_xyz, point_mask, box_id, heatmap, self._voxel_size, self._spatial_size, ) b, h, w, z = ops.shape(dense_box_id) # [B, H * W * Z] dense_box_id = tf.reshape(dense_box_id, [b, h * w * z]) # mask out invalid boxes to 0, which represents background box_classes = box_classes * tf.cast(box_mask, box_classes.dtype) # [B, H, W, Z] dense_box_classes = tf.reshape( tf.gather(box_classes, dense_box_id, batch_dims=1), [b, h, w, z] ) # [B, H, W, Z, 7] in vehicle frame. dense_box_3d = tf.reshape( tf.gather(box_3d, dense_box_id, batch_dims=1), [b, h, w, z, -1] ) global_xyz = tf.zeros([b, 3], dtype=point_xyz.dtype) # [B, H, W, Z, 3] feature_map_ref_xyz = voxel_utils.compute_feature_map_ref_xyz( self._voxel_size, self._spatial_size, global_xyz ) # convert from global box point xyz to offset w.r.t center of # feature map. # [B, H, W, Z, 3] dense_box_3d_center = dense_box_3d[..., :3] - tf.cast( feature_map_ref_xyz, dense_box_3d.dtype ) # [B, H, W, Z, 7] dense_box_3d = tf.concat( [dense_box_3d_center, dense_box_3d[..., 3:]], axis=-1 ) centernet_targets = {} for i in range(self._num_classes): # Object class is 1-indexed (0 is background). dense_box_classes_i = tf.cast( tf.math.equal(dense_box_classes, i + 1), dtype=dense_heatmap.dtype, ) dense_heatmap_i = dense_heatmap * dense_box_classes_i dense_box_3d_i = ( dense_box_3d * dense_box_classes_i[..., tf.newaxis] ) # Remove z-dimension if this is 2D setup. if self._voxel_size[2] > INF_VOXEL_SIZE: dense_heatmap_i = tf.squeeze(dense_heatmap_i, axis=-1) dense_box_3d_i = tf.squeeze(dense_box_3d_i, axis=-2) top_k_heatmap_feature_idx_i = None if self._top_k_heatmap[i] > 0: top_k_heatmap_feature_idx_i = compute_top_k_heatmap_idx( dense_heatmap_i, self._top_k_heatmap[i] ) centernet_targets[f"class_{i+1}"] = { "heatmap": dense_heatmap_i, "boxes": dense_box_3d_i, "top_k_index": top_k_heatmap_feature_idx_i, } inputs.update(centernet_targets) return inputs
keras-cv/keras_cv/layers/object_detection_3d/centernet_label_encoder.py/0
{ "file_path": "keras-cv/keras_cv/layers/object_detection_3d/centernet_label_encoder.py", "repo_id": "keras-cv", "token_count": 8152 }
49
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 VectorizedBaseImageAugmentationLayer, ) @keras_cv_export("keras_cv.layers.ChannelShuffle") class ChannelShuffle(VectorizedBaseImageAugmentationLayer): """Shuffle channels of an input image. Input shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format Output shape: 3D (unbatched) or 4D (batched) tensor with shape: `(..., height, width, channels)`, in `"channels_last"` format Args: groups: Number of groups to divide the input channels, defaults to 3. seed: Integer. Used to create a random seed. Usage: ```python (images, labels), _ = keras.datasets.cifar10.load_data() channel_shuffle = ChannelShuffle(groups=3) augmented_images = channel_shuffle(images) ``` """ def __init__(self, groups=3, seed=None, **kwargs): super().__init__(seed=seed, **kwargs) self.groups = groups self.seed = seed def get_random_transformation_batch(self, batch_size, **kwargs): # get batched shuffled indices # for example: batch_size=2; self.group=5 # indices = [ # [0, 2, 3, 4, 1], # [4, 1, 0, 2, 3] # ] indices_distribution = self._random_generator.uniform( (batch_size, self.groups) ) indices = tf.argsort(indices_distribution, axis=-1) return indices def augment_ragged_image(self, image, transformation, **kwargs): # self.augment_images must have # 4D images (batch_size, height, width, channel) # 2D transformations (batch_size, groups) image = tf.expand_dims(image, axis=0) transformation = tf.expand_dims(transformation, axis=0) image = self.augment_images( images=image, transformations=transformation, **kwargs ) return tf.squeeze(image, axis=0) def augment_images(self, images, transformations, **kwargs): batch_size = tf.shape(images)[0] height, width = images.shape[1], images.shape[2] num_channels = images.shape[3] indices = transformations # append batch indexes next to shuffled indices batch_indexs = tf.repeat(tf.range(batch_size), self.groups) batch_indexs = tf.reshape(batch_indexs, (batch_size, self.groups)) indices = tf.stack([batch_indexs, indices], axis=-1) if not num_channels % self.groups == 0: raise ValueError( "The number of input channels should be " "divisible by the number of groups." f"Received: channels={num_channels}, groups={self.groups}" ) channels_per_group = num_channels // self.groups images = tf.reshape( images, [batch_size, height, width, self.groups, channels_per_group] ) images = tf.transpose(images, perm=[0, 3, 1, 2, 4]) images = tf.gather_nd(images, indices=indices) images = tf.transpose(images, perm=[0, 2, 3, 4, 1]) images = tf.reshape(images, [batch_size, height, width, num_channels]) return images def augment_labels(self, labels, transformations, **kwargs): return labels def augment_segmentation_masks( self, segmentation_masks, transformations, **kwargs ): return segmentation_masks def augment_bounding_boxes(self, bounding_boxes, transformations, **kwargs): return bounding_boxes def get_config(self): config = { "groups": self.groups, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config)
keras-cv/keras_cv/layers/preprocessing/channel_shuffle.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/channel_shuffle.py", "repo_id": "keras-cv", "token_count": 1829 }
50
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv import bounding_box from keras_cv.api_export import keras_cv_export from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 BATCHED, ) from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 BOUNDING_BOXES, ) from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 IMAGES, ) from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 LABELS, ) from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 SEGMENTATION_MASKS, ) from keras_cv.layers.preprocessing.vectorized_base_image_augmentation_layer import ( # noqa: E501 VectorizedBaseImageAugmentationLayer, ) from keras_cv.utils import preprocessing as preprocessing_utils @keras_cv_export("keras_cv.layers.Mosaic") class Mosaic(VectorizedBaseImageAugmentationLayer): """Mosaic implements the mosaic data augmentation technique. Mosaic data augmentation first takes 4 images from the batch and makes a grid. After that based on the offset, a crop is taken to form the mosaic image. Labels are in the same ratio as the area of their images in the output image. Bounding boxes are translated according to the position of the 4 images. Args: offset: A tuple of two floats, a single float or `keras_cv.FactorSampler`. `offset` is used to determine the offset of the mosaic center from the top-left corner of the mosaic. If a tuple is used, the x and y coordinates of the mosaic center are sampled between the two values for every image augmented. If a single float is used, a value between `0.0` and the passed float is sampled. In order to ensure the value is always the same, please pass a tuple with two identical floats: `(0.5, 0.5)`. Defaults to (0.25, 0.75). bounding_box_format: a case-insensitive string (for example, "xyxy") to be passed if bounding boxes are being augmented by this layer. Each bounding box is defined by at least these 4 values. The inputs may contain additional information such as classes and confidence after these 4 values but these values will be ignored and returned as is. For detailed information on the supported formats, see the [KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/). Defaults to None. seed: integer, used to create a random seed. References: - [Yolov4 paper](https://arxiv.org/pdf/2004.10934). - [Yolov5 implementation](https://github.com/ultralytics/yolov5). - [YoloX implementation](https://github.com/Megvii-BaseDetection/YOLOX) Sample usage: ```python (images, labels), _ = keras.datasets.cifar10.load_data() labels = tf.one_hot(labels,10) labels = tf.cast(tf.squeeze(labels), tf.float32) mosaic = keras_cv.layers.preprocessing.Mosaic() output = mosaic({'images': images, 'labels': labels}) # output == {'images': updated_images, 'labels': updated_labels} ``` """ # noqa: E501 def __init__( self, offset=(0.25, 0.75), bounding_box_format=None, seed=None, **kwargs ): super().__init__(seed=seed, **kwargs) self.offset = offset self.bounding_box_format = bounding_box_format self.center_sampler = preprocessing_utils.parse_factor( offset, param_name="offset", seed=seed ) self.seed = seed def get_random_transformation_batch(self, batch_size, **kwargs): # pick 3 indices for every batch to create the mosaic output with. permutation_order = self._random_generator.uniform( (batch_size, 3), minval=0, maxval=batch_size, dtype=tf.int32, ) # concatenate the batches with permutation order to get all 4 images of # the mosaic permutation_order = tf.concat( [tf.expand_dims(tf.range(batch_size), axis=-1), permutation_order], axis=-1, ) mosaic_centers_x = self.center_sampler( shape=(batch_size,), dtype=self.compute_dtype ) mosaic_centers_y = self.center_sampler( shape=(batch_size,), dtype=self.compute_dtype ) mosaic_centers = tf.stack((mosaic_centers_x, mosaic_centers_y), axis=-1) return { "permutation_order": permutation_order, "mosaic_centers": mosaic_centers, } def augment_ragged_image(self, image, transformation, **kwargs): raise ValueError( "Mosaic received ragged images to `call`. The layer relies on " "combining multiple examples with same size, and as such will not " "behave as expected. Please call the layer with dense images with " "same size. This is an implementation constraint, not an algorithm " "constraint. If you find this method helpful, please open an issue " "on KerasCV." ) def augment_images( self, images, transformations, resize_method="bilinear", **kwargs ): batch_size = tf.shape(images)[0] input_height, input_width, _ = images.shape[1:] # forms mosaic for one image from the batch permutation_order = transformations["permutation_order"] mosaic_images = tf.gather(images, permutation_order) tops = tf.concat([mosaic_images[:, 0], mosaic_images[:, 1]], axis=2) bottoms = tf.concat([mosaic_images[:, 2], mosaic_images[:, 3]], axis=2) outputs = tf.concat([tops, bottoms], axis=1) # cropping coordinates for the mosaic mosaic_centers = transformations["mosaic_centers"] mosaic_centers_x = mosaic_centers[..., 0] * input_width mosaic_centers_y = mosaic_centers[..., 1] * input_height x1s = (input_width - mosaic_centers_x) / (input_width * 2 - 1) y1s = (input_height - mosaic_centers_y) / (input_height * 2 - 1) x2s = x1s + (input_width) / (input_width * 2 - 1) y2s = y1s + (input_height) / (input_height * 2 - 1) cropping_boxes = tf.stack([y1s, x1s, y2s, x2s], axis=-1) # helps avoid retracing caused by slicing, inspired by RRC # implementation # boxes must be type tf.float32 outputs = tf.image.crop_and_resize( outputs, tf.cast(cropping_boxes, tf.float32), tf.range(batch_size), [input_height, input_width], method=resize_method, ) # tf.image.crop_and_resize will always output float32, so we need to # recast tf.image.crop_and_resize outputs # [num_boxes, crop_height, crop_width, depth] since num_boxes is always # one we squeeze axis 0 outputs = tf.cast(outputs, self.compute_dtype) return outputs def augment_segmentation_masks( self, segmentation_masks, transformations, **kwargs ): return self.augment_images( segmentation_masks, transformations, resize_method="nearest" ) def augment_labels(self, labels, transformations, images=None, **kwargs): input_height, input_width, _ = images.shape[1:] # updates labels for one output mosaic permutation_order = transformations["permutation_order"] labels_for_mosaic = tf.gather(labels, permutation_order) mosaic_centers = transformations["mosaic_centers"] center_x = mosaic_centers[..., 0] * input_width center_y = mosaic_centers[..., 1] * input_height area = input_height * input_width # labels are in the same ratio as the area of the images top_left_ratio = (center_x * center_y) / area top_right_ratio = ((input_width - center_x) * center_y) / area bottom_left_ratio = (center_x * (input_height - center_y)) / area bottom_right_ratio = ( (input_width - center_x) * (input_height - center_y) ) / area labels = ( labels_for_mosaic[:, 0] * top_left_ratio[:, tf.newaxis] + labels_for_mosaic[:, 1] * top_right_ratio[:, tf.newaxis] + labels_for_mosaic[:, 2] * bottom_left_ratio[:, tf.newaxis] + labels_for_mosaic[:, 3] * bottom_right_ratio[:, tf.newaxis] ) return labels def augment_bounding_boxes( self, bounding_boxes, transformations, images=None, **kwargs ): batch_size = tf.shape(images)[0] input_height, input_width, _ = images.shape[1:] bounding_boxes = bounding_box.to_dense(bounding_boxes) bounding_boxes = bounding_box.convert_format( bounding_boxes, source=self.bounding_box_format, target="xyxy", images=images, dtype=self.compute_dtype, ) boxes, classes = bounding_boxes["boxes"], bounding_boxes["classes"] # values to translate the boxes by in the mosaic image mosaic_centers = transformations["mosaic_centers"] mosaic_centers_x = mosaic_centers[..., 0] * input_width mosaic_centers_y = mosaic_centers[..., 1] * input_height translate_x = tf.stack( [ mosaic_centers_x - input_width, mosaic_centers_x, mosaic_centers_x - input_width, mosaic_centers_x, ], axis=-1, ) translate_y = tf.stack( [ mosaic_centers_y - input_height, mosaic_centers_y - input_height, mosaic_centers_y, mosaic_centers_y, ], axis=-1, ) # updates bounding_boxes for one output mosaic permutation_order = transformations["permutation_order"] classes_for_mosaic = tf.gather(classes, permutation_order) boxes_for_mosaic = tf.gather(boxes, permutation_order) # stacking translate values such that the shape is (B, 4, 1, 4) or # (batch_size, num_images, broadcast dim, coordinates) translate_values = tf.stack( [translate_x, translate_y, translate_x, translate_y], axis=-1 ) translate_values = tf.expand_dims(translate_values, axis=2) # translating boxes boxes_for_mosaic = boxes_for_mosaic + translate_values boxes_for_mosaic = tf.reshape(boxes_for_mosaic, [batch_size, -1, 4]) classes_for_mosaic = tf.reshape(classes_for_mosaic, [batch_size, -1]) boxes_for_mosaic = { "boxes": boxes_for_mosaic, "classes": classes_for_mosaic, } boxes_for_mosaic = bounding_box.clip_to_image( boxes_for_mosaic, bounding_box_format="xyxy", images=images, ) boxes_for_mosaic = bounding_box.convert_format( boxes_for_mosaic, source="xyxy", target=self.bounding_box_format, images=images, dtype=self.compute_dtype, ) return boxes_for_mosaic def _batch_augment(self, inputs): self._validate_inputs(inputs) return super()._batch_augment(inputs) def call(self, inputs): _, metadata = self._format_inputs(inputs) if metadata[BATCHED] is not True: raise ValueError( "Mosaic received a single image to `call`. The " "layer relies on combining multiple examples, and as such " "will not behave as expected. Please call the layer with 4 " "or more samples." ) return super().call(inputs=inputs) def _validate_inputs(self, inputs): images = inputs.get(IMAGES, None) labels = inputs.get(LABELS, None) bounding_boxes = inputs.get(BOUNDING_BOXES, None) segmentation_masks = inputs.get(SEGMENTATION_MASKS, None) if images is None or ( labels is None and bounding_boxes is None and segmentation_masks is None ): raise ValueError( "Mosaic expects inputs in a dictionary with format " '{"images": images, "labels": labels} or' '{"images": images, "bounding_boxes": bounding_boxes} or' '{"images": images, "segmentation_masks": masks}.' f"Got: inputs = {inputs}" ) if labels is not None and not labels.dtype.is_floating: raise ValueError( f"Mosaic received labels with type {labels.dtype}. " "Labels must be of type float." ) if bounding_boxes is not None and self.bounding_box_format is None: raise ValueError( "Mosaic received bounding boxes but no bounding_box_format. " "Please pass a bounding_box_format from the supported list." ) def get_config(self): config = { "offset": self.offset, "bounding_box_format": self.bounding_box_format, "seed": self.seed, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config)
keras-cv/keras_cv/layers/preprocessing/mosaic.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/mosaic.py", "repo_id": "keras-cv", "token_count": 6082 }
51
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import tensorflow as tf from keras_cv.backend import ops from keras_cv.layers import preprocessing from keras_cv.tests.test_case import TestCase class RandomChannelShiftTest(TestCase): def test_return_shapes(self): xs = np.ones((2, 512, 512, 3)) layer = preprocessing.RandomChannelShift( factor=1.0, value_range=(0, 255) ) xs = layer(xs, training=True) self.assertEqual(xs.shape, (2, 512, 512, 3)) def test_non_square_image(self): xs = tf.cast( tf.stack( [2 * np.ones((1024, 512, 3)), np.ones((1024, 512, 3))], axis=0, ), dtype=tf.float32, ) layer = preprocessing.RandomChannelShift( factor=[0.1, 0.3], value_range=(0, 255) ) xs = layer(xs, training=True) self.assertFalse(np.any(ops.convert_to_numpy(xs[0]) == 2.0)) self.assertFalse(np.any(ops.convert_to_numpy(xs[1]) == 1.0)) @pytest.mark.tf_only def test_in_tf_function(self): xs = tf.cast( tf.stack( [2 * np.ones((100, 100, 3)), np.ones((100, 100, 3))], axis=0 ), dtype=tf.float32, ) layer = preprocessing.RandomChannelShift( factor=0.3, value_range=(0, 255) ) @tf.function def augment(x): return layer(x, training=True) xs = augment(xs) self.assertFalse(np.any(ops.convert_to_numpy(xs[0]) == 2.0)) self.assertFalse(np.any(ops.convert_to_numpy(xs[1]) == 1.0)) def test_5_channels(self): xs = tf.cast( np.ones((512, 512, 5)), dtype="float32", ) layer = preprocessing.RandomChannelShift( factor=0.4, channels=5, value_range=(0, 255) ) xs = layer(xs, training=True) self.assertFalse(np.any(ops.convert_to_numpy(xs) == 1.0)) def test_1_channel(self): xs = tf.cast( np.ones((512, 512, 1)), dtype="float32", ) layer = preprocessing.RandomChannelShift( factor=0.4, channels=1, value_range=(0, 255) ) xs = layer(xs, training=True) self.assertFalse(np.any(ops.convert_to_numpy(xs) == 1.0)) def test_in_single_image(self): xs = tf.cast( np.ones((512, 512, 3)), dtype="float32", ) layer = preprocessing.RandomChannelShift( factor=0.4, value_range=(0, 255) ) xs = layer(xs, training=True) self.assertFalse(np.any(ops.convert_to_numpy(xs) == 1.0)) def test_config(self): layer = preprocessing.RandomChannelShift( factor=[0.1, 0.5], value_range=(0, 255), seed=101 ) config = layer.get_config() self.assertEqual(config["factor"].get_config()["lower"], 0.1) self.assertEqual(config["factor"].get_config()["upper"], 0.5) self.assertEqual(config["value_range"], (0, 255)) self.assertEqual(config["channels"], 3) self.assertEqual(config["seed"], 101) reconstructed_layer = preprocessing.RandomChannelShift.from_config( config ) self.assertEqual(reconstructed_layer.factor, layer.factor) self.assertEqual(reconstructed_layer.value_range, layer.value_range) self.assertEqual(reconstructed_layer.seed, layer.seed) self.assertEqual(reconstructed_layer.channels, layer.channels)
keras-cv/keras_cv/layers/preprocessing/random_channel_shift_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_channel_shift_test.py", "repo_id": "keras-cv", "token_count": 1902 }
52
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import tensorflow as tf from keras_cv import bounding_box from keras_cv.backend import ops from keras_cv.layers.preprocessing.random_flip import HORIZONTAL_AND_VERTICAL from keras_cv.layers.preprocessing.random_flip import RandomFlip from keras_cv.tests.test_case import TestCase class RandomFlipTest(TestCase): def test_horizontal_flip(self): np.random.seed(1337) mock_random = tf.convert_to_tensor([[0.6], [0.6]]) inp = np.random.random((2, 5, 8, 3)) expected_output = np.flip(inp, axis=2) layer = RandomFlip("horizontal") with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): actual_output = layer(inp) self.assertAllClose(expected_output, actual_output) def test_flip_ragged(self): images = tf.ragged.stack( [tf.ones((512, 512, 3)), tf.ones((1002, 512, 3))] ) bounding_boxes = { "boxes": tf.ragged.stack([tf.ones((5, 4)), tf.ones((3, 4))]), "classes": tf.ragged.stack([tf.ones((5,)), tf.ones((3,))]), } inputs = {"images": images, "bounding_boxes": bounding_boxes} layer = RandomFlip(mode="horizontal", bounding_box_format="xywh") _ = layer(inputs) def test_vertical_flip(self): np.random.seed(1337) mock_random = tf.convert_to_tensor([[0.6], [0.6]]) inp = np.random.random((2, 5, 8, 3)) expected_output = np.flip(inp, axis=1) layer = RandomFlip("vertical") with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): actual_output = layer(inp) self.assertAllClose(expected_output, actual_output) def test_flip_both(self): np.random.seed(1337) mock_random = tf.convert_to_tensor([[0.6], [0.6]]) inp = np.random.random((2, 5, 8, 3)) expected_output = np.flip(inp, axis=2) expected_output = np.flip(expected_output, axis=1) layer = RandomFlip("horizontal_and_vertical") with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): actual_output = layer(inp) self.assertAllClose(expected_output, actual_output) def test_random_flip_default(self): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = np.flip(input_images, axis=2) mock_random = tf.convert_to_tensor([[0.6], [0.6]]) layer = RandomFlip() with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): actual_output = layer(input_images) self.assertAllClose(expected_output, actual_output) def test_random_flip_low_rate(self): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = input_images # mock_random > 0.5 but no flipping occurs due to low rate mock_random = tf.convert_to_tensor([[0.6], [0.6]]) layer = RandomFlip(rate=0.1) with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): actual_output = layer(input_images) self.assertAllClose(expected_output, actual_output) def test_random_flip_high_rate(self): input_images = np.random.random((2, 5, 8, 3)).astype(np.float32) expected_output = np.flip(input_images, axis=2) # mock_random is small (0.2) but flipping still occurs due to high rate mock_random = tf.convert_to_tensor([[0.2], [0.2]]) layer = RandomFlip(rate=0.9) with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): actual_output = layer(input_images) self.assertAllClose(expected_output, actual_output) def test_config_with_custom_name(self): layer = RandomFlip(name="image_preproc") config = layer.get_config() layer_1 = RandomFlip.from_config(config) self.assertEqual(layer_1.name, layer.name) def test_random_flip_unbatched_image(self): input_image = np.random.random((4, 4, 1)).astype(np.float32) expected_output = np.flip(input_image, axis=0) mock_random = tf.convert_to_tensor([[0.6]]) layer = RandomFlip("vertical") with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): actual_output = layer(input_image) self.assertAllClose(expected_output, actual_output) def test_output_dtypes(self): inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64") layer = RandomFlip() self.assertAllEqual( ops.convert_to_numpy(layer(inputs)).dtype, "float32" ) layer = RandomFlip(dtype="uint8") self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8") def test_augment_bounding_box_batched_input(self): image = tf.zeros([20, 20, 3]) bounding_boxes = { "boxes": tf.convert_to_tensor( [ [[0, 0, 10, 10], [4, 4, 12, 12]], [[4, 4, 12, 12], [0, 0, 10, 10]], ], dtype=tf.float32, ), "classes": tf.convert_to_tensor( [ [0, 0], [0, 0], ] ), } input = {"images": [image, image], "bounding_boxes": bounding_boxes} mock_random = tf.convert_to_tensor([[0.6], [0.6]]) layer = RandomFlip( "horizontal_and_vertical", bounding_box_format="xyxy" ) with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): output = layer(input) expected_output = { "boxes": tf.convert_to_tensor( [ [[10, 10, 20, 20], [8, 8, 16, 16]], [[8, 8, 16, 16], [10, 10, 20, 20]], ] ), "classes": tf.convert_to_tensor( [ [0, 0], [0, 0], ] ), } output["bounding_boxes"] = bounding_box.to_dense( output["bounding_boxes"] ) self.assertAllClose( expected_output["boxes"], output["bounding_boxes"]["boxes"] ) self.assertAllClose( expected_output["classes"], output["bounding_boxes"]["classes"] ) def test_augment_boxes_ragged(self): image = tf.zeros([2, 20, 20, 3]) bounding_boxes = { "boxes": tf.ragged.constant( [[[0, 0, 10, 10], [4, 4, 12, 12]], [[0, 0, 10, 10]]], dtype=tf.float32, ), "classes": tf.ragged.constant([[0, 0], [0]], dtype=tf.float32), } input = {"images": image, "bounding_boxes": bounding_boxes} mock_random = tf.convert_to_tensor([[0.6], [0.6]]) layer = RandomFlip( "horizontal_and_vertical", bounding_box_format="xyxy" ) with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): output = layer(input) expected_output = { "boxes": tf.ragged.constant( [[[10, 10, 20, 20], [8, 8, 16, 16]], [[10, 10, 20, 20]]], dtype=tf.float32, ), "classes": tf.ragged.constant([[0, 0], [0]], dtype=tf.float32), } output["bounding_boxes"] = bounding_box.to_dense( output["bounding_boxes"] ) expected_output = bounding_box.to_dense(expected_output) self.assertAllClose( expected_output["boxes"], output["bounding_boxes"]["boxes"] ) self.assertAllClose( expected_output["classes"], output["bounding_boxes"]["classes"] ) def test_augment_segmentation_mask(self): np.random.seed(1337) image = np.random.random((1, 20, 20, 3)).astype(np.float32) mask = np.random.randint(2, size=(1, 20, 20, 1)).astype(np.float32) input = {"images": image, "segmentation_masks": mask} # Flip both vertically and horizontally mock_random = tf.convert_to_tensor([[0.6]]) layer = RandomFlip("horizontal_and_vertical") with unittest.mock.patch.object( layer._random_generator, "uniform", return_value=mock_random, ): output = layer(input) expected_mask = np.flip(np.flip(mask, axis=1), axis=2) self.assertAllClose(expected_mask, output["segmentation_masks"]) def test_ragged_bounding_boxes(self): input_image = tf.random.uniform((2, 512, 512, 3)) bounding_boxes = { "boxes": tf.ragged.constant( [ [[200, 200, 400, 400], [100, 100, 300, 300]], [[200, 200, 400, 400]], ], dtype=tf.float32, ), "classes": tf.ragged.constant([[0, 0], [0]], dtype=tf.float32), } input = {"images": input_image, "bounding_boxes": bounding_boxes} layer = RandomFlip(bounding_box_format="xyxy") _ = layer(input) def test_independence_of_random_flip_on_batched_images(self): image = tf.random.uniform((100, 100, 3)) batched_images = tf.stack((image, image), axis=0) seed = 2023 layer = RandomFlip(mode=HORIZONTAL_AND_VERTICAL, seed=seed) results = layer(batched_images) self.assertNotAllClose(results[0], results[1]) def test_config(self): layer = RandomFlip( mode=HORIZONTAL_AND_VERTICAL, bounding_box_format="xyxy" ) config = layer.get_config() self.assertEqual(config["mode"], HORIZONTAL_AND_VERTICAL) self.assertEqual(config["bounding_box_format"], "xyxy")
keras-cv/keras_cv/layers/preprocessing/random_flip_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_flip_test.py", "repo_id": "keras-cv", "token_count": 5471 }
53
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from keras_cv.backend import ops from keras_cv.layers import preprocessing from keras_cv.tests.test_case import TestCase class RandomTranslationTest(TestCase): def test_random_translation_up_numeric_reflect(self): for dtype in (np.int64, np.float32): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype( dtype ) # Shifting by -.2 * 5 = 1 pixel. layer = preprocessing.RandomTranslation( height_factor=(-0.2, -0.2), width_factor=0.0 ) output_image = layer(input_image) expected_output = np.asarray( [ [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [20, 21, 22, 23, 24], ] ).astype(dtype) expected_output = np.reshape(expected_output, (1, 5, 5, 1)) self.assertAllEqual(expected_output, output_image) def test_random_translation_up_numeric_constant(self): for dtype in (np.int64, np.float32): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype( dtype ) # Shifting by -.2 * 5 = 1 pixel. layer = preprocessing.RandomTranslation( height_factor=(-0.2, -0.2), width_factor=0.0, fill_mode="constant", ) output_image = layer(input_image) expected_output = np.asarray( [ [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [0, 0, 0, 0, 0], ] ).astype(dtype) expected_output = np.reshape(expected_output, (1, 5, 5, 1)) self.assertAllEqual(expected_output, output_image) def test_random_translation_down_numeric_reflect(self): for dtype in (np.int64, np.float32): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype( dtype ) # Shifting by .2 * 5 = 1 pixel. layer = preprocessing.RandomTranslation( height_factor=(0.2, 0.2), width_factor=0.0 ) output_image = layer(input_image) expected_output = np.asarray( [ [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], ] ).astype(dtype) expected_output = np.reshape(expected_output, (1, 5, 5, 1)) self.assertAllEqual(expected_output, output_image) def test_random_translation_asymmetric_size_numeric_reflect(self): for dtype in (np.int64, np.float32): input_image = np.reshape(np.arange(0, 16), (1, 8, 2, 1)).astype( dtype ) # Shifting by .5 * 8 = 1 pixel. layer = preprocessing.RandomTranslation( height_factor=(0.5, 0.5), width_factor=0.0 ) output_image = layer(input_image) # pyformat: disable expected_output = np.asarray( [ [6, 7], [4, 5], [2, 3], [0, 1], [0, 1], [2, 3], [4, 5], [6, 7], ] ).astype(dtype) # pyformat: enable expected_output = np.reshape(expected_output, (1, 8, 2, 1)) self.assertAllEqual(expected_output, output_image) def test_random_translation_down_numeric_constant(self): for dtype in (np.int64, np.float32): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype( dtype ) # Shifting by -.2 * 5 = 1 pixel. layer = preprocessing.RandomTranslation( height_factor=(0.2, 0.2), width_factor=0.0, fill_mode="constant", ) output_image = layer(input_image) expected_output = np.asarray( [ [0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], ] ).astype(dtype) expected_output = np.reshape(expected_output, (1, 5, 5, 1)) self.assertAllEqual(expected_output, output_image) def test_random_translation_left_numeric_reflect(self): for dtype in (np.int64, np.float32): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype( dtype ) # Shifting by .2 * 5 = 1 pixel. layer = preprocessing.RandomTranslation( height_factor=0.0, width_factor=(-0.2, -0.2) ) output_image = layer(input_image) expected_output = np.asarray( [ [1, 2, 3, 4, 4], [6, 7, 8, 9, 9], [11, 12, 13, 14, 14], [16, 17, 18, 19, 19], [21, 22, 23, 24, 24], ] ).astype(dtype) expected_output = np.reshape(expected_output, (1, 5, 5, 1)) self.assertAllEqual(expected_output, output_image) def test_random_translation_left_numeric_constant(self): for dtype in (np.int64, np.float32): input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype( dtype ) # Shifting by -.2 * 5 = 1 pixel. layer = preprocessing.RandomTranslation( height_factor=0.0, width_factor=(-0.2, -0.2), fill_mode="constant", ) output_image = layer(input_image) expected_output = np.asarray( [ [1, 2, 3, 4, 0], [6, 7, 8, 9, 0], [11, 12, 13, 14, 0], [16, 17, 18, 19, 0], [21, 22, 23, 24, 0], ] ).astype(dtype) expected_output = np.reshape(expected_output, (1, 5, 5, 1)) self.assertAllEqual(expected_output, output_image) def test_random_translation_on_batched_images_independently(self): image = tf.random.uniform(shape=(100, 100, 3)) input_images = tf.stack([image, image], axis=0) layer = preprocessing.RandomTranslation( height_factor=0.5, width_factor=0.5 ) results = layer(input_images) self.assertNotAllClose(results[0], results[1]) def test_config_with_custom_name(self): layer = preprocessing.RandomTranslation(0.5, 0.6, name="image_preproc") config = layer.get_config() layer_1 = preprocessing.RandomTranslation.from_config(config) self.assertEqual(layer_1.name, layer.name) def test_unbatched_image(self): input_image = np.reshape(np.arange(0, 25), (5, 5, 1)).astype(np.int64) # Shifting by -.2 * 5 = 1 pixel. layer = preprocessing.RandomTranslation( height_factor=(-0.2, -0.2), width_factor=0.0 ) output_image = layer(input_image) expected_output = np.asarray( [ [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [20, 21, 22, 23, 24], ] ).astype(np.int64) expected_output = np.reshape(expected_output, (5, 5, 1)) self.assertAllEqual(expected_output, output_image) def test_output_dtypes(self): inputs = np.array([[[1], [2]], [[3], [4]]], dtype="float64") layer = preprocessing.RandomTranslation(0.5, 0.6) self.assertAllEqual( ops.convert_to_numpy(layer(inputs)).dtype, "float32" ) layer = preprocessing.RandomTranslation(0.5, 0.6, dtype="uint8") self.assertAllEqual(ops.convert_to_numpy(layer(inputs)).dtype, "uint8")
keras-cv/keras_cv/layers/preprocessing/random_translation_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing/random_translation_test.py", "repo_id": "keras-cv", "token_count": 4946 }
54
# Copyright 2022 Waymo LLC. # # Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501 import tensorflow as tf from keras_cv.api_export import keras_cv_export from keras_cv.backend import random from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES @keras_cv_export("keras_cv.layers.GlobalRandomScaling") class GlobalRandomScaling(base_augmentation_layer_3d.BaseAugmentationLayer3D): """A preprocessing layer which randomly scales point clouds and bounding boxes along X, Y, and Z axes during training. This layer will randomly scale the whole scene along the X, Y, and Z axes based on a randomly sampled scaling factor between [min_scaling_factor, max_scaling_factor] following a uniform distribution. Input shape: point_clouds: 3D (multi frames) float32 Tensor with shape [num of frames, num of points, num of point features]. The first 5 features are [x, y, z, class, range]. bounding_boxes: 3D (multi frames) float32 Tensor with shape [num of frames, num of boxes, num of box features]. Boxes are expected to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py for more details on supported bounding box formats. Output shape: A dictionary of Tensors with the same shape as input Tensors. Arguments: x_factor: A tuple of float scalars or a float scalar sets the minimum and maximum scaling factors for the X axis. y_factor: A tuple of float scalars or a float scalar sets the minimum and maximum scaling factors for the Y axis. z_factor: A tuple of float scalars or a float scalar sets the minimum and maximum scaling factors for the Z axis. """ def __init__( self, x_factor=None, y_factor=None, z_factor=None, preserve_aspect_ratio=False, **kwargs ): super().__init__(**kwargs) if not x_factor: min_x_factor = 1.0 max_x_factor = 1.0 elif type(x_factor) is float: min_x_factor = x_factor max_x_factor = x_factor else: min_x_factor = x_factor[0] max_x_factor = x_factor[1] if not y_factor: min_y_factor = 1.0 max_y_factor = 1.0 elif type(y_factor) is float: min_y_factor = y_factor max_y_factor = y_factor else: min_y_factor = y_factor[0] max_y_factor = y_factor[1] if not z_factor: min_z_factor = 1.0 max_z_factor = 1.0 elif type(z_factor) is float: min_z_factor = z_factor max_z_factor = z_factor else: min_z_factor = z_factor[0] max_z_factor = z_factor[1] if ( min_x_factor < 0 or max_x_factor < 0 or min_y_factor < 0 or max_y_factor < 0 or min_z_factor < 0 or max_z_factor < 0 ): raise ValueError("min_factor and max_factor must be >=0.") if ( min_x_factor > max_x_factor or min_y_factor > max_y_factor or min_z_factor > max_z_factor ): raise ValueError("min_factor must be less than max_factor.") if preserve_aspect_ratio: if min_x_factor != min_y_factor or min_y_factor != min_z_factor: raise ValueError( "min_factor must be the same when preserve_aspect_ratio is " "true." ) if max_x_factor != max_y_factor or max_y_factor != max_z_factor: raise ValueError( "max_factor must be the same when preserve_aspect_ratio is " "true." ) self._min_x_factor = min_x_factor self._max_x_factor = max_x_factor self._min_y_factor = min_y_factor self._max_y_factor = max_y_factor self._min_z_factor = min_z_factor self._max_z_factor = max_z_factor self._preserve_aspect_ratio = preserve_aspect_ratio def get_config(self): return { "x_factor": ( self._min_x_factor, self._max_x_factor, ), "y_factor": ( self._min_y_factor, self._max_y_factor, ), "z_factor": ( self._min_z_factor, self._max_z_factor, ), "preserve_aspect_ratio": self._preserve_aspect_ratio, } def get_random_transformation(self, **kwargs): random_scaling_x = random.uniform( (), minval=self._min_x_factor, maxval=self._max_x_factor, dtype=self.compute_dtype, seed=self._random_generator, ) random_scaling_y = random.uniform( (), minval=self._min_y_factor, maxval=self._max_y_factor, dtype=self.compute_dtype, seed=self._random_generator, ) random_scaling_z = random.uniform( (), minval=self._min_z_factor, maxval=self._max_z_factor, dtype=self.compute_dtype, seed=self._random_generator, ) if not self._preserve_aspect_ratio: return { "scale": tf.stack( [random_scaling_x, random_scaling_y, random_scaling_z] ) } else: return { "scale": tf.stack( [random_scaling_x, random_scaling_x, random_scaling_x] ) } def augment_point_clouds_bounding_boxes( self, point_clouds, bounding_boxes, transformation, **kwargs ): scale = transformation["scale"][tf.newaxis, tf.newaxis, :] point_clouds_xyz = point_clouds[..., :3] * scale point_clouds = tf.concat( [point_clouds_xyz, point_clouds[..., 3:]], axis=-1 ) bounding_boxes_xyzdxdydz = bounding_boxes[ ..., : CENTER_XYZ_DXDYDZ_PHI.DZ + 1 ] * tf.concat([scale] * 2, axis=-1) bounding_boxes = tf.concat( [ bounding_boxes_xyzdxdydz, bounding_boxes[..., CENTER_XYZ_DXDYDZ_PHI.PHI :], ], axis=-1, ) return (point_clouds, bounding_boxes)
keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_scaling.py/0
{ "file_path": "keras-cv/keras_cv/layers/preprocessing_3d/waymo/global_random_scaling.py", "repo_id": "keras-cv", "token_count": 3451 }
55
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.layers.regularization.dropblock_2d import DropBlock2D from keras_cv.tests.test_case import TestCase class DropBlock2DTest(TestCase): FEATURE_SHAPE = (1, 14, 14, 256) # Shape of ResNet block group 3 rng = tf.random.Generator.from_non_deterministic_state() def test_layer_not_created_with_invalid_block_size(self): invalid_sizes = [0, -10, (5, -2), (0, 7), (1, 2, 3, 4)] for size in invalid_sizes: with self.assertRaises(ValueError): DropBlock2D(block_size=size, rate=0.1) def test_layer_not_created_with_invalid_rate(self): invalid_rates = [1.1, -0.1] for rate in invalid_rates: with self.assertRaises(ValueError): DropBlock2D(rate=rate, block_size=7) def test_input_unchanged_in_eval_mode(self): dummy_inputs = self.rng.uniform(shape=self.FEATURE_SHAPE) layer = DropBlock2D(rate=0.1, block_size=7) output = layer(dummy_inputs, training=False) self.assertAllClose(dummy_inputs, output) def test_input_unchanged_with_rate_equal_to_zero(self): dummy_inputs = self.rng.uniform(shape=self.FEATURE_SHAPE) layer = DropBlock2D(rate=0.0, block_size=7) output = layer(dummy_inputs, training=True) self.assertAllClose(dummy_inputs, output) def test_input_gets_partially_zeroed_out_in_train_mode(self): dummy_inputs = self.rng.uniform(shape=self.FEATURE_SHAPE) layer = DropBlock2D(rate=0.1, block_size=7) output = layer(dummy_inputs, training=True) num_input_zeros = self._count_zeros(dummy_inputs) num_output_zeros = self._count_zeros(output) self.assertGreater(num_output_zeros, num_input_zeros) def test_batched_input_gets_partially_zeroed_out_in_train_mode(self): batched_shape = (4, *self.FEATURE_SHAPE[1:]) dummy_inputs = self.rng.uniform(shape=batched_shape) layer = DropBlock2D(rate=0.1, block_size=7) output = layer(dummy_inputs, training=True) num_input_zeros = self._count_zeros(dummy_inputs) num_output_zeros = self._count_zeros(output) self.assertGreater(num_output_zeros, num_input_zeros) def test_input_gets_partially_zeroed_out_with_non_square_block_size(self): dummy_inputs = self.rng.uniform(shape=self.FEATURE_SHAPE) layer = DropBlock2D(rate=0.1, block_size=(7, 10)) output = layer(dummy_inputs, training=True) num_input_zeros = self._count_zeros(dummy_inputs) num_output_zeros = self._count_zeros(output) self.assertGreater(num_output_zeros, num_input_zeros) @staticmethod def _count_zeros(tensor: tf.Tensor) -> tf.Tensor: return tf.size(tensor) - tf.math.count_nonzero(tensor, dtype=tf.int32)
keras-cv/keras_cv/layers/regularization/dropblock_2d_test.py/0
{ "file_path": "keras-cv/keras_cv/layers/regularization/dropblock_2d_test.py", "repo_id": "keras-cv", "token_count": 1420 }
56
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.bounding_box_3d import CENTER_XYZ_DXDYDZ_PHI def l1(y_true, y_pred, sigma=9.0): """Computes element-wise l1 loss.""" absolute_difference = ops.abs(y_pred - y_true) loss = ops.where( absolute_difference < 1.0 / sigma, 0.5 * sigma * absolute_difference**2, absolute_difference - 0.5 / sigma, ) return ops.sum(loss, axis=-1) @keras_cv_export("keras_cv.losses.CenterNetBoxLoss") class CenterNetBoxLoss(keras.losses.Loss): """Implements a bin-based box regression loss for 3D bounding boxes. This loss is meant for use as a box loss for `keras_cv.models.MultiHeadCenterPillar`. Reference: https://arxiv.org/abs/1812.04244 CenterNetBoxLoss uses L1 loss on the individual components of boxes, with the exception of the bin-based heading component of each box, where the bin indicator outputs use cross entropy loss, and the heading residual uses L1 loss. The position (x/y/z) components of predictions are absolute, while the size components are normalized to the anchor size. Ground truth boxes are expected to follow the CENTER_XYZ_DXDYDZ_PHI format. Refer to https://github.com/keras-team/keras-cv/blob/master/keras_cv/bounding_box_3d/formats.py for more details on supported bounding box formats. Box predictions are expected to be in CenterPillar heatmap-encoded format. Args: num_heading_bins: int, number of bins used for predicting box heading. anchor_size: list of 3 ints, anchor sizes for the x, y, and z axes. """ # noqa: E501 def __init__(self, num_heading_bins, anchor_size, **kwargs): super().__init__(**kwargs) self.num_heading_bins = num_heading_bins self.anchor_size = anchor_size def heading_regression_loss(self, heading_true, heading_pred): # Set the heading to within 0 -> 2pi heading_true = ops.floor(ops.mod(heading_true, 2 * math.pi)) # Divide 2pi into bins. shifted by 0.5 * angle_per_class. angle_per_class = (2 * math.pi) / self.num_heading_bins shift_angle = ops.floor( ops.mod(heading_true + angle_per_class / 2, 2 * math.pi) ) heading_bin_label_float = ops.floor( ops.divide(shift_angle, angle_per_class) ) heading_bin_label = ops.cast(heading_bin_label_float, dtype="int32") heading_res_label = shift_angle - ( heading_bin_label_float * angle_per_class + angle_per_class / 2.0 ) heading_res_norm_label = heading_res_label / (angle_per_class / 2.0) heading_bin_one_hot = ops.one_hot( heading_bin_label, self.num_heading_bins, dtype=heading_pred.dtype ) loss_heading_bin = ops.categorical_crossentropy( target=heading_bin_one_hot, output=heading_pred[..., : self.num_heading_bins], from_logits=True, ) loss_heading_res = l1( ops.sum( heading_pred[..., self.num_heading_bins :] * heading_bin_one_hot, axis=-1, keepdims=True, ), ops.expand_dims(heading_res_norm_label, axis=-1), ) return loss_heading_bin + loss_heading_res def regression_loss(self, y_true, y_pred): position_loss = l1(y_true[:, :3], y_pred[:, :3]) heading_loss = self.heading_regression_loss( y_true[:, CENTER_XYZ_DXDYDZ_PHI.PHI], y_pred[:, 3:-3] ) # Size loss size_norm_label = y_true[:, 3:6] / ops.cast( self.anchor_size, y_true.dtype ) size_norm_pred = y_pred[:, -3:] + 1.0 size_loss = l1(size_norm_pred, size_norm_label) # TODO(ianstenbit): Add IoU3D Loss. return position_loss + heading_loss + size_loss def call(self, y_true, y_pred): return ops.vectorized_map( lambda y_true_and_pred: self.regression_loss( y_true_and_pred[0], y_true_and_pred[1] ), (y_true, y_pred), ) def get_config(self): config = { "num_heading_bins": self.num_heading_bins, "anchor_size": self.anchor_size, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/losses/centernet_box_loss.py/0
{ "file_path": "keras-cv/keras_cv/losses/centernet_box_loss.py", "repo_id": "keras-cv", "token_count": 2163 }
57
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops @keras_cv_export("keras_cv.losses.SmoothL1Loss") class SmoothL1Loss(keras.losses.Loss): """Implements Smooth L1 loss. SmoothL1Loss implements the SmoothL1 function, where values less than `l1_cutoff` contribute to the overall loss based on their squared difference, and values greater than l1_cutoff contribute based on their raw difference. Args: l1_cutoff: differences between y_true and y_pred that are larger than `l1_cutoff` are treated as `L1` values """ def __init__(self, l1_cutoff=1.0, **kwargs): super().__init__(**kwargs) self.l1_cutoff = l1_cutoff def call(self, y_true, y_pred): difference = y_true - y_pred absolute_difference = ops.abs(difference) squared_difference = difference**2 loss = ops.where( absolute_difference < self.l1_cutoff, 0.5 * squared_difference, absolute_difference - 0.5, ) return ops.mean(loss, axis=-1) def get_config(self): config = { "l1_cutoff": self.l1_cutoff, } base_config = super().get_config() return dict(list(base_config.items()) + list(config.items()))
keras-cv/keras_cv/losses/smooth_l1.py/0
{ "file_path": "keras-cv/keras_cv/losses/smooth_l1.py", "repo_id": "keras-cv", "token_count": 725 }
58
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """CSPDarkNet model preset configurations.""" backbone_presets_no_weights = { "csp_darknet_tiny": { "metadata": { "description": ( "CSPDarkNet model with [48, 96, 192, 384] channels and " "[1, 3, 3, 1] depths where the batch normalization " "and SiLU activation are applied after the convolution layers." ), "params": 2380416, "official_name": "CSPDarkNet", "path": "csp_darknet", }, "kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_tiny/2", }, "csp_darknet_s": { "metadata": { "description": ( "CSPDarkNet model with [64, 128, 256, 512] channels and " "[1, 3, 3, 1] depths where the batch normalization " "and SiLU activation are applied after the convolution layers." ), "params": 4223488, "official_name": "CSPDarkNet", "path": "csp_darknet", }, "kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_s/2", }, "csp_darknet_m": { "metadata": { "description": ( "CSPDarkNet model with [96, 192, 384, 768] channels and " "[2, 6, 6, 2] depths where the batch normalization " "and SiLU activation are applied after the convolution layers." ), "params": 12374400, "official_name": "CSPDarkNet", "path": "csp_darknet", }, "kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_m/2", }, "csp_darknet_l": { "metadata": { "description": ( "CSPDarkNet model with [128, 256, 512, 1024] channels and " "[3, 9, 9, 3] depths where the batch normalization " "and SiLU activation are applied after the convolution layers." ), "params": 27111424, "official_name": "CSPDarkNet", "path": "csp_darknet", }, "kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_l/2", }, "csp_darknet_xl": { "metadata": { "description": ( "CSPDarkNet model with [170, 340, 680, 1360] channels and " "[4, 12, 12, 4] depths where the batch normalization " "and SiLU activation are applied after the convolution layers." ), "params": 56837970, "official_name": "CSPDarkNet", "path": "csp_darknet", }, "kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_xl/2", }, } backbone_presets_with_weights = { "csp_darknet_tiny_imagenet": { "metadata": { "description": ( "CSPDarkNet model with [48, 96, 192, 384] channels and " "[1, 3, 3, 1] depths where the batch normalization " "and SiLU activation are applied after the convolution layers. " "Trained on Imagenet 2012 classification task." ), "params": 2380416, "official_name": "CSPDarkNet", "path": "csp_darknet", }, "kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_tiny_imagenet/2", # noqa: E501 }, "csp_darknet_l_imagenet": { "metadata": { "description": ( "CSPDarkNet model with [128, 256, 512, 1024] channels and " "[3, 9, 9, 3] depths where the batch normalization " "and SiLU activation are applied after the convolution layers. " "Trained on Imagenet 2012 classification task." ), "params": 27111424, "official_name": "CSPDarkNet", "path": "csp_darknet", }, "kaggle_handle": "kaggle://keras/cspdarknet/keras/csp_darknet_l_imagenet/2", # noqa: E501 }, } backbone_presets = { **backbone_presets_no_weights, **backbone_presets_with_weights, }
keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/csp_darknet/csp_darknet_backbone_presets.py", "repo_id": "keras-cv", "token_count": 2218 }
59
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for loading pretrained model presets.""" import numpy as np import pytest from keras_cv.backend import ops from keras_cv.models.backbones.mix_transformer.mix_transformer_aliases import ( MiTB0Backbone, ) from keras_cv.models.backbones.mix_transformer.mix_transformer_backbone import ( MiTBackbone, ) from keras_cv.tests.test_case import TestCase @pytest.mark.large class MixTransformerPresetSmokeTest(TestCase): """ A smoke test for MixTransformer presets we run continuously. This only tests the smallest weights we have available. Run with: `pytest keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets_test.py --run_large` # noqa: E501 """ def setUp(self): self.input_batch = np.ones(shape=(2, 224, 224, 3)) def test_backbone_output(self): model = MiTBackbone.from_preset("mit_b0") model(self.input_batch) def test_backbone_output_with_weights(self): model = MiTBackbone.from_preset("mit_b0_imagenet") # The forward pass from a preset should be stable! # This test should catch cases where we unintentionally change our # network code in a way that would invalidate our preset weights. # We should only update these numbers if we are updating a weights # file, or have found a discrepancy with the upstream source. outputs = model(np.ones(shape=(1, 224, 224, 3))) expected = [-0.603472, -0.180627, -1.92137, -0.004339, 2.396384] # Keep a high tolerance, so we are robust to different hardware. self.assertAllClose( ops.convert_to_numpy(outputs[0, 0, 0, :5]), expected, atol=0.01, rtol=0.01, ) def test_applications_model_output(self): model = MiTB0Backbone() model(self.input_batch) def test_applications_model_output_with_preset(self): model = MiTB0Backbone.from_preset("mit_b0_imagenet") model(self.input_batch) def test_preset_docstring(self): """Check we did our docstring formatting correctly.""" for name in MiTBackbone.presets: self.assertRegex(MiTBackbone.from_preset.__doc__, name) def test_unknown_preset_error(self): # Not a preset name with self.assertRaises(ValueError): MiTBackbone.from_preset("mit_b0_clowntown") def test_load_weights_error(self): # Try to load weights when none available with self.assertRaises(ValueError): MiTBackbone.from_preset("mit_b0", load_weights=True) @pytest.mark.extra_large class MixTransformerPresetFullTest(TestCase): """ Test the full enumeration of our preset. This tests every preset for Mix Transformer and is only run manually. Run with: `pytest keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets_test.py --run_extra_large` # noqa: E501 """ def test_load_mix_transformer(self): input_data = np.ones(shape=(2, 224, 224, 3)) for preset in MiTBackbone.presets: model = MiTBackbone.from_preset(preset) model(input_data)
keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets_test.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/mix_transformer/mix_transformer_backbone_presets_test.py", "repo_id": "keras-cv", "token_count": 1423 }
60
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ResNet backbone model. Reference: - [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027) (ECCV 2016) - [Based on the original keras.applications ResNet](https://github.com/keras-team/keras/blob/master/keras/applications/resnet_v2.py) """ # noqa: E501 import copy from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.models import utils from keras_cv.models.backbones.backbone import Backbone from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone_presets import ( backbone_presets, ) from keras_cv.models.backbones.resnet_v2.resnet_v2_backbone_presets import ( backbone_presets_with_weights, ) from keras_cv.utils.python_utils import classproperty BN_AXIS = 3 BN_EPSILON = 1.001e-5 @keras_cv_export("keras_cv.models.ResNetV2Backbone") class ResNetV2Backbone(Backbone): """Instantiates the ResNetV2 architecture. Reference: - [Identity Mappings in Deep Residual Networks](https://arxiv.org/abs/1603.05027) (ECCV 2016) The difference in Resnet and ResNetV2 rests in the structure of their individual building blocks. In ResNetV2, the batch normalization and ReLU activation precede the convolution layers, as opposed to ResNetV1 where the batch normalization and ReLU activation are applied after the convolution layers. For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/). Args: stackwise_filters: list of ints, number of filters for each stack in the model. stackwise_blocks: list of ints, number of blocks for each stack in the model. stackwise_strides: list of ints, stride for each stack in the model. include_rescaling: bool, whether to rescale the inputs. If set to `True`, inputs will be passed through a `Rescaling(1/255.0)` layer. stackwise_dilations: list of ints, dilation for each stack in the model. If `None` (default), dilation will not be used. input_shape: optional shape tuple, defaults to (None, None, 3). input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. block_type: string, one of "basic_block" or "block". The block type to stack. Use "basic_block" for smaller models like ResNet18 and ResNet34. Examples: ```python input_data = tf.ones(shape=(8, 224, 224, 3)) # Pretrained backbone model = keras_cv.models.ResNetV2Backbone.from_preset("resnet50_v2_imagenet") output = model(input_data) # Randomly initialized backbone with a custom config model = ResNetV2Backbone( stackwise_filters=[64, 128, 256, 512], stackwise_blocks=[2, 2, 2, 2], stackwise_strides=[1, 2, 2, 2], include_rescaling=False, ) output = model(input_data) ``` """ # noqa: E501 def __init__( self, *, stackwise_filters, stackwise_blocks, stackwise_strides, include_rescaling, stackwise_dilations=None, input_shape=(None, None, 3), input_tensor=None, block_type="block", **kwargs, ): inputs = utils.parse_model_inputs(input_shape, input_tensor) x = inputs if include_rescaling: x = keras.layers.Rescaling(1 / 255.0)(x) x = keras.layers.Conv2D( 64, 7, strides=2, use_bias=True, padding="same", name="conv1_conv", )(x) x = keras.layers.MaxPooling2D( 3, strides=2, padding="same", name="pool1_pool" )(x) num_stacks = len(stackwise_filters) if stackwise_dilations is None: stackwise_dilations = [1] * num_stacks pyramid_level_inputs = {} for stack_index in range(num_stacks): x = apply_stack( x, filters=stackwise_filters[stack_index], blocks=stackwise_blocks[stack_index], stride=stackwise_strides[stack_index], dilations=stackwise_dilations[stack_index], block_type=block_type, first_shortcut=(block_type == "block" or stack_index > 0), name=f"v2_stack_{stack_index}", ) pyramid_level_inputs[f"P{stack_index + 2}"] = ( utils.get_tensor_input_name(x) ) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name="post_bn" )(x) x = keras.layers.Activation("relu", name="post_relu")(x) # Create model. super().__init__(inputs=inputs, outputs=x, **kwargs) # All references to `self` below this line self.pyramid_level_inputs = pyramid_level_inputs self.stackwise_filters = stackwise_filters self.stackwise_blocks = stackwise_blocks self.stackwise_strides = stackwise_strides self.include_rescaling = include_rescaling self.stackwise_dilations = stackwise_dilations self.input_tensor = input_tensor self.block_type = block_type def get_config(self): config = super().get_config() config.update( { "stackwise_filters": self.stackwise_filters, "stackwise_blocks": self.stackwise_blocks, "stackwise_strides": self.stackwise_strides, "include_rescaling": self.include_rescaling, # Remove batch dimension from `input_shape` "input_shape": self.input_shape[1:], "stackwise_dilations": self.stackwise_dilations, "input_tensor": self.input_tensor, "block_type": self.block_type, } ) return config @classproperty def presets(cls): """Dictionary of preset names and configurations.""" return copy.deepcopy(backbone_presets) @classproperty def presets_with_weights(cls): """Dictionary of preset names and configurations that include weights.""" return copy.deepcopy(backbone_presets_with_weights) def apply_basic_block( x, filters, kernel_size=3, stride=1, dilation=1, conv_shortcut=False, name=None, ): """A basic residual block (v2). Args: x: input tensor. filters: int, filters of the basic layer. kernel_size: int, kernel size of the bottleneck layer, defaults to 3. stride: int, stride of the first layer, defaults to 1. dilation: int, the dilation rate to use for dilated convolution. Defaults to 1. conv_shortcut: bool, uses convolution shortcut if `True`. If `False` (default), uses identity or pooling shortcut, based on stride. name: string, optional prefix for the layer names used in the block. Returns: Output tensor for the residual block. """ if name is None: name = f"v2_basic_block_{keras.backend.get_uid('v2_basic_block')}" use_preactivation = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_use_preactivation_bn" )(x) use_preactivation = keras.layers.Activation( "relu", name=name + "_use_preactivation_relu" )(use_preactivation) s = stride if dilation == 1 else 1 if conv_shortcut: shortcut = keras.layers.Conv2D( filters, 1, strides=s, name=name + "_0_conv" )(use_preactivation) else: shortcut = ( keras.layers.MaxPooling2D( 1, strides=stride, name=name + "_0_max_pooling" )(x) if s > 1 else x ) x = keras.layers.Conv2D( filters, kernel_size, padding="SAME", strides=1, use_bias=False, name=name + "_1_conv", )(use_preactivation) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn" )(x) x = keras.layers.Activation("relu", name=name + "_1_relu")(x) x = keras.layers.Conv2D( filters, kernel_size, strides=s, padding="same", dilation_rate=dilation, use_bias=False, name=name + "_2_conv", )(x) x = keras.layers.Add(name=name + "_out")([shortcut, x]) return x def apply_block( x, filters, kernel_size=3, stride=1, dilation=1, conv_shortcut=False, name=None, ): """A residual block (v2). Args: x: input tensor. filters: int, filters of the basic layer. kernel_size: int, kernel size of the bottleneck layer, defaults to 3. stride: int, stride of the first layer, defaults to 1. dilation: int, the dilation rate to use for dilated convolution. Defaults to 1. conv_shortcut: bool, uses convolution shortcut if `True`. If `False` (default), uses identity or pooling shortcut, based on stride. name: string, optional prefix for the layer names used in the block. Returns: Output tensor for the residual block. """ if name is None: name = f"v2_block_{keras.backend.get_uid('v2_block')}" use_preactivation = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_use_preactivation_bn" )(x) use_preactivation = keras.layers.Activation( "relu", name=name + "_use_preactivation_relu" )(use_preactivation) s = stride if dilation == 1 else 1 if conv_shortcut: shortcut = keras.layers.Conv2D( 4 * filters, 1, strides=s, name=name + "_0_conv", )(use_preactivation) else: shortcut = ( keras.layers.MaxPooling2D( 1, strides=stride, name=name + "_0_max_pooling" )(x) if s > 1 else x ) x = keras.layers.Conv2D( filters, 1, strides=1, use_bias=False, name=name + "_1_conv" )(use_preactivation) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_1_bn" )(x) x = keras.layers.Activation("relu", name=name + "_1_relu")(x) x = keras.layers.Conv2D( filters, kernel_size, strides=s, use_bias=False, padding="same", dilation_rate=dilation, name=name + "_2_conv", )(x) x = keras.layers.BatchNormalization( axis=BN_AXIS, epsilon=BN_EPSILON, name=name + "_2_bn" )(x) x = keras.layers.Activation("relu", name=name + "_2_relu")(x) x = keras.layers.Conv2D(4 * filters, 1, name=name + "_3_conv")(x) x = keras.layers.Add(name=name + "_out")([shortcut, x]) return x def apply_stack( x, filters, blocks, stride=2, dilations=1, name=None, block_type="block", first_shortcut=True, ): """A set of stacked blocks. Args: x: input tensor. filters: int, filters of the layer in a block. blocks: int, blocks in the stacked blocks. stride: int, stride of the first layer in the first block, defaults to 2. dilations: int, the dilation rate to use for dilated convolution. Defaults to 1. name: string, optional prefix for the layer names used in the block. block_type: string, one of "basic_block" or "block". The block type to stack. Use "basic_block" for ResNet18 and ResNet34. first_shortcut: bool. Use convolution shortcut if `True` (default), otherwise uses identity or pooling shortcut, based on stride. Returns: Output tensor for the stacked blocks. """ if name is None: name = "v2_stack" if block_type == "basic_block": block_fn = apply_basic_block elif block_type == "block": block_fn = apply_block else: raise ValueError( """`block_type` must be either "basic_block" or "block". """ f"Received block_type={block_type}." ) x = block_fn( x, filters, conv_shortcut=first_shortcut, name=name + "_block1" ) for i in range(2, blocks): x = block_fn( x, filters, dilation=dilations, name=name + "_block" + str(i) ) x = block_fn( x, filters, stride=stride, dilation=dilations, name=name + "_block" + str(blocks), ) return x
keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone.py/0
{ "file_path": "keras-cv/keras_cv/models/backbones/resnet_v2/resnet_v2_backbone.py", "repo_id": "keras-cv", "token_count": 5936 }
61
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DarkNet models for KerasCV. Reference: - [YoloV3 Paper](https://arxiv.org/abs/1804.02767) - [YoloV3 implementation](https://github.com/ultralytics/yolov3) """ import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import ( DarknetConvBlock, ) from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import ( ResidualBlocks, ) from keras_cv.models.backbones.csp_darknet.csp_darknet_utils import ( SpatialPyramidPoolingBottleneck, ) from keras_cv.models.legacy import utils from keras_cv.models.legacy.weights import parse_weights BASE_DOCSTRING = """Represents the {name} architecture. Although the {name} architecture is commonly used for detection tasks, it is possible to extract the intermediate dark2 to dark5 layers from the model for creating a feature pyramid Network. Reference: - [YoloV3 Paper](https://arxiv.org/abs/1804.02767) - [YoloV3 implementation](https://github.com/ultralytics/yolov3) For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/). Args: include_rescaling: bool, whether to rescale the inputs. If set to True, inputs will be passed through a `Rescaling(1/255.0)` layer. include_top: bool, whether to include the fully-connected layer at the top of the network. If provided, `num_classes` must be provided. num_classes: integer, optional number of classes to classify images into. Only to be specified if `include_top` is True. weights: one of `None` (random initialization), or a pretrained weight file path. input_shape: optional shape tuple, defaults to (None, None, 3). input_tensor: optional Keras tensor (i.e., output of `layers.Input()`) to use as image input for the model. pooling: optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. name: string, optional name to pass to the model, defaults to "{name}". Returns: A `keras.Model` instance. """ # noqa: E501 @keras.utils.register_keras_serializable(package="keras_cv.models") class DarkNet(keras.Model): """Represents the DarkNet architecture. The DarkNet architecture is commonly used for detection tasks. It is possible to extract the intermediate dark2 to dark5 layers from the model for creating a feature pyramid Network. Reference: - [YoloV3 Paper](https://arxiv.org/abs/1804.02767) - [YoloV3 implementation](https://github.com/ultralytics/yolov3) For transfer learning use cases, make sure to read the [guide to transfer learning & fine-tuning](https://keras.io/guides/transfer_learning/). Args: blocks: integer, numbers of building blocks from the layer dark2 to layer dark5. include_rescaling: bool, whether to rescale the inputs. If set to True, inputs will be passed through a `Rescaling(1/255.0)` layer. include_top: bool, whether to include the fully-connected layer at the top of the network. If provided, `num_classes` must be provided. num_classes: integer, optional number of classes to classify images into. Only to be specified if `include_top` is True. weights: one of `None` (random initialization) or a pretrained weight file path. input_shape: optional shape tuple, defaults to (None, None, 3). input_tensor: optional Keras tensor (i.e., output of `layers.Input()`) to use as image input for the model. pooling: optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional block. - `avg` means that global average pooling will be applied to the output of the last convolutional block, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classifier_activation: A `str` or callable. The activation function to use on the "top" layer. Ignored unless `include_top=True`. Set `classifier_activation=None` to return the logits of the "top" layer. name: string, optional name to pass to the model, defaults to "DarkNet". Returns: A `keras.Model` instance. """ # noqa: E501 def __init__( self, blocks, include_rescaling, include_top, num_classes=None, weights=None, input_shape=(None, None, 3), input_tensor=None, pooling=None, classifier_activation="softmax", name="DarkNet", **kwargs, ): if weights and not tf.io.gfile.exists(weights): raise ValueError( "The `weights` argument should be either `None` or the path to " "the weights file to be loaded. Weights file not found at " f"location: {weights}" ) if include_top and not num_classes: raise ValueError( "If `include_top` is True, you should specify `num_classes`. " f"Received: num_classes={num_classes}" ) inputs = utils.parse_model_inputs(input_shape, input_tensor) x = inputs if include_rescaling: x = layers.Rescaling(1 / 255.0)(x) # stem x = DarknetConvBlock( filters=32, kernel_size=3, strides=1, activation="leaky_relu", name="stem_conv", )(x) x = ResidualBlocks( filters=64, num_blocks=1, name="stem_residual_block" )(x) # filters for the ResidualBlock outputs filters = [128, 256, 512, 1024] # layer_num is used for naming the residual blocks # (starts with dark2, hence 2) layer_num = 2 for filter, block in zip(filters, blocks): x = ResidualBlocks( filters=filter, num_blocks=block, name=f"dark{layer_num}_residual_block", )(x) layer_num += 1 # remaining dark5 layers x = DarknetConvBlock( filters=512, kernel_size=1, strides=1, activation="leaky_relu", name="dark5_conv1", )(x) x = DarknetConvBlock( filters=1024, kernel_size=3, strides=1, activation="leaky_relu", name="dark5_conv2", )(x) x = SpatialPyramidPoolingBottleneck( 512, activation="leaky_relu", name="dark5_spp" )(x) x = DarknetConvBlock( filters=1024, kernel_size=3, strides=1, activation="leaky_relu", name="dark5_conv3", )(x) x = DarknetConvBlock( filters=512, kernel_size=1, strides=1, activation="leaky_relu", name="dark5_conv4", )(x) if include_top: x = layers.GlobalAveragePooling2D(name="avg_pool")(x) x = layers.Dense( num_classes, activation=classifier_activation, name="predictions", )(x) elif pooling == "avg": x = layers.GlobalAveragePooling2D(name="avg_pool")(x) elif pooling == "max": x = layers.GlobalMaxPooling2D(name="max_pool")(x) super().__init__(inputs=inputs, outputs=x, name=name, **kwargs) if weights is not None: self.load_weights(weights) self.blocks = blocks self.include_rescaling = include_rescaling self.include_top = include_top self.num_classes = num_classes self.input_tensor = input_tensor self.pooling = pooling self.classifier_activation = classifier_activation def get_config(self): return { "blocks": self.blocks, "include_rescaling": self.include_rescaling, "include_top": self.include_top, "num_classes": self.num_classes, "input_shape": self.input_shape[1:], "input_tensor": self.input_tensor, "pooling": self.pooling, "classifier_activation": self.classifier_activation, "name": self.name, "trainable": self.trainable, } @classmethod def from_config(cls, config): return cls(**config) def DarkNet21( *, include_rescaling, include_top, num_classes=None, weights=None, input_shape=(None, None, 3), input_tensor=None, pooling=None, name="DarkNet21", **kwargs, ): return DarkNet( [1, 2, 2, 1], include_rescaling=include_rescaling, include_top=include_top, num_classes=num_classes, weights=parse_weights(weights, include_top, "darknet"), input_shape=input_shape, input_tensor=input_tensor, pooling=pooling, name=name, **kwargs, ) def DarkNet53( *, include_rescaling, include_top, num_classes=None, weights=None, input_shape=(None, None, 3), input_tensor=None, pooling=None, name="DarkNet53", **kwargs, ): return DarkNet( [2, 8, 8, 4], include_rescaling=include_rescaling, include_top=include_top, num_classes=num_classes, weights=parse_weights(weights, include_top, "darknet53"), input_shape=input_shape, input_tensor=input_tensor, pooling=pooling, name=name, **kwargs, ) setattr(DarkNet21, "__doc__", BASE_DOCSTRING.format(name="DarkNet21")) setattr(DarkNet53, "__doc__", BASE_DOCSTRING.format(name="DarkNet53"))
keras-cv/keras_cv/models/legacy/darknet.py/0
{ "file_path": "keras-cv/keras_cv/models/legacy/darknet.py", "repo_id": "keras-cv", "token_count": 4843 }
62
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytest import tensorflow as tf from absl.testing import parameterized import keras_cv from keras_cv.backend import config from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.backbones.test_backbone_presets import ( test_backbone_presets, ) from keras_cv.models.object_detection.__test_utils__ import ( _create_bounding_box_dataset, ) from keras_cv.models.object_detection.retinanet import RetinaNetLabelEncoder from keras_cv.tests.test_case import TestCase class RetinaNetTest(TestCase): def test_retinanet_construction(self): retinanet = keras_cv.models.RetinaNet( num_classes=20, bounding_box_format="xywh", backbone=keras_cv.models.ResNet18V2Backbone(), ) retinanet.compile( classification_loss="focal", box_loss="smoothl1", optimizer="adam", ) # TODO(lukewood) uncomment when using keras_cv.models.ResNet18 # self.assertIsNotNone(retinanet.backbone.get_layer(name="rescaling")) # TODO(lukewood): test compile with the FocalLoss class def test_retinanet_recompilation_without_metrics(self): retinanet = keras_cv.models.RetinaNet( num_classes=20, bounding_box_format="xywh", backbone=keras_cv.models.ResNet18V2Backbone(), ) retinanet.compile( classification_loss="focal", box_loss="smoothl1", optimizer="adam", metrics=[ keras_cv.metrics.BoxCOCOMetrics( bounding_box_format="center_xywh", evaluate_freq=20 ) ], ) self.assertIsNotNone(retinanet._user_metrics) retinanet.compile( classification_loss="focal", box_loss="smoothl1", optimizer="adam", metrics=None, ) self.assertIsNone(retinanet._user_metrics) @pytest.mark.large # Fit is slow, so mark these large. def test_retinanet_call(self): retinanet = keras_cv.models.RetinaNet( num_classes=20, bounding_box_format="xywh", backbone=keras_cv.models.ResNet18V2Backbone(), ) images = np.random.uniform(size=(2, 512, 512, 3)) _ = retinanet(images) _ = retinanet.predict(images) def test_wrong_logits(self): retinanet = keras_cv.models.RetinaNet( num_classes=2, bounding_box_format="xywh", backbone=keras_cv.models.ResNet18V2Backbone(), ) with self.assertRaisesRegex( ValueError, "from_logits", ): retinanet.compile( optimizer=keras.optimizers.SGD(learning_rate=0.25), classification_loss=keras_cv.losses.FocalLoss( from_logits=False, reduction="none" ), box_loss=keras_cv.losses.SmoothL1Loss( l1_cutoff=1.0, reduction="none" ), ) def test_weights_contained_in_trainable_variables(self): bounding_box_format = "xywh" retinanet = keras_cv.models.RetinaNet( num_classes=2, bounding_box_format=bounding_box_format, backbone=keras_cv.models.ResNet18V2Backbone(), ) retinanet.backbone.trainable = False retinanet.compile( optimizer=keras.optimizers.Adam(), classification_loss=keras_cv.losses.FocalLoss( from_logits=True, reduction="none" ), box_loss=keras_cv.losses.SmoothL1Loss( l1_cutoff=1.0, reduction="none" ), ) xs, ys = _create_bounding_box_dataset(bounding_box_format) # call once _ = retinanet(xs) self.assertEqual(len(retinanet.trainable_variables), 32) @pytest.mark.large # Fit is slow, so mark these large. def test_no_nans(self): retina_net = keras_cv.models.RetinaNet( num_classes=2, bounding_box_format="xywh", backbone=keras_cv.models.CSPDarkNetTinyBackbone(), ) retina_net.compile( optimizer=keras.optimizers.Adam(), classification_loss="focal", box_loss="smoothl1", ) # only a -1 box xs = np.ones((1, 512, 512, 3), "float32") ys = { "classes": np.array([[-1]], "float32"), "boxes": np.array([[[0, 0, 0, 0]]], "float32"), } ds = tf.data.Dataset.from_tensor_slices((xs, ys)) ds = ds.repeat(2) ds = ds.batch(2, drop_remainder=True) retina_net.fit(ds, epochs=1) weights = retina_net.get_weights() for weight in weights: self.assertFalse(ops.any(ops.isnan(weight))) @pytest.mark.large # Fit is slow, so mark these large. def test_weights_change(self): bounding_box_format = "xywh" retinanet = keras_cv.models.RetinaNet( num_classes=2, bounding_box_format=bounding_box_format, backbone=keras_cv.models.CSPDarkNetTinyBackbone(), ) retinanet.compile( optimizer=keras.optimizers.Adam(), classification_loss=keras_cv.losses.FocalLoss( from_logits=True, reduction="sum" ), box_loss=keras_cv.losses.SmoothL1Loss( l1_cutoff=1.0, reduction="sum" ), ) ds = _create_bounding_box_dataset( bounding_box_format, use_dictionary_box_format=True ) # call once _ = retinanet(ops.ones((1, 512, 512, 3))) original_fpn_weights = retinanet.feature_pyramid.get_weights() original_box_head_weights = retinanet.box_head.get_weights() original_classification_head_weights = ( retinanet.classification_head.get_weights() ) retinanet.fit(ds, epochs=1) fpn_after_fit = retinanet.feature_pyramid.get_weights() box_head_after_fit_weights = retinanet.box_head.get_weights() classification_head_after_fit_weights = ( retinanet.classification_head.get_weights() ) for w1, w2 in zip( original_classification_head_weights, classification_head_after_fit_weights, ): self.assertNotAllClose(w1, w2) for w1, w2 in zip( original_box_head_weights, box_head_after_fit_weights ): self.assertNotAllClose(w1, w2) for w1, w2 in zip(original_fpn_weights, fpn_after_fit): self.assertNotAllClose(w1, w2) @pytest.mark.large # Saving is slow, so mark these large. def test_saved_model(self): model = keras_cv.models.RetinaNet( num_classes=20, bounding_box_format="xywh", backbone=keras_cv.models.CSPDarkNetTinyBackbone(), ) input_batch = ops.ones(shape=(2, 224, 224, 3)) model_output = model(input_batch) save_path = os.path.join(self.get_temp_dir(), "retinanet.keras") model.save(save_path) restored_model = keras.models.load_model(save_path) # Check we got the real object back. self.assertIsInstance(restored_model, keras_cv.models.RetinaNet) # Check that output matches. restored_output = restored_model(input_batch) self.assertAllClose( tf.nest.map_structure(ops.convert_to_numpy, model_output), tf.nest.map_structure(ops.convert_to_numpy, restored_output), ) def test_call_with_custom_label_encoder(self): anchor_generator = keras_cv.models.RetinaNet.default_anchor_generator( "xywh" ) model = keras_cv.models.RetinaNet( num_classes=20, bounding_box_format="xywh", backbone=keras_cv.models.ResNet18V2Backbone(), label_encoder=RetinaNetLabelEncoder( bounding_box_format="xywh", anchor_generator=anchor_generator, box_variance=[0.1, 0.1, 0.2, 0.2], ), ) model(ops.ones(shape=(2, 224, 224, 3))) def test_tf_dataset_data_generator(self): if config.backend() != "tensorflow": pytest.skip("TensorFlow required for `tf.data.Dataset` test.") def data_generator(): image = tf.ones((512, 512, 3), dtype=tf.float32) bounding_boxes = { "boxes": tf.ones((3, 4), dtype=tf.float32), "classes": tf.ones((3,), dtype=tf.float32), } yield {"images": image, "bounding_boxes": bounding_boxes} data = tf.data.Dataset.from_generator( generator=data_generator, output_signature={ "images": tf.TensorSpec(shape=(512, 512, 3), dtype=tf.float32), "bounding_boxes": { "boxes": tf.TensorSpec(shape=(None, 4), dtype=tf.float32), "classes": tf.TensorSpec(shape=(None,), dtype=tf.float32), }, }, ).batch(1) model = keras_cv.models.RetinaNet( num_classes=2, bounding_box_format="xyxy", backbone=keras_cv.models.ResNet50Backbone.from_preset( "resnet50_imagenet", load_weights=False, ), ) model.compile( classification_loss="focal", box_loss="smoothl1", optimizer="adam", jit_compile=False, ) model.fit(data, epochs=1, batch_size=1, steps_per_epoch=1) @pytest.mark.large class RetinaNetSmokeTest(TestCase): @parameterized.named_parameters( *[(preset, preset) for preset in test_backbone_presets] ) def test_backbone_preset(self, preset): model = keras_cv.models.RetinaNet.from_preset( preset, num_classes=20, bounding_box_format="xywh", ) xs, _ = _create_bounding_box_dataset(bounding_box_format="xywh") output = model(xs) # 4 represents number of parameters in a box # 49104 is the number of anchors for a 512x512 image self.assertEqual(output["box"].shape, (xs.shape[0], 49104, 4)) def test_full_preset_weight_loading(self): model = keras_cv.models.RetinaNet.from_preset( "retinanet_resnet50_pascalvoc", bounding_box_format="xywh", ) xs = ops.ones((1, 512, 512, 3)) output = model(xs) expected_box = ops.array( [-1.2427993, 0.05179548, -1.9953268, 0.32456252] ) self.assertAllClose( ops.convert_to_numpy(output["box"][0, 123, :]), expected_box, atol=1e-5, ) expected_class = ops.array( [ -8.387445, -7.891776, -8.14204, -8.117359, -7.2517176, -7.906804, -7.0910635, -8.295824, -6.5567474, -7.086027, -6.3826647, -7.960227, -7.556676, -8.28963, -6.526232, -7.071624, -6.9687414, -6.6398506, -8.598567, -6.484198, ] ) expected_class = ops.reshape(expected_class, (20,)) self.assertAllClose( ops.convert_to_numpy(output["classification"][0, 123]), expected_class, atol=1e-5, )
keras-cv/keras_cv/models/object_detection/retinanet/retinanet_test.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/retinanet/retinanet_test.py", "repo_id": "keras-cv", "token_count": 6368 }
63
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_cv.models.object_detection.yolox.layers import YoloXLabelEncoder from keras_cv.tests.test_case import TestCase class YoloXLabelEncoderTest(TestCase): def test_ragged_images_exception(self): img1 = tf.random.uniform((10, 11, 3)) img2 = tf.random.uniform((9, 14, 3)) img3 = tf.random.uniform((7, 12, 3)) images = tf.ragged.stack([img1, img2, img3]) box_labels = {} box_labels["bounding_boxes"] = tf.random.uniform((3, 4, 4)) box_labels["classes"] = tf.random.uniform( (3, 4), maxval=20, dtype=tf.int32 ) layer = YoloXLabelEncoder() with self.assertRaisesRegexp( ValueError, "method does not support RaggedTensor inputs for the `images` " "argument.", ): layer(images, box_labels) def test_ragged_labels(self): images = tf.random.uniform((3, 12, 12, 3)) box_labels = {} box1 = tf.random.uniform((11, 4)) class1 = tf.random.uniform([11], maxval=20, dtype=tf.int32) box2 = tf.random.uniform((14, 4)) class2 = tf.random.uniform([14], maxval=20, dtype=tf.int32) box3 = tf.random.uniform((12, 4)) class3 = tf.random.uniform([12], maxval=20, dtype=tf.int32) box_labels["boxes"] = tf.ragged.stack([box1, box2, box3]) box_labels["classes"] = tf.ragged.stack([class1, class2, class3]) layer = YoloXLabelEncoder() encoded_boxes, _ = layer(images, box_labels) self.assertEqual(encoded_boxes.shape, (3, 14, 4)) def test_one_hot_classes_exception(self): images = tf.random.uniform((3, 12, 12, 3)) box_labels = {} box1 = tf.random.uniform((11, 4)) class1 = tf.random.uniform([11], maxval=20, dtype=tf.int32) class1 = tf.one_hot(class1, 20) box2 = tf.random.uniform((14, 4)) class2 = tf.random.uniform([14], maxval=20, dtype=tf.int32) class2 = tf.one_hot(class2, 20) box3 = tf.random.uniform((12, 4)) class3 = tf.random.uniform([12], maxval=20, dtype=tf.int32) class3 = tf.one_hot(class3, 20) box_labels["boxes"] = tf.ragged.stack([box1, box2, box3]) box_labels["classes"] = tf.ragged.stack([class1, class2, class3]) layer = YoloXLabelEncoder() with self.assertRaises(ValueError): layer(images, box_labels)
keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_label_encoder_test.py/0
{ "file_path": "keras-cv/keras_cv/models/object_detection/yolox/layers/yolox_label_encoder_test.py", "repo_id": "keras-cv", "token_count": 1321 }
64
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_cv.api_export import keras_cv_export from keras_cv.backend import keras from keras_cv.backend import ops from keras_cv.models.segmentation.segment_anything.sam_layers import ( MultiHeadAttentionWithDownsampling, ) from keras_cv.models.segmentation.segment_anything.sam_layers import ( TwoWayMultiHeadAttention, ) @keras_cv_export("keras_cv.models.TwoWayTransformer", package="keras_cv.models") class TwoWayTransformer(keras.layers.Layer): """A two-way cross-attention transformer decoder. A transformer decoder that attends to an input image using queries whose positional embedding is supplied. The transformer decoder design is shown in [1]_. Each decoder layer performs 4 steps: (1) self-attention on the tokens, (2) cross-attention from tokens (as queries) to the image embedding, (3) a point-wise MLP updates each token, and (4) cross-attention from the image embedding (as queries) to tokens. This last step updates the image embedding with prompt information. Each self/cross-attention and MLP has a residual connection and layer normalization. To ensure the decoder has access to critical geometric information the positional encodings are added to the image embedding whenever they participate in an attention layer. Additionally, the entire original prompt tokens (including their positional encodings) are re-added to the updated tokens whenever they participate in an attention layer. This allows for a strong dependence on both the prompt token's geometric location and type. Args: depth (int, optional): The depth of the attention blocks (the number of attention blocks to use). Defaults to `2`. embed_dim (int, optional): The number of features of the input image and point embeddings. Defaults to `256`. num_heads (int, optional): Number of heads to use in the attention layers. Defaults to `8`. mlp_dim (int, optional): The number of units in the hidden layer of the MLP block used in the attention layers. Defaults to `2048`. activation (str, optional): The activation of the MLP block's output layer used in the attention layers. Defaults to `"relu"`. attention_downsample_rate (int, optional): The downsample rate of the attention layers. Defaults to `2`. References: - [Segment Anything paper](https://arxiv.org/abs/2304.02643) - [Segment Anything GitHub](https://github.com/facebookresearch/segment-anything) """ # noqa: E501 def __init__( self, *, depth=2, embed_dim=256, num_heads=8, mlp_dim=2048, activation="relu", attention_downsample_rate=2, **kwargs, ): super().__init__(**kwargs) self.depth = depth self.embed_dim = embed_dim self.num_heads = num_heads self.mlp_dim = mlp_dim self.activation = activation self.attention_downsample_rate = attention_downsample_rate self.layers = [] for i in range(depth): self.layers.append( TwoWayMultiHeadAttention( num_heads=num_heads, key_dim=embed_dim // num_heads, mlp_dim=mlp_dim, skip_first_layer_pe=(i == 0), attention_downsample_rate=attention_downsample_rate, activation=activation, ) ) self.final_attention_token_to_image = ( MultiHeadAttentionWithDownsampling( num_heads=num_heads, key_dim=embed_dim // num_heads, downsample_rate=attention_downsample_rate, ) ) self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5) def build(self, input_shape=None): for layer in self.layers: layer.build() self.final_attention_token_to_image.build() self.final_layer_norm.build([None, None, self.embed_dim]) self.built = True def call(self, image_embedding, image_pe, point_embedding): shape = ops.shape(image_embedding) B, H, W, C = shape[0], shape[1], shape[2], shape[3] image_embedding = ops.reshape(image_embedding, (B, H * W, C)) shape = ops.shape(image_pe) B, H, W, C = shape[0], shape[1], shape[2], shape[3] image_pe = ops.reshape(image_pe, (B, H * W, C)) queries = point_embedding keys = image_embedding for layer in self.layers: queries, keys = layer( queries=queries, keys=keys, query_pe=point_embedding, key_pe=image_pe, ) queries_with_pe = queries + point_embedding keys_with_pe = keys + image_pe attention_map = self.final_attention_token_to_image( query=queries_with_pe, key=keys_with_pe, value=keys ) queries = queries + attention_map queries = self.final_layer_norm(queries) return queries, keys def get_config(self): config = super().get_config() config.update( { "depth": self.depth, "embed_dim": self.embed_dim, "num_heads": self.num_heads, "mlp_dim": self.mlp_dim, "activation": self.activation, "attention_downsample_rate": self.attention_downsample_rate, } ) return config
keras-cv/keras_cv/models/segmentation/segment_anything/sam_transformer.py/0
{ "file_path": "keras-cv/keras_cv/models/segmentation/segment_anything/sam_transformer.py", "repo_id": "keras-cv", "token_count": 2563 }
65
# Copyright 2022 The KerasCV Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility functions for models""" from keras_cv.backend import keras from keras_cv.backend.config import keras_3 def get_tensor_input_name(tensor): if keras_3(): return tensor._keras_history.operation.name else: return tensor.node.layer.name def parse_model_inputs(input_shape, input_tensor, **kwargs): if input_tensor is None: return keras.layers.Input(shape=input_shape, **kwargs) else: if not keras.backend.is_keras_tensor(input_tensor): return keras.layers.Input( tensor=input_tensor, shape=input_shape, **kwargs ) else: return input_tensor def correct_pad_downsample(inputs, kernel_size): """Returns a tuple for zero-padding for 2D convolution with downsampling. Args: inputs: Input tensor. kernel_size: An integer or tuple/list of 2 integers. Returns: A tuple. """ img_dim = 1 input_size = inputs.shape[img_dim : (img_dim + 2)] if isinstance(kernel_size, int): kernel_size = (kernel_size, kernel_size) if input_size[0] is None: adjust = (1, 1) else: adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2) correct = (kernel_size[0] // 2, kernel_size[1] // 2) return ( (correct[0] - adjust[0], correct[0]), (correct[1] - adjust[1], correct[1]), )
keras-cv/keras_cv/models/utils.py/0
{ "file_path": "keras-cv/keras_cv/models/utils.py", "repo_id": "keras-cv", "token_count": 776 }
66
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from tensorflow import keras from keras_cv.utils.train import convert_inputs_to_tf_dataset class ContrastiveTrainer(keras.Model): """Creates a self-supervised contrastive trainer for a model. Args: encoder: a `keras.Model` to be pre-trained. In most cases, this encoder should not include a top dense layer. augmenter: a preprocessing layer to randomly augment input images for contrastive learning, or a tuple of two separate augmenters for the two sides of the contrastive pipeline. projector: a projection model for contrastive training, or a tuple of two separate projectors for the two sides of the contrastive pipeline. This shrinks the feature map produced by the encoder, and is usually a 1 or 2-layer dense MLP. probe: An optional Keras layer or model which will be trained against class labels at train-time using the encoder output as input. Note that this should be specified iff training with labeled images. This predicts class labels based on the feature map produced by the encoder and is usually a 1 or 2-layer dense MLP. Returns: A `keras.Model` instance. Usage: ```python encoder = keras.Sequential( [ DenseNet121Backbone(include_rescaling=False), layers.GlobalAveragePooling2D(name="avg_pool"), ], ) augmenter = keras_cv.layers.preprocessing.RandomFlip() projector = keras.layers.Dense(64) probe = keras_cv.training.ContrastiveTrainer.linear_probe(num_classes=10) trainer = keras_cv.training.ContrastiveTrainer( encoder=encoder, augmenter=augmenter, projector=projector, probe=probe ) trainer.compile( encoder_optimizer=keras.optimizers.Adam(), encoder_loss=keras_cv.losses.SimCLRLoss(temperature=0.5), probe_optimizer=keras.optimizers.Adam(), probe_loss=keras.losses.CategoricalCrossentropy(from_logits=True), probe_metrics=[keras.metrics.CategoricalAccuracy(name="probe_accuracy")] ) (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() y_train = keras.utils.to_categorical(y_train, 10) trainer.fit(x_train, y_train) ``` """ def __init__( self, encoder, augmenter, projector, probe=None, ): super().__init__() if encoder.output.shape.rank != 2: raise ValueError( f"`encoder` must have a flattened output. Expected " f"rank(encoder.output.shape)=2, got " f"encoder.output.shape={encoder.output.shape}" ) if type(augmenter) is tuple and len(augmenter) != 2: raise ValueError( "`augmenter` must be either a single augmenter or a tuple of " "exactly 2 augmenters." ) if type(projector) is tuple and len(projector) != 2: raise ValueError( "`projector` must be either a single augmenter or a tuple of " "exactly 2 augmenters." ) self.augmenters = ( augmenter if type(augmenter) is tuple else (augmenter, augmenter) ) self.encoder = encoder # Check to see if the projector is being shared or are distinct. self._is_shared_projector = ( True if not isinstance(projector, tuple) else False ) self.projectors = ( projector if type(projector) is tuple else (projector, projector) ) self.probe = probe self.loss_metric = keras.metrics.Mean(name="loss") if probe is not None: self.probe_loss_metric = keras.metrics.Mean(name="probe_loss") self.probe_metrics = [] def compile( self, encoder_loss, encoder_optimizer, encoder_metrics=None, probe_optimizer=None, probe_loss=None, probe_metrics=None, **kwargs, ): super().compile( loss=encoder_loss, optimizer=encoder_optimizer, metrics=encoder_metrics, **kwargs, ) if self.probe and not probe_optimizer: raise ValueError( "`probe_optimizer` must be specified when a probe is included." ) if self.probe and not probe_loss: raise ValueError( "`probe_loss` must be specified when a probe is included." ) if "loss" in kwargs: raise ValueError( "`loss` parameter in ContrastiveTrainer.compile is ambiguous. " "Please specify `encoder_loss` or `probe_loss`." ) if "optimizer" in kwargs: raise ValueError( "`optimizer` parameter in ContrastiveTrainer.compile is " "ambiguous. Please specify `encoder_optimizer` or " "`probe_optimizer`." ) if "metrics" in kwargs: raise ValueError( "`metrics` parameter in ContrastiveTrainer.compile is " "ambiguous. Please specify `encoder_metrics` or " "`probe_metrics`." ) if self.probe: self.probe_loss = probe_loss self.probe_optimizer = probe_optimizer self.probe_metrics = probe_metrics or [] @property def metrics(self): metrics = [ self.loss_metric, ] if self.probe: metrics += [self.probe_loss_metric] metrics += self.probe_metrics return super().metrics + metrics def fit( self, x=None, y=None, sample_weight=None, batch_size=None, **kwargs, ): dataset = convert_inputs_to_tf_dataset( x=x, y=y, sample_weight=sample_weight, batch_size=batch_size ) dataset = dataset.map( self.run_augmenters, num_parallel_calls=tf.data.AUTOTUNE ) dataset = dataset.prefetch(tf.data.AUTOTUNE) return super().fit(x=dataset, **kwargs) def run_augmenters(self, x, y=None): inputs = {"images": x} if y is not None: inputs["labels"] = y inputs["augmented_images_0"] = self.augmenters[0](x, training=True) inputs["augmented_images_1"] = self.augmenters[1](x, training=True) return inputs def train_step(self, data): images = data["images"] labels = data["labels"] if "labels" in data else None augmented_images_0 = data["augmented_images_0"] augmented_images_1 = data["augmented_images_1"] with tf.GradientTape() as tape: features_0 = self.encoder(augmented_images_0, training=True) features_1 = self.encoder(augmented_images_1, training=True) projections_0 = self.projectors[0](features_0, training=True) projections_1 = self.projectors[1](features_1, training=True) loss = self.compiled_loss( projections_0, projections_1, regularization_losses=self.encoder.losses, ) # If the projector is shared, then take the trainable weights of just # one of the projectors in the tuple. If not, use both the projectors. projector_weights = ( self.projectors[0].trainable_weights if self._is_shared_projector else self.projectors[0].trainable_weights + self.projectors[1].trainable_weights ) gradients = tape.gradient( loss, self.encoder.trainable_weights + projector_weights, ) self.optimizer.apply_gradients( zip( gradients, self.encoder.trainable_weights + projector_weights, ) ) self.loss_metric.update_state(loss) if self.probe: if labels is None: raise ValueError( "Targets must be provided when a probe is specified" ) with tf.GradientTape() as tape: features = tf.stop_gradient( self.encoder(images, training=False) ) class_logits = self.probe(features, training=True) probe_loss = self.probe_loss(labels, class_logits) gradients = tape.gradient(probe_loss, self.probe.trainable_weights) self.probe_optimizer.apply_gradients( zip(gradients, self.probe.trainable_weights) ) self.probe_loss_metric.update_state(probe_loss) for metric in self.probe_metrics: metric.update_state(labels, class_logits) return {metric.name: metric.result() for metric in self.metrics} def call(self, inputs): raise NotImplementedError( "ContrastiveTrainer.call() is not implemented - " "please call your model directly." ) @staticmethod def linear_probe(num_classes, **kwargs): return keras.Sequential(keras.layers.Dense(num_classes), **kwargs)
keras-cv/keras_cv/training/contrastive/contrastive_trainer.py/0
{ "file_path": "keras-cv/keras_cv/training/contrastive/contrastive_trainer.py", "repo_id": "keras-cv", "token_count": 4543 }
67
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from keras_cv.backend import ops from keras_cv.tests.test_case import TestCase from keras_cv.utils.target_gather import _target_gather class TargetGatherTest(TestCase): def test_target_gather_boxes_batched(self): target_boxes = np.array( [[0, 0, 5, 5], [0, 5, 5, 10], [5, 0, 10, 5], [5, 5, 10, 10]] ) target_boxes = ops.expand_dims(target_boxes, axis=0) indices = np.array([[0, 2]], dtype="int32") expected_boxes = np.array([[0, 0, 5, 5], [5, 0, 10, 5]]) expected_boxes = ops.expand_dims(expected_boxes, axis=0) res = _target_gather(target_boxes, indices) self.assertAllClose(expected_boxes, res) def test_target_gather_boxes_unbatched(self): target_boxes = np.array( [[0, 0, 5, 5], [0, 5, 5, 10], [5, 0, 10, 5], [5, 5, 10, 10]], "int32", ) indices = np.array([0, 2], dtype="int32") expected_boxes = np.array([[0, 0, 5, 5], [5, 0, 10, 5]]) res = _target_gather(target_boxes, indices) self.assertAllClose(expected_boxes, res) def test_target_gather_classes_batched(self): target_classes = np.array([[1, 2, 3, 4]]) target_classes = ops.expand_dims(target_classes, axis=-1) indices = np.array([[0, 2]], dtype="int32") expected_classes = np.array([[1, 3]]) expected_classes = ops.expand_dims(expected_classes, axis=-1) res = _target_gather(target_classes, indices) self.assertAllClose(expected_classes, res) def test_target_gather_classes_unbatched(self): target_classes = np.array([1, 2, 3, 4]) target_classes = ops.expand_dims(target_classes, axis=-1) indices = np.array([0, 2], dtype="int32") expected_classes = np.array([1, 3]) expected_classes = ops.expand_dims(expected_classes, axis=-1) res = _target_gather(target_classes, indices) self.assertAllClose(expected_classes, res) def test_target_gather_classes_batched_with_mask(self): target_classes = np.array([[1, 2, 3, 4]]) target_classes = ops.expand_dims(target_classes, axis=-1) indices = np.array([[0, 2]], dtype="int32") masks = np.array(([[False, True]])) masks = ops.expand_dims(masks, axis=-1) # the second element is masked expected_classes = np.array([[1, 0]]) expected_classes = ops.expand_dims(expected_classes, axis=-1) res = _target_gather(target_classes, indices, masks) self.assertAllClose(expected_classes, res) def test_target_gather_classes_batched_with_mask_val(self): target_classes = np.array([[1, 2, 3, 4]]) target_classes = ops.expand_dims(target_classes, axis=-1) indices = np.array([[0, 2]], dtype="int32") masks = np.array(([[False, True]])) masks = ops.expand_dims(masks, axis=-1) # the second element is masked expected_classes = np.array([[1, -1]]) expected_classes = ops.expand_dims(expected_classes, axis=-1) res = _target_gather(target_classes, indices, masks, -1) self.assertAllClose(expected_classes, res) def test_target_gather_classes_unbatched_with_mask(self): target_classes = np.array([1, 2, 3, 4]) target_classes = ops.expand_dims(target_classes, axis=-1) indices = np.array([0, 2], dtype="int32") masks = np.array([False, True]) masks = ops.expand_dims(masks, axis=-1) expected_classes = np.array([1, 0]) expected_classes = ops.expand_dims(expected_classes, axis=-1) res = _target_gather(target_classes, indices, masks) self.assertAllClose(expected_classes, res) def test_target_gather_with_empty_targets(self): target_classes = np.array([]) target_classes = ops.expand_dims(target_classes, axis=-1) indices = np.array([0, 2], dtype="int32") # return all 0s since input is empty expected_classes = np.array([0, 0]) expected_classes = ops.expand_dims(expected_classes, axis=-1) res = _target_gather(target_classes, indices) self.assertAllClose(expected_classes, res) def test_target_gather_classes_multi_batch(self): target_classes = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) target_classes = ops.expand_dims(target_classes, axis=-1) indices = np.array([[0, 2], [1, 3]], dtype="int32") expected_classes = np.array([[1, 3], [6, 8]]) expected_classes = ops.expand_dims(expected_classes, axis=-1) res = _target_gather(target_classes, indices) self.assertAllClose(expected_classes, res) def test_target_gather_invalid_rank(self): targets = np.random.normal(size=[32, 2, 2, 2]) indices = np.array([0, 1], dtype="int32") with self.assertRaisesRegex(ValueError, "larger than 3"): _ = _target_gather(targets, indices)
keras-cv/keras_cv/utils/target_gather_test.py/0
{ "file_path": "keras-cv/keras_cv/utils/target_gather_test.py", "repo_id": "keras-cv", "token_count": 2338 }
68
# データセット ## CIFAR10 画像分類 10のクラスにラベル付けされた,50,000枚の32x32訓練用カラー画像,10,000枚のテスト用画像のデータセット. ### 使い方: ```python from keras.datasets import cifar10 (x_train, y_train), (x_test, y_test) = cifar10.load_data() ``` - __戻り値__: - 2つのタプル: - __x_train, x_test__: shape (num_samples, 3, 32, 32)または(num_samples, 32, 32, 3)のRGB画像データのuint8配列です.これはバックエンド設定の`image_data_format`が`channels_first`と`channels_last`のいずれなのかによって決まります. - __y_train, y_test__: shape (num_samples,) のカテゴリラベル(0-9の範囲の整数)のuint8配列. --- ## CIFAR100 画像分類 100のクラスにラベル付けされた,50,000枚の32x32訓練用カラー画像,10,000枚のテスト用画像のデータセット. ### 使い方: ```python from keras.datasets import cifar100 (x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode='fine') ``` - __戻り値__: - 2つのタプル: - __x_train, x_test__: shape (num_samples, 3, 32, 32)または(num_samples, 32, 32, 3)のRGB画像データのuint8配列です.これはバックエンド設定の`image_data_format`が`channels_first`と`channels_last`のいずれなのかによって決まります. - __y_train, y_test__: shape (num_samples,) のカテゴリラベルのuint8配列. - __引数__: - __label_mode__: "fine" または "coarse". --- ## IMDB映画レビュー感情分類 感情 (肯定/否定) のラベル付けをされた,25,000のIMDB映画レビューのデータセット.レビューは前処理済みで,各レビューは単語のインデックス(整数)の[シーケンス](preprocessing/sequence.md)としてエンコードされています.便宜上,単語はデータセットにおいての出現頻度によってインデックスされています.そのため例えば,整数"3"はデータの中で3番目に頻度が多い単語にエンコードされます.これによって"上位20個の頻出語を除いた,上位10,000個の頻出語についてのみ考える"というようなフィルタリング作業を高速に行うことができます. 慣例として,"0"は特定の単語を表さずに,未知語にエンコードされます. ### 使い方: ```python from keras.datasets import imdb (x_train, y_train), (x_test, y_test) = imdb.load_data(path="imdb.npz", num_words=None, skip_top=0, maxlen=None, seed=113, start_char=1, oov_char=2, index_from=3) ``` - __戻り値__: - 2つのタプル: - __x_train, x_test__: シーケンスのリスト,リストはインデックス(整数).引数num_wordsに具体的な整数が与えられた場合,取り得るインデックスの最大値はnum_words-1となる.引数maxlenに具体的な数値が与えられた場合,シーケンスの最大長はmaxlenとなる. - __y_train, y_test__: 整数ラベル(1または0)のリスト. - __引数__: - __path__: データをローカルに持っている場合 (`'~/.keras/datasets/' + path`),cPickleフォーマットではこの位置にダウンロードされます. - __num_words__: 整数 または None. 指定された数値だけ上位の頻出語が対象となります.指定された数値より下位の頻出語はシーケンスデータにおいて`oov_char`の値で表現します. - __skip_top__: 整数.指定された数値だけ上位の頻出語が無視されます(シーケンスデータにおいて`oov_char`の値で表現します). - __maxlen__: 整数.シーケンスの最大長.最大長より長いシーケンスは切り捨てられます. - __seed__: 整数.再現可能なデータシャッフルのためのシード. - __start_char__: この文字が系列の開始記号として扱われます. 0は通常パディング用の文字であるため,1以上からセットしてください. - __oov_char__: 整数.`num_words`か`skip_top`によって削除された単語をこの値で置換します. - __index_from__: 単語のインデックスはこのインデックス以上の数値が与えられます. --- ## ロイターのニュースワイヤー トピックス分類 46のトピックにラベル付けされた,11,228個のロイターのニュースワイヤーのデータセット.IMDBデータセットと同様,各ワイヤーが一連の単語インデックスとしてエンコードされます(同じ慣例に基づく). ### 使い方: ```python from keras.datasets import reuters (x_train, y_train), (x_test, y_test) = reuters.load_data(path="reuters.npz", num_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2, index_from=3) ``` 仕様はIMDBデータセットのものに加えて,次のパラメータが追加されます: - __test_split__: 浮動小数点数.テストデータとして使用するデータセットの割合. このデータセットはシーケンスをエンコードに使われている単語インデックスを利用できます. ```python word_index = reuters.get_word_index(path="reuters_word_index.npz") ``` - __戻り値__: キーが単語(文字列),値がインデックス(整数)の辞書.例,`word_index["giraffe"]`は`1234`が返ります. - __引数__: - __path__: データをローカルに持っていない場合 (`'~/.keras/datasets/' + path`) ,この位置にダウンロードされます. --- ## MNIST 手書き数字データベース 60,000枚の28x28,10個の数字の白黒画像と10,000枚のテスト用画像データセット. ### 使い方: ```python from keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() ``` - __戻り値__: - 2つのタプル: - __x_train, x_test__: shape (num_samples, 28, 28) の白黒画像データのuint8配列. - __y_train, y_test__: shape (num_samples,) のカテゴリラベル(0-9の整数)のuint8配列. - __引数__: - __path__: データをローカルに持っていない場合 (`'~/.keras/datasets/' + path`) ,この位置にダウンロードされます. --- ## Fashion-MNIST ファッション記事データベース 60,000枚の28x28,10個のファッションカテゴリの白黒画像と10,000枚のテスト用画像データセット.このデータセットはMNISTの完全な互換品として使えます.クラスラベルは次の通りです: | ラベル | 説明 | | --- | --- | | 0 | Tシャツ/トップス | | 1 | ズボン | | 2 | プルオーバー | | 3 | ドレス | | 4 | コート | | 5 | サンダル | | 6 | シャツ | | 7 | スニーカー | | 8 | バッグ | | 9 | アンクルブーツ | ### 使い方: ```python from keras.datasets import fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() ``` - __戻り値__: - 2つのタプル: - __x_train, x_test__: shape (num_samples, 28, 28) の白黒画像データのuint8配列. - __y_train, y_test__: shape (num_samples,) のラベル(0-9の整数)のuint8配列. --- ## ボストンの住宅価格回帰データセット Carnegie Mellon大学のStatLib ライブラリのデータセット. サンプルは,1970年代後半におけるボストン近郊の異なる地域の住宅に関する13の属性値を含みます. 予測値は,その地域での住宅価格の中央値(単位はk$)です. ### 使い方: ```python from keras.datasets import boston_housing (x_train, y_train), (x_test, y_test) = boston_housing.load_data() ``` - __引数__: - __path__: ローカルに保存するパス (~/.keras/datasets). - __seed__: テストデータに分ける前にデータをシャッフルするためのシード. - __test_split__: テストデータとして使用するデータセットの割合. - __返り値__: Numpy 配列のタプル: `(x_train, y_train), (x_test, y_test)`.
keras-docs-ja/sources/datasets.md/0
{ "file_path": "keras-docs-ja/sources/datasets.md", "repo_id": "keras-docs-ja", "token_count": 4868 }
69
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L212)</span> ### RNN ```python keras.layers.RNN(cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False) ``` Recurrentレイヤーに対する基底クラス. __引数__ - __cell__: RNN cellインスタンス.RNN cellは以下の属性を持つクラスです. - `call(input_at_t, states_at_t)`メソッド,これは`(output_at_t, states_at_t_plus_1)`を返します. cellのメソッド呼び出しはオプションの引数`constants`も使えます. 下記の「外部定数を渡す場合の注意」を参照してください. - `state_size`属性. これは1つの整数(1つの状態)でもよく,その場合はrecurrent stateのサイズになります(これはcellの出力のサイズと同じである必要があります). (1つ状態につき1つのサイズが対応するように)整数やリストやタプルもとれます. この場合は最初のエントリ(`state_size[0]`)がcellの出力のサイズと同じである必要があります. `cell`をRNN cellインスタンスのリストとすることも可能です. この場合,cellはRNNの中で他のcellの後にスタックされているいれば,効率的なstacked RNNが実現されます. - __return_sequences__: 真理値.出力系列の最後の出力を返すか,完全な系列を返すか. - __return_state__: 真理値.出力とともに,最後の状態を返すかどうか. - __go_backwards__: 真理値(デフォルトはFalse).Trueなら,入力系列を逆向きから処理し,逆順の系列を返します. - __stateful__: 真理値(デフォルトはFalse).Trueなら,バッチ内のインデックスiの各サンプル に対する最後の状態が次のバッチ内のインデックスiのサンプルに対する初期状態として使われます. - __unroll__: 真理値(デフォルトはFalse).Trueなら,ネットワークは展開され, そうでなければシンボリックループが使われます. 展開はよりメモリ集中傾向になりますが,RNNをスピードアップできます. 展開は短い系列にのみ適しています. - __input_dim__: 入力の次元(整数). この引数(または代わりのキーワード引数`input_shape`)は, このレイヤーをモデルの最初のレイヤーとして利用するときに必要です. - __input_length__: 入力系列の長さ. この引数はこのレイヤーの後に`Flatten`から`Dense`レイヤーへ接続する際に必要です (これがないと,denseの出力のshapeを計算できません). Recurrentレイヤーがモデルの最初のレイヤーでなければ, 最初のレイヤーのレベルで入力系列の長さを指定する必要があります (例えば`input_shape`引数を通じて). __入力のshape__ shapeが`(batch_size, timesteps, input_dim)`の3階テンソル. __出力のshape__ - `return_state`の場合:テンソルのリスト.最初のテンソルが出力になります.残りのテンソルは最終状態で,それぞれのshapeは`(batch_size, units)`です. - `return_sequences`の場合:shapeが`(batch_size, timesteps, input_dim)`の3階テンソル. - それ以外の場合:shapeが`(batch_size, input_dim)`の2階テンソル. __マスキング__ このレイヤーはタイムステップの変数を持つ入力データに対するマスキングをサポートします. あなたのデータにマスクを導入するためには, `mask_zero`パラメータに`True`を渡した[Embedding](embeddings.md)レイヤーを利用してください. __RNNで状態管理を利用するときの注意点__ RNNレイヤーを'stateful'にすることができます. これはあるバッチでサンプルに対して計算された状態が次のバッチのサンプルの初期状態として再利用されるという意味です. これは別々の連続したバッチ内のサンプルが一対一対応することを仮定します. 状態管理を可能にするためには: - レイヤーコンストラクタにおいて`stateful=True`を指定してください. - モデルに一定のバッチサイズを指定してください. もしsequentialモデルなら: `batch_input_shape=(...)`を最初のレイヤーに 1つ以上の入力層をもったfunctionalモデルなら: `batch_input_shape=(...)`をモデルのすべての最初のレイヤーに 渡すことで固定系列長のバッチサイズを指定してください. これは*バッチサイズを含む*入力の期待されるshapeです. これは整数のタプルであるべきです,例えば`(32, 10, 100)`. - `fit()`を呼ぶときは,`shuffle=False`を指定してください. モデルの状態を再設定するには,指定したレイヤーもしくはモデル全体で`.reset_states()`を呼び出してください. __RNNの初期状態を指定するときの注意点__ `initial_state`のキーワード引数を渡してRNNを呼び出すことで,内部状態の初期値を指定できます. `initial_state`の値は,RNNの初期値を表現したテンソルかテンソルのリストです. __RNNに外部定数を渡すときの注意__ (`RNN.call`のように)`RNN.__call__`メソッドの`constants`キーワード引数を使うことによって「外部」定数を渡せます. `cell.call`メソッドが`constants`と同じキーワード変数を受け入れる必要があります. そのような定数は,アテンションメカニズムで知られるような,追加の固定入力(時間変動しない)におけるcellの変化の状態として使われます. __例__ ```python # First, let's define a RNN Cell, as a layer subclass. class MinimalRNNCell(keras.layers.Layer): def __init__(self, units, **kwargs): self.units = units self.state_size = units super(MinimalRNNCell, self).__init__(**kwargs) def build(self, input_shape): self.kernel = self.add_weight(shape=(input_shape[-1], self.units), initializer='uniform', name='kernel') self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer='uniform', name='recurrent_kernel') self.built = True def call(self, inputs, states): prev_output = states[0] h = K.dot(inputs, self.kernel) output = h + K.dot(prev_output, self.recurrent_kernel) return output, [output] # Let's use this cell in a RNN layer: cell = MinimalRNNCell(32) x = keras.Input((None, 5)) layer = RNN(cell) y = layer(x) # Here's how to use the cell to build a stacked RNN: cells = [MinimalRNNCell(32), MinimalRNNCell(64)] x = keras.Input((None, 5)) layer = RNN(cells) y = layer(x) ``` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L941)</span> ### SimpleRNN ```python keras.layers.SimpleRNN(units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False) ``` 出力が入力にフィードバックされる全結合RNN. __引数__ - __units__: 正の整数値,出力の次元数. - __activation__: 活性化関数([activations](../activations.md)を参照). デフォルト:ハイパボリックタンジェント(`tanh`). `None`を渡すと活性化関数は適用されません (例."linear" activation: `a(x) = x`). - __use_bias__: 真理値,biasベクトルを使うかどうか. - __kernel_initializer__: 入力の線形変換に使われる`kernel`の重み行列のためのInitializer([initializers](../initializers.md)を参照). - __recurrent_initializer__: 再帰の線形変換に使われる`recurrent_kernel`の重み行列のInitializer([initializers](../initializers.md)を参照). - __bias_initializer__: biasベクトルのInitializer([initializers](../initializers.md)を参照). - __kernel_regularizer__: `kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __bias_regularizer__: biasベクトルに適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __activity_regularizer__: 出力 (そのactivation) に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __recurrent_constraint__: `recurrent_kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __bias_constraint__: biasベクトルに適用するConstraint関数([constraints](../constraints.md)を参照). - __dropout__: 0から1の間の浮動小数点数.入力の線形変換においてdropするユニットの割合. - __recurrent_dropout__: 0から1の間の浮動小数点数.再帰の線形変換においてdropするユニットの割合. - __return_sequences__: 真理値.出力系列の最後の出力を返すか,完全な系列を返すか. - __return_state__: 真理値.出力とともに,最後の状態を返すかどうか. - __go_backwards__: 真理値(デフォルトはFalse).Trueなら,入力系列の後ろから処理し,逆順の系列を返します. - __stateful__: 真理値(デフォルトはFalse).Trueなら,バッチ内のインデックスiの各サンプル に対する最後の状態が次のバッチ内のインデックスiのサンプルに対する初期状態として使われます. - __unroll__: 真理値(デフォルトはFalse).Trueなら,ネットワークは展開され, そうでなければシンボリックループが使われます. 展開はよりメモリ集中傾向になりますが,RNNをスピードアップできます. 展開は短い系列にのみ適しています. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1465)</span> ### GRU ```python keras.layers.GRU(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=1, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, reset_after=False) ``` ゲートのあるリカレントユニット - Cho et al. 2014. 2つの異なる変種があります.デフォルトは1406.1078v3を基にしたもので,行列の乗算の前に隠れ状態にリセットゲートを適用します.もう1つはオリジナルである1406.1078v1をベースにしているもので,処理の順番が逆です. 2つ目の変種は(GPU限定の)CuDNNGRUに互換があり,CPUでの推論も可能です.結果として`kernel`と`recurrent_kernel`に対して異なるバイアスがあります.`'reset_after'=True`と`recurrent_activation='sigmoid'`を使用してください. __引数__ - __units__: 正の整数値,出力の次元数. - __activation__: 活性化関数([activations](../activations.md)を参照). デフォルト:ハイパボリックタンジェント(`tanh`). `None`を渡すと活性化関数は適用されません (例."linear" activation: `a(x) = x`). - __recurrent_activation__: 再帰計算時に使う活性化関数([activations](../activations.md)を参照). - __use_bias__: 真理値,biasベクトルを使うかどうか. - __kernel_initializer__: 入力の線形変換に使われる`kernel`の重み行列のためのInitializer([initializers](../initializers.md)を参照). - __recurrent_initializer__: 再帰の線形変換に使われる`recurrent_kernel`の重み行列のInitializer([initializers](../initializers.md)を参照). - __bias_initializer__: biasベクトルのInitializer([initializers](../initializers.md)を参照). - __kernel_regularizer__: `kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __bias_regularizer__: biasベクトルに適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __activity_regularizer__: 出力 (そのactivation) に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __recurrent_constraint__: `recurrent_kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __bias_constraint__: biasベクトルに適用するConstraint関数([constraints](../constraints.md)を参照). - __dropout__: 0から1の間の浮動小数点数.入力の線形変換においてdropするユニットの割合. - __recurrent_dropout__: 0から1の間の浮動小数点数.再帰の線形変換においてdropするユニットの割合. - __implementation__: 実装モードで,1か2.モード1は小さなドット積や加算処理を多数行う構造である一方,モード2は少数の大きな操作をバッチ処理します. これらのモードはハードウェアやアプリケーションによって異なるパフォーマンスプロファイルとなるでしょう. - __return_sequences__: 真理値.出力系列の最後の出力を返すか,完全な系列を返すか. - __return_state__: 真理値.出力とともに,最後の状態を返すかどうか. - __go_backwards__: 真理値(デフォルトはFalse).Trueなら,入力系列の後ろから処理し,逆順の系列を返します. - __stateful__: 真理値(デフォルトはFalse).Trueなら,バッチ内のインデックスiの各サンプル に対する最後の状態が次のバッチ内のインデックスiのサンプルに対する初期状態として使われます. - __unroll__: 真理値(デフォルトはFalse).Trueなら,ネットワークは展開され, そうでなければシンボリックループが使われます. 展開はよりメモリ集中傾向になりますが,RNNをスピードアップできます. 展開は短い系列にのみ適しています. - __reset_after__: GRUの慣習(行列の乗算の前後のどちらでリセットゲートの適用を行うか).False = "before" (デフォルト), True = "after" (CuDNN互換). __参考文献__ - [On the Properties of Neural Machine Translation: Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259) - [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](http://arxiv.org/abs/1412.3555v1) - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287) ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1996)</span> ### LSTM ```python keras.layers.LSTM(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=1, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False) ``` 長短期記憶ユニット - Hochreiter 1997. __引数__ - __units__: 正の整数値,出力の次元数. - __activation__: 活性化関数([activations](../activations.md)を参照). デフォルト:ハイパボリックタンジェント(`tanh`). `None`を渡すと活性化関数は適用されません (例."linear" activation: `a(x) = x`). - __recurrent_activation__: 再帰計算時に使う活性化関数([activations](../activations.md)を参照). デフォルト:ハードシグモイド(`hard_sigmoid`). `None`を渡すと活性化関数は適用されません (例."linear" activation: `a(x) = x`). - __use_bias__: 真理値,biasベクトルを使うかどうか. - __kernel_initializer__: 入力の線形変換に使われる`kernel`の重み行列のためのInitializer([initializers](../initializers.md)を参照). - __recurrent_initializer__: 再帰の線形変換に使われる`recurrent_kernel`の重み行列のInitializer([initializers](../initializers.md)を参照). - __bias_initializer__: biasベクトルのInitializer([initializers](../initializers.md)を参照). - __unit_forget_bias__: 真理値.Trueなら,初期化時に忘却ゲートのbiasに1加算.また,trueの場合は強制的に`bias_initializer="zeros"`になります.これは[Jozefowicz et al.](http://proceedings.mlr.press/v37/jozefowicz15.pdf)で推奨されています. - __kernel_regularizer__: `kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __bias_regularizer__: biasベクトルに適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __activity_regularizer__: 出力(そのactivation)に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __recurrent_constraint__: `recurrent_kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __bias_constraint__: biasベクトルに適用するConstraint関数([constraints](../constraints.md)を参照). - __dropout__: 0から1の間の浮動小数点数.入力の線形変換においてdropするユニットの割合. - __recurrent_dropout__: 0から1の間の浮動小数点数.再帰の線形変換においてdropするユニットの割合. - __implementation__: 実装モードで,1か2.モード1は小さなドット積や加算処理を多数行う構造である一方,モード2は少数の大きな操作をバッチ処理します. これらのモードはハードウェアやアプリケーションによって異なるパフォーマンスプロファイルとなるでしょう. - __return_sequences__: 真理値.出力系列の最後の出力を返すか,完全な系列を返すか. - __return_state__: 真理値.出力とともに,最後の状態を返すかどうか. - __go_backwards__: 真理値(デフォルトはFalse).Trueなら,入力系列の後ろから処理し,逆順の系列を返します. - __stateful__: 真理値(デフォルトはFalse).Trueなら,バッチ内のインデックスiの各サンプル に対する最後の状態が次のバッチ内のインデックスiのサンプルに対する初期状態として使われます. - __unroll__: 真理値(デフォルトはFalse).Trueなら,ネットワークは展開され, そうでなければシンボリックループが使われます. 展開はよりメモリ集中傾向になりますが,RNNをスピードアップできます. 展開は短い系列にのみ適しています. __参考文献__ - [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper) - [Learning to forget: Continual prediction with LSTM](http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015) - [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf) - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287) ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional_recurrent.py#L773)</span> ### ConvLSTM2D 畳み込みLSTM. LSTMレイヤーに似ていますが,入力の変換とリカレントな変換が畳み込みです. __引数__ - __filters__: 整数,出力空間の次元(つまり畳み込みにおける出力フィルタの数). - __kernel_size__: 整数かn個の整数からなるタプル/リストで,n次元の畳み込みウィンドウを指定します. - __strides__: 整数かn個の整数からなるタプル/リストで,畳み込みのストライドをそれぞれ指定できます.strides value != 1とすると`dilation_rate` value != 1と指定できません. - __padding__: `"valid"`か`"same"`のどちらかを指定します. - __data_format__: 文字列,`channels_last`(デフォルト)か`channels_first`のどちらかを指定します. これは入力における次元の順序です. `"channels_last"`の場合,入力のshapeは`(batch, time, ..., channels)`となり,`"channels_first"`の場合は`(batch, time, channels, ...)`となります. デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です. 一度も値を変更していなければ,"channels_last"になります. - __dilation_rate__: 整数かn個の整数からなるタプル/リストで,dilated convolutionで使われる膨張率を指定します. 現在,`dilation_rate` value != 1 とすると,`strides` value != 1を指定することはできません. - __activation__: 使用する活性化関数の名前([activations](../activations.md)を参照), 何も指定しなければ,活性化は一切適用されません(つまり"線形"活性`a(x) = x`). - __recurrent_activation__: recurrentステップで適用される活性化関数([activations](../activations.md)を参照). - __use_bias__: 真理値,レイヤーがバイアスベクトルを使うかどうか. - __kernel_initializer__: `kernel`の重み行列の初期値を指定します.入力の線形変換に使われます.([initializers](../initializers.md)を参照). - __recurrent_initializer__: `recurrent_kernel`の重み行列の初期値を指定します. recurrent stateの線形変換に使われます.([initializers](../initializers.md)を参照). - __bias_initializer__: バイアスベクトルの初期値を指定します.([initializers](../initializers.md)を参照) - __unit_forget_bias__: 真理値.Trueなら,初期化時に忘却ゲートのバイアスに1を加えます. `bias_initializer="zeros"`とともに用いられます. これは[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)により推奨されています. - __kernel_regularizer__: `kernel`の重み行列に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照) - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照). - __bias_regularizer__: バイアスベクトルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照). - __activity_regularizer__: 出力テンソルに適用させるRegularizerを指定します.([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用させるConstraintを指定します.([constraint](../constraints.md)を参照). - __recurrent_constraint__: recurrent_kernelの重み行列に適用させるConstraintを指定します.([constraint](../constraints.md)を参照). - __bias_constraint__: バイアスベクトルに適用させるConstraintを指定します.([constraint](../constraints.md)を参照). - __return_sequences__: 真理値.出力系列の最後の出力を返すか,完全な系列を返すか. - __go_backwards__: 真理値(デフォルトはFalse).Trueなら,入力系列の後ろから処理し,逆順の系列を返します. - __stateful__: 真理値(デフォルトはFalse).Trueなら,バッチ内のインデックスiの各サンプル に対する最後の状態が次のバッチ内のインデックスiのサンプルに対する初期状態として使われます. - __dropout__: 0から1の間の浮動小数点数.入力の線形変換においてdropするユニットの割合. - __recurrent_dropout__: 0から1の間の浮動小数点数.再帰の線形変換においてdropするユニットの割合. __入力のshape__ - data_format='channels_first'の場合は次のshapeの5階テンソル:`(samples,time, channels, rows, cols)` - data_format='channels_last'の場合は次のshapeの5階テンソル:`(samples,time, rows, cols, channels)` __出力のshape__ - `return_sequences`の場合 - data_format='channels_first'なら次のshapeの5階テンソル: `(samples, time, filters, output_row, output_col)` - data_format='channels_last'なら次のshapeの5階テンソル: `(samples, time, output_row, output_col, filters)` - それ以外の場合 - data_format='channels_first'なら次のshapeの4階テンソル: `(samples, filters, output_row, output_col)` - data_format='channels_last'なら次のshapeの4階テンソル: `(samples, output_row, output_col, filters)` o_rowsとo_colsはフィルタのshapeやパディングに依存します. __Raise__ - __ValueError__: 無効なコンストラクタ引数の場合 __参考文献__ - [Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1) 現状の実装ではcell出力におけるフィードバックループを含んでいません ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L782)</span> ### SimpleRNNCell ```python keras.layers.SimpleRNNCell(units, activation='tanh', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0) ``` SimpleRNNのCellクラス. __引数__ - __units__: 正の整数値,出力の次元数. - __activation__: 活性化関数([activations](../activations.md)を参照). デフォルト:ハイパボリックタンジェント(`tanh`). `None`を渡すと活性化関数は適用されません(つまり"線形"活性: `a(x) = x`). - __use_bias__: 真理値,レイヤーがバイアスベクトルを使うかどうか. - __kernel_initializer__: 入力の線形変換に使われる`kernel`の重み行列のためのInitializer([initializers](../initializers.md)を参照). - __recurrent_initializer__: 再帰の線形変換に使われる`recurrent_kernel`の重み行列のInitializer([initializers](../initializers.md)を参照). - __bias_initializer__: バイアスベクトルのInitializer([initializers](../initializers.md)を参照). - __kernel_regularizer__: `kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __bias_regularizer__: biasベクトルに適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __recurrent_constraint__: `recurrent_kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __bias_constraint__: biasベクトルに適用するConstraint関数([constraints](../constraints.md)を参照). - __dropout__: 0から1の間の浮動小数点数.入力の線形変換においてdropするユニットの割合. - __recurrent_dropout__: 0から1の間の浮動小数点数.再帰の線形変換においてdropするユニットの割合. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1154)</span> ### GRUCell ```python keras.layers.GRUCell(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=1) ``` GRUレイヤーのためのCellクラス. - __units__: 正の整数値,出力の次元数. - __activation__: 活性化関数([activations](../activations.md)を参照). デフォルト:ハイパボリックタンジェント(`tanh`). `None`を渡すと活性化関数は適用されません(つまり"線形"活性: `a(x) = x`). - __recurrent_activation__: 再帰計算時に使う活性化関数([activations](../activations.md)を参照). - __use_bias__: 真理値,レイヤーがバイアスベクトルを使うかどうか. - __kernel_initializer__: 入力の線形変換に使われる`kernel`の重み行列のためのInitializer([initializers](../initializers.md)を参照). - __recurrent_initializer__: 再帰の線形変換に使われる`recurrent_kernel`の重み行列のInitializer([initializers](../initializers.md)を参照). - __bias_initializer__: biasベクトルのInitializer([initializers](../initializers.md)を参照). - __kernel_regularizer__: `kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __bias_regularizer__: biasベクトルに適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __recurrent_constraint__: `recurrent_kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __bias_constraint__: biasベクトルに適用するConstraint関数([constraints](../constraints.md)を参照). - __dropout__: 0から1の間の浮動小数点数.入力の線形変換においてdropするユニットの割合. - __recurrent_dropout__: 0から1の間の浮動小数点数.再帰の線形変換においてdropするユニットの割合. - __implementation__: 実装モードで,1か2.モード1は小さなドット積や加算処理を多数行う構造である一方,モード2は少数の大きな操作をバッチ処理します. これらのモードはハードウェアやアプリケーションによって異なるパフォーマンスプロファイルとなるでしょう. - __reset_after__: GRUの慣習(行列の乗算の前後のどちらでリセットゲートの適用を行うか).False = "before" (デフォルト), True = "after" (CuDNN互換). ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L1728)</span> ### LSTMCell ```python keras.layers.LSTMCell(units, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, implementation=1) ``` LSTMレイヤーのためのcellクラス. __引数__ - __units__: 正の整数値,出力の次元数. - __activation__: 活性化関数([activations](../activations.md)を参照). デフォルト:ハイパボリックタンジェント(`tanh`). `None`を渡すと活性化関数は適用されません(つまり"線形"活性: `a(x) = x`). - __activation__: 活性化関数([activations](../activations.md)を参照). デフォルト:ハイパボリックタンジェント(`tanh`). `None`を渡すと活性化関数は適用されません(つまり"線形"活性: `a(x) = x`). - __use_bias__: 真理値,biasベクトルを使うかどうか. - __kernel_initializer__: 入力の線形変換に使われる`kernel`の重み行列のためのInitializer([initializers](../initializers.md)を参照). - __recurrent_initializer__: 再帰の線形変換に使われる`recurrent_kernel`の重み行列のInitializer([initializers](../initializers.md)を参照). - __bias_initializer__: biasベクトルのInitializer([initializers](../initializers.md)を参照). - __unit_forget_bias__: 真理値.Trueなら,初期化時に忘却ゲートのバイアスに1を加えます. `bias_initializer="zeros"`とともに用いられます. これは[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)により推奨されています. - __kernel_regularizer__: `kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __bias_regularizer__: biasベクトルに適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __recurrent_constraint__: `recurrent_kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __bias_constraint__: biasベクトルに適用するConstraint関数([constraints](../constraints.md)を参照). - __dropout__: 0から1の間の浮動小数点数.入力の線形変換においてdropするユニットの割合. - __recurrent_dropout__: 0から1の間の浮動小数点数.再帰の線形変換においてdropするユニットの割合. - __implementation__: 実装モードで,1か2.モード1は小さなドット積や加算処理を多数行う構造である一方,モード2は少数の大きな操作をバッチ処理します. これらのモードはハードウェアやアプリケーションによって異なるパフォーマンスプロファイルとなるでしょう. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/recurrent.py#L25)</span> ### StackedRNNCells ```python keras.layers.StackedRNNCells(cells) ``` RNN cellのスタックの振る舞いを単一のcellのようにするためのラッパー. 効率的なstacked RNNを実装するために使われます. __引数__ - __cells__: RNN cellインスタンスのリスト. __例__ ```python cells = [ keras.layers.LSTMCell(output_dim), keras.layers.LSTMCell(output_dim), keras.layers.LSTMCell(output_dim), ] inputs = keras.Input((timesteps, input_dim)) x = keras.layers.RNN(cells)(inputs) ``` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/cudnn_recurrent.py#L135)</span> ### CuDNNGRU ```python keras.layers.CuDNNGRU(units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, stateful=False) ``` [CuDNN](https://developer.nvidia.com/cudnn)を利用した高速なGRU実装. TensorFlowバックエンドでGPU上でのみ動作します. __引数__ - __units__: 正の整数値,出力の次元数. - __kernel_initializer__: 入力の線形変換に使われる`kernel`の重み行列のためのInitializer([initializers](../initializers.md)を参照). - __recurrent_initializer__: 再帰の線形変換に使われる`recurrent_kernel`の重み行列のInitializer([initializers](../initializers.md)を参照). - __bias_initializer__: biasベクトルのInitializer([initializers](../initializers.md)を参照). - __kernel_regularizer__: `kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __bias_regularizer__: biasベクトルに適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __activity_regularizer__: 出力 (そのactivation) に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __recurrent_constraint__: `recurrent_kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __bias_constraint__: biasベクトルに適用するConstraint関数([constraints](../constraints.md)を参照). - __return_sequences__: 真理値.出力系列の最後の出力を返すか,完全な系列を返すか. - __return_state__: 真理値.出力とともに,最後の状態を返すかどうか. - __stateful__: 真理値(デフォルトはFalse).Trueなら,バッチ内のインデックスiの各サンプルに対する最後の状態が次のバッチ内のインデックスiのサンプルに対する初期状態として使われます. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/cudnn_recurrent.py#L324)</span> ### CuDNNLSTM ```python keras.layers.CuDNNLSTM(units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, stateful=False) ``` [CuDNN](https://developer.nvidia.com/cudnn)を利用した高速なLSTM実装. TensorFlowバックエンドでGPU上でのみ動作します. __引数__ - __units__: 正の整数値,出力の次元数. - __kernel_initializer__: 入力の線形変換に使われる`kernel`の重み行列のためのInitializer([initializers](../initializers.md)を参照). - __unit_forget_bias__: 真理値.Trueなら,初期化時に忘却ゲートのバイアスに1を加えます. `bias_initializer="zeros"`とともに用いられます. これは[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)により推奨されています. - __recurrent_initializer__: 再帰の線形変換に使われる`recurrent_kernel`の重み行列のInitializer([initializers](../initializers.md)を参照). - __bias_initializer__: biasベクトルのInitializer([initializers](../initializers.md)を参照). - __kernel_regularizer__: `kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __recurrent_regularizer__: `recurrent_kernel`の重み行列に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __bias_regularizer__: biasベクトルに適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __activity_regularizer__: 出力 (そのactivation) に適用するRegularizer関数([regularizer](../regularizers.md)を参照). - __kernel_constraint__: `kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __recurrent_constraint__: `recurrent_kernel`の重み行列に適用するConstraint関数([constraints](../constraints.md)を参照). - __bias_constraint__: biasベクトルに適用するConstraint関数([constraints](../constraints.md)を参照). - __return_sequences__: 真理値.出力系列の最後の出力を返すか,完全な系列を返すか. - __return_state__: 真理値.出力とともに,最後の状態を返すかどうか. - __stateful__: 真理値(デフォルトはFalse).Trueなら,バッチ内のインデックスiの各サンプルに対する最後の状態が次のバッチ内のインデックスiのサンプルに対する初期状態として使われます.
keras-docs-ja/sources/layers/recurrent.md/0
{ "file_path": "keras-docs-ja/sources/layers/recurrent.md", "repo_id": "keras-docs-ja", "token_count": 19666 }
70
# なぜKerasを使うか? 今日,数え切れない数の深層学習フレームワークが存在します.なぜ他のライブラリではなくて,Kerasを使うのでしょうか?ここでは,Kerasが既存の選択肢に引けを取らない理由のいくつかを紹介します. --- ## Kerasはシンプルかつ柔軟に使用できます - Kerasは,機械ではなく,人間のために設計されたAPIです.[Kerasは認知的負荷を軽減するためのベストプラクティスに従っています](https://blog.keras.io/user-experience-design-for-apis.html): 一貫性のあるシンプルなAPIを提供し,一般的なユースケースで必要なユーザーの操作を最小限に抑え,エラー時には明確で実用的なフィードバックを提供します. - これにより,Kerasは簡単に学ぶことが出来て,簡単に使う事が出来ます.Kerasユーザーは,生産性が高く,競争相手よりも,より多くのアイデアを試す事が出来ます. -- これにより,[機械学習のコンテストで勝つのに役立ちます](https://www.quora.com/Why-has-Keras-been-so-successful-lately-at-Kaggle-competitions). - 手軽さがあっても,柔軟性がなければいけません: Kerasは低レベルな深層学習言語(特にTensorFlow)と統合しているので,基本の深層学習言語で構築されたものを実装する事が出来ます.特に,`tf.keras`として,Keras APIはTensorFlowワークフローとシームレスに統合されています. --- ## Kerasは事業と研究コミュニティの両方で幅広く使用されています <a href='https://towardsdatascience.com/deep-learning-framework-power-scores-2018-23607ddf297a'> <img style='width: 80%; margin-left: 10%;' src='https://s3.amazonaws.com/keras.io/img/dl_frameworks_power_scores.png'/> </a> <p style='font-style: italic; font-size: 10pt; text-align: center;'> Deep learning frameworks ranking computed by Jeff Hale, based on 11 data sources across 7 categories </i> 2018年中旬の時点で,Kerasは25万以上の個人ユーザーがおり,TensorFlow自体を除いて,他の深層学習フレームワークよりも事業と研究コミュニティの両方で多く採用されています(そしてKeras APIは`tf.keras`を経由することでTensorFlowの公式なフロントエンドとなっています). あなたはすでにKerasで構築された機能を日常で使用しています -- Netflix,Uber,Yelp,Instacart,Zocdoc,Squareなど多くの企業がKerasを使用しています.特に,自社製品の核となる部分で深層学習を用いているようなスタートアップ企業で人気があります. また,Kerasは,深層学習研究者の間でも人気があり,プレプリント・サーバ[arXiv.org](https://arxiv.org/archive/cs)にアップロードされた,科学技術論文で言及されているフレームワークの中で二番目に使用されています.また,Kerasは,大規模な科学機関,例えば,CERNやNASAの研究者によって採用されています. --- ## Kerasは簡単にモデルを製品化できます Kerasのモデルは,他の深層学習フレームワークよりも多くのプラットフォームで,簡単にデプロイできます. - iOS([Apple’s CoreML](https://developer.apple.com/documentation/coreml)経由,Kerasのサポートは正式にAppleから提供されています).こちらが[チュートリアル](https://www.pyimagesearch.com/2018/04/23/running-keras-models-on-ios-with-coreml/)です. - Android(TensorFlow Androidランタイム経由)例: [Not Hotdog app](https://medium.com/@timanglade/how-hbos-silicon-valley-built-not-hotdog-with-mobile-tensorflow-keras-react-native-ef03260747f3). - ブラウザ([Keras.js](https://transcranial.github.io/keras-js/#/)や,[WebDNN](https://mil-tokyo.github.io/webdnn/)などのGPU利用が可能なJavaScriptランタイム経由) - Google Cloud([TensorFlow-Serving](https://www.tensorflow.org/serving/)経由) - [(Flaskアプリのような)Pythonのウェブアプリのバックエンド](https://blog.keras.io/building-a-simple-keras-deep-learning-rest-api.html). - JVM([SkyMindによって提供されたDL4J モデル](https://deeplearning4j.org/model-import-keras)経由) - ラズベリーパイ --- ## Kerasは複数のバックエンドをサポートし,一つのエコシステムに縛られません Kerasは複数の[バックエンドエンジン](https://keras.io/ja/backend/)をサポートしています.重要な事に,組み込みレイヤーのみで構成されるKerasモデルは,全てのバックエンド間で移植可能です.: 一つのバックエンドを使用して学習したモデルを用いて,別のバックエンドを使用してモデルをロードする事が出来ます.(例えば,デプロイなどで用いる事が出来ます) 使用可能なバックエンドは以下のとおりです. - TensorFlow バックエンド (from Google) - CNTK バックエンド (from Microsoft) - Theano バックエンド Amazonも現在,KerasのMXNetバックエンドの開発にも取り組んでいます. また,KerasモデルはCPU以外の様々なハードウェアプラットフォームで学習する事が出来ます. - [NVIDIA GPUs](https://developer.nvidia.com/deep-learning) - [Google TPUs](https://cloud.google.com/tpu/)(TensorFlowバックエンドかつ,Google Cloud経由) - OpenGLが使用出来るAMDのようなGPU([the PlaidML Kerasバックエンド](https://github.com/plaidml/plaidml)経由) --- ## Kerasは複数のGPU,分散学習のサポートが強力です - Kerasは[複数GPU並列処理のための組み込みサポート](/utils/#multi_gpu_model)もあります. - Uberの[Horovod](https://github.com/uber/horovod)は,Kerasモデルを最もサポートしています. - Kerasモデルは[TensorFlow Estimatorsに変換する事](https://www.tensorflow.org/versions/master/api_docs/python/tf/keras/estimator/model_to_estimator)が出来ます.また,[Google CloudのGPUクラスターを用いて](https://cloud.google.com/solutions/running-distributed-tensorflow-on-compute-engine)学習が出来ます. - Kerasは[Dist-Keras](https://github.com/cerndb/dist-keras) (from CERN)と [Elephas](https://github.com/maxpumperla/elephas)経由でSpark上で走らせる事が出来ます. --- ## Kerasの開発は深層学習の主要企業によってサポートされています Kerasの開発は主にGoogleによってサポートされ,Keras APIはTensorFlowに`tf.keras`としてパッケージ化されています. 加えて,MicrosoftはCNTK Kerasバックエンドを管理しています. Amazon AWSはMXNetサポートを開発中です. その他,NVIDIA,Uber,Apple(CoreML)によって,サポートされています. <img src='https://keras.io/img/google-logo.png' style='width:200px; margin-right:15px;'/> <img src='https://keras.io/img/microsoft-logo.png' style='width:200px; margin-right:15px;'/> <img src='https://keras.io/img/nvidia-logo.png' style='width:200px; margin-right:15px;'/> <img src='https://keras.io/img/aws-logo.png' style='width:110px; margin-right:15px;'/>
keras-docs-ja/sources/why-use-keras.md/0
{ "file_path": "keras-docs-ja/sources/why-use-keras.md", "repo_id": "keras-docs-ja", "token_count": 3410 }
71
# Keras 백엔드 ## "백앤드"는 무엇인가요? Keras는 딥러닝 모델을 개발하기 위한 고수준의 구성요성 요소를 제공하는 모델 레벨의 라이브러리입니다. Keras는 텐서 곱셈, 합성곱 등의 저수준의 연산을 제공하지 않습니다. 대신 Keras의 "백엔드 엔진" 역할을 하는 특수하고 잘 최적화 된 텐서 라이브러리에 의존합니다. 하나의 단일 텐서 라이브러리를 선택하고 Keras 구현을 해당 라이브러리에 묶는 대신, Keras는 모듈 방식으로 문제를 처리하여 여러 다른 백엔드 엔진들을 Keras에 매끄럽게 연결할 수 있게 합니다. 현재 Keras는 **TensorFlow**, **Theano**, 그리고 **CNTK**의 세 가지 백엔드를 지원합니다. - [TensorFlow](http://www.tensorflow.org/) is an open-source symbolic tensor manipulation framework developed by Google. - [Theano](http://deeplearning.net/software/theano/) is an open-source symbolic tensor manipulation framework developed by LISA Lab at Université de Montréal. - [CNTK](https://www.microsoft.com/en-us/cognitive-toolkit/) is an open-source toolkit for deep learning developed by Microsoft. - [TensorFlow](http://www.tensorflow.org/)는 구글에서 만든 기호텐서 프레임워크 오픈소스입니다. - [Theano](http://deeplearning.net/software/theano/)은 Université de Montréal LISA Lab에서 개발한 기호텐서 프레임워크 오픈소스입니다. - [CNTK](https://www.microsoft.com/en-us/cognitive-toolkit/)마이크로소프트에서 만든 딥러닝 개발을 위한 오픈소스 툴킷입니다. 앞으로 더 많은 백엔드 옵션을 지원할 예정입니다. ---- ## 한 백엔드에서 다른 백엔드로의 전환 Keras를 한 번이라도 실행한 적이 있다면, 아래의 위치에서 Keras 구성 파일을 찾을 수 있습니다. `$HOME/.keras/keras.json` 만약 파일이 없다면, 해당 위치에 구성 파일을 만들 수 있습니다. **Windows(윈도우) 사용자를 위한 노트:** `$HOME`을 `%USERPROFILE%`로 바꾸십시오. 기본 구성 파일의 내용은 다음과 같습니다. ``` { "image_data_format": "channels_last", "epsilon": 1e-07, "floatx": "float32", "backend": "tensorflow" } ``` 단순히 `backend` 필드의 값을 `"theano"`, `"tensorflow"` 또는 `"cntk"`로 바꿔주는 것 만으로 새로운 백엔드를 사용해 Keras 코드를 실행할 수 있습니다. 또는 아래와 같이 환경 변수 `KERAS_BACKEND`를 정의해 설정 파일에 정의된 것을 대체할 수도 있습니다. ```bash KERAS_BACKEND=tensorflow python -c "from keras import backend" Using TensorFlow backend. ``` Keras에서는 `"tensorflow"`, `"theano"` 그리고 `"cntk"`외에도 사용자가 지정한 임의의 백엔드를 로드하는 것이 가능합니다. 만약 `my_module`이라는 이름의 Python 모듈을 백엔드로 사용하고자 한다면, `keras.json` 파일의 `"backend"` 변수 값을 아래와 같이 바꿔주어야 합니다. ``` { "image_data_format": "channels_last", "epsilon": 1e-07, "floatx": "float32", "backend": "my_package.my_module" } ``` 사용하고자 하는 외부 백엔드는 반드시 검증된 것이어야 하며, `placeholder`, `variable` 그리고 `function` 세 함수들을 지원해야 합니다. 만약, 외부 백엔드가 필수 항목이 누락되어 유효하지 않은 경우라면, 누락된 항목/항목들에 대한 오류가 기록됩니다. ---- ## keras.json 상세 `keras.json` 구성 파일은 아래의 설정들을 포함합니다. ``` { "image_data_format": "channels_last", "epsilon": 1e-07, "floatx": "float32", "backend": "tensorflow" } ``` `$HOME/.keras/keras.json` 파일을 편집하여 설정을 변경할 수 있습니다. * `image_data_format`: String, either `"channels_last"` or `"channels_first"`. It specifies which data format convention Keras will follow. (`keras.backend.image_data_format()` returns it.) - For 2D data (e.g. image), `"channels_last"` assumes `(rows, cols, channels)` while `"channels_first"` assumes `(channels, rows, cols)`. - For 3D data, `"channels_last"` assumes `(conv_dim1, conv_dim2, conv_dim3, channels)` while `"channels_first"` assumes `(channels, conv_dim1, conv_dim2, conv_dim3)`. * `epsilon`: Float, a numeric fuzzing constant used to avoid dividing by zero in some operations. * `floatx`: String, `"float16"`, `"float32"`, or `"float64"`. Default float precision. * `backend`: String, `"tensorflow"`, `"theano"`, or `"cntk"`. ---- ## 추상화된 Keras 백엔드를 사용하여 새로운 코드 작성하기 만약 Theano(`th`)와 Tensorflow(`tf`) 모두와 호환이 되는 Keras 모듈을 작성하고자 한다면, 아래와 같이 추상화된 Keras 백엔드 API를 사용해야 합니다. 다음과 같이 백엔드 모듈을 사용할 수 있습니다. ```python from keras import backend as K ``` 아래는 입력 `placeholder`를 인스턴스화하는 코드입니다. 이는 `tf.placeholder()`, `th.tensor.matrix()` 또는 `th.tensor.tensor()` 등을 실행하는 것과 같습니다. ```python inputs = K.placeholder(shape=(2, 4, 5)) # also works: inputs = K.placeholder(shape=(None, 4, 5)) # also works: inputs = K.placeholder(ndim=3) ``` 아래의 코드는 변수를 인스턴스화합니다. `tf.Variable()` 또는 `th.shared()`를 실행하는 것과 같습니다. ```python import numpy as np val = np.random.random((3, 4, 5)) var = K.variable(value=val) # all-zeros variable: var = K.zeros(shape=(3, 4, 5)) # all-ones: var = K.ones(shape=(3, 4, 5)) ``` 구현에 필요한 대부분의 텐서 연산들은 사용법이 TensorFlow나 Theano와 크게 다르지 않습니다. ```python # Initializing Tensors with Random Numbers b = K.random_uniform_variable(shape=(3, 4), low=0, high=1) # Uniform distribution c = K.random_normal_variable(shape=(3, 4), mean=0, scale=1) # Gaussian distribution d = K.random_normal_variable(shape=(3, 4), mean=0, scale=1) # Tensor Arithmetic a = b + c * K.abs(d) c = K.dot(a, K.transpose(b)) a = K.sum(b, axis=1) a = K.softmax(b) a = K.concatenate([b, c], axis=-1) # etc... ``` ---- ## 백엔드 함수들 ### epsilon ```python keras.backend.epsilon() ``` 수치 식에 사용되는 fuzz factor(엡실론의<sag>float</sag>값)을 반환합니다. __Returns__ A float. __Example__ ```python >>> keras.backend.epsilon() 1e-07 ``` ---- ### set_epsilon ```python keras.backend.set_epsilon(e) ``` 수치 식에 사용되는 fuzz factor의 값을 설정합니다. __Arguments__ - __e__: <sag>float</sag>, 엡실론의 새로운 값. __Example__ ```python >>> from keras import backend as K >>> K.epsilon() 1e-07 >>> K.set_epsilon(1e-05) >>> K.epsilon() 1e-05 ``` ---- ### floatx ```python keras.backend.floatx() ``` Returns the default float type, as a string. (e.g. 'float16', 'float32', 'float64'). __Returns__ String, the current default float type. __Example__ ```python >>> keras.backend.floatx() 'float32' ``` ---- ### set_floatx ```python keras.backend.set_floatx(floatx) ``` 기본 실수형 타입을 설정합니다. __Arguments__ - __floatx__: <sag>String</sag>, 'float16', 'float32', or 'float64'. __Example__ ```python >>> from keras import backend as K >>> K.floatx() 'float32' >>> K.set_floatx('float16') >>> K.floatx() 'float16' ``` ---- ### cast_to_floatx ```python keras.backend.cast_to_floatx(x) ``` NumPy 배열을 Keras의 기본 실수형 타입으로 변환합니다. __Arguments__ - __x__: NumPy 배열. __Returns__ 변환된 NumPy 배열 __Example__ ```python >>> from keras import backend as K >>> K.floatx() 'float32' >>> arr = numpy.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = K.cast_to_floatx(arr) >>> new_arr array([ 1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32') ``` ---- ### image_data_format ```python keras.backend.image_data_format() ``` Returns the default image data format convention. __Returns__ A string, either `'channels_first'` or `'channels_last'` __Example__ ```python >>> keras.backend.image_data_format() 'channels_first' ``` ---- ### set_image_data_format ```python keras.backend.set_image_data_format(data_format) ``` Sets the value of the data format convention. __Arguments__ - __data_format__: string. `'channels_first'` 또는 `'channels_last'`. __Example__ ```python >>> from keras import backend as K >>> K.image_data_format() 'channels_first' >>> K.set_image_data_format('channels_last') >>> K.image_data_format() 'channels_last' ``` ---- ### get_uid ```python keras.backend.get_uid(prefix='') ``` 디폴트 그래프의 uid 값을 가져옵니다. __Arguments__ - __prefix__: An optional prefix of the graph. __Returns__ 그래프의 고유 식별자(uid) ---- ### reset_uids ```python keras.backend.reset_uids() ``` 그래프의 식별자를 재설정합니다. ---- ### clear_session ```python keras.backend.clear_session() ``` 현재 TF 그래프를 없애고, 새로운 TF 그래프를 만듭니다. 오래된 모델 혹은 층과의 혼란을 피할 때 유용합니다. ---- ### manual_variable_initialization ```python keras.backend.manual_variable_initialization(value) ``` 수동 변수 초기화 플래그를 설정합니다. 이 boolean 플래그는 변수가 인스턴스화 될 때 초기화 되어야 하는지(기본값), 혹은 사용자가 직접 초기화를 처리해야 하는지 여부를 결정합니다. (e.g. via `tf.initialize_all_variables()`). __Arguments__ - __value__: Python boolean. ---- ### learning_phase ```python keras.backend.learning_phase() ``` 학습 단계를 나타내는 플래그를 반환합니다. 해당 플래그 변수는 학습과 테스트시에 다른 행동을 취하는 Keras 함수에 입력으로 전달되는 bool형 텐서 (0 = 테스트, 1 = 학습)입니다. __Returns__ 학습 단계 ( 스칼라 정수 텐서 또는 파이썬 정수형 ). ---- ### set_learning_phase ```python keras.backend.set_learning_phase(value) ``` 학습 단계 변수를 주어진 값으로 고정합니다. __Arguments__ - __value__: 학습 단계 값, 0 또는 1(정수). __Raises__ - __ValueError__: `value` 가 `0` 또는 `1`이 아닌 경우. ---- ### is_sparse ```python keras.backend.is_sparse(tensor) ``` 희소 텐서인지 아닌지를 반환합니다. __Arguments__ - __tensor__: 한 개의 텐서 인스턴스. __Returns__ A boolean. __Example__ ```python >>> from keras import backend as K >>> a = K.placeholder((2, 2), sparse=False) >>> print(K.is_sparse(a)) False >>> b = K.placeholder((2, 2), sparse=True) >>> print(K.is_sparse(b)) True ``` ---- ### to_dense ```python keras.backend.to_dense(tensor) ``` <sag>sparse</sag> 텐서에서 <sag>dense</sag>텐서로 바꿔준다. __Arguments__ - __tensor__: <sag>sparse</sag> 텐서일 수도 있는 인스턴스. __Returns__ 한 개의 <sag>dense</sag>텐서. __Examples__ ```python >>> from keras import backend as K >>> b = K.placeholder((2, 2), sparse=True) >>> print(K.is_sparse(b)) True >>> c = K.to_dense(b) >>> print(K.is_sparse(c)) False ``` ---- ### variable ```python keras.backend.variable(value, dtype=None, name=None, constraint=None) ``` 변수를 인스턴스화한 후 반환합니다. __Arguments__ - __value__: NumPy 배열, 텐서의 초기 값. - __dtype__: 텐서 타입. - __name__: 텐서의 이름(선택사항). - __constraint__: 옵티마이저 업데이트 후 변수에 적용되는 투영 함수입니다(선택사항). __Returns__ 변수 인스턴스(Keras 메타 데이터 포함). __Examples__ ```python >>> from keras import backend as K >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val, dtype='float64', name='example_var') >>> K.dtype(kvar) 'float64' >>> print(kvar) example_var >>> K.eval(kvar) array([[ 1., 2.], [ 3., 4.]]) ``` ---- ### constant ```python keras.backend.constant(value, dtype=None, shape=None, name=None) ``` 상수 텐서를 만듭니다. __Arguments__ - __value__: 상수 값(또는 리스트) - __dtype__: 결과의 텐서의 요소의 형태. - __shape__: 결과 텐서의 크기(선택사항). - __name__: 텐서의 이름(선택사항). __Returns__ 상수 텐서 ---- ### is_keras_tensor ```python keras.backend.is_keras_tensor(x) ``` `x`가 Keras 텐서인지 아닌지를 반환합니다. "Keras 텐서"란 Keras 층(`Layer` 클래스) 또는 `Input`에 의해 반환된 텐서입니다. __Arguments__ - __x__: 후보 텐서. __Returns__ A boolean: 주어진 인자가 Keras 텐서인지의 여부. __Raises__ - __ValueError__: `x`가 심볼릭 텐서가 아닌 경우. __Examples__ ```python >>> from keras import backend as K >>> from keras.layers import Input, Dense >>> np_var = numpy.array([1, 2]) >>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor. ValueError >>> k_var = tf.placeholder('float32', shape=(1,1)) >>> # A variable indirectly created outside of keras is not a Keras tensor. >>> K.is_keras_tensor(k_var) False >>> keras_var = K.variable(np_var) >>> # A variable created with the keras backend is not a Keras tensor. >>> K.is_keras_tensor(keras_var) False >>> keras_placeholder = K.placeholder(shape=(2, 4, 5)) >>> # A placeholder is not a Keras tensor. >>> K.is_keras_tensor(keras_placeholder) False >>> keras_input = Input([10]) >>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor. True >>> keras_layer_output = Dense(10)(keras_input) >>> # Any Keras layer output is a Keras tensor. >>> K.is_keras_tensor(keras_layer_output) True ``` ---- ### is_tensor ```python keras.backend.is_tensor(x) ``` ---- ### placeholder ```python keras.backend.placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None) ``` 플레이스홀더 텐서를 인스턴스화 한 후 반환합니다. __Arguments__ - __shape__: 플레이스홀더의 형식 (<sag>integer</sag> 튜플은 <sag>None</sag>요소가 없을수도 있습니다.) - __ndim__: 텐서 축의 갯수. 적어도 {'shape`, `ndim`} 중 하나는 반드시 명시되어야 합니다. 만약 두 요소 모두 명시되었다면, <sag>shape</sag>가 사용됩니다. - __dtype__: 플레이스홀더 타입. - __sparse__: 불리언 타입<sag>Boolean</sag>,플레이스홀더가 <sag>sparse</sag>타입이어야 하는지에 대한 진리값. - __name__: 문자열 플레이스홀더에 대한 선택적인 이름. __Returns__ 케라스의 메타데이터가 포함된 텐서 인스턴스. __Examples__ ```python >>> from keras import backend as K >>> input_ph = K.placeholder(shape=(2, 4, 5)) >>> input_ph._keras_shape (2, 4, 5) >>> input_ph <tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32> ``` ---- ### is_placeholder ```python keras.backend.is_placeholder(x) ``` 'x'가 플레이스홀더인지 아닌지를 반환한다. __Arguments__ - __x__: 한 개의 후보 플레이스홀더. __Returns__ 불리언 값. ---- ### shape ```python keras.backend.shape(x) ``` 텐서 또는 변수의 기호 형식을 반환합니다. __Arguments__ - __x__: 한 개의 텐서 또는 변수. __Returns__ 텐서 그 자체의 기호형식. __Examples__ ```python # TensorFlow example >>> from keras import backend as K >>> tf_session = K.get_session() >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val) >>> inputs = keras.backend.placeholder(shape=(2, 4, 5)) >>> K.shape(kvar) <tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32> >>> K.shape(inputs) <tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32> # To get integer shape (Instead, you can use K.int_shape(x)) >>> K.shape(kvar).eval(session=tf_session) array([2, 2], dtype=int32) >>> K.shape(inputs).eval(session=tf_session) array([2, 4, 5], dtype=int32) ``` ---- ### int_shape ```python keras.backend.int_shape(x) ``` <sag>int</sag> 또는 <sag>None</sag>요소의 튜플로서 변수 또는 텐서의 형식을 반환합니다. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ <sag>integers</sag>(또는 <sag>None</sag>)의 튜플. __Examples__ ```python >>> from keras import backend as K >>> inputs = K.placeholder(shape=(2, 4, 5)) >>> K.int_shape(inputs) (2, 4, 5) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val) >>> K.int_shape(kvar) (2, 2) ``` __Numpy implementation__ ```python def int_shape(x): return x.shape ``` ---- ### ndim ```python keras.backend.ndim(x) ``` <sag>integer</sag>타입으로, 텐서의 축의 갯수를 반환합니다. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 축의 갯 수, 정수형(스칼라값)으로 반환합니다. __Examples__ ```python >>> from keras import backend as K >>> inputs = K.placeholder(shape=(2, 4, 5)) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val) >>> K.ndim(inputs) 3 >>> K.ndim(kvar) 2 ``` __Numpy implementation__ ```python def ndim(x): return x.ndim ``` ---- ### dtype ```python keras.backend.dtype(x) ``` <sag>string</sag>타입으로 케라스 변수 또는 텐서의 <sag>dtype</sag>을 반환한다. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 'x'의 dtype<sag>string</sag> __Examples__ ```python >>> from keras import backend as K >>> K.dtype(K.placeholder(shape=(2,4,5))) 'float32' >>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32')) 'float32' >>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64')) 'float64' # Keras variable >>> kvar = K.variable(np.array([[1, 2], [3, 4]])) >>> K.dtype(kvar) 'float32_ref' >>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32') >>> K.dtype(kvar) 'float32_ref' ``` __Numpy implementation__ ```python def dtype(x): return x.dtype.name ``` ---- ### eval ```python keras.backend.eval(x) ``` 변수의 값을 평가한다. __Arguments__ - __x__: 한 개의 변수. __Returns__ 넘파이 배열. __Examples__ ```python >>> from keras import backend as K >>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32') >>> K.eval(kvar) array([[ 1., 2.], [ 3., 4.]], dtype=float32) ``` __Numpy implementation__ ```python def eval(x): return x ``` ---- ### zeros ```python keras.backend.zeros(shape, dtype=None, name=None) ``` 모두 0인 변수로 인스턴스화 하고 반환한다. __Arguments__ - __shape__: <sag>integers</sag>의 튜플, 반환된 케라스 변수의 형식 - __dtype__: <sag>string</sag>, 반환된 케라스 변수의 데이터 타입 - __name__: <sag>string</sag>, 반환된 케라스 변수의 이름 __Returns__ Keras 메타 데이터를 포함한 `0.0`으로 채워진 변수. `shape`가 기호 인 경우 변수를 반환 할 수 없습니다. 대신 동적 모양의 텐서를 반환합니다. __Example__ ```python >>> from keras import backend as K >>> kvar = K.zeros((3,4)) >>> K.eval(kvar) array([[ 0., 0., 0., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]], dtype=float32) ``` __Numpy implementation__ ```python def zeros(shape, dtype=floatx(), name=None): return np.zeros(shape, dtype=dtype) ``` ---- ### ones ```python keras.backend.ones(shape, dtype=None, name=None) ``` 모든 변수를 인스턴스화하고 반환합니다. __Arguments__ - __shape__: <sag>integers</sag>의 튜플, 반환된 케라스 변수 형식. - __dtype__: <sag>string</sag>, 반환된 케라스 데이터 타입. - __name__: <sag>string</sag>, 반환된 케라스 변수 이름. __Returns__ `1.0`으로 채워진 Keras 변수. `shape`가 기호 인 경우 변수를 반환 할 수 없습니다. 대신 동적 모양의 텐서를 반환합니다. __Example__ ```python >>> from keras import backend as K >>> kvar = K.ones((3,4)) >>> K.eval(kvar) array([[ 1., 1., 1., 1.], [ 1., 1., 1., 1.], [ 1., 1., 1., 1.]], dtype=float32) ``` __Numpy implementation__ ```python def ones(shape, dtype=floatx(), name=None): return np.ones(shape, dtype=dtype) ``` ---- ### eye ```python keras.backend.eye(size, dtype=None, name=None) ``` 단위행렬을 인스턴스화 하고 반환합니다. __Arguments__ - __size__: <sag>integer</sag>, 행과 열의 수. - __dtype__: <sag>string</sag>, 반환된 케라스 변수의 데이터 타입. - __name__: <sag>string</sag>, 반환된 케라스 변수의 이름. __Returns__ 단위행렬, 케라스 변수. __Example__ ```python >>> from keras import backend as K >>> kvar = K.eye(3) >>> K.eval(kvar) array([[ 1., 0., 0.], [ 0., 1., 0.], [ 0., 0., 1.]], dtype=float32) ``` __Numpy implementation__ ```python def eye(size, dtype=None, name=None): return np.eye(size, dtype=dtype) ``` ---- ### zeros_like ```python keras.backend.zeros_like(x, dtype=None, name=None) ``` 또 다른 텐서이면서 같은 형식의 모두 0값인 변수가 인스턴스화 됩니다. __Arguments__ - __x__: 케라스 변수 또는 케라스 텐서. - __dtype__: <sag>string</sag>, 반환된 케라스 변수의 dtype. x의 dtype을 사용하지 않습니다. - __name__: <sag>string</sag>, 생성할 변수의 이름. __Returns__ 0으로 채워진 x 형식의 케라스 변수. __Example__ ```python >>> from keras import backend as K >>> kvar = K.variable(np.random.random((2,3))) >>> kvar_zeros = K.zeros_like(kvar) >>> K.eval(kvar_zeros) array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) ``` __Numpy implementation__ ```python def zeros_like(x, dtype=floatx(), name=None): return np.zeros_like(x, dtype=dtype) ``` ---- ### ones_like ```python keras.backend.ones_like(x, dtype=None, name=None) ``` 또 다른 텐서와 동일한 모양의 <sag>all-ones</sag> 변수를 인스턴스화 합니다. __Arguments__ - __x__: 케라스 변수 또는 케라스 텐서. - __dtype__: <sag>string</sag>, 반환된 케라스 변수의 dtype. x의 dtype을 사용하지 않습니다. - __name__: <sag>string</sag>, 생성할 변수의 이름. __Returns__ ones로 전달된 형식에 대한 케라스 변수. __Example__ ```python >>> from keras import backend as K >>> kvar = K.variable(np.random.random((2,3))) >>> kvar_ones = K.ones_like(kvar) >>> K.eval(kvar_ones) array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) ``` __Numpy implementation__ ```python def ones_like(x, dtype=floatx(), name=None): return np.ones_like(x, dtype=dtype) ``` ---- ### identity ```python keras.backend.identity(x, name=None) ``` 입력 텐서와 내용이 같은 텐서를 반환합니다. __Arguments__ - __x__: 입력텐서. - __name__: <sag>string</sag>, 생성 할 변수의 이름. __Returns__ 형식 및 내용이 같은 텐서. ---- ### random_uniform_variable ```python keras.backend.random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None) ``` 균등 분포에서 가져온 값의 변수를 인스턴스화 합니다. __Arguments__ - __shape__: <sag>integers</sag>의 튜플, 반환된 케라스 변수의 형식. - __low__: <sag>float</sag>, 출력 범위의 하한. - __high__: <sag>float</sag>, 출력 번위의 상한. - __dtype__: <sag>string</sag>, 반환된 케라스 변수의 dtype. - __name__: <sag>string</sag>, 반환된 케라스 변수의 이름. - __seed__: <sag>integer</sag>, 난수생성. __Returns__ 샘플에서 가져온 케라스 변수. __Example__ ```python # TensorFlow example >>> kvar = K.random_uniform_variable((2,3), 0, 1) >>> kvar <tensorflow.python.ops.variables.Variable object at 0x10ab40b10> >>> K.eval(kvar) array([[ 0.10940075, 0.10047495, 0.476143 ], [ 0.66137183, 0.00869417, 0.89220798]], dtype=float32) ``` __Numpy implementation__ ```python def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): return (high - low) * np.random.random(shape).astype(dtype) + low ``` ---- ### random_normal_variable ```python keras.backend.random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None) ``` 정규 분포에서 가져온 값의 변수를 인스턴스화 합니다. __Arguments__ - __shape__: <sag>integers</sag>의 튜플, 반환된 케라스 변수의 형식. - __mean__: <sag>float</sag>, 정규분포의 평균. - __scale__: <sag>float</sag>, 정규분포의 표준편차. - __dtype__: <sag>string</sag>, 반환된 케라스 변수의 dtype. - __name__: <sag>string</sag>, 반환된 케라스 변수의 이름. - __seed__: <sag>integer</sag>, 난수생성. __Returns__ 샘플에서 가져온 케라스 변수. __Example__ ```python # TensorFlow example >>> kvar = K.random_normal_variable((2,3), 0, 1) >>> kvar <tensorflow.python.ops.variables.Variable object at 0x10ab12dd0> >>> K.eval(kvar) array([[ 1.19591331, 0.68685907, -0.63814116], [ 0.92629528, 0.28055015, 1.70484698]], dtype=float32) ``` __Numpy implementation__ ```python def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None): return scale * np.random.randn(*shape).astype(dtype) + mean ``` ---- ### count_params ```python keras.backend.count_params(x) ``` 케라스 변수 또는 텐서에서 요소들의 <sag>static</sag> 숫자를 반환합니다. __Arguments__ - __x__: 케라스 텐서 또는 변수. __Returns__ <sag>integer</sag>,`x`요소의 갯수, 즉, 배열의 정적차원<sag>static dimensions</sag>의 곱 연산. __Example__ ```python >>> kvar = K.zeros((2,3)) >>> K.count_params(kvar) 6 >>> K.eval(kvar) array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) ``` __Numpy implementation__ ```python def count_params(x): return x.size ``` ---- ### cast ```python keras.backend.cast(x, dtype) ``` 텐서를 다른 dtype으로 타입을 바꿔주고 반환합니다. 케라스 변수 타입을 바꿔줄 수 있으나 여전히 텐서를 반환합니다. __Arguments__ - __x__: 케라스 텐서 또는 변수. - __dtype__: <sag>string</sag>, 'float16', 'float32', 또는 'float64' __Returns__ <sag>dtype</sag>의 케라스 텐서. __Example__ ```python >>> from keras import backend as K >>> input = K.placeholder((2, 3), dtype='float32') >>> input <tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32> # It doesn't work in-place as below. >>> K.cast(input, dtype='float16') <tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16> >>> input <tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32> # you need to assign it. >>> input = K.cast(input, dtype='float16') >>> input <tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16> ``` ---- ### update ```python keras.backend.update(x, new_x) ``` x값을 new_x로 갱신합니다. __Arguments__ - __x__: 한개의 변수. - __new_x__: x의 같은 형식의 텐서. __Returns__ x변수를 갱신합니다. ---- ### update_add ```python keras.backend.update_add(x, increment) ``` <sag>increment</sag>를 x에 더한 값을 갱신합니다. __Arguments__ - __x__: 변수. - __increment__: x와 같은 형식의 텐서. __Returns__ 변수 x 갱신. ---- ### update_sub ```python keras.backend.update_sub(x, decrement) ``` <sag>decrement</sag>를 뺀 후 x의 값 갱신. __Arguments__ - __x__: A `Variable`. - __decrement__: x와 같은 형식의 텐서. __Returns__ 변수 x 갱신. ---- ### moving_average_update ```python keras.backend.moving_average_update(x, value, momentum) ``` 변수의 이동평균을 계산합니다. __Arguments__ - __x__: `Variable`. - __value__:같은`x`형식의 텐서. - __momentum__: 이동 평균 운동량. __Returns__ 변수를 업데이트하는 연산. ---- ### dot ```python keras.backend.dot(x, y) ``` 2 텐서(또는 변수)를 곱하고 텐서를 반환합니다. N차원의 텐서를 곱하려고 시도할 때, N차원의 텐서가 Theano의 방식으로 다시 생성합니다. (e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`) __Arguments__ - __x__: 텐서 또는 변수. - __y__: 텐서 또는 변수. __Returns__ `x` 과 `y`의 내적을 텐서로 반환. __Examples__ ```python # dot product between tensors >>> x = K.placeholder(shape=(2, 3)) >>> y = K.placeholder(shape=(3, 4)) >>> xy = K.dot(x, y) >>> xy <tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32> ``` ```python # dot product between tensors >>> x = K.placeholder(shape=(32, 28, 3)) >>> y = K.placeholder(shape=(3, 4)) >>> xy = K.dot(x, y) >>> xy <tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32> ``` ```python # Theano-like behavior example >>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1) >>> y = K.ones((4, 3, 5)) >>> xy = K.dot(x, y) >>> K.int_shape(xy) (2, 4, 5) ``` __Numpy implementation__ ```python def dot(x, y): return np.dot(x, y) ``` ---- ### batch_dot ```python keras.backend.batch_dot(x, y, axes=None) ``` 배치방식의 내적. x와 y가 배치 데이터일 때, x와 y의 내적을 계산하여 batch_dot을 사용한다. 즉, (batch_size, :)형식. batch_dot은 입력값보다 차수가 작은 텐서 또는 변수를 반환합니다. 차원의 수가 1로 줄어들면 적어도 2차원이상인지 확인하기 위해 expand_dims를 사용합니다. __Arguments__ - __x__: `ndim >= 2` 조건의 케라스 텐서 또는 변수. - __y__: `ndim >= 2` 조건의 케라스 텐서 또는 변수. - __axes__: 목적 차원이 감소된 (int,int)튜플 또는 <sag>int</sag> __Returns__ x 형식의 연쇄와 같은 형식의 텐서와 y형식. y형식은 배치차원과 합산된 차원보다 더 적습니다. rank가 1이면, (batch_size,1)로 재설정합니다. __Examples__ `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`일 때, `batch_dot(x, y, axes=1) = [[17], [53]]` 비대각선을 계산할 필요가 없을 때도, `x.dot(y.T)` 주대각선 계산. Pseudocode: ``` inner_products = [] for xi, yi in zip(x, y): inner_products.append(xi.dot(yi)) result = stack(inner_products) ``` 형식 추론하기: `x`의 모양은`(100, 20)`이되고`y`의 모양은`(100, 30, 20)`이됩니다. '축'이 (1, 2) 인 경우, 결과 텐서의 출력값 형식을 찾으려면, `x` 형식과`y`형식으로 각 차원을 반복합니다. *`x.shape [0]`: 100 : 출력 형식에 추가 *`x.shape [1]`: 20 : 출력 형식에 추가하지 않습니다, `x`의 차원 1이 됩니다. (`dot_axes [0]`= 1) *`y.shape [0]`: 100 : 출력 형태에 추가하지 않습니다, 항상 y의 첫 번째 차원을 배제합니다. *`y.shape [1]`: 30 : 출력 형식에 추가 *`y.shape [2]`: 20 : 출력 형식에 추가하지 않습니다, `y`의 차원 2이 됩니다. (`dot_axes [1]`= 2) `output_shape` =`(100, 30)` ```python >>> x_batch = K.ones(shape=(32, 20, 1)) >>> y_batch = K.ones(shape=(32, 30, 20)) >>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=(1, 2)) >>> K.int_shape(xy_batch_dot) (32, 1, 30) ``` __Numpy implementation__ <details> <summary>Show the Numpy implementation</summary> ```python def batch_dot(x, y, axes=None): if x.ndim < 2 or y.ndim < 2: raise ValueError('Batch dot requires inputs of rank 2 or more.') if isinstance(axes, int): axes = [axes, axes] elif isinstance(axes, tuple): axes = list(axes) if axes is None: if y.ndim == 2: axes = [x.ndim - 1, y.ndim - 1] else: axes = [x.ndim - 1, y.ndim - 2] if any([isinstance(a, (list, tuple)) for a in axes]): raise ValueError('Multiple target dimensions are not supported. ' + 'Expected: None, int, (int, int), ' + 'Provided: ' + str(axes)) # Handle negative axes if axes[0] < 0: axes[0] += x.ndim if axes[1] < 0: axes[1] += y.ndim if 0 in axes: raise ValueError('Can not perform batch dot over axis 0.') if x.shape[0] != y.shape[0]: raise ValueError('Can not perform batch dot on inputs' ' with different batch sizes.') d1 = x.shape[axes[0]] d2 = y.shape[axes[1]] if d1 != d2: raise ValueError('Can not do batch_dot on inputs with shapes ' + str(x.shape) + ' and ' + str(y.shape) + ' with axes=' + str(axes) + '. x.shape[%d] != ' 'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2)) result = [] axes = [axes[0] - 1, axes[1] - 1] # ignore batch dimension for xi, yi in zip(x, y): result.append(np.tensordot(xi, yi, axes)) result = np.array(result) if result.ndim == 1: result = np.expand_dims(result, -1) return result ``` </details> ---- ### transpose ```python keras.backend.transpose(x) ``` Transposes a tensor and returns it. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. __Examples__ ```python >>> var = K.variable([[1, 2, 3], [4, 5, 6]]) >>> K.eval(var) array([[ 1., 2., 3.], [ 4., 5., 6.]], dtype=float32) >>> var_transposed = K.transpose(var) >>> K.eval(var_transposed) array([[ 1., 4.], [ 2., 5.], [ 3., 6.]], dtype=float32) ``` ```python >>> inputs = K.placeholder((2, 3)) >>> inputs <tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32> >>> input_transposed = K.transpose(inputs) >>> input_transposed <tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32> ``` __Numpy implementation__ ```python def transpose(x): return np.transpose(x) ``` ---- ### gather ```python keras.backend.gather(reference, indices) ``` 텐서 `reference`에서 `indices`의 인덱스 요소를 검색합니다. __Arguments__ - __reference__: 텐서. - __indices__: 인덱스의 <sag>integer</sag>텐서. __Returns__ <sag>reference</sag>와 같은 타입의 텐서. __Numpy implementation__ ```python def gather(reference, indices): return reference[indices] ``` ---- ### max ```python keras.backend.max(x, axis=None, keepdims=False) ``` 텐서에 대한 최댓값. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: [-rank(x), rank(x)) 범위의 <sag>integers</sag>의 튜플 또는 <sag>integer</sag> 최댓값을 찾기위한 축. 만약 <sag>None</sag>이라면 모든 차원에 대한 최댓값을 찾습니다. - __keepdims__: <sag>boolean</sag>, 차원이 유지되고 있는지에 대한 여부. `keepdims`가`False` 인 경우 텐서의 rank가 1만큼 감소합니다 `keepdims`가`True`이면 축소 된 치수는 길이 1로 유지됩니다. __Returns__ x의 최대값에 대한 텐서. __Numpy implementation__ ```python def max(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.max(x, axis=axis, keepdims=keepdims) ``` ---- ### min ```python keras.backend.min(x, axis=None, keepdims=False) ``` 텐서에 대한 최솟값. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: [-rank(x), rank(x)) 범위의 <sag>integers</sag>의 튜플 또는 <sag>integer</sag> 최솟값을 찾기위한 축. 만약 <sag>None</sag>이라면 모든 차원에 대한 최솟값을 찾습니다. - __keepdims__: <sag>boolean</sag>, 차원이 유지되고 있는지에 대한 여부. `keepdims`가`False` 인 경우 텐서의 rank가 1만큼 감소합니다 `keepdims`가`True`이면 축소 된 치수는 길이 1로 유지됩니다. __Returns__ x의 최솟값에 대한 텐서. __Numpy implementation__ ```python def min(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.min(x, axis=axis, keepdims=keepdims) ``` ---- ### sum ```python keras.backend.sum(x, axis=None, keepdims=False) ``` 지정된 축에따른 텐서의 값들의 합. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: [-rank(x), rank(x)) 범위의 <sag>integers</sag>의 튜플 또는 <sag>integer</sag>를 합산 하기위한 축. 만약 <sag>None</sag>이라면 모든 차원에 대한 합의 값을 찾습니다. - __keepdims__: <sag>boolean</sag>, 차원이 유지되고 있는지에 대한 여부. `keepdims`가`False` 인 경우 텐서의 rank가 1만큼 감소합니다 `keepdims`가`True`이면 축소 된 치수는 길이 1로 유지됩니다. __Returns__ 'x'의 합을 가진 텐서. __Numpy implementation__ ```python def sum(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.sum(x, axis=axis, keepdims=keepdims) ``` ---- ### prod ```python keras.backend.prod(x, axis=None, keepdims=False) ``` 지정된 축을 따라, 텐서의 값을 곱합니다. __Arguments__ - __x__: A tensor or variable. - __x__: 텐서 또는 변수. - __axis__: An integer or list of integers in [-rank(x), rank(x)) 범위 내 <sag>integers</sag>의 리스트 또는 <sag>integers</sag>로서, 곱을 계산한 축. 만약 <sag>None</sag>이라면 모든 차원에 대해 곱을 계산합니다. - __keepdims__: <sag>boolean</sag>, 차원이 유지되고 있는지 아닌지에 대한 진리값. 만약 `keepdims` 가 <sag>False</sag>라면, 텐서의 랭크가 1만큼 감소합니다. 만약 `keepdims` 가 <sag>True</sag>라면, 줄어든 차원이 길이 1만큼 유지됩니다. __Returns__ 'x'의 요소들의 곱에대한 텐서. __Numpy implementation__ ```python def prod(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.prod(x, axis=axis, keepdims=keepdims) ``` ---- ### cumsum ```python keras.backend.cumsum(x, axis=0) ``` 지정된 축에 따라, 텐서 값의 누적된 합계. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: An integer, 합계를 계산하는 축. __Returns__ x의 값에 따른 축의 누적된 합의 텐서. __Numpy implementation__ ```python def cumsum(x, axis=0): return np.cumsum(x, axis=axis) ``` ---- ### cumprod ```python keras.backend.cumprod(x, axis=0) ``` 지정된 축에 따라, 텐서 값의 누적된 곱. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: An integer, 곱 계산에 대한 축. __Returns__ x의 값에 따른 축의 누적된 곱의 텐서. __Numpy implementation__ ```python def cumprod(x, axis=0): return np.cumprod(x, axis=axis) ``` ---- ### var ```python keras.backend.var(x, axis=None, keepdims=False) ``` 지정된 축에 따라, 텐서의 분산. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: [-rank(x), rank(x)) 범위의 <sag>integer</sag>타입 리스트 또는 <sag>integer</sag>으로, 분산을 계산 할 축. <sag>None</sag> (default)이면 계산. 모든 차원에 대한 분산을 계산합니다.. - __keepdims__: <sag>boolean</sag>, 차원을 유지 하였는지에 대한 진리값. `keepdims` 가 <sag>False</sag>인 경우, 텐서의 랭크가 1씩 감소합니다. `keepdims` 가 <sag>True</sag>인 경우, 줄어든 차원의 길이는 1로 유지됩니다. __Returns__ `x`의 요소의 분산을 갖는 텐서. __Numpy implementation__ ```python def var(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.var(x, axis=axis, keepdims=keepdims) ``` ---- ### std ```python keras.backend.std(x, axis=None, keepdims=False) ``` 지정된 축과 함께 텐서의 표준 편차를 반환한다. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: [-rank(x), rank(x)) 범위의 <sag>integer</sag>타입 리스트 또는 <sag>integer</sag>으로, 표준편차를 계산하는 축. <sag>None</sag> (default)이면 계산. 모든 차원에 대한 표준편차를 계산합니다. - __keepdims__: <sag>boolean</sag>, 차원을 유지 하였는지에 대한 진리값. `keepdims` 가 <sag>False</sag>인 경우, 텐서의 랭크가 1씩 감소합니다. `keepdims` 가 <sag>True</sag>인 경우, 줄어든 차원의 길이는 1로 유지됩니다. __Returns__ x의 요소의 표준편차에 대한 텐서. __Numpy implementation__ ```python def std(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.std(x, axis=axis, keepdims=keepdims) ``` ---- ### mean ```python keras.backend.mean(x, axis=None, keepdims=False) ``` 지정된 축에 따른 텐서의 평균. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: [-rank(x), rank(x)) 범위의 <sag>integer</sag>타입 리스트 또는 <sag>integer</sag>으로, 평균을 계산하는 축. <sag>None</sag> (default)이면 계산. 모든 차원에 대한 평균을 계산합니다. - __keepdims__: <sag>boolean</sag>, 차원을 유지 하였는지에 대한 진리값. `keepdims` 가 <sag>False</sag>인 경우, 축의 각 항목에 대해 텐서의 랭크가 1씩 감소합니다. `keepdims` 가 <sag>True</sag>인 경우, 줄어든 차원의 길이는 1로 유지됩니다. __Returns__ `x`의 요소의 평균을 가진 텐서. __Numpy implementation__ ```python def mean(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.mean(x, axis=axis, keepdims=keepdims) ``` ---- ### any ```python keras.backend.any(x, axis=None, keepdims=False) ``` 비트단위 감소(logical OR). __Arguments__ - __x__: Tensor or variable. - __axis__: [-rank(x), rank(x)) 범위의 <sag>integer</sag>타입 리스트 또는 <sag>integer</sag> <sag>None</sag> (default)이면 계산. 모든 차원에 대한 평균을 계산합니다. - __keepdims__: 감소한 축을 브로드캐스트 하는지 드롭하는지에 대한 여부. __Returns__ uint8텐서 (0s and 1s). __Numpy implementation__ ```python def any(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.any(x, axis=axis, keepdims=keepdims) ``` ---- ### all ```python keras.backend.all(x, axis=None, keepdims=False) ``` 비트단위 감소 (logical AND). __Arguments__ - __x__: 텐서 또는 변수. - __axis__: [-rank(x), rank(x)) 범위의 <sag>integer</sag>타입 리스트 또는 <sag>integer</sag> <sag>None</sag> (default)이면 계산. 모든 차원에 대한 평균을 계산합니다. - __keepdims__: 감소한 축을 브로드캐스트 하는지 드롭하는지에 대한 여부. __Returns__ uint8텐서 (0s and 1s). __Numpy implementation__ ```python def all(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return np.all(x, axis=axis, keepdims=keepdims) ``` ---- ### argmax ```python keras.backend.argmax(x, axis=-1) ``` 축에 따른 최댓값의 인덱스를 반환합니다. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: 감소 수행에 따른 축. __Returns__ 텐서. __Numpy implementation__ ```python def argmax(x, axis=-1): return np.argmax(x, axis=axis) ``` ---- ### argmin ```python keras.backend.argmin(x, axis=-1) ``` 축에 따른 최솟값의 인덱스를 반환합니다. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: 축소를 수행에 따른 축. __Returns__ 텐서 __Numpy implementation__ ```python def argmin(x, axis=-1): return np.argmin(x, axis=axis) ``` ---- ### square ```python keras.backend.square(x) ``` 요소별로 제곱계산. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. ---- ### abs ```python keras.backend.abs(x) ``` 절대값 계산. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. ---- ### sqrt ```python keras.backend.sqrt(x) ``` 요소별 제곱근 계산. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서 __Numpy implementation__ ```python def sqrt(x): y = np.sqrt(x) y[np.isnan(y)] = 0. return y ``` ---- ### exp ```python keras.backend.exp(x) ``` Element-wise exponential. __Arguments__ - __x__: Tensor or variable. __Returns__ A tensor. ---- ### log ```python keras.backend.log(x) ``` log 취하기. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. ---- ### logsumexp ```python keras.backend.logsumexp(x, axis=None, keepdims=False) ``` log(sum(exp(elements across dimensions of a tensor)))를 계산합니다. log(sum(exp(x))) 보다 수치적으로 안정된 함수입니다. 큰 입력값의 exp를 취해서 오버플로가 발생하고 작은 입력값의 log를 가져와서 언더플로가 발생하는 것을 방지합니다. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: An integer or list of integers in [-rank(x), rank(x)) 범위 내 <sag>integers</sag>의 리스트 또는 <sag>integers</sag>로서, <sag>logsunexp</sag>을 계산한 축. 만약 <sag>None</sag>이라면 모든 차원에 대해 <sag>logsunexp</sag>을 계산합니다. - __keepdims__: <sag>boolean</sag>, 차원이 유지되고 있는지 아닌지에 대한 진리값. 만약 `keepdims` 가 <sag>False</sag>라면, 텐서의 랭크가 1만큼 감소합니다. 만약 `keepdims` 가 <sag>True</sag>라면, 줄어든 차원이 길이 1만큼 유지됩니다. __Returns__ 감소된 텐서. __Numpy implementation__ ```python def logsumexp(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) return sp.misc.logsumexp(x, axis=axis, keepdims=keepdims) ``` ---- ### round ```python keras.backend.round(x) ``` 요소별로 가장 가까운 수로 반올림. 0.5.의 경우, 가장 가까운 짝수로 반올림 보내는 방식을 사용합니다. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. ---- ### sign ```python keras.backend.sign(x) ``` 요소별로 sign 취하기. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. ---- ### pow ```python keras.backend.pow(x, a) ``` 요소별로 지수화. __Arguments__ - __x__: 텐서 또는 변수. - __a__: <sag>integer</sag> __Returns__ 텐서. __Numpy implementation__ ```python def pow(x, a=1.): return np.power(x, a) ``` ---- ### clip ```python keras.backend.clip(x, min_value, max_value) ``` 간격이 주어지면 간격 가장자리에서 값이 잘립니다. (클리핑) __Arguments__ - __x__: 텐서 또는 변수. - __min_value__: <sag>float</sag>, <sag>integer</sag> or tensor. - __max_value__: <sag>float</sag>, <sag>integer</sag> or tensor. __Returns__ 텐서 __Numpy implementation__ ```python def clip(x, min_value, max_value): return np.clip(x, min_value, max_value) ``` ---- ### equal ```python keras.backend.equal(x, y) ``` 두 텐서 사이의 대등함을 비교. __Arguments__ - __x__: 텐서 또는 변수. - __y__: 텐서 또는 변수. __Returns__ 불리언 텐서. __Numpy implementation__ ```python def equal(x, y): return x == y ``` ---- ### not_equal ```python keras.backend.not_equal(x, y) ``` 두 텐서사이 동등하지 않음을 판정. __Arguments__ - __x__: 텐서 또는 변수. - __y__: 텐서 또는 변수. __Returns__ 불리언 텐서. __Numpy implementation__ ```python def not_equal(x, y): return x != y ``` ---- ### greater ```python keras.backend.greater(x, y) ``` (x > y)의 진리값. __Arguments__ - __x__: 텐서 또는 변수. - __y__: 텐서 또는 변수. __Returns__ 불리언 텐서. __Numpy implementation__ ```python def greater(x, y): return x > y ``` ---- ### greater_equal ```python keras.backend.greater_equal(x, y) ``` (x >= y)의 진리값. __Arguments__ - __x__: 텐서 또는 변수. - __y__: 텐서 또는 변수. __Returns__ 불리언 텐서. __Numpy implementation__ ```python def greater_equal(x, y): return x >= y ``` ---- ### less ```python keras.backend.less(x, y) ``` (x < y)의 진리값. __Arguments__ - __x__: Tensor or variable. - __y__: Tensor or variable. __Returns__ 불리언 텐서. __Numpy implementation__ ```python def less(x, y): return x < y ``` ---- ### less_equal ```python keras.backend.less_equal(x, y) ``` (x <= y)의 진리값. __Arguments__ - __x__: 텐서 또는 변수. - __y__: 텐서 또는 변수. __Returns__ 불리언 텐서. __Numpy implementation__ ```python def less_equal(x, y): return x <= y ``` ---- ### maximum ```python keras.backend.maximum(x, y) ``` 두 텐서사이 최댓값. __Arguments__ - __x__: 텐서 또는 변수. - __y__: 텐서 또는 변수. __Returns__ 한 개의 텐서. __Numpy implementation__ ```python def maximum(x, y): return np.maximum(x, y) ``` ---- ### minimum ```python keras.backend.minimum(x, y) ``` 두 텐서 사이 최솟값. __Arguments__ - __x__: 텐서 또는 변수. - __y__: 텐서 또는 변수. __Returns__ 텐서. __Numpy implementation__ ```python def minimum(x, y): return np.minimum(x, y) ``` ---- ### sin ```python keras.backend.sin(x) ``` x의 sin 계산. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 한 개의 텐서. ---- ### cos ```python keras.backend.cos(x) ``` x의 cos 계산. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. ---- ### normalize_batch_in_training ```python keras.backend.normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001) ``` 배치에 대한 평균과 표준을 계산 한 다음 배치에 배치 정규화를 적용합니다. __Arguments__ - __x__: Input 텐서 또는 변수. - __gamma__: 입력 스케일링에 사용되는 텐서. - __beta__: 입력을 중앙에 위치시키는 텐서. - __reduction_axes__: 정수 반복가능, 정규화 할 축. - __epsilon__: 퍼지 상수. __Returns__ `(normalized_tensor, mean, variance)` 인자의 튜플 길이. ---- ### batch_normalization ```python keras.backend.batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=0.001) ``` 평균, 분산, 베타와 감마가 주어진 x에 대해 배치 정규화를 수행합니다. I.e. returns: `output = (x - mean) / sqrt(var + epsilon) * gamma + beta` __Arguments__ - __x__: 입력 텐서 또는 변수. - __mean__: 배치의 평균 - __var__: 배치의 분산 - __beta__: 입력을 중앙에 위치시키는 텐서. - __gamma__: 입력 스케일링에 의한 텐서. - __axis__: Integer, 정규화 시켜야 하는 축. (typically the features axis). - __epsilon__: 퍼지 상수. __Returns__ 텐서. ---- ### concatenate ```python keras.backend.concatenate(tensors, axis=-1) ``` 지정된 축에따른 텐서의 리스트 연결. __Arguments__ - __tensors__: 연결 할 텐서의 목록. - __axis__: 연결 축. __Returns__ 텐서. ---- ### reshape ```python keras.backend.reshape(x, shape) ``` 텐서를 지정한 형식으로 다시 재정의 합니다. __Arguments__ - __x__: 텐서 또는 변수. - __shape__: 대상이 되는 형식튜플. __Returns__ 텐서. ---- ### permute_dimensions ```python keras.backend.permute_dimensions(x, pattern) ``` 텐서의 축을 치환합니다. __Arguments__ - __x__: 텐서 또는 변수. - __pattern__: `(0, 2, 1)`처럼 인덱스들의 차원의 튜플. __Returns__ 텐서. ---- ### resize_images ```python keras.backend.resize_images(x, height_factor, width_factor, data_format, interpolation='nearest') ``` 4차원 텐서에 포함된 이미지들을 재조정합니다. __Arguments__ - __x__: 텐서 또는 변수. to resize. - __height_factor__: 양의 정수. - __width_factor__: 양의 정수. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. - __interpolation__: <sag>string</sag>, `nearest` 또는 `bilinear` 중 하나. __Returns__ 텐서. __Raises__ - __ValueError__: `data_format`이면 'channels_last' 또는 'channels_first' 모두 아니다. ---- ### resize_volumes ```python keras.backend.resize_volumes(x, depth_factor, height_factor, width_factor, data_format) ``` 5차원 텐서에 포함된 볼륨 크기 조정. __Arguments__ - __x__: 텐서 또는 변수. to resize. - __depth_factor__: 양의 정수. - __height_factor__: 양의 정수. - __width_factor__: 양의 정수. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. __Returns__ 한 개의 텐서. __Raises__ - __ValueError__: `data_format`이면 'channels_last' 또는 'channels_first' 모두 아니다. ---- ### repeat_elements ```python keras.backend.repeat_elements(x, rep, axis) ``` `np.repeat`처럼 축을따라 텐서의 요소를 반복. `x` 형식이 `(s1, s2, s3)` 이고, `axis` 이 `1`이면, 출력형식은 (s1, s2 * rep, s3)`입니다. __Arguments__ - __x__: 텐서 또는 변수. - __rep__: <sag>integer</sag>, 반복횟수. - __axis__: 반복 할 축 __Returns__ 한 개의 텐서. ---- ### repeat ```python keras.backend.repeat(x, n) ``` 2차원 텐서를 반복합니다. 만약 x가 (samples, dim)형식이고 'n'이 2라면, 출력값은 형식이 (samples, 2, dim)가 됩니다. __Arguments__ - __x__: 텐서 또는 변수. - __n__: <sag>integer</sag>, 반복횟수. __Returns__ 텐서. ---- ### arange ```python keras.backend.arange(start, stop=None, step=1, dtype='int32') ``` 정수 시퀀스를 포함하는 1D 텐서를 생성합니다. 함수 인자는 "Theano 's arange (단 하나의 인수 만 제공되면 실제로 "stop"인수이고 "start"는 0입니다.)"와 같은 규칙을 사용합니다. 반환 된 텐서의 기본 타입은` 'int32'`입니다. TensorFlow의 기본값과 일치합니다. __Arguments__ - __start__: 시작 값. - __stop__: 정지 값. - __step__: 두 개의 연속적인 값의 차이. - __dtype__: <sag>Integer</sag> dtype __Returns__ 정수형 텐서. ---- ### tile ```python keras.backend.tile(x, n) ``` x를 n으로 나열하여 생성합니다. __Arguments__ - __x__: 텐서 또는 배열. - __n__: <sag>integer</sag>의 리스트. x의 차원의 갯수와 그 길이가 같다. __Returns__ 나열된 텐서. ---- ### flatten ```python keras.backend.flatten(x) ``` 텐서를 합쳐서 나열합니다. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서를 1차원으로 형식을 재구성하여 나열합니다. ---- ### batch_flatten ```python keras.backend.batch_flatten(x) ``` n차원 텐서를 같은 0차원의 2차원 텐서로 변형합니다. 즉, 배치의 각 데이터 샘플을 위 차원의 변형에 맞게 변환합니다. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. ---- ### expand_dims ```python keras.backend.expand_dims(x, axis=-1) ``` 축의 인덱스값에 1만큼의 차원을 더한다. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: 새로운 축을 추가한 위치. __Returns__ 확장한 차원들의 텐서. ---- ### squeeze ```python keras.backend.squeeze(x, axis) ``` 축의 인덱스 값에 해당하는 텐서를 1차원의 크기만큼 제거합니다. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: 없앨 축. __Returns__ 줄어든 차원의 x와 동일한 데이터를 가지는 텐서. ---- ### temporal_padding ```python keras.backend.temporal_padding(x, padding=(1, 1)) ``` 3차원 텐서의 중간차원을 채웁니다. __Arguments__ - __x__: 텐서 또는 변수. - __padding__: 2 <sag>integers</sag>의 튜플, 차원 1의 시작과 끝에 얼마나 많은 0을 추가할 지에 대한 수치. __Returns__ 3차원 텐서를 채워 넣습니다. ---- ### spatial_2d_padding ```python keras.backend.spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None) ``` 4차원 텐서에서 2차원과 3차원을 채워 넣습니다. __Arguments__ - __x__: 텐서 또는 변수. - __padding__: 2 튜플들의 튜플, 채워진 패턴. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. __Returns__ 채워진 4차원 텐서. __Raises__ - __ValueError__: `data_format`이면 'channels_last' 또는 'channels_first' 모두 아니다. ---- ### spatial_3d_padding ```python keras.backend.spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None) ``` 깊이, 높이, 너비 치수를 따라 0으로 채워진 5차원 텐서를 채웁니다. "padding [0]", "padding [1]"및 "padding [2]"는 왼쪽과 오른쪽으로 0인 치수를 각각 채 웁니다. 'channels_last'data_format의 경우 2 차원, 3 차원 및 4 차원이 채워집니다. 'channels_first'data_format의 경우 3 차원, 4 차원 및 5 차원이 채워집니다. __Arguments__ - __x__: 텐서 또는 변수. - __padding__: 3 튜플들의 튜플, 채워진 패턴. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. __Returns__ 채워진 5차원 텐서. __Raises__ - __ValueError__: `data_format`이면 'channels_last' 또는 'channels_first' 모두 아니다. ---- ### stack ```python keras.backend.stack(x, axis=0) ``` 랭크`R` 텐서의 <sag>list</sag>를 랭크`R + 1` 텐서에 쌓습니다. __Arguments__ - __x__: 텐서들의 <sag>list</sag> - __axis__: 텐서를 쌓을 축. __Returns__ 텐서. __Numpy implementation__ ```python def stack(x, axis=0): return np.stack(x, axis=axis) ``` ---- ### one_hot ```python keras.backend.one_hot(indices, num_classes) ``` 정수형 텐서의 원핫 표기를 계산합니다. __Arguments__ - __indices__: `(batch_size, dim1, dim2, ... dim(n-1))`형식의 n차원 정수형 텐서. - __num_classes__: <sag>integer</sag>, 클래스들의 갯수. __Returns__ `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`형식의 입력값의 (n+1)차원의 원핫 표현형식. ---- ### reverse ```python keras.backend.reverse(x, axes) ``` 지정된 축을 따라 텐서를 반전시킵니다. __Arguments__ - __x__: 텐서를 반전시킨다. - __axes__: 축이 반전된, 정수형 또는 반복 가능한 정수. __Returns__ 텐서. __Numpy implementation__ ```python def reverse(x, axes): if isinstance(axes, list): axes = tuple(axes) return np.flip(x, axes) ``` ---- ### slice ```python keras.backend.slice(x, start, size) ``` 텐서에서 슬라이스를 추출합니다. __Arguments__ - __x__: 입력 텐서. - __start__: 각 축에 따라 슬라이스의 시작 인덱스를 나타내는 텐서 또는 <sag>integer</sag>리스트/튜플 자료형. - __size__: 각 축을 따라 슬라이스 할 차원의 수를 나타내는 텐서 또는 <sag>integer</sag>리스트/튜플 자료형. __Returns__ A sliced tensor: ```python new_x = x[start[0]: start[0] + size[0], ..., start[-1]: start[-1] + size[-1]] ``` __Numpy implementation__ ```python def slice(x, start, size): slices = [py_slice(i, i + j) for i, j in zip(start, size)] return x[tuple(slices)] ``` ---- ### get_value ```python keras.backend.get_value(x) ``` Returns the value of a variable. __Arguments__ - __x__: 입력 변수. __Returns__ 넘파이 배열. ---- ### batch_get_value ```python keras.backend.batch_get_value(ops) ``` 한 가지 이상의 텐서 변수의 값을 반환합니다. __Arguments__ - __ops__: 실행할 ops 목록. __Returns__ 넘파이 배열 리스트. ---- ### set_value ```python keras.backend.set_value(x, value) ``` 넘파이 배열에서 변수의 값을 설정합니다. __Arguments__ - __x__: 새로운 값으로 설정하는 텐서. - __value__: 넘파이 배열로 텐서를 설정하는 값. ---- ### batch_set_value ```python keras.backend.batch_set_value(tuples) ``` 한번에 밚은 텐서 변수들의 값을 설정합니다. __Arguments__ - __tuples__: `(tensor, value)` 튜플 리스트, <sag>value</sag>인자는 넘파이 배열이어야 합니다. ---- ### print_tensor ```python keras.backend.print_tensor(x, message='') ``` 평가시 <sag>message</sag>와 텐서 값을 출력합니다. `print_tensor`는 `x`와 동일한 새로운 텐서를 반환합니다. 이 코드는 반드시 다음코드에 사용해야 합니다. 그렇지 않으면 평가 중 프린트 연산이 고려되지 않습니다. __Example__ ```python >>> x = K.print_tensor(x, message="x is: ") ``` __Arguments__ - __x__: 출력 할 텐서. - __message__: 텐서와 함께 출력 할 메시지. __Returns__ 변경되지 않은 같은 텐서 `x`. ---- ### function ```python keras.backend.function(inputs, outputs, updates=None) ``` 케라스 함수 인스턴스화하기. __Arguments__ - __inputs__: 플레이스홀더 텐서의 리스트. - __outputs__: 출력 텐서의 리스트. - __updates__: 업데이트 연산의 리스트. - __**kwargs__: `tf.Session.run`에 전달되는 값. __Returns__ 넘파이 배열의 값 출력. __Raises__ - __ValueError__: 유효하지 않은 kwargs 가 전달된 경우. ---- ### gradients ```python keras.backend.gradients(loss, variables) ``` 변수에 대한 손실의 그라디언트를 반환합니다. __Arguments__ - __loss__: 최소화시킨 스칼라값 텐서. - __variables__: 변수들의 리스트. __Returns__ 그라디언트 텐서. ---- ### stop_gradient ```python keras.backend.stop_gradient(variables) ``` 모든 다른 변수에 대한 0 그라디언트 'variables'를 반환합니다. __Arguments__ - __variables__: 또 다른 변수에 대한 상수를 고려한 텐서 또는 텐서의 리스트. __Returns__ 전달받은 인자에 따른 또 다른 변수에 대한 상수 그라디언트를 가진 텐서 또는 텐서의 리스트. ---- ### rnn ```python keras.backend.rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None) ``` 텐서의 시간 차원에 대한 반복. __Arguments__ - __step_function__: 매개변수: inputs: 시간 차원이 없고 형식이 있는 텐서. 어떤 시간 단계의 배치에 관한 입력값을 나타냅니다. state: 텐서의 리스트. 반환값: outputs: 시간 차원이 없고 형식이 있는 텐서. new_states: 'states'의 형식과 같은 길이의 텐서 리스트. - __inputs__: 적어도 3차원인 형식의 일시적인 데이터의 텐서 (samples, time, ...) - __initial_states__: 단계함수에서 사용된 상태의 초기 값을 포함한 시간 차원이 없고 형식이 있는 텐서. - __go_backwards__: <sag>boolean</sag> 만약 True라면 그 시간동안 반복한다. 뒤집힌 순서를 반환하며 뒤집힌 순서의 차원이다. - __mask__: (samples, time)형식을 가진 이진 텐서. 마스크의 모든 요소에 0 포함. - __constants__: 각 단계에 전달된 상수 값 리스트. - __unroll__: RNN을 사용하거나 기호 루프를 사용할지에 대한 여부. (백엔드에 따라 `while_loop` 또는 `scan`) - __input_length__: 입력 시, 시간단계의 <sag>static</sag>숫자. __Returns__ A tuple, `(last_output, outputs, new_states)`. last_output: `(samples, ...)` 형식의, rnn의 최근 출력값. outputs: `(samples, time, ...)` 형식이 있는 텐서 의 각 `outputs[s, t]`요소는 's'샘플에 대한 't'시간에 대한 단계 함수의 출력요소 입니다. new_states: `(samples, ...)`형식의 단계함수로 반환된 최근 상태의 텐서 리스트. __Raises__ - __ValueError__: 입력 차원이 3보다 작은 경우. - __ValueError__: `unroll`이 `True`인 경우. 입력 시간 단계는 고정이 아님. - __ValueError__: `mask` 가 존재하면 (not `None`) 상태는 (`len(states)` == 0). __Numpy implementation__ <details> <summary>Show the Numpy implementation</summary> ```python def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): if constants is None: constants = [] output_sample, _ = step_function(inputs[:, 0], initial_states + constants) if mask is not None: if mask.dtype != np.bool: mask = mask.astype(np.bool) if mask.shape != inputs.shape[:2]: raise ValueError( 'mask should have `shape=(samples, time)`, ' 'got {}'.format(mask.shape)) def expand_mask(mask_, x): # expand mask so that `mask[:, t].ndim == x.ndim` while mask_.ndim < x.ndim + 1: mask_ = np.expand_dims(mask_, axis=-1) return mask_ output_mask = expand_mask(mask, output_sample) states_masks = [expand_mask(mask, state) for state in initial_states] if input_length is None: input_length = inputs.shape[1] assert input_length == inputs.shape[1] time_index = range(input_length) if go_backwards: time_index = time_index[::-1] outputs = [] states_tm1 = initial_states # tm1 means "t minus one" as in "previous timestep" output_tm1 = np.zeros(output_sample.shape) for t in time_index: output_t, states_t = step_function(inputs[:, t], states_tm1 + constants) if mask is not None: output_t = np.where(output_mask[:, t], output_t, output_tm1) states_t = [np.where(state_mask[:, t], state_t, state_tm1) for state_mask, state_t, state_tm1 in zip(states_masks, states_t, states_tm1)] outputs.append(output_t) states_tm1 = states_t output_tm1 = output_t return outputs[-1], np.stack(outputs, axis=1), states_tm1 ``` </details> ---- ### switch ```python keras.backend.switch(condition, then_expression, else_expression) ``` 스칼라 값에 따라 두 연산사이를 전환합니다. `then_expression` 와 `else_expression` 모두 동일 모양의 기호 텐서. __Arguments__ - __condition__: 텐서 (<sag>int</sag> or <sag>bool</sag>). - __then_expression__: 텐서 또는 텐서를 반환하는 호출가능한 값. - __else_expression__: 텐서 또는 텐서를 반환하는 호출가능한 값. __Returns__ 지정한 텐서. __Raises__ - __ValueError__: 표현된 랭크보다 더 나은 'condition'의 랭크일 경우, 에러. __Numpy implementation__ ```python def switch(condition, then_expression, else_expression): cond_float = condition.astype(floatx()) while cond_float.ndim < then_expression.ndim: cond_float = cond_float[..., np.newaxis] return cond_float * then_expression + (1 - cond_float) * else_expression ``` ---- ### in_train_phase ```python keras.backend.in_train_phase(x, alt, training=None) ``` 열차 단계에서 'x'를 선택하고 그렇지 않으면 'alt'를 선택합니다. `alt`는`x`와 동일한 모양 *을 가져야합니다. __Arguments__ - __x__: 훈련 단계에서 반환하는 것. (텐서 또는 호출가능한 텐서). - __alt__: 그 밖의 것을 반환. (텐서 또는 호출가능한 텐서). - __training__: 학습 단계를 지정한 선택적 스칼라 텐서. (<sag>Python boolean</sag> 또는 <sag>Python integer</sag>) __Returns__ 플래그에 기반한 `x` 또는 `alt`. `training` 플래그는 기본적으로 `K.learning_phase()`입니다. ---- ### in_test_phase ```python keras.backend.in_test_phase(x, alt, training=None) ``` 열차 단계에서 'x'를 선택하고 그렇지 않으면 'alt'를 선택합니다. `alt`는`x`와 동일한 모양 *을 가져야합니다. __Arguments__ - __x__: 테스트 단계에서 반환 할 내용. (tensor or callable that returns a tensor). - __alt__: 다른 경우 반환 할 내용. (tensor or callable that returns a tensor). - __training__: 학습 단계를 지정한 선택적 스칼라 텐서. (<sag>Python boolean</sag> 또는 <sag>Python integer</sag>) __Returns__ 'learning_phase()'에 기반한 `x` 또는 `alt'. ---- ### relu ```python keras.backend.relu(x, alpha=0.0, max_value=None, threshold=0.0) ``` 정제된 선형 단위. 기본값으로, 요소별로`max(x, 0)를 반환합니다. 그 외, `f(x) = max_value` for `x >= max_value`, `f(x) = x` for `threshold <= x < max_value`, `f(x) = alpha * (x - threshold)` otherwise. __Arguments__ - __x__: 텐서 또는 변수. - __alpha__: 음수 섹션의 스칼라, 기울기 (default=`0.`). - __max_value__: <sag>float</sag>, 포화상태의 임계값. - __threshold__: <sag>float</sag>, 임계값 활성화에 대한 임계값. __Returns__ 텐서. __Numpy implementation__ ```python def relu(x, alpha=0., max_value=None, threshold=0.): if max_value is None: max_value = np.inf above_threshold = x * (x >= threshold) above_threshold = np.clip(above_threshold, 0.0, max_value) below_threshold = alpha * (x - threshold) * (x < threshold) return below_threshold + above_threshold ``` ---- ### elu ```python keras.backend.elu(x, alpha=1.0) ``` 지수적증가의 선형 단위. __Arguments__ - __x__: 활성화 함수를 계산할 텐서 또는 변수 입니다. - __alpha__: 음수 섹션의 스칼라, 기울기. __Returns__ 텐서. __Numpy implementation__ ```python def elu(x, alpha=1.): return x * (x > 0) + alpha * (np.exp(x) - 1.) * (x < 0) ``` ---- ### softmax ```python keras.backend.softmax(x, axis=-1) ``` 텐서의 Softmax. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: 차수 softmax가 수행 됩니다. 기본값은 -1을 나타내며 마지막 차원을 나타냅니다. __Returns__ 텐서. __Numpy implementation__ ```python def softmax(x, axis=-1): y = np.exp(x - np.max(x, axis, keepdims=True)) return y / np.sum(y, axis, keepdims=True) ``` ---- ### softplus ```python keras.backend.softplus(x) ``` 텐서의 Softplus. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. __Numpy implementation__ ```python def softplus(x): return np.log(1. + np.exp(x)) ``` ---- ### softsign ```python keras.backend.softsign(x) ``` 텐서의 Softsign. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. __Numpy implementation__ ```python def softsign(x): return x / (1 + np.abs(x)) ``` ---- ### categorical_crossentropy ```python keras.backend.categorical_crossentropy(target, output, from_logits=False, axis=-1) ``` 결과 텐서와 목표 텐서 사이의 범주형의 크로스엔트로피. __Arguments__ - __target__: `output`과 같은 모양의 텐서. - __output__: softmax의 결과 텐서. (unless `from_logits` is True, in which case `output` is expected to be the logits). - __from_logits__: <sag>boolean</sag>, <sag>logits</sag>의 텐서이거나 softmax의 결과의 'output' 입니다. - __axis__: 채널 축을 지정합니다. `axis=-1` `channels_last`형식 데이터에 해당합니다, `channels_first` 데이터 형식은 `axis=1`에 해당 합니다. __Returns__ 출력 텐서. __Raises__ - __ValueError__: `output`의 축 도 아니고 -1도 아닌 축. ---- ### sparse_categorical_crossentropy ```python keras.backend.sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1) ``` 정수 목표를 가진 범주형 크로스엔트로피. __Arguments__ - __target__: An integer tensor. - __output__: softmax의 결과로 나온 텐서. (unless `from_logits` is True, in which case `output` is expected to be the logits). - __from_logits__: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. - __axis__: `channels_last` 데이터 형식에 해당하는 Int 채널 축을 지정합니다. `axis=-1` and `axis=1` corresponds to data format `channels_first`. __Returns__ 텐서. __Raises__ - __ValueError__: `axis`가 -1 또는 `output`의 축 모두 아니다. ---- ### binary_crossentropy ```python keras.backend.binary_crossentropy(target, output, from_logits=False) ``` 출력 텐서와 목표 텐서 사나의 이진 크로스엔트로피. __Arguments__ - __target__: `output`과 같은 형식의 텐서. - __output__: 텐서. - __from_logits__: logits 텐서가 출력값으로 나올 것인지에 대한 값. 기본적으로 'output'은 확률분포를 내포 합니다. __Returns__ 텐서. ---- ### sigmoid ```python keras.backend.sigmoid(x) ``` 요소별로 sigmoid. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. __Numpy implementation__ ```python def sigmoid(x): return 1. / (1. + np.exp(-x)) ``` ---- ### hard_sigmoid ```python keras.backend.hard_sigmoid(x) ``` 각 세그먼트의 sigmoid 선형 근사. sigmoid보다 더 빠르다. Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`. In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. __Numpy implementation__ ```python def hard_sigmoid(x): y = 0.2 * x + 0.5 return np.clip(y, 0, 1) ``` ---- ### tanh ```python keras.backend.tanh(x) ``` 요소별로 tanh. __Arguments__ - __x__: 텐서 또는 변수. __Returns__ 텐서. __Numpy implementation__ ```python def tanh(x): return np.tanh(x) ``` ---- ### dropout ```python keras.backend.dropout(x, level, noise_shape=None, seed=None) ``` 전체 텐서를 스케일링하는 동안 'x'의 항목을 임의로 설정합니다. __Arguments__ - __x__: 텐서. - __level__: 텐서 항목의 일부가 0으로 설정됩니다. - __noise_shape__: `x`의 형식을 확장해야 하므로 유지/삭제 플래그를 랜덤으로 생성하는 형식. - __seed__: 결정성을 보장하기 위한 난수생성. __Returns__ 텐서. __Numpy implementation__ <details> <summary>Show the Numpy implementation</summary> ```python def dropout(x, level, noise_shape=None, seed=None): if noise_shape is None: noise_shape = x.shape if learning_phase(): noise = np.random.choice([0, 1], noise_shape, replace=True, p=[level, 1 - level]) return x * noise / (1 - level) else: return x ``` </details> ---- ### l2_normalize ```python keras.backend.l2_normalize(x, axis=None) ``` 지정된 축을 따라 L2 norm으로 텐서를 정규화 시킨다. __Arguments__ - __x__: 텐서 또는 변수. - __axis__: axis along which to perform normalization. 정규화를 수행하는 축. __Returns__ 텐서. __Numpy implementation__ ```python def l2_normalize(x, axis=-1): y = np.max(np.sum(x ** 2, axis, keepdims=True), axis, keepdims=True) return x / np.sqrt(y) ``` ---- ### in_top_k ```python keras.backend.in_top_k(predictions, targets, k) ``` `targets`이 최상위`k` `predictions`에 있는지를 반환합니다. __Arguments__ - __predictions__: `float32`타입과 `(batch_size, classes)`형식의 텐서. - __targets__: `batch_size` and type `int32` or `int64`의 길이의 1차원 텐서. - __k__: An `int`, 고려해야 할 최상위 요소의 수. __Returns__ A 1D tensor of length `batch_size` and type `bool`. 만약 `predictions[i, targets[i]]` 이 top-`k`내에 있다면, `output[i]` 이 `True`. `predictions[i]'의 값. ---- ### conv1d ```python keras.backend.conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1) ``` 1D convolution. __Arguments__ - __x__: 텐서 또는 변수. - __kernel__: 커널 텐서. - __strides__: 정수형 스트라이드. - __padding__: <sag>string</sag>, `"same"`, `"causal"` or `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` or `"channels_first"`. - __dilation_rate__: 정수 확장 비율. __Returns__ 1차원 컨볼루션 연산 결과, 텐서 값. __Raises__ - __ValueError__:`data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. ---- ### conv2d ```python keras.backend.conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)) ``` 2차원 컨볼루션. __Arguments__ - __x__: 텐서 또는 변수. - __kernel__: 커널 텐서. - __strides__: 스트라이드 튜플. - __padding__: <sag>string</sag>, `"same"` or `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` or `"channels_first"`. inputs/kernels/outputs에 대한 Theano 또는 TensorFlow/CNTK데이터 형식을 사용할 여부. - __dilation_rate__: 2 integers의 튜플. __Returns__ 텐서, 2차원 컨볼루션 연산 결과. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. ---- ### conv2d_transpose ```python keras.backend.conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)) ``` 2차원의 트렌스포즈된 컨볼루션 연산을 수행합니다. __Arguments__ - __x__: 텐서 또는 변수. - __kernel__: 커널 텐서. - __output_shape__: 1D int tensor 출력 형식에 대해 1차원 <sag>int</sag>텐서 - __strides__: 스트라이드 튜플. - __padding__: <sag>string</sag>, `"same"` 또는 `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. inputs/kernels/outputs에 대한 Theano 또는 TensorFlow/CNTK 데이터 형태 - __dilation_rate__: 2 <sag>integers</sag>의 튜플. __Returns__ 2차원의 트렌스포즈된 컨볼루션 결과, 텐서. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. ---- ### separable_conv1d ```python keras.backend.separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1) ``` 분리가능한 필터와 1차원 컨볼루션 연산. __Arguments__ - __x__: input tensor - __depthwise_kernel__: 깊이 컨볼루션을 위한 컨볼루션 커널. - __pointwise_kernel__: 1x1 컨볼루션에 대한 커널. - __strides__: 스트라이드 정수형. - __padding__: <sag>string</sag>, `"same"` or `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` or `"channels_first"`. - __dilation_rate__: integer dilation rate. __Returns__ 출력 텐서. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. ---- ### separable_conv2d ```python keras.backend.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)) ``` 분리가능한 필터와 2차원 컨볼루션 연산. __Arguments__ - __x__: input tensor - __depthwise_kernel__: 깊이 컨볼루션을 위한 컨볼루션 커널. - __pointwise_kernel__: 1x1 컨볼루션에 대한 커널. - __strides__: strides tuple (length 2). - __padding__: <sag>string</sag>, `"same"` or `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` or `"channels_first"`. - __dilation_rate__: integers의 튜플, 분리가능한 컨볼루션의 팽창률. __Returns__ 출력 텐서. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. ---- ### depthwise_conv2d ```python keras.backend.depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)) ``` 분리가능한 필터로 2차원 컨볼루션 연산. __Arguments__ - __x__: input tensor - __depthwise_kernel__: 깊이 별 컨볼루션 연산을 위한 컨볼루션 커널. - __strides__: strides tuple (length 2). - __padding__: string, `"same"` or `"valid"`. - __data_format__: string, `"channels_last"` or `"channels_first"`. - __dilation_rate__: integers의 튜플, 분리가능한 컨볼루션의 팽창률. __Returns__ 출력텐서. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. ---- ### conv3d ```python keras.backend.conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)) ``` 3차원 컨볼루션 연산. __Arguments__ - __x__: 텐서 또는 변수. - __kernel__: 커널 텐서. - __strides__: 스트라이드 튜플. - __padding__: <sag>string</sag>, `"same"` 또는 `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. inputs/kernels/outputs에 대한 Theano 또는 TensorFlow/CNTK 데이터 형태 - __dilation_rate__: 2 <sag>integers</sag>의 튜플. __Returns__ 텐서, 3차원 컨볼루션 연산 결과. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. ---- ### conv3d_transpose ```python keras.backend.conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None) ``` 3차원 트렌스포즈 컨볼루션. __Arguments__ - __x__: 텐서 또는 변수. - __kernel__: 커널 텐서. - __output_shape__: 결과값 형식에 대한 1차원 정수형 텐서. - __strides__: 스트라이드 튜플. - __padding__: <sag>string</sag>, `"same"` 또는 `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. inputs/kernels/outputs에 대한 Theano 또는 TensorFlow/CNTK 데이터 형태 - __dilation_rate__: 2 <sag>integers</sag>의 튜플. __Returns__ 트렌스포즈된 3차원 컨볼루션 연산결과 텐서. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. ---- ### pool2d ```python keras.backend.pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max') ``` 2차원 풀링연산. __Arguments__ - __x__: 텐서 또는 변수. - __pool_size__: 2 <sag>integers</sag>의 튜플. - __strides__: 2 <sag>integers</sag>의 튜플. - __padding__: <sag>string</sag>, `"same"` 또는 `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. - __pool_mode__: <sag>string</sag>, `"max"` `"avg"`. __Returns__ 2차원 풀링 연산 결과값의 텐서. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. - __ValueError__: 만약 `pool_mode` 라면 `"max"` 또는 `"avg"` 둘 다 아니다. ---- ### pool3d ```python keras.backend.pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max') ``` 3D Pooling. __Arguments__ <sag> - __x__: 텐서 또는 변수. - __pool_size__: 3 <sag>integers</sag>의 튜플. - __strides__: 3 <sag>integers</sag>의 튜플. - __padding__: <sag>string</sag>, `"same"` 또는 `"valid"`. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. - __pool_mode__: <sag>string</sag>, `"max"` 또는 `"avg"`. __Returns__ 텐서, 3차원 풀링 결과. __Raises__ - __ValueError__: `data_format`이 모두 `"channels_last"` ,`"channels_first"`이 아닐 때. - __ValueError__: 만약 `pool_mode` 라면 `"max"` 또는 `"avg"` 둘 다 아니다. ---- ### bias_add ```python keras.backend.bias_add(x, bias, data_format=None) ``` 텐서에 대한 바이어스 벡터 추가. __Arguments__ - __x__: 텐서 또는 변수. - __bias__: 추가 할 바이어스 텐서. - __data_format__: <sag>string</sag>, `"channels_last"` 또는 `"channels_first"`. __Returns__ 결과 텐서. __Raises__ ValueError : 아래 두 경우 중 하나에서 : 1. 유효하지 않은`data_format` 인수. 2. 잘못된 편향 모양. 편향은 벡터이거나 ndim (x)-1 차원의 텐서. __Numpy implementation__ <details> <summary>Show the Numpy implementation</summary> ```python def bias_add(x, y, data_format): if data_format == 'channels_first': if y.ndim > 1: y = np.reshape(y, y.shape[::-1]) for _ in range(x.ndim - y.ndim - 1): y = np.expand_dims(y, -1) else: for _ in range(x.ndim - y.ndim - 1): y = np.expand_dims(y, 0) return x + y ``` </details> ---- ### random_normal ```python keras.backend.random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None) ``` 값의 정규분포를 포함한 텐서를 반환 합니다. __Arguments__ - __shape__: <sag>integers</sag>의 튜플, 생성할 텐서의 형식. - __mean__: <sag>float</sag>, 정규 분포의 평균 그리기. - __stddev__: <sag>float</sag>, 정규 분포의 표준편차 그리기. - __dtype__: <sag>string</sag>, 반환된 텐서의 dtype. - __seed__: <sag>Integer</sag>, random seed. __Returns__ 텐서. ---- ### random_uniform ```python keras.backend.random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None) ``` 값의 균등분포를 포함한 텐서를 반환 합니다. __Arguments__ - __shape__: <sag>integers</sag>의 튜플, 생성할 텐서의 형식. - __minval__: <sag>float</sag>, 균등 분포의 하한 샘플 그리기. - __maxval__: <sag>float</sag>, 균등 분포의 상한 샘플 그리기. - __dtype__: <sag>string</sag>, 반환된 텐서의 dtype. - __seed__: <sag>Integer</sag>, random seed. __Returns__ 텐서. ---- ### random_binomial ```python keras.backend.random_binomial(shape, p=0.0, dtype=None, seed=None) ``` 값의 임의의 이항 분포의 텐서를 반환합니다. __Arguments__ - __shape__: <sag>integers</sag>의 튜플, 생성할 텐서의 형식. - __p__: <sag>float</sag>, `0. <= p <= 1`범위의 이항 분포의 확률 - __dtype__: <sag>string</sag>, 반환된 텐서의 dtype. - __seed__: <sag>Integer</sag>, random seed. __Returns__ 텐서. ---- ### truncated_normal ```python keras.backend.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None) ``` 값의 임의의 정규분포가 잘린 텐서를 반환합니다. 평균에 대한 두 표준편차가 제거되고 다시 지정되어 크기가 더 큰 값을 제외한 뒤 지정된 평균과 표준편차로 정규푼보에 따라 생성된 값. __Arguments__ - __shape__: <sag>integers</sag>의 튜플, 생성할 텐서의 형식. - __mean__: 값들의 평균. - __stddev__: 값들의 표준편차. - __dtype__: <sag>string</sag>, 반환된 텐서의 dtype. - __seed__: <sag>Integer</sag>, 난수생성. __Returns__ 텐서. ---- ### ctc_label_dense_to_sparse ```python keras.backend.ctc_label_dense_to_sparse(labels, label_lengths) ``` <sag>dense</sag>에서 <sag>sparse</sag>로 CTC레이블을 변환합니다. __Arguments__ - __labels__: <sag>dense</sag> CTC 레이블. - __label_lengths__: 레이블의 길이. __Returns__ 레이블의 희소 텐서 표현. ---- ### ctc_batch_cost ```python keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length) ``` 각 배치에서 CTC손실 알고리즘을 수행합니다. __Arguments__ - __y_true__: truth 레이블을 포함한 `(samples, max_string_length)` 텐서. - __y_pred__: softmax의 출력 또는 예측값을 포함한 `(samples, time_steps, num_categories)` 텐서. - __input_length__: `y_pred`의 각 배치 항목의 시퀀스 길이를 포함하는 `(samples, 1)`텐서. - __label_length__: `y_true`의 각 배치 항목의 시퀀스 길이를 포함하는 `(samples, 1)`텐서. __Returns__ 각 요소의 CTC 손실값을 포함한 텐서의 (samples,1)형식. ---- ### ctc_decode ```python keras.backend.ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1) ``` 소프트맥스의 결과를 해석. 그리디 탐색(최적화)이나 제한적인 딕셔너리 탐색이 가능합니다. __Arguments__ - __y_pred__: 예측을 포함한 `(samples, time_steps, num_categories)` 텐서 또는 소프트맥스의 출력. - __input_length__: `y_pred`의 각 배치 항목에 대한 시퀀스 길이를 포함한 `(samples, )`텐서. - __greedy__: 만약 `true`라면 훨씬 더 빠르고 좋은 탐색을 수행합니다. 딕셔너리 자료형을 사용하지 않습니다. - __beam_width__: `greedy`가 `false`일 때, beam 탐색 디코더가 너비의 beam으로 사용됩니다. - __top_paths__: `greedy`가 `false`일 때, 가장 가능할만한 경로 중에 얼마나 많은 경로가 있는지 반환합니다. __Returns__ - __Tuple__: List: `greedy`가 `true`일 때, 디코딩 된 시퀀스를 포함한 요소의 리스트를 반환합니다. `false`일 때, 가장 높은 가능성이 있는 `top_paths`을 반환합니다. Important: `-1`로 비어있는 레이블을 반환합니다. 디코딩 된 각 시퀀스의 로그확률을 포함한 `(top_paths, )`텐서. ---- ### map_fn ```python keras.backend.map_fn(fn, elems, name=None, dtype=None) ``` fn 함수를 요소 위에 맵핑하고 출력을 반환합니다. __Arguments__ - __fn__: <sag>elems</sag>에 있는 각 요소에 대해 호출가능. - __elems__: 텐서 - __name__: 그래프에서 맵 노드에 대한 문자열 이름. - __dtype__: 출력 데이터 타입. __Returns__ `dtype`의 텐서. ---- ### foldl ```python keras.backend.foldl(fn, elems, initializer=None, name=None) ``` 왼쪽에서 오른쪽으로 결합하기위해 <sag>fn</sag>을 사용해 요소를 감소시킵니다. __Arguments__ - __fn__: <sag>elems</sag>에서 각 요소에 호출 될 연산기, 예를 들어, `lambda acc, x: acc + x` - __elems__: 텐서 - __initializer__: 사용된 첫 번째 값. (`elems[0]` in case of None) - __name__: 그래프 fodl 노드에 대한 문자열 이름. __Returns__ `initializer` 모양과 같은 타입의 텐서. ---- ### foldr ```python keras.backend.foldr(fn, elems, initializer=None, name=None) ``` <sag>fn</sag>인자를 사용하여 오른쪽에서 왼쪽으로 텐서 요소들을 줄인다. __Arguments__ - __fn__: <sag>elems</sag>에서 호출가능한 각 요소와 누산기. 예를들어, `lambda acc, x: acc + x` - __elems__: 텐서 - __initializer__: 사용된 첫번 째 값 (`elems[-1]` in case of None) - __name__: 그래프에서 <sag>foldr node</sag>의 문자열 이름 __Returns__ `initializer` 모양과 같은 타입의 텐서. ---- ### local_conv1d ```python keras.backend.local_conv1d(inputs, kernel, kernel_size, strides, data_format=None) ``` 공유되지 않은 가중치를 1D 컨볼루션에 적용합니다. __Arguments__ - __inputs__: 3D 텐서의 형식: (batch_size, steps, input_dim) - __kernel__: (output_length, feature_dim, filters)형식의 컨볼루션의 공유되지 않은 가중치. - __kernel_size__: 1d 컨볼루션 윈도우의 길이를 지정한 단일 <sag>integer</sag> 튜플. - __strides__: 컨볼루션의 스타라이드 길이를 지정한 단일 <sag>integer</sag> 튜플. - __data_format__: 데이터 형식, channels_first 또는 channels_last __Returns__ (batch_size, output_length, filters)형식: 공유되지 않은 가중치로 1d 컨볼루션 연산 후의 텐서. __Raises__ - __ValueError__: If `data_format`가 <sag>channels_last</sag> 또는 <sag>channels_first"`이 아닐 때, 오류. ---- ### local_conv2d ```python keras.backend.local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None) ``` 2D 컨볼루션에 공유되지 않은 가중치를 적용합니다. __Arguments__ - __inputs__: data_format='channels_first'일 때, 4D 텐서 형식: (batch_size, filters, new_rows, new_cols) data_format='channels_last'일 때, 4D 텐서 형식: (batch_size, new_rows, new_cols, filters) - __kernel__: (output_items, feature_dim, filters) 형식의 컨볼루션 연산을 위한 공유되지 않은 가중치 - __kernel_size__: 2차원 컨볼루션 윈도우의 너비와 높이를 지정한 2<sag>integers</sag>의 튜플. - __strides__: 2<sag>integers</sag>인 튜플, 너비와 높이에 따른 컨볼루션의 스트라이드를 지정합니다. - __output_shape__: (output_row, output_col)형태의 튜플 - __data_format__: 데이터 형식, 'channels_first' 또는 'channels_last'. __Returns__ 4d 텐서의 형식: data_format='channels_first'일 때, (batch_size, filters, new_rows, new_cols) 4d 텐서의 형식: data_format='channels_last'일 때, (batch_size, new_rows, new_cols, filters) __Raises__ - __ValueError__: <sag>data_format</sag>가 <sag>channels_last</sag> 또는 <sag>channels_first</sag>이 아니었을 때, 오류. ---- ### backend ```python keras.backend.backend() ``` 백엔드를 결정하기 위한 공개접근방식. __Returns__ <sag>string</sag>, 현재 사용 중인 케라스 백엔드 이름. __Example__ ```python >>> keras.backend.backend() 'tensorflow' ```
keras-docs-ko/sources/backend.md/0
{ "file_path": "keras-docs-ko/sources/backend.md", "repo_id": "keras-docs-ko", "token_count": 55571 }
72
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L200)</span> ### Add ```python keras.layers.Add() ``` 입력 리스트에 대해 덧셈을 수행하는 층<sub>layer</sub>입니다. 동일한 형태<sub>shape</sub>의 텐서 리스트를 입력으로 받아, 동일한 형태의 하나의 텐서를 반환합니다. __예시__ ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) # equivalent to added = keras.layers.add([x1, x2]) added = keras.layers.Add()([x1, x2]) out = keras.layers.Dense(4)(added) model = keras.models.Model(inputs=[input1, input2], outputs=out) ``` ------ <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L231)</span> ### Subtract ```python keras.layers.Subtract() ``` 입력 리스트에 대해 뺄셈을 수행하는 층입니다. 두 개의 동일한 형태의 텐서로 이루어진 리스트를 입력으로 받아 동일한 형태의 텐서 (inputs[0] - inputs[1]) 한 개를 반환합니다. __예시__ ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) # Equivalent to subtracted = keras.layers.subtract([x1, x2]) subtracted = keras.layers.Subtract()([x1, x2]) out = keras.layers.Dense(4)(subtracted) model = keras.models.Model(inputs=[input1, input2], outputs=out) ``` ------ <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L268)</span> ### Multiply ```python keras.layers.Multiply() ``` 입력 리스트에 대해 원소별<sub>element-wise</sub> 곱셈을 수행하는 층입니다. 동일한 형태의 텐서 리스트를 입력으로 받아, 동일한 형태의 하나의 텐서를 반환합니다. ------ <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L283)</span> ### Average ```python keras.layers.Average() ``` 입력 리스트의 평균을 계산하는 층입니다 동일한 형태의 텐서 리스트를 입력으로 받아, 동일한 형태의 하나의 텐서를 반환합니다. ------ <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L298)</span> ### Maximum ```python keras.layers.Maximum() ``` 입력 리스트의 원소별 최댓값을 계산하는 층입니다. 동일한 형태의 텐서 리스트를 입력으로 받아, 동일한 형태의 하나의 텐서를 반환합니다. ------ <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L313)</span> ### Minimum ```python keras.layers.Minimum() ``` 입력 리스트의 원소별 최솟값을 계산하는 층입니다. 동일한 형태의 텐서 리스트를 입력으로 받아, 동일한 형태의 하나의 텐서를 반환합니다. ------ <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L328)</span> ### Concatenate ```python keras.layers.Concatenate(axis=-1) ``` 입력 리스트를 이어 붙이는 층입니다. 동일한 형태(이어 붙이는 축을 제외한)의 텐서 리스트를 입력으로 받아, 모든 입력이 이어 붙여진 하나의 텐서를 반환합니다. __인자__ - __axis__: 이어 붙이기를 수행할 축입니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. ------ <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L416)</span> ### Dot ```python keras.layers.Dot(axes, normalize=False) ``` 두 텐서의 샘플들 간에 내적 연산을 수행하는 층입니다. 만약 형태가 `(batch_size, n)`인 두 텐서 `a`와 `b`를 갖는 리스트에 대해 내적 연산을 수행할 경우, 출력 텐서의 형태는 `(batch_size, 1)`가 됩니다. 이때 출력 텐서의 `i`번째 원소는 `a[i]`와 `b[i]`간 내적 연산의 결과값입니다. __인자__ - __axes__: `int` 또는 `int`로 이루어진 튜플. 내적 연산을 수행할 축을 의미합니다. - __normalize__: `bool`. 내적 연산을 수행하기 전에 샘플들에 L2 정규화<sub>normalization</sub>를 적용할지 결정합니다. `True`로 설정하면, 내적 연산의 결과는 두 샘플들 간의 코사인 유사도와 같습니다. 기본값은 `True`입니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. ------ ### add ```python keras.layers.add(inputs) ``` `Add` 층의 함수형 인터페이스입니다. __인자__ - __inputs__: 입력 텐서의 리스트입니다. 최소 2개 이상의 텐서를 포함해야 합니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. __반환값__ 입력값의 합을 텐서로 반환합니다. __예시__ ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) added = keras.layers.add([x1, x2]) out = keras.layers.Dense(4)(added) model = keras.models.Model(inputs=[input1, input2], outputs=out) ``` ------ ### subtract ```python keras.layers.subtract(inputs) ``` `Subtract` 층의 함수형 인터페이스입니다. __인자__ - __inputs__: 입력 텐서의 리스트입니다. 정확히 2개의 텐서만을 포함해야 합니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. __반환값__ 두 입력값의 차를 텐서로 반환합니다. __예시__ ```python import keras input1 = keras.layers.Input(shape=(16,)) x1 = keras.layers.Dense(8, activation='relu')(input1) input2 = keras.layers.Input(shape=(32,)) x2 = keras.layers.Dense(8, activation='relu')(input2) subtracted = keras.layers.subtract([x1, x2]) out = keras.layers.Dense(4)(subtracted) model = keras.models.Model(inputs=[input1, input2], outputs=out) ``` ------ ### multiply ```python keras.layers.multiply(inputs) ``` `Multiply` 층의 함수형 인터페이스입니다. __인자__ - __inputs__: 입력 텐서의 리스트입니다. 최소 2개 이상의 텐서를 포함해야 합니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. __반환값__ 입력값의 원소별 곱셈의 결과를 텐서로 반환합니다. ------ ### average ```python keras.layers.average(inputs) ``` `Average` 층의 함수형 인터페이스입니다. __인자__ - __inputs__: 입력 텐서의 리스트입니다. 최소 2개 이상의 텐서를 포함해야 합니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. __반환값__ 입력값의 평균을 텐서로 반환합니다. ------ ### maximum ```python keras.layers.maximum(inputs) ``` `Maximum` 층의 함수형 인터페이스입니다. __인자__ - __inputs__: 입력 텐서의 리스트입니다. 최소 2개 이상의 텐서를 포함해야 합니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. __반환값__ 입력값의 원소별 최댓값을 텐서로 반환합니다. ------ ### minimum ```python keras.layers.minimum(inputs) ``` `Minimum` 층의 함수형 인터페이스입니다. __인자__ - __inputs__: 입력 텐서의 리스트입니다. 최소 2개 이상의 텐서를 포함해야 합니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. __반환값__ 입력값의 원소별 최솟값을 텐서로 반환합니다. ------ ### concatenate ```python keras.layers.concatenate(inputs, axis=-1) ``` `Concatenate` 층의 함수형 인터페이스입니다. __인자__ - __inputs__: 입력 텐서의 리스트입니다. 최소 2개 이상의 텐서를 포함해야 합니다. - __axis__: 이어 붙이기를 수행할 축입니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. __반환값__ `axis`를 따라 입력값을 이어 붙인 텐서를 반환합니다. ------ ### dot ```python keras.layers.dot(inputs, axes, normalize=False) ``` `Dot` 층의 함수형 인터페이스입니다. __인자__ - __inputs__: 입력 텐서의 리스트입니다. 최소 2개 이상의 텐서를 포함해야 합니다. - __axes__: `int` 또는 `int`로 이루어진 튜플. 내적 연산을 수행할 축을 의미합니다. - __normalize__: `bool`. 내적 연산을 수행하기 전에 샘플들에 L2 정규화를 적용할지 결정합니다. `True`로 설정하면, 내적 연산의 결과는 두 샘플들 간의 코사인 유사도와 같습니다. 기본값은 `True`입니다. - __**kwargs__: 케라스 층에 표준적으로 사용되는 키워드 인자입니다. __반환값__ 입력 샘플들 간에 내적 연산을 수행한 결과를 텐서로 반환합니다.
keras-docs-ko/sources/layers/merge.md/0
{ "file_path": "keras-docs-ko/sources/layers/merge.md", "repo_id": "keras-docs-ko", "token_count": 6192 }
73
## Regularizer의 사용법 Regularizer는 최적화 과정 중에 각 층별 파라미터 또는 출력값에 대하여 페널티를 적용할 수 있게 해줍니다. 이러한 페널티는 네트워크가 최적화 하려는 손실 함수의 일부로 포함됩니다. 페널티는 층별로 다르게 적용될 수 있습니다. 정확한 API는 층마다 서로 다르지만, `Dense`, `Conv1D`, `Conv2D` 그리고 `Conv3D`는 동일한 API를 가지고 있습니다. 이 층들은 다음 세 개의 키워드 인자들을 공통적으로 가지고 있습니다. - `kernel_regularizer`: `keras.regularizers.Regularizer`의 객체입니다. - `bias_regularizer`: `keras.regularizers.Regularizer`의 객체입니다. - `activity_regularizer`: `keras.regularizers.Regularizer`의 객체입니다. ## 예제 ```python from keras import regularizers model.add(Dense(64, input_dim=64, kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01))) ``` ## 사용 가능한 페널티 ```python keras.regularizers.l1(0.) keras.regularizers.l2(0.) keras.regularizers.l1_l2(l1=0.01, l2=0.01) ``` ## 새로운 regularizer를 개발하려면 아래와 같이 가중치 행렬을 입력으로 받고, 그에 해당하는 페널티를 계산해 반환하는 함수라면 모두 regularizer로 사용할 수 있습니다. ```python from keras import backend as K def l1_reg(weight_matrix): return 0.01 * K.sum(K.abs(weight_matrix)) model.add(Dense(64, input_dim=64, kernel_regularizer=l1_reg)) ``` 좀 더 객체 지향적인 방법을 원한다면 [여기](https://github.com/keras-team/keras/blob/master/keras/regularizers.py)에서 예시를 확인할 수 있습니다.
keras-docs-ko/sources/regularizers.md/0
{ "file_path": "keras-docs-ko/sources/regularizers.md", "repo_id": "keras-docs-ko", "token_count": 1158 }
74
# Chinese (zh-cn) translation of the Keras docs 有关最新文档,请访问 Read the Docs 备份版本:[keras-zh](https://keras-zh.readthedocs.io/),每月更新。 有关官方原始文档,请访问 [Keras官方中文文档](https://keras.io/zh/)。 Translation has done! 文档已完成,欢迎进一步修订。 翻译过程中,请直接将 `sources/` 中 `.md` 文件中的英文替换为中文。 ## 排版规范 Typesetting 此文档遵循 [中文排版指南](https://github.com/sparanoid/chinese-copywriting-guidelines) 规范,并在此之上遵守以下约定: * 英文的左右保持一个空白,避免中英文字黏在一起; * 使用全角标点符号; * 严格遵循 Markdown 语法; * 原文中的双引号(" ")请代换成中文的引号(「」符号怎么打出来见 [这里](http://zhihu.com/question/19755746/answer/27233392)); * 「`加亮`」和「**加粗**」和「[链接]()」都需要在左右保持一个空格。 ## 翻译对照列表 Conventions - 该翻译用于 `zh-cn` (简体中文,中国大陆地区)。 - 当遇到以下 `专业术语` 的时候,请使用以下列表进行对照翻译。(未完待续) | English | 中文 | |:-------------------|:--------------------| | arguments         | 参数                | | boolean           | 布尔                | | data augumentation | 数据增强            | | deep learning     | 深度学习            | | float             | 浮点数               | | Functional API     | 函数式 API     | | Fuzz factor       | 模糊因子             | | input shape       | 输入尺寸             | | index             | 索引                 | | int               | 整数                 | | layer             | 层                  | | loss function     | 损失函数             | | metrics | 评估标准 | | nD tensor         | nD 张量             | | Numpy Array | Numpy 矩阵 | | objective | 目标 | | optimizer | 优化器 | | output shape | 输出尺寸 | | regularizer       | 正则化器             | | return | 返回 | | recurrent | 循环 | | Sequential Model | 顺序模型 | | shape | 尺寸 | | target | 目标 | | testing | 测试 | | training | 训练 | | wrapper           | 封装器               | Welcome to contribute!!!
keras-docs-zh/README.md/0
{ "file_path": "keras-docs-zh/README.md", "repo_id": "keras-docs-zh", "token_count": 1500 }
75
# 用 Keras 实现字符级序列到序列模型。 该脚本演示了如何实现基本的字符级 CNN 序列到序列模型。 我们将其用于将英文短句逐个字符翻译成法语短句。 请注意,进行字符级机器翻译是非比寻常的,因为在此领域中词级模型更为常见。 本示例仅用于演示目的。 **算法总结** - 我们从一个领域的输入序列(例如英语句子)和另一个领域的对应目标序列(例如法语句子)开始。 - 编码器 CNN 对输入字符序列进行编码。 - 对解码器 CNN 进行训练,以将目标序列转换为相同序列,但以后将偏移一个时间步,在这种情况下,该训练过程称为 "教师强制"。它使用编码器的输出。实际上,解码器会根据输入序列,根据给定的 `targets[...t]` 来学习生成 `target[t+1...]`。 - 在推理模式下,当我们想解码未知的输入序列时,我们: - 对输入序列进行编码; - 从大小为1的目标序列开始(仅是序列开始字符); - 将输入序列和 1 个字符的目标序列馈送到解码器,以生成下一个字符的预测; - 使用这些预测来采样下一个字符(我们仅使用 argmax); - 将采样的字符附加到目标序列; - 重复直到我们达到字符数限制。 **数据下载** [English to French sentence pairs.](http://www.manythings.org/anki/fra-eng.zip) [Lots of neat sentence pairs datasets.](http://www.manythings.org/anki/) **参考** - lstm_seq2seq.py - https://wanasit.github.io/attention-based-sequence-to-sequence-in-keras.html ```python from __future__ import print_function import numpy as np from keras.layers import Input, Convolution1D, Dot, Dense, Activation, Concatenate from keras.models import Model batch_size = 64 # 训练批次大小。 epochs = 100 # 训练迭代轮次。 num_samples = 10000 # 训练样本数。 # 磁盘数据文件路径 data_path = 'fra-eng/fra.txt' # 向量化数据。 input_texts = [] target_texts = [] input_characters = set() target_characters = set() with open(data_path, 'r', encoding='utf-8') as f: lines = f.read().split('\n') for line in lines[: min(num_samples, len(lines) - 1)]: input_text, target_text = line.split('\t') # 我们使用 "tab" 作为 "起始序列" 字符, # 对于目标,使用 "\n" 作为 "终止序列" 字符。 target_text = '\t' + target_text + '\n' input_texts.append(input_text) target_texts.append(target_text) for char in input_text: if char not in input_characters: input_characters.add(char) for char in target_text: if char not in target_characters: target_characters.add(char) input_characters = sorted(list(input_characters)) target_characters = sorted(list(target_characters)) num_encoder_tokens = len(input_characters) num_decoder_tokens = len(target_characters) max_encoder_seq_length = max([len(txt) for txt in input_texts]) max_decoder_seq_length = max([len(txt) for txt in target_texts]) print('Number of samples:', len(input_texts)) print('Number of unique input tokens:', num_encoder_tokens) print('Number of unique output tokens:', num_decoder_tokens) print('Max sequence length for inputs:', max_encoder_seq_length) print('Max sequence length for outputs:', max_decoder_seq_length) input_token_index = dict( [(char, i) for i, char in enumerate(input_characters)]) target_token_index = dict( [(char, i) for i, char in enumerate(target_characters)]) encoder_input_data = np.zeros( (len(input_texts), max_encoder_seq_length, num_encoder_tokens), dtype='float32') decoder_input_data = np.zeros( (len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype='float32') decoder_target_data = np.zeros( (len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype='float32') for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)): for t, char in enumerate(input_text): encoder_input_data[i, t, input_token_index[char]] = 1. for t, char in enumerate(target_text): # decoder_target_data is ahead of decoder_input_data by one timestep decoder_input_data[i, t, target_token_index[char]] = 1. if t > 0: # decoder_target_data will be ahead by one timestep # and will not include the start character. decoder_target_data[i, t - 1, target_token_index[char]] = 1. # 定义输入序列并处理它。 encoder_inputs = Input(shape=(None, num_encoder_tokens)) # Encoder x_encoder = Convolution1D(256, kernel_size=3, activation='relu', padding='causal')(encoder_inputs) x_encoder = Convolution1D(256, kernel_size=3, activation='relu', padding='causal', dilation_rate=2)(x_encoder) x_encoder = Convolution1D(256, kernel_size=3, activation='relu', padding='causal', dilation_rate=4)(x_encoder) decoder_inputs = Input(shape=(None, num_decoder_tokens)) # Decoder x_decoder = Convolution1D(256, kernel_size=3, activation='relu', padding='causal')(decoder_inputs) x_decoder = Convolution1D(256, kernel_size=3, activation='relu', padding='causal', dilation_rate=2)(x_decoder) x_decoder = Convolution1D(256, kernel_size=3, activation='relu', padding='causal', dilation_rate=4)(x_decoder) # Attention attention = Dot(axes=[2, 2])([x_decoder, x_encoder]) attention = Activation('softmax')(attention) context = Dot(axes=[2, 1])([attention, x_encoder]) decoder_combined_context = Concatenate(axis=-1)([context, x_decoder]) decoder_outputs = Convolution1D(64, kernel_size=3, activation='relu', padding='causal')(decoder_combined_context) decoder_outputs = Convolution1D(64, kernel_size=3, activation='relu', padding='causal')(decoder_outputs) # 输出 decoder_dense = Dense(num_decoder_tokens, activation='softmax') decoder_outputs = decoder_dense(decoder_outputs) # 定义将 `encoder_input_data` & `decoder_input_data` # 转化为 `decoder_target_data`的模型。 model = Model([encoder_inputs, decoder_inputs], decoder_outputs) model.summary() # 执行训练 model.compile(optimizer='adam', loss='categorical_crossentropy') model.fit([encoder_input_data, decoder_input_data], decoder_target_data, batch_size=batch_size, epochs=epochs, validation_split=0.2) # 保存模型 model.save('cnn_s2s.h5') # 接下来: 推理模式 (采样)。 # 定义采样模型 reverse_input_char_index = dict( (i, char) for char, i in input_token_index.items()) reverse_target_char_index = dict( (i, char) for char, i in target_token_index.items()) nb_examples = 100 in_encoder = encoder_input_data[:nb_examples] in_decoder = np.zeros( (len(input_texts), max_decoder_seq_length, num_decoder_tokens), dtype='float32') in_decoder[:, 0, target_token_index["\t"]] = 1 predict = np.zeros( (len(input_texts), max_decoder_seq_length), dtype='float32') for i in range(max_decoder_seq_length - 1): predict = model.predict([in_encoder, in_decoder]) predict = predict.argmax(axis=-1) predict_ = predict[:, i].ravel().tolist() for j, x in enumerate(predict_): in_decoder[j, i + 1, x] = 1 for seq_index in range(nb_examples): # 抽取一个序列(训练集的一部分)进行解码。 output_seq = predict[seq_index, :].ravel().tolist() decoded = [] for x in output_seq: if reverse_target_char_index[x] == "\n": break else: decoded.append(reverse_target_char_index[x]) decoded_sentence = "".join(decoded) print('-') print('Input sentence:', input_texts[seq_index]) print('Decoded sentence:', decoded_sentence) ```
keras-docs-zh/sources/examples/cnn_seq2seq.md/0
{ "file_path": "keras-docs-zh/sources/examples/cnn_seq2seq.md", "repo_id": "keras-docs-zh", "token_count": 3857 }
76
# 在 MNIST 数据集上训练去噪自动编码器。 去噪是自动编码器的经典应用之一。 去噪过程去除了破坏真实信号的有害噪声。 噪声 + 数据 ---> 去噪自动编码器 ---> 数据 给定训练数据集的损坏数据作为输入,输出真实信号作为输出, 去噪自动编码器可以恢复隐藏的结构以生成干净的数据。 此示例具有模块化设计。编码器、解码器和自动编码器是 3 种共享权重的模型。 例如,在训练自动编码器之后,编码器可用于生成输入数据的潜在矢量,以实现低维可视化(如 PCA 或 TSNE)。 ```python from __future__ import absolute_import from __future__ import division from __future__ import print_function import keras from keras.layers import Activation, Dense, Input from keras.layers import Conv2D, Flatten from keras.layers import Reshape, Conv2DTranspose from keras.models import Model from keras import backend as K from keras.datasets import mnist import numpy as np import matplotlib.pyplot as plt from PIL import Image np.random.seed(1337) # MNIST 数据集 (x_train, _), (x_test, _) = mnist.load_data() image_size = x_train.shape[1] x_train = np.reshape(x_train, [-1, image_size, image_size, 1]) x_test = np.reshape(x_test, [-1, image_size, image_size, 1]) x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # 通过添加法线距离为 0.5 且 std=0.5 的噪声来生成损坏的 MNIST 图像 noise = np.random.normal(loc=0.5, scale=0.5, size=x_train.shape) x_train_noisy = x_train + noise noise = np.random.normal(loc=0.5, scale=0.5, size=x_test.shape) x_test_noisy = x_test + noise x_train_noisy = np.clip(x_train_noisy, 0., 1.) x_test_noisy = np.clip(x_test_noisy, 0., 1.) # 网络参数 input_shape = (image_size, image_size, 1) batch_size = 128 kernel_size = 3 latent_dim = 16 # CNN 层和每层过滤器的编码器/解码器数量 layer_filters = [32, 64] # 建立自动编码器模型 # 首先建立编码器模型 inputs = Input(shape=input_shape, name='encoder_input') x = inputs # Conv2D块的堆栈 # 注意: # 1) 在深度网络上的 ReLU 之前使用批处理规范化 # 2) 使用 MaxPooling2D 替代 strides>1 # - 更快但不如 strides>1 好 for filters in layer_filters: x = Conv2D(filters=filters, kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x) # 构建解码器模型所需的形状信息 shape = K.int_shape(x) # 生成潜在向量 x = Flatten()(x) latent = Dense(latent_dim, name='latent_vector')(x) # 实例化编码器模型 encoder = Model(inputs, latent, name='encoder') encoder.summary() # 建立解码器模型 latent_inputs = Input(shape=(latent_dim,), name='decoder_input') x = Dense(shape[1] * shape[2] * shape[3])(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) # 转置 Conv2D 块的堆栈 # 注意: # 1) 在深度网络上的 ReLU 之前使用批处理规范化 # 2) 使用 UpSampling2D 替代 strides>1 # - 更快但不如 strides>1 好 for filters in layer_filters[::-1]: x = Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=2, activation='relu', padding='same')(x) x = Conv2DTranspose(filters=1, kernel_size=kernel_size, padding='same')(x) outputs = Activation('sigmoid', name='decoder_output')(x) # 实例化解码器模型 decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() # Autoencoder = Encoder + Decoder # 实例化自动编码器模型 autoencoder = Model(inputs, decoder(encoder(inputs)), name='autoencoder') autoencoder.summary() autoencoder.compile(loss='mse', optimizer='adam') # 训练自动编码器 autoencoder.fit(x_train_noisy, x_train, validation_data=(x_test_noisy, x_test), epochs=30, batch_size=batch_size) # 根据损坏的测试图像预测自动编码器的输出 x_decoded = autoencoder.predict(x_test_noisy) # 显示第 1 个 8 张损坏和去噪的图像 rows, cols = 10, 30 num = rows * cols imgs = np.concatenate([x_test[:num], x_test_noisy[:num], x_decoded[:num]]) imgs = imgs.reshape((rows * 3, cols, image_size, image_size)) imgs = np.vstack(np.split(imgs, rows, axis=1)) imgs = imgs.reshape((rows * 3, -1, image_size, image_size)) imgs = np.vstack([np.hstack(i) for i in imgs]) imgs = (imgs * 255).astype(np.uint8) plt.figure() plt.axis('off') plt.title('Original images: top rows, ' 'Corrupted Input: middle rows, ' 'Denoised Input: third rows') plt.imshow(imgs, interpolation='none', cmap='gray') Image.fromarray(imgs).save('corrupted_and_denoised.png') plt.show() ```
keras-docs-zh/sources/examples/mnist_denoising_autoencoder.md/0
{ "file_path": "keras-docs-zh/sources/examples/mnist_denoising_autoencoder.md", "repo_id": "keras-docs-zh", "token_count": 2637 }
77
# Keras FAQ: 常见问题解答 - [如何引用 Keras?](#how-should-i-cite-keras) - [如何在 GPU 上运行 Keras?](#how-can-i-run-keras-on-gpu) - [如何在多 GPU 上运行 Keras 模型?](#how-can-i-run-a-keras-model-on-multiple-gpus) - ["sample", "batch", "epoch" 分别是什么?](#what-does-sample-batch-epoch-mean) - [如何保存 Keras 模型?](#how-can-i-save-a-keras-model) - [为什么训练集误差比测试集的误差高很多?](#why-is-the-training-loss-much-higher-than-the-testing-loss) - [如何获取中间层的输出?](#how-can-i-obtain-the-output-of-an-intermediate-layer) - [如何用 Keras 处理超过内存的数据集?](#how-can-i-use-keras-with-datasets-that-dont-fit-in-memory) - [在验证集的误差不再下降时,如何中断训练?](#how-can-i-interrupt-training-when-the-validation-loss-isnt-decreasing-anymore) - [验证集划分是如何计算的?](#how-is-the-validation-split-computed) - [在训练过程中数据是否会混洗?](#is-the-data-shuffled-during-training) - [如何在每个 epoch 后记录训练集和验证集的误差和准确率?](#how-can-i-record-the-training-validation-loss-accuracy-at-each-epoch) - [如何「冻结」网络层?](#how-can-i-freeze-keras-layers) - [如何使用状态 RNNs (stateful RNNs)?](#how-can-i-use-stateful-rnns) - [如何从 Sequential 模型中移除一个层?](#how-can-i-remove-a-layer-from-a-sequential-model) - [如何在 Keras 中使用预训练的模型?](#how-can-i-use-pre-trained-models-in-keras) - [如何在 Keras 中使用 HDF5 输入?](#how-can-i-use-hdf5-inputs-with-keras) - [Keras 配置文件保存在哪里?](#where-is-the-keras-configuration-file-stored) - [如何在 Keras 开发过程中获取可复现的结果?](#how-can-i-obtain-reproducible-results-using-keras-during-development) - [如何在 Keras 中安装 HDF5 或 h5py 来保存我的模型?](#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras) --- <span id="how-should-i-cite-keras"></span> ### 如何引用 Keras? 如果 Keras 有助于您的研究,请在你的出版物中引用它。以下是 BibTeX 条目引用的示例: ``` @misc{chollet2015keras, title={Keras}, author={Chollet, Fran\c{c}ois and others}, year={2015}, publisher={GitHub}, howpublished={\url{https://github.com/keras-team/keras}}, } ``` --- <span id="how-can-i-run-keras-on-gpu"></span> ### 如何在 GPU 上运行 Keras? 如果你以 **TensorFlow** 或 **CNTK** 后端运行,只要检测到任何可用的 GPU,那么代码将自动在 GPU 上运行。 如果你以 **Theano** 后端运行,则可以使用以下方法之一: **方法 1**: 使用 Theano flags。 ```bash THEANO_FLAGS=device=gpu,floatX=float32 python my_keras_script.py ``` "gpu" 可能需要根据你的设备标识符(例如gpu0,gpu1等)进行更改。 **方法 2**: 创建 `.theanorc`: [指导教程](http://deeplearning.net/software/theano/library/config.html) **方法 3**: 在代码的开头手动设置 `theano.config.device`, `theano.config.floatX`: ```python import theano theano.config.device = 'gpu' theano.config.floatX = 'float32' ``` --- <span id="how-can-i-run-a-keras-model-on-multiple-gpus"></span> ### 如何在多 GPU 上运行 Keras 模型? 我们建议使用 **TensorFlow** 后端来执行这项任务。有两种方法可在多个 GPU 上运行单个模型:**数据并行**和**设备并行**。 在大多数情况下,你最需要的是数据并行。 #### 数据并行 数据并行包括在每个设备上复制一次目标模型,并使用每个模型副本处理不同部分的输入数据。Keras 有一个内置的实用函数 `keras.utils.multi_gpu_model`,它可以生成任何模型的数据并行版本,在多达 8 个 GPU 上实现准线性加速。 有关更多信息,请参阅 [multi_gpu_model](/utils/#multi_gpu_model) 的文档。这里是一个快速的例子: ```python from keras.utils import multi_gpu_model # 将 `model` 复制到 8 个 GPU 上。 # 假定你的机器有 8 个可用的 GPU。 parallel_model = multi_gpu_model(model, gpus=8) parallel_model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # 这个 `fit` 调用将分布在 8 个 GPU 上。 # 由于 batch size 为 256,每个 GPU 将处理 32 个样本。 parallel_model.fit(x, y, epochs=20, batch_size=256) ``` #### 设备并行 设备并行性包括在不同设备上运行同一模型的不同部分。对于具有并行体系结构的模型,例如有两个分支的模型,这种方式很合适。 这种并行可以通过使用 TensorFlow device scopes 来实现。这里是一个简单的例子: ```python # 模型中共享的 LSTM 用于并行编码两个不同的序列 input_a = keras.Input(shape=(140, 256)) input_b = keras.Input(shape=(140, 256)) shared_lstm = keras.layers.LSTM(64) # 在一个 GPU 上处理第一个序列 with tf.device_scope('/gpu:0'): encoded_a = shared_lstm(tweet_a) # 在另一个 GPU上 处理下一个序列 with tf.device_scope('/gpu:1'): encoded_b = shared_lstm(tweet_b) # 在 CPU 上连接结果 with tf.device_scope('/cpu:0'): merged_vector = keras.layers.concatenate([encoded_a, encoded_b], axis=-1) ``` --- <span id="what-does-sample-batch-epoch-mean"></span> ### "sample", "batch", "epoch" 分别是什么? 为了正确地使用 Keras,以下是必须了解和理解的一些常见定义: - **Sample**: 样本,数据集中的一个元素,一条数据。 - *例1:* 在卷积神经网络中,一张图像是一个样本。 - *例2:* 在语音识别模型中,一段音频是一个样本。 - **Batch**: 批次,含有 *N* 个样本的集合。每一个 batch 的样本都是独立并行处理的。在训练时,一个 batch 的结果只会用来更新一次模型。 - 一个 **batch** 的样本通常比单个输入更接近于总体输入数据的分布,batch 越大就越近似。然而,每个 batch 将花费更长的时间来处理,并且仍然只更新模型一次。在推理(评估/预测)时,建议条件允许的情况下选择一个尽可能大的 batch,(因为较大的 batch 通常评估/预测的速度会更快)。 - **Epoch**: 轮次,通常被定义为「在整个数据集上的一轮迭代」,用于训练的不同的阶段,这有利于记录和定期评估。 - 当在 Keras 模型的 `fit` 方法中使用 `validation_data` 或 `validation_split` 时,评估将在每个 **epoch** 结束时运行。 - 在 Keras 中,可以添加专门的用于在 **epoch** 结束时运行的 [callbacks 回调](/callbacks/)。例如学习率变化和模型检查点(保存)。 --- <span id="how-can-i-save-a-keras-model"></span> ### 如何保存 Keras 模型? #### 保存/加载整个模型(结构 + 权重 + 优化器状态) *不建议使用 pickle 或 cPickle 来保存 Keras 模型。* 你可以使用 `model.save(filepath)` 将 Keras 模型保存到单个 HDF5 文件中,该文件将包含: - 模型的结构,允许重新创建模型 - 模型的权重 - 训练配置项(损失函数,优化器) - 优化器状态,允许准确地从你上次结束的地方继续训练。 你可以使用 `keras.models.load_model(filepath)` 重新实例化模型。`load_model` 还将负责使用保存的训练配置项来编译模型(除非模型从未编译过)。 示例: ```python from keras.models import load_model model.save('my_model.h5') # 创建 HDF5 文件 'my_model.h5' del model # 删除现有模型 # 返回一个编译好的模型 # 与之前那个相同 model = load_model('my_model.h5') ``` 另请参阅[如何安装 HDF5 或 h5py 以在 Keras 中保存我的模型](#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras),查看有关如何安装 h5py 的说明。 #### 只保存/加载模型的结构 如果您只需要保存**模型的结构**,而非其权重或训练配置项,则可以执行以下操作: ```python # 保存为 JSON json_string = model.to_json() # 保存为 YAML yaml_string = model.to_yaml() ``` 生成的 JSON/YAML 文件是人类可读的,如果需要还可以手动编辑。 你可以从这些数据建立一个新的模型: ```python # 从 JSON 重建模型: from keras.models import model_from_json model = model_from_json(json_string) # 从 YAML 重建模型: from keras.models import model_from_yaml model = model_from_yaml(yaml_string) ``` #### 只保存/加载模型的权重 如果您只需要 **模型的权重**,可以使用下面的代码以 HDF5 格式进行保存。 请注意,我们首先需要安装 HDF5 和 Python 库 h5py,它们不包含在 Keras 中。 ```python model.save_weights('my_model_weights.h5') ``` 假设你有用于实例化模型的代码,则可以将保存的权重加载到具有相同结构的模型中: ```python model.load_weights('my_model_weights.h5') ``` 如果你需要将权重加载到不同的结构(有一些共同层)的模型中,例如微调或迁移学习,则可以按层的名字来加载权重: ```python model.load_weights('my_model_weights.h5', by_name=True) ``` 示例: ```python """ 假设原始模型如下所示: model = Sequential() model.add(Dense(2, input_dim=3, name='dense_1')) model.add(Dense(3, name='dense_2')) ... model.save_weights(fname) """ # 新模型 model = Sequential() model.add(Dense(2, input_dim=3, name='dense_1')) # 将被加载 model.add(Dense(10, name='new_dense')) # 将不被加载 # 从第一个模型加载权重;只会影响第一层,dense_1 model.load_weights(fname, by_name=True) ``` #### 处理已保存模型中的自定义层(或其他自定义对象) 如果要加载的模型包含自定义层或其他自定义类或函数,则可以通过 `custom_objects` 参数将它们传递给加载机制: ```python from keras.models import load_model # 假设你的模型包含一个 AttentionLayer 类的实例 model = load_model('my_model.h5', custom_objects={'AttentionLayer': AttentionLayer}) ``` 或者,你可以使用[自定义对象作用域](/utils/#customobjectscope): ```python from keras.utils import CustomObjectScope with CustomObjectScope({'AttentionLayer': AttentionLayer}): model = load_model('my_model.h5') ``` 自定义对象的处理与 `load_model`, `model_from_json`, `model_from_yaml` 的工作方式相同: ```python from keras.models import model_from_json model = model_from_json(json_string, custom_objects={'AttentionLayer': AttentionLayer}) ``` --- <span id="why-is-the-training-loss-much-higher-than-the-testing-loss"></span> ### 为什么训练误差比测试误差高很多? Keras 模型有两种模式:训练和测试。正则化机制,如 Dropout 和 L1/L2 权重正则化,在测试时是关闭的。 此外,训练误差是每批训练数据的平均误差。由于你的模型是随着时间而变化的,一个 epoch 中的第一批数据的误差通常比最后一批的要高。另一方面,测试误差是模型在一个 epoch 训练完后计算的,因而误差较小。 --- <span id="how-can-i-obtain-the-output-of-an-intermediate-layer"></span> ### 如何获取中间层的输出? 一个简单的方法是创建一个新的 `Model` 来输出你所感兴趣的层: ```python from keras.models import Model model = ... # 创建原始模型 layer_name = 'my_layer' intermediate_layer_model = Model(inputs=model.input, outputs=model.get_layer(layer_name).output) intermediate_output = intermediate_layer_model.predict(data) ``` 或者,你也可以构建一个 Keras 函数,该函数将在给定输入的情况下返回某个层的输出,例如: ```python from keras import backend as K # 以 Sequential 模型为例 get_3rd_layer_output = K.function([model.layers[0].input], [model.layers[3].output]) layer_output = get_3rd_layer_output([x])[0] ``` 同样,你可以直接建立一个 Theano 或 TensorFlow 函数。 注意,如果你的模型在训练和测试阶段有不同的行为(例如,使用 `Dropout`, `BatchNormalization` 等),则需要将学习阶段标志传递给你的函数: ```python get_3rd_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[3].output]) # 测试模式 = 0 时的输出 layer_output = get_3rd_layer_output([x, 0])[0] # 测试模式 = 1 时的输出 layer_output = get_3rd_layer_output([x, 1])[0] ``` --- <span id="how-can-i-use-keras-with-datasets-that-dont-fit-in-memory"></span> ### 如何用 Keras 处理超过内存的数据集? 你可以使用 `model.train_on_batch(x,y)` 和 `model.test_on_batch(x,y)` 进行批量训练与测试。请参阅[模型文档](/models/sequential)。 或者,你可以编写一个生成批处理训练数据的生成器,然后使用 `model.fit_generator(data_generator,steps_per_epoch,epochs)` 方法。 你可以在 [CIFAR10 example](https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py) 中找到实践代码。 --- <span id="how-can-i-interrupt-training-when-the-validation-loss-isnt-decreasing-anymore"></span> ### 在验证集的误差不再下降时,如何中断训练? 你可以使用 `EarlyStopping` 回调: ```python from keras.callbacks import EarlyStopping early_stopping = EarlyStopping(monitor='val_loss', patience=2) model.fit(x, y, validation_split=0.2, callbacks=[early_stopping]) ``` 更多信息请查看 [callbacks 文档](/callbacks)。 --- <span id="how-is-the-validation-split-computed"></span> ### 验证集划分是如何计算的? 如果您将 `model.fit` 中的 `validation_split` 参数设置为 0.1,那么使用的验证数据将是最后 10% 的数据。如果设置为 0.25,就是最后 25% 的数据。注意,在提取分割验证集之前,数据不会被混洗,因此验证集仅仅是传递的输入中最后一个 x% 的样本。 所有 epoch 都使用相同的验证集(在同一个 `fit` 中调用)。 --- <span id="is-the-data-shuffled-during-training"></span> ### 在训练过程中数据是否会混洗? 是的,如果 `model.fit`中的 `shuffle`参数设置为 True(默认值),则训练数据将在每个 epoch 混洗。 验证集永远不会混洗。 --- <span id="how-can-i-record-the-training-validation-loss-accuracy-at-each-epoch"></span> ### 如何在每个 epoch 后记录训练集和验证集的误差和准确率? `model.fit` 方法返回一个 `History` 回调,它具有包含连续误差的列表和其他度量的 `history` 属性。 ```python hist = model.fit(x, y, validation_split=0.2) print(hist.history) ``` --- <span id="how-can-i-freeze-keras-layers"></span> ### 如何「冻结」网络层? 「冻结」一个层意味着将其排除在训练之外,即其权重将永远不会更新。这在微调模型或使用固定的词向量进行文本输入中很有用。 您可以将 `trainable` 参数(布尔值)传递给一个层的构造器,以将该层设置为不可训练的: ```python frozen_layer = Dense(32, trainable=False) ``` 另外,可以在实例化之后将网络层的 `trainable` 属性设置为 True 或 False。为了使之生效,在修改 `trainable` 属性之后,需要在模型上调用 `compile()`。这是一个例子: ```python x = Input(shape=(32,)) layer = Dense(32) layer.trainable = False y = layer(x) frozen_model = Model(x, y) # 在下面的模型中,训练期间不会更新层的权重 frozen_model.compile(optimizer='rmsprop', loss='mse') layer.trainable = True trainable_model = Model(x, y) # 使用这个模型,训练期间 `layer` 的权重将被更新 # (这也会影响上面的模型,因为它使用了同一个网络层实例) trainable_model.compile(optimizer='rmsprop', loss='mse') frozen_model.fit(data, labels) # 这不会更新 `layer` 的权重 trainable_model.fit(data, labels) # 这会更新 `layer` 的权重 ``` --- <span id="how-can-i-use-stateful-rnns"></span> ### 如何使用有状态 RNN (stateful RNNs)? 使 RNN 具有状态意味着每批样品的状态将被重新用作下一批样品的初始状态。 当使用有状态 RNN 时,假定: - 所有的批次都有相同数量的样本 - 如果 `x1` 和 `x2` 是连续批次的样本,则 `x2[i]` 是 `x1[i]` 的后续序列,对于每个 `i`。 要在 RNN 中使用状态,你需要: - 通过将 `batch_size` 参数传递给模型的第一层来显式指定你正在使用的批大小。例如,对于 10 个时间步长的 32 样本的 batch,每个时间步长具有 16 个特征,`batch_size = 32`。 - 在 RNN 层中设置 `stateful = True`。 - 在调用 `fit()` 时指定 `shuffle = False`。 重置累积状态: - 使用 `model.reset_states()` 来重置模型中所有层的状态 - 使用 `layer.reset_states()` 来重置指定有状态 RNN 层的状态 示例: ```python x # 输入数据,尺寸为 (32, 21, 16) # 将步长为 10 的序列输送到模型中 model = Sequential() model.add(LSTM(32, input_shape=(10, 16), batch_size=32, stateful=True)) model.add(Dense(16, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy') # 训练网络,根据给定的前 10 个时间步,来预测第 11 个时间步: model.train_on_batch(x[:, :10, :], np.reshape(x[:, 10, :], (32, 16))) # 网络的状态已经改变。我们可以提供后续序列: model.train_on_batch(x[:, 10:20, :], np.reshape(x[:, 20, :], (32, 16))) # 重置 LSTM 层的状态: model.reset_states() # 另一种重置方法: model.layers[0].reset_states() ``` 请注意,`predict`, `fit`, `train_on_batch`, `predict_classes` 等方法*全部*都会更新模型中有状态层的状态。这使你不仅可以进行有状态的训练,还可以进行有状态的预测。 --- <span id="how-can-i-remove-a-layer-from-a-sequential-model"></span> ### 如何从 Sequential 模型中移除一个层? 你可以通过调用 `.pop()` 来删除 `Sequential` 模型中最后添加的层: ```python model = Sequential() model.add(Dense(32, activation='relu', input_dim=784)) model.add(Dense(32, activation='relu')) print(len(model.layers)) # "2" model.pop() print(len(model.layers)) # "1" ``` --- <span id="how-can-i-use-pre-trained-models-in-keras"></span> ### 如何在 Keras 中使用预训练的模型? 我们提供了以下图像分类模型的代码和预训练的权重: - Xception - VGG16 - VGG19 - ResNet50 - ResNet v2 - ResNeXt - Inception v3 - Inception-ResNet v2 - MobileNet v1 - MobileNet v2 - DenseNet - NASNet 它们可以使用 `keras.applications` 模块进行导入: ```python from keras.applications.xception import Xception from keras.applications.vgg16 import VGG16 from keras.applications.vgg19 import VGG19 from keras.applications.resnet50 import ResNet50 from keras.applications.inception_v3 import InceptionV3 from keras.applications.inception_resnet_v2 import InceptionResNetV2 from keras.applications.mobilenet import MobileNet from keras.applications.densenet import DenseNet121 from keras.applications.densenet import DenseNet169 from keras.applications.densenet import DenseNet201 from keras.applications.nasnet import NASNetLarge from keras.applications.nasnet import NASNetMobile from keras.applications.mobilenet_v2 import MobileNetV2 model = VGG16(weights='imagenet', include_top=True) ``` 有关一些简单的用法示例,请参阅 [Applications 模块的文档](/applications)。 有关如何使用此类预训练的模型进行特征提取或微调的详细示例,请参阅[此博客文章](http://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html)。 VGG16 模型也是以下几个 Keras 示例脚本的基础: - [Style transfer](https://github.com/keras-team/keras/blob/master/examples/neural_style_transfer.py) - [Feature visualization](https://github.com/keras-team/keras/blob/master/examples/conv_filter_visualization.py) - [Deep dream](https://github.com/keras-team/keras/blob/master/examples/deep_dream.py) --- <span id="how-can-i-use-hdf5-inputs-with-keras"></span> ### 如何在 Keras 中使用 HDF5 输入? 你可以使用 `keras.utils` 中的 `HDF5Matrix` 类。有关详细信息,请参阅 [HDF5Matrix 文档](/utils/#hdf5matrix)。 你也可以直接使用 HDF5 数据集: ```python import h5py with h5py.File('input/file.hdf5', 'r') as f: x_data = f['x_data'] model.predict(x_data) ``` 请查看[如何在 Keras 中安装 HDF5 或 h5py 来保存模型](#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras)找到 h5py 安装指引。 --- <span id="where-is-the-keras-configuration-file-stored"></span> ### Keras 配置文件保存在哪里? 所有 Keras 数据存储的默认目录是: ```bash $HOME/.keras/ ``` 注意,Windows 用户应该将 `$HOME` 替换为 `%USERPROFILE%`。如果 Keras 无法创建上述目录(例如,由于权限问题),则使用 `/tmp/.keras/` 作为备份。 Keras 配置文件是存储在 `$HOME/.keras/keras.json` 中的 JSON 文件。默认的配置文件如下所示: ``` { "image_data_format": "channels_last", "epsilon": 1e-07, "floatx": "float32", "backend": "tensorflow" } ``` 它包含以下字段: - 图像处理层和实用程序所使用的默认值图像数据格式(`channels_last` 或 `channels_first`)。 - 用于防止在某些操作中被零除的 `epsilon` 模糊因子。 - 默认浮点数据类型。 - 默认后端。详见 [backend 文档](/backend)。 同样,缓存的数据集文件(如使用 `get_file()` 下载的文件)默认存储在 `$HOME/.keras/datasets/` 中。 --- <span id="how-can-i-obtain-reproducible-results-using-keras-during-development"></span> ### 如何在 Keras 开发过程中获取可复现的结果? 在模型的开发过程中,能够在一次次的运行中获得可复现的结果,以确定性能的变化是来自模型还是数据集的变化,或者仅仅是一些新的随机样本点带来的结果,有时候是很有用处的。 首先,你需要在程序启动之前将 `PYTHONHASHSEED` 环境变量设置为 0(不在程序本身内)。对于 Python 3.2.3 以上版本,它对于某些基于散列的操作具有可重现的行为是必要的(例如,集合和字典的 item 顺序,请参阅 [Python 文档](https://docs.python.org/3.7/using/cmdline.html#envvar-PYTHONHASHSEED)和 [issue #2280](https://github.com/keras-team/keras/issues/2280#issuecomment-306959926) 获取更多详细信息)。设置环境变量的一种方法是,在这样启动 python 时: ```bash $ cat test_hash.py print(hash("keras")) $ python3 test_hash.py # 无法复现的 hash (Python 3.2.3+) -8127205062320133199 $ python3 test_hash.py # 无法复现的 hash (Python 3.2.3+) 3204480642156461591 $ PYTHONHASHSEED=0 python3 test_hash.py # 可复现的 hash 4883664951434749476 $ PYTHONHASHSEED=0 python3 test_hash.py # 可复现的 hash 4883664951434749476 ``` 此外,当使用 TensorFlow 后端并在 GPU 上运行时,某些操作具有非确定性输出,特别是 `tf.reduce_sum()`。这是因为 GPU 并行运行许多操作,因此并不总能保证执行顺序。由于浮点数的精度有限,即使添加几个数字,也可能会产生略有不同的结果,具体取决于添加它们的顺序。你可以尝试避免某些非确定性操作,但有些操作可能是由 TensorFlow 在计算梯度时自动创建的,因此在 CPU 上运行代码要简单得多。为此,你可以将 `CUDA_VISIBLE_DEVICES` 环境变量设置为空字符串,例如: ```bash $ CUDA_VISIBLE_DEVICES="" PYTHONHASHSEED=0 python your_program.py ``` 下面的代码片段提供了一个如何获得可复现结果的例子 - 针对 Python 3 环境的 TensorFlow 后端。 ```python import numpy as np import tensorflow as tf import random as rn # 以下是 Numpy 在一个明确的初始状态生成固定随机数字所必需的。 np.random.seed(42) # 以下是 Python 在一个明确的初始状态生成固定随机数字所必需的。 rn.seed(12345) # 强制 TensorFlow 使用单线程。 # 多线程是结果不可复现的一个潜在因素。 # 更多详情,见: https://stackoverflow.com/questions/42022950/ session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) from keras import backend as K # `tf.set_random_seed()` 将会以 TensorFlow 为后端, # 在一个明确的初始状态下生成固定随机数字。 # 更多详情,见: https://www.tensorflow.org/api_docs/python/tf/set_random_seed tf.set_random_seed(1234) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) # 剩余代码 ... ``` --- <span id="how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras"></span> ### 如何在 Keras 中安装 HDF5 或 h5py 来保存我的模型? 为了将你的 Keras 模型保存为 HDF5 文件,例如通过 `keras.callbacks.ModelCheckpoint`,Keras 使用了 h5py Python 包。h5py 是 Keras 的依赖项,应默认被安装。在基于 Debian 的发行版本上,你需要再额外安装 `libhdf5`: ``` sudo apt-get install libhdf5-serial-dev ``` 如果你不确定是否安装了 h5py,则可以打开 Python shell 并通过下面的命令加载模块 ``` import h5py ``` 如果模块导入没有错误,那么说明模块已经安装成功,否则你可以在 [http://docs.h5py.org/en/latest/build.html](http://docs.h5py.org/en/latest/build.html) 中找到详细的安装说明。
keras-docs-zh/sources/getting-started/faq.md/0
{ "file_path": "keras-docs-zh/sources/getting-started/faq.md", "repo_id": "keras-docs-zh", "token_count": 14781 }
78
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L796)</span> ### Dense ```python keras.layers.Dense(units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None) ``` 就是你常用的的全连接层。 `Dense` 实现以下操作:`output = activation(dot(input, kernel) + bias)` 其中 `activation` 是按逐个元素计算的激活函数,`kernel` 是由网络层创建的权值矩阵,以及 `bias` 是其创建的偏置向量 (只在 `use_bias` 为 `True` 时才有用)。 - __注意__: 如果该层的输入的秩大于 2,那么它首先被展平然后 再计算与 `kernel` 的点乘。 __示例__ ```python # 作为 Sequential 模型的第一层 model = Sequential() model.add(Dense(32, input_shape=(16,))) # 现在模型就会以尺寸为 (*, 16) 的数组作为输入, # 其输出数组的尺寸为 (*, 32) # 在第一层之后,你就不再需要指定输入的尺寸了: model.add(Dense(32)) ``` __参数__ - __units__: 正整数,输出空间维度。 - __activation__: 激活函数 (详见 [activations](../activations.md))。 若不指定,则不使用激活函数 (即,线性激活: `a(x) = x`)。 - __use_bias__: 布尔值,该层是否使用偏置向量。 - __kernel_initializer__: `kernel` 权值矩阵的初始化器 (详见 [initializers](../initializers.md))。 - __bias_initializer__: 偏置向量的初始化器 (详见 [initializers](../initializers.md))。 - __kernel_regularizer__: 运用到 `kernel` 权值矩阵的正则化函数 (详见 [regularizer](../regularizers.md))。 - __bias_regularizer__: 运用到偏置向量的的正则化函数 (详见 [regularizer](../regularizers.md))。 - __activity_regularizer__: 运用到层的输出的正则化函数 (它的 "activation")。 (详见 [regularizer](../regularizers.md))。 - __kernel_constraint__: 运用到 `kernel` 权值矩阵的约束函数 (详见 [constraints](../constraints.md))。 - __bias_constraint__: 运用到偏置向量的约束函数 (详见 [constraints](../constraints.md))。 __输入尺寸__ nD 张量,尺寸: `(batch_size, ..., input_dim)`。 最常见的情况是一个尺寸为 `(batch_size, input_dim)` 的 2D 输入。 __输出尺寸__ nD 张量,尺寸: `(batch_size, ..., units)`。 例如,对于尺寸为 `(batch_size, input_dim)` 的 2D 输入, 输出的尺寸为 `(batch_size, units)`。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L277)</span> ### Activation ```python keras.layers.Activation(activation) ``` 将激活函数应用于输出。 __参数__ - __activation__: 要使用的激活函数的名称 (详见: [activations](../activations.md)), 或者选择一个 Theano 或 TensorFlow 操作。 __输入尺寸__ 任意尺寸。 当使用此层作为模型中的第一层时, 使用参数 `input_shape` (整数元组,不包括样本数的轴)。 __输出尺寸__ 与输入相同。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L81)</span> ### Dropout ```python keras.layers.Dropout(rate, noise_shape=None, seed=None) ``` 将 Dropout 应用于输入。 Dropout 包括在训练中每次更新时, 将输入单元的按比率随机设置为 0, 这有助于防止过拟合。 __参数__ - __rate__: 在 0 和 1 之间浮动。需要丢弃的输入比例。 - __noise_shape__: 1D 整数张量, 表示将与输入相乘的二进制 dropout 掩层的形状。 例如,如果你的输入尺寸为 `(batch_size, timesteps, features)`,然后 你希望 dropout 掩层在所有时间步都是一样的, 你可以使用 `noise_shape=(batch_size, 1, features)`。 - __seed__: 一个作为随机种子的 Python 整数。 __参考文献__ - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf) ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L462)</span> ### Flatten ```python keras.layers.Flatten(data_format=None) ``` 将输入展平。不影响批量大小。 __参数__ - __data_format__:一个字符串,其值为 `channels_last`(默认值)或者 `channels_first`。它表明输入的维度的顺序。此参数的目的是当模型从一种数据格式切换到另一种数据格式时保留权重顺序。`channels_last` 对应着尺寸为 `(batch, ..., channels)` 的输入,而 `channels_first` 对应着尺寸为 `(batch, channels, ...)` 的输入。默认为 `image_data_format` 的值,你可以在 Keras 的配置文件 `~/.keras/keras.json` 中找到它。如果你从未设置过它,那么它将是 `channels_last` __示例__ ```python model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=(3, 32, 32), padding='same',)) # 现在:model.output_shape == (None, 64, 32, 32) model.add(Flatten()) # 现在:model.output_shape == (None, 65536) ``` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/engine/input_layer.py#L114)</span> ### Input ```python keras.engine.input_layer.Input() ``` `Input()` 用于实例化 Keras 张量。 Keras 张量是底层后端(Theano, TensorFlow 或 CNTK) 的张量对象,我们增加了一些特性,使得能够通过了解模型的输入 和输出来构建 Keras 模型。 例如,如果 a, b 和 c 都是 Keras 张量, 那么以下操作是可行的: `model = Model(input=[a, b], output=c)` 添加的 Keras 属性是: - __`_keras_shape`__: 通过 Keras端的尺寸推理 进行传播的整数尺寸元组。 - __`_keras_history`__: 应用于张量的最后一层。 整个网络层计算图可以递归地从该层中检索。 __参数__ - __shape__: 一个尺寸元组(整数),不包含批量大小。 例如,`shape=(32,)` 表明期望的输入是按批次的 32 维向量。 - __batch_shape__: 一个尺寸元组(整数),包含批量大小。 例如,`batch_shape=(10, 32)` 表明期望的输入是 10 个 32 维向量。 `batch_shape=(None, 32)` 表明任意批次大小的 32 维向量。 - __name__: 一个可选的层的名称的字符串。 在一个模型中应该是唯一的(不可以重用一个名字两次)。 如未提供,将自动生成。 - __dtype__: 输入所期望的数据类型,字符串表示 (`float32`, `float64`, `int32`...) - __sparse__: 一个布尔值,指明需要创建的占位符是否是稀疏的。 - __tensor__: 可选的可封装到 `Input` 层的现有张量。 如果设定了,那么这个层将不会创建占位符张量。 __返回__ 一个张量。 __示例__ ```python # 这是 Keras 中的一个逻辑回归 x = Input(shape=(32,)) y = Dense(16, activation='softmax')(x) model = Model(x, y) ``` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L311)</span> ### Reshape ```python keras.layers.Reshape(target_shape) ``` 将输入重新调整为特定的尺寸。 __参数__ - __target_shape__: 目标尺寸。整数元组。 不包含表示批量的轴。 __输入尺寸__ 任意,尽管输入尺寸中的所有维度必须是固定的。 当使用此层作为模型中的第一层时, 使用参数 `input_shape` (整数元组,不包括样本数的轴)。 __输出尺寸__ `(batch_size,) + target_shape` __示例__ ```python # 作为 Sequential 模型的第一层 model = Sequential() model.add(Reshape((3, 4), input_shape=(12,))) # 现在:model.output_shape == (None, 3, 4) # 注意: `None` 是批表示的维度 # 作为 Sequential 模型的中间层 model.add(Reshape((6, 2))) # 现在: model.output_shape == (None, 6, 2) # 还支持使用 `-1` 表示维度的尺寸推断 model.add(Reshape((-1, 2, 2))) # 现在: model.output_shape == (None, 3, 2, 2) ``` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L410)</span> ### Permute ```python keras.layers.Permute(dims) ``` 根据给定的模式置换输入的维度。 在某些场景下很有用,例如将 RNN 和 CNN 连接在一起。 __示例__ ```python model = Sequential() model.add(Permute((2, 1), input_shape=(10, 64))) # 现在: model.output_shape == (None, 64, 10) # 注意: `None` 是批表示的维度 ``` __参数__ - __dims__: 整数元组。置换模式,不包含样本维度。 索引从 1 开始。 例如, `(2, 1)` 置换输入的第一和第二个维度。 __输入尺寸__ 任意。当使用此层作为模型中的第一层时, 使用参数 `input_shape` (整数元组,不包括样本数的轴)。 __输出尺寸__ 与输入尺寸相同,但是维度根据指定的模式重新排列。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L524)</span> ### RepeatVector ```python keras.layers.RepeatVector(n) ``` 将输入重复 n 次。 __示例__ ```python model = Sequential() model.add(Dense(32, input_dim=32)) # 现在: model.output_shape == (None, 32) # 注意: `None` 是批表示的维度 model.add(RepeatVector(3)) # 现在: model.output_shape == (None, 3, 32) ``` __参数__ - __n__: 整数,重复次数。 __输入尺寸__ 2D 张量,尺寸为 `(num_samples, features)`。 __输出尺寸__ 3D 张量,尺寸为 `(num_samples, n, features)`。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L566)</span> ### Lambda ```python keras.layers.Lambda(function, output_shape=None, mask=None, arguments=None) ``` 将任意表达式封装为 `Layer` 对象。 __示例__ ```python # 添加一个 x -> x^2 层 model.add(Lambda(lambda x: x ** 2)) ``` ```python # 添加一个网络层,返回输入的正数部分 # 与负数部分的反面的连接 def antirectifier(x): x -= K.mean(x, axis=1, keepdims=True) x = K.l2_normalize(x, axis=1) pos = K.relu(x) neg = K.relu(-x) return K.concatenate([pos, neg], axis=1) def antirectifier_output_shape(input_shape): shape = list(input_shape) assert len(shape) == 2 # only valid for 2D tensors shape[-1] *= 2 return tuple(shape) model.add(Lambda(antirectifier, output_shape=antirectifier_output_shape)) ``` ```python # 添加一个返回 hadamard 乘积和两个输入张量之和的层 def hadamard_product_sum(tensors): out1 = tensors[0] * tensors[1] out2 = K.sum(out1, axis=-1) return [out1, out2] def hadamard_product_sum_output_shape(input_shapes): shape1 = list(input_shapes[0]) shape2 = list(input_shapes[1]) assert shape1 == shape2 # 否则无法得到 hadamard 乘积 return [tuple(shape1), tuple(shape2[:-1])] x1 = Dense(32)(input_1) x2 = Dense(32)(input_2) layer = Lambda(hadamard_product_sum, hadamard_product_sum_output_shape) x_hadamard, x_sum = layer([x1, x2]) ``` __参数__ - __function__: 需要封装的函数。 将输入张量或张量序列作为第一个参数。 - __output_shape__: 预期的函数输出尺寸。 只在使用 Theano 时有意义。 可以是元组或者函数。 如果是元组,它只指定第一个维度; 样本维度假设与输入相同: `output_shape = (input_shape[0], ) + output_shape` 或者,输入是 `None` 且样本维度也是 `None`: `output_shape = (None, ) + output_shape` 如果是函数,它指定整个尺寸为输入尺寸的一个函数: `output_shape = f(input_shape)` - __mask__: 要么是 None (表示无 masking),要么是一个张量表示用于 Embedding 的输入 mask。 - __arguments__: 可选的需要传递给函数的关键字参数。 __输入尺寸__ 任意。当使用此层作为模型中的第一层时, 使用参数 `input_shape` (整数元组,不包括样本数的轴)。 __输出尺寸__ 由 `output_shape` 参数指定 (或者在使用 TensorFlow 时,自动推理得到)。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L940)</span> ### ActivityRegularization ```python keras.layers.ActivityRegularization(l1=0.0, l2=0.0) ``` 网络层,对基于代价函数的输入活动应用一个更新。 __参数__ - __l1__: L1 正则化因子 (正数浮点型)。 - __l2__: L2 正则化因子 (正数浮点型)。 __输入尺寸__ 任意。当使用此层作为模型中的第一层时, 使用参数 `input_shape` (整数元组,不包括样本数的轴)。 __输出尺寸__ 与输入相同。 ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L28)</span> ### Masking ```python keras.layers.Masking(mask_value=0.0) ``` 使用覆盖值覆盖序列,以跳过时间步。 如果一个给定的样本时间步的所有特征都等于 `mask_value`, 那么这个时间步将在所有下游层被覆盖 (跳过) (只要它们支持覆盖)。 如果任何下游层不支持覆盖但仍然收到此类输入覆盖信息,会引发异常。 __示例__ 考虑将要喂入一个 LSTM 层的 Numpy 矩阵 `x`, 尺寸为 `(samples, timesteps, features)`。 你想要覆盖时间步 #3 的样本 #0,以及时间步 #5 的样本 #2, 由于你缺乏这几个时间步的特征。你可以: - 设置 `x[0, 3, :] = 0.` 以及 `x[2, 5, :] = 0.` - 在 LSTM 层之前,插入一个 `mask_value=0` 的 `Masking` 层: ```python model = Sequential() model.add(Masking(mask_value=0., input_shape=(timesteps, features))) model.add(LSTM(32)) ``` __参数__ - __mask_value__: 要么是 None,要么是一个要跳过的 mask 值。 --- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L141)</span> ### SpatialDropout1D ```python keras.layers.SpatialDropout1D(rate) ``` Dropout 的 Spatial 1D 版本 此版本的功能与 Dropout 相同,但它会丢弃整个 1D 的特征图而不是丢弃单个元素。如果特征图中相邻的帧是强相关的(通常是靠前的卷积层中的情况),那么常规的 dropout 将无法使激活正则化,且导致有效的学习速率降低。在这种情况下,SpatialDropout1D 将有助于提高特征图之间的独立性,应该使用它来代替 Dropout。 __参数__ - __rate__: 0 到 1 之间的浮点数。需要丢弃的输入比例。 __输入尺寸__ 3D 张量,尺寸为:`(samples, timesteps, channels)` __输出尺寸__ 与输入相同。 __参考文献__ - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) --- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L178)</span> ### SpatialDropout2D ```python keras.layers.SpatialDropout2D(rate, data_format=None) ``` Dropout 的 Spatial 2D 版本 此版本的功能与 Dropout 相同,但它会丢弃整个 2D 的特征图而不是丢弃单个元素。如果特征图中相邻的像素是强相关的(通常是靠前的卷积层中的情况),那么常规的 dropout 将无法使激活正则化,且导致有效的学习速率降低。在这种情况下,SpatialDropout2D 将有助于提高特征图之间的独立性,应该使用它来代替 dropout。 __参数__ - __rate__: 0 到 1 之间的浮点数。需要丢弃的输入比例。 - __data_format__:`channels_first` 或者 `channels_last`。在 `channels_first` 模式中,通道维度(即深度)位于索引 1,在 `channels_last` 模式中,通道维度位于索引 3。默认为 `image_data_format` 的值,你可以在 Keras 的配置文件 `~/.keras/keras.json` 中找到它。如果你从未设置过它,那么它将是 `channels_last`。 __输入尺寸__ 4D 张量,如果 data_format=`channels_first`,尺寸为 `(samples, channels, rows, cols)`,如果 data_format=`channels_last`,尺寸为 `(samples, rows, cols, channels)`。 __输出尺寸__ 与输入相同。 __参考文献__ - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) --- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L228)</span> ### SpatialDropout3D ```python keras.layers.SpatialDropout3D(rate, data_format=None) ``` Dropout 的 Spatial 3D 版本 此版本的功能与 Dropout 相同,但它会丢弃整个 3D 的特征图而不是丢弃单个元素。如果特征图中相邻的体素是强相关的(通常是靠前的卷积层中的情况),那么常规的 dropout 将无法使激活正则化,且导致有效的学习速率降低。在这种情况下,SpatialDropout3D 将有助于提高特征图之间的独立性,应该使用它来代替 dropout。 __参数__ - __rate__: 0 到 1 之间的浮点数。需要丢弃的输入比例。 - __data_format__:`channels_first` 或者 `channels_last`。在 `channels_first` 模式中,通道维度(即深度)位于索引 1,在 `channels_last` 模式中,通道维度位于索引 4。默认为 `image_data_format` 的值,你可以在 Keras 的配置文件 `~/.keras/keras.json` 中找到它。如果你从未设置过它,那么它将是 `channels_last`。 __输入尺寸__ 5D 张量,如果 data_format=`channels_first`,尺寸为 `(samples, channels, dim1, dim2, dim3)`,如果 data_format=`channels_last`,尺寸为 `(samples, dim1, dim2, dim3, channels)`。 __输出尺寸__ 与输入相同。 __参考文献__ - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)
keras-docs-zh/sources/layers/core.md/0
{ "file_path": "keras-docs-zh/sources/layers/core.md", "repo_id": "keras-docs-zh", "token_count": 10018 }
79
# 图像预处理 <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/preprocessing/image.py#L238)</span> ## ImageDataGenerator 类 ```python keras.preprocessing.image.ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0, width_shift_range=0.0, height_shift_range=0.0, brightness_range=None, shear_range=0.0, zoom_range=0.0, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format='channels_last', validation_split=0.0, interpolation_order=1, dtype='float32') ``` 通过实时数据增强生成张量图像数据批次。数据将不断循环(按批次)。 __参数__ - __featurewise_center__: 布尔值。将输入数据的均值设置为 0,逐特征进行。 - __samplewise_center__: 布尔值。将每个样本的均值设置为 0。 - __featurewise_std_normalization__: Boolean. 布尔值。将输入除以数据标准差,逐特征进行。 - __samplewise_std_normalization__: 布尔值。将每个输入除以其标准差。 - __zca_epsilon__: ZCA 白化的 epsilon 值,默认为 1e-6。 - __zca_whitening__: 布尔值。是否应用 ZCA 白化。 - __rotation_range__: 整数。随机旋转的度数范围。 - __width_shift_range__: 浮点数、一维数组或整数 - float: 如果 <1,则是除以总宽度的值,或者如果 >=1,则为像素值。 - 1-D 数组: 数组中的随机元素。 - int: 来自间隔 `(-width_shift_range, +width_shift_range)` 之间的整数个像素。 - `width_shift_range=2` 时,可能值是整数 `[-1, 0, +1]`,与 `width_shift_range=[-1, 0, +1]` 相同;而 `width_shift_range=1.0` 时,可能值是 `[-1.0, +1.0)` 之间的浮点数。 - __height_shift_range__: 浮点数、一维数组或整数 - float: 如果 <1,则是除以总宽度的值,或者如果 >=1,则为像素值。 - 1-D array-like: 数组中的随机元素。 - int: 来自间隔 `(-height_shift_range, +height_shift_range)` 之间的整数个像素。 - `height_shift_range=2` 时,可能值是整数 `[-1, 0, +1]`,与 `height_shift_range=[-1, 0, +1]` 相同;而 `height_shift_range=1.0` 时,可能值是 `[-1.0, +1.0)` 之间的浮点数。 - __brightness_range__: 两个浮点数的元组或列表。从中选择亮度偏移值的范围。 - __shear_range__: 浮点数。剪切强度(以弧度逆时针方向剪切角度)。 - __zoom_range__: 浮点数 或 `[lower, upper]`。随机缩放范围。如果是浮点数,`[lower, upper] = [1-zoom_range, 1+zoom_range]`。 - __channel_shift_range__: 浮点数。随机通道转换的范围。 - __fill_mode__: {"constant", "nearest", "reflect" or "wrap"} 之一。默认为 'nearest'。输入边界以外的点根据给定的模式填充: - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k) - 'nearest': aaaaaaaa|abcd|dddddddd - 'reflect': abcddcba|abcd|dcbaabcd - 'wrap': abcdabcd|abcd|abcdabcd - __cval__: 浮点数或整数。用于边界之外的点的值,当 `fill_mode = "constant"` 时。 - __horizontal_flip__: 布尔值。随机水平翻转。 - __vertical_flip__: 布尔值。随机垂直翻转。 - __rescale__: 重缩放因子。默认为 None。如果是 None 或 0,不进行缩放,否则将数据乘以所提供的值(在应用任何其他转换之前)。 - __preprocessing_function__: 应用于每个输入的函数。这个函数会在任何其他改变之前运行。这个函数需要一个参数:一张图像(秩为 3 的 Numpy 张量),并且应该输出一个同尺寸的 Numpy 张量。 - __data_format__: 图像数据格式,{"channels_first", "channels_last"} 之一。"channels_last" 模式表示图像输入尺寸应该为 `(samples, height, width, channels)`,"channels_first" 模式表示输入尺寸应该为 `(samples, channels, height, width)`。默认为 在 Keras 配置文件 `~/.keras/keras.json` 中的 `image_data_format` 值。如果你从未设置它,那它就是 "channels_last"。 - __validation_split__: 浮点数。Float. 保留用于验证的图像的比例(严格在0和1之间)。 - __dtype__: 生成数组使用的数据类型。 __示例__ 使用 `.flow(x, y)` 的例子: ```python (x_train, y_train), (x_test, y_test) = cifar10.load_data() y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) datagen = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True) # 计算特征归一化所需的数量 # (如果应用 ZCA 白化,将计算标准差,均值,主成分) datagen.fit(x_train) # 使用实时数据增益的批数据对模型进行拟合: model.fit_generator(datagen.flow(x_train, y_train, batch_size=32), steps_per_epoch=len(x_train) / 32, epochs=epochs) # 这里有一个更 「手动」的例子 for e in range(epochs): print('Epoch', e) batches = 0 for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32): model.fit(x_batch, y_batch) batches += 1 if batches >= len(x_train) / 32:           # 我们需要手动打破循环,           # 因为生成器会无限循环 break ``` 使用 `.flow_from_directory(directory)` 的例子: ```python train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( 'data/train', target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_directory( 'data/validation', target_size=(150, 150), batch_size=32, class_mode='binary') model.fit_generator( train_generator, steps_per_epoch=2000, epochs=50, validation_data=validation_generator, validation_steps=800) ``` 同时转换图像和蒙版 (mask) 的例子。 ```python # 创建两个相同参数的实例 data_gen_args = dict(featurewise_center=True, featurewise_std_normalization=True, rotation_range=90, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) image_datagen = ImageDataGenerator(**data_gen_args) mask_datagen = ImageDataGenerator(**data_gen_args) # 为 fit 和 flow 函数提供相同的种子和关键字参数 seed = 1 image_datagen.fit(images, augment=True, seed=seed) mask_datagen.fit(masks, augment=True, seed=seed) image_generator = image_datagen.flow_from_directory( 'data/images', class_mode=None, seed=seed) mask_generator = mask_datagen.flow_from_directory( 'data/masks', class_mode=None, seed=seed) # 将生成器组合成一个产生图像和蒙版(mask)的生成器 train_generator = zip(image_generator, mask_generator) model.fit_generator( train_generator, steps_per_epoch=2000, epochs=50) ``` 使用 `.flow_from_dataframe(dataframe, directory` 的例子: ```python train_df = pandas.read_csv("./train.csv") valid_df = pandas.read_csv("./valid.csv") train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_dataframe( dataframe=train_df, directory='data/train', x_col="filename", y_col="class", target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_dataframe( dataframe=valid_df, directory='data/validation', x_col="filename", y_col="class", target_size=(150, 150), batch_size=32, class_mode='binary') model.fit_generator( train_generator, steps_per_epoch=2000, epochs=50, validation_data=validation_generator, validation_steps=800) ``` --- ### ImageDataGenerator 类方法 ### apply_transform ```python apply_transform(x, transform_parameters) ``` 根据给定的参数将变换应用于图像。 __参数__ - __x__: 3D 张量,单张图像。 - __transform_parameters__: 字符串 - 参数 对表示的字典,用于描述转换。目前,使用字典中的以下参数: - 'theta': 浮点数。旋转角度(度)。 - 'tx': 浮点数。在 x 方向上移动。 - 'ty': 浮点数。在 y 方向上移动。 - shear': 浮点数。剪切角度(度)。 - 'zx': 浮点数。放大 x 方向。 - 'zy': 浮点数。放大 y 方向。 - 'flip_horizontal': 布尔 值。水平翻转。 - 'flip_vertical': 布尔值。垂直翻转。 - 'channel_shift_intencity': 浮点数。频道转换强度。 - 'brightness': 浮点数。亮度转换强度。 __返回__ 输入的转换后版本(相同尺寸)。 --- ### fit ```python fit(x, augment=False, rounds=1, seed=None) ``` 将数据生成器用于某些示例数据。 它基于一组样本数据,计算与数据转换相关的内部数据统计。 当且仅当 `featurewise_center` 或 `featurewise_std_normalization` 或 `zca_whitening` 设置为 True 时才需要。 __参数__ - __x__: 样本数据。秩应该为 4。对于灰度数据,通道轴的值应该为 1;对于 RGB 数据,值应该为 3;对于 RGBA 数据,值应该为 4。 - __augment__: 布尔值(默认为 False)。是否使用随机样本扩张。 - __rounds__: 整数(默认为 1)。如果数据数据增强(`augment=True`),表明在数据上进行多少次增强。 - __seed__: 整数(默认 None)。随机种子。 --- ### flow ```python flow(x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None) ``` 采集数据和标签数组,生成批量增强数据。 __参数__ - __x__: 输入数据。秩为 4 的 Numpy 矩阵或元组。如果是元组,第一个元素应该包含图像,第二个元素是另一个 Numpy 数组或一列 Numpy 数组,它们不经过任何修改就传递给输出。可用于将模型杂项数据与图像一起输入。对于灰度数据,图像数组的通道轴的值应该为 1;对于 RGB 数据,其值应该为 3;对于 RGBA 数据,值应该为 4。 - __y__: 标签。 - __batch_size__: 整数 (默认为 32)。 - __shuffle__: 布尔值 (默认为 True)。 - __sample_weight__: 样本权重。 - __seed__: 整数(默认为 None)。 - __save_to_dir__: None 或 字符串(默认为 None)。这使您可以选择指定要保存的正在生成的增强图片的目录(用于可视化您正在执行的操作)。 - __save_prefix__: 字符串(默认 `''`)。保存图片的文件名前缀(仅当 `save_to_dir` 设置时可用)。 - __save_format__: "png", "jpeg" 之一(仅当 `save_to_dir` 设置时可用)。默认:"png"。 - __subset__: 数据子集 ("training" 或 "validation"),如果 在 `ImageDataGenerator` 中设置了 `validation_split`。 __返回__ 一个生成元组 `(x, y)` 的 `Iterator`,其中 `x` 是图像数据的 Numpy 数组(在单张图像输入时),或 Numpy 数组列表(在额外多个输入时),`y` 是对应的标签的 Numpy 数组。如果 'sample_weight' 不是 None,生成的元组形式为 `(x, y, sample_weight)`。如果 `y` 是 None, 只有 Numpy 数组 `x` 被返回。 --- ### flow_from_dataframe ```python flow_from_dataframe(dataframe, directory=None, x_col='filename', y_col='class', weight_col=None, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None, interpolation='nearest', validate_filenames=True) ``` 输入 dataframe 和目录的路径,并生成批量的增强/标准化的数据。 这里有一个简单的教程: [http://bit.ly/keras_flow_from_dataframe](http://bit.ly/keras_flow_from_dataframe) __参数__ - __dataframe__: Pandas dataframe,其中一列字符串包含对应目录(或绝对路径,如果 `directory` 为 None)的图片文件路径。 它应该根据 `class_mode` 来包含其他列: - 如果 `class_mode` 是 `"categorical"` (默认值),它必须包含 `y_col` 列表示每张图片的类别。 这一列的值可以是字符串/列表/元组,如果是一个单独的类,或者是列表/元组,如果是多个类。 - 如果 `class_mode` 是 `"binary"` 或 `"sparse"`,它必须包含给定的 `y_col` 列表示每张图片的字符串类别。 - 如果 `class_mode` 是 `"raw"` 或 `"multi_output"`,它必须包含 `y_col` 中指定的列。 - 如果 `class_mode` 是 `"input"` 或 `None`,则不需要额外的列。 - __directory__: 字符串,读取图片的目录的路径,如果是 `None`, `x_col` 列中的数据必须是绝对路径。 - __x_col__: 字符串,`dataframe` 中包含文件名列(或者绝对路径,如果 `directory` 是 `None`)。 - __y_col__: 字符串或字符串列表,`dataframe` 中将作为目标数据的列。 - __weight_col__: 字符串,`dataframe` 中包含样本权重的列。默认为 `None`。 - __target_size__: 整数元组 `(height, width)`,默认为 `(256, 256)`。所有找到的图都会调整到这个维度。 - __color_mode__: "grayscale", "rbg", "rgba" 之一。默认:"rgb"。 图像是否转换为 1 个或 3 个颜色通道。 - __classes__: 可选的类别列表 (例如, `['dogs', 'cats']`)。默认:None。 如未提供,类比列表将自动从 `y_col` 中推理出来,`y_col` 将会被映射为类别索引)。 包含从类名到类索引的映射的字典可以通过属性 `class_indices` 获得。 - __class_mode__: "binary", "categorical", "input", "multi_output", "raw", sparse" 或 None 之一。默认:"categorical"。 决定返回标签数组的类型: - `"binary"`: 1D numpy 数组二进制标签; - `"categorical"`: 2D numpy 数组 one-hot 编码标签,支持多标签输出; - `"input"`: 与输入图像相同的图像(主要用于与自动编码器一起使用); - `"multi_output"`: 不同列的值的列表; - `"raw"`: `y_col` 列中值的 numpy 数组; - `"sparse"`: 1D numpy 数组整数标签; - `"other"` 将是 y_col 数据的 numpy 数组; - `None`: 不返回任何标签(生成器只会产生批量的图像数据,这对使用 `model.predict_generator()`, `model.evaluate_generator()` 等很有用)。 - __batch_size__: 批量数据的尺寸(默认:32)。 - __shuffle__: 是否混洗数据(默认:True) - __seed__: 可选的混洗和转换的随即种子。 - __save_to_dir__: None 或 str (默认: None). 这允许你可选地指定要保存正在生成的增强图片的目录(用于可视化您正在执行的操作)。 - __save_prefix__: 字符串。保存图片的文件名前缀(仅当 `save_to_dir` 设置时可用)。 - __save_format__: "png", "jpeg" 之一(仅当 `save_to_dir` 设置时可用)。默认:"png"。 - __follow_links__: 是否跟随类子目录中的符号链接(默认:False)。 - __subset__: 数据子集 (`"training"` 或 `"validation"`),如果在 `ImageDataGenerator` 中设置了 `validation_split`。 - __interpolation__: 在目标大小与加载图像的大小不同时,用于重新采样图像的插值方法。 支持的方法有 `"nearest"`, `"bilinear"`, and `"bicubic"`。 如果安装了 1.1.3 以上版本的 PIL 的话,同样支持 `"lanczos"`。 如果安装了 3.4.0 以上版本的 PIL 的话,同样支持 `"box"` 和 `"hamming"`。 默认情况下,使用 `"nearest"`。 - __validate_filenames__: 布尔值,是否验证 `x_col` 中的图片路径。 如果 `True`,将忽略无效的图片。禁用这一选项会加速这一函数的执行。 默认:`True`。 __Returns__ 一个生成 `(x, y)` 元组的 DataFrameIterator, 其中 `x` 是一个包含一批尺寸为 `(batch_size, *target_size, channels)` 的图像样本的 numpy 数组,`y` 是对应的标签的 numpy 数组。 --- ### flow_from_directory ```python flow_from_directory(directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest') ``` __参数__ - __directory__: 字符串,目标目录的路径。每个类应该包含一个子目录。任何在子目录树下的 PNG, JPG, BMP, PPM 或 TIF 图像,都将被包含在生成器中。更多细节,详见[此脚本](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)。 - __target_size__: 整数元组 `(height, width)`,默认:`(256, 256)`。所有的图像将被调整到的尺寸。 - __color_mode__: "grayscale", "rbg", "rgba" 之一。默认:"rgb"。图像是否被转换成 1,3 或 4 个颜色通道。 - __classes__: 可选的类的子目录列表(例如 `['dogs', 'cats']`)。默认:None。如果未提供,类的列表将自动从 `directory` 下的 子目录名称/结构 中推断出来,其中每个子目录都将被作为不同的类(类名将按字典序映射到标签的索引)。包含从类名到类索引的映射的字典可以通过 `class_indices` 属性获得。 - __class_mode__: "categorical", "binary", "sparse", "input" 或 None 之一。默认:"categorical"。决定返回的标签数组的类型: - "categorical" 将是 2D one-hot 编码标签, - "binary" 将是 1D 二进制标签,"sparse" 将是 1D 整数标签, - "input" 将是与输入图像相同的图像(主要用于自动编码器)。 - 如果为 None,不返回标签(生成器将只产生批量的图像数据,对于 `model.predict_generator()`, `model.evaluate_generator()` 等很有用)。请注意,如果 `class_mode` 为 None,那么数据仍然需要驻留在 `directory` 的子目录中才能正常工作。 - __batch_size__: 一批数据的大小(默认 32)。 - __shuffle__: 是否混洗数据(默认 True)。 - __seed__: 可选随机种子,用于混洗和转换。 - __save_to_dir__: None 或 字符串(默认 None)。这使你可以最佳地指定正在生成的增强图片要保存的目录(用于可视化你在做什么)。 - __save_prefix__: 字符串。 保存图片的文件名前缀(仅当 `save_to_dir` 设置时可用)。 - __save_format__: "png", "jpeg" 之一(仅当 `save_to_dir` 设置时可用)。默认:"png"。 - __follow_links__: 是否跟踪类子目录中的符号链接(默认为 False)。 - __subset__: 数据子集 ("training" 或 "validation"),如果 在 `ImageDataGenerator` 中设置了 `validation_split`。 - __interpolation__: 在目标大小与加载图像的大小不同时,用于重新采样图像的插值方法。 支持的方法有 `"nearest"`, `"bilinear"`, and `"bicubic"`。 如果安装了 1.1.3 以上版本的 PIL 的话,同样支持 `"lanczos"`。 如果安装了 3.4.0 以上版本的 PIL 的话,同样支持 `"box"` 和 `"hamming"`。 默认情况下,使用 `"nearest"`。 __返回__ 一个生成 `(x, y)` 元组的 `DirectoryIterator`,其中 `x` 是一个包含一批尺寸为 `(batch_size, *target_size, channels)`的图像的 Numpy 数组,`y` 是对应标签的 Numpy 数组。 --- ### get_random_transform ```python get_random_transform(img_shape, seed=None) ``` 为转换生成随机参数。 __参数__ - __seed__: 随机种子 - __img_shape__: 整数元组。被转换的图像的尺寸。 __返回__ 包含随机选择的描述变换的参数的字典。 --- ### random_transform ```python random_transform(x, seed=None) ``` 将随机变换应用于图像。 __参数__ - __x__: 3D 张量,单张图像。 - __seed__: 随机种子。 __返回__ 输入的随机转换版本(相同形状)。 --- #### standardize ```python standardize(x) ``` 将标准化配置应用于一批输入。 由于该函数主要在内部用于对图像进行标准化处理并将其馈送到网络,所以 `x` 会就地更改。 如果要创建 `x` 的副本,则会带来很大的性能成本。 如果要应用此方法而不更改就地输入,则可以在这之前调用创建副本的方法: standarize(np.copy(x)) __参数__ - __x__: 需要标准化的一批输入。 __返回__ 标准化后的输入。
keras-docs-zh/sources/preprocessing/image.md/0
{ "file_path": "keras-docs-zh/sources/preprocessing/image.md", "repo_id": "keras-docs-zh", "token_count": 13085 }
80
<jupyter_start><jupyter_text>Audio Classification with Hugging Face Transformers**Author:** Sreyan Ghosh**Date created:** 2022/07/01**Last modified:** 2022/08/27**Description:** Training Wav2Vec 2.0 using Hugging Face Transformers for Audio Classification. IntroductionIdentification of speech commands, also known as *keyword spotting* (KWS),is important from an engineering perspective for a wide range of applications,from indexing audio databases and indexing keywords, to running speech models locallyon microcontrollers. Currently, many human-computer interfaces (HCI) like GoogleAssistant, Microsoft Cortana, Amazon Alexa, Apple Siri and others rely on keywordspotting. There is a significant amount of research in the field by all major companies,notably Google and Baidu.In the past decade, deep learning has led to significant performancegains on this task. Though low-level audio features extracted from raw audio like MFCC ormel-filterbanks have been used for decades, the design of these low-level featuresare [flawed by biases](https://arxiv.org/abs/2101.08596). Moreover, deep learning modelstrained on these low-level features can easily overfit to noise or signals irrelevant to thetask. This makes it is essential for any system to learn speech representations that makehigh-level information, such as acoustic and linguistic content, including phonemes,words, semantic meanings, tone, speaker characteristics from speech signals available tosolve the downstream task. [Wav2Vec 2.0](https://arxiv.org/abs/2006.11477), which solves aself-supervised contrastive learning task to learn high-level speech representations,provides a great alternative to traditional low-level features for training deep learningmodels for KWS.In this notebook, we train the Wav2Vec 2.0 (base) model, built on theHugging Face Transformers library, in an end-to-end fashion on the keyword spotting task andachieve state-of-the-art results on the Google Speech Commands Dataset. Setup Installing the requirements<jupyter_code>pip install git+https://github.com/huggingface/transformers.git pip install datasets pip install huggingface-hub pip install joblib pip install librosa<jupyter_output><empty_output><jupyter_text>Importing the necessary libraries<jupyter_code>import random import logging import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers # Only log error messages tf.get_logger().setLevel(logging.ERROR) # Set random seed tf.keras.utils.set_random_seed(42)<jupyter_output><empty_output><jupyter_text>Define certain variables<jupyter_code># Maximum duration of the input audio file we feed to our Wav2Vec 2.0 model. MAX_DURATION = 1 # Sampling rate is the number of samples of audio recorded every second SAMPLING_RATE = 16000 BATCH_SIZE = 32 # Batch-size for training and evaluating our model. NUM_CLASSES = 10 # Number of classes our dataset will have (11 in our case). HIDDEN_DIM = 768 # Dimension of our model output (768 in case of Wav2Vec 2.0 - Base). MAX_SEQ_LENGTH = MAX_DURATION * SAMPLING_RATE # Maximum length of the input audio file. # Wav2Vec 2.0 results in an output frequency with a stride of about 20ms. MAX_FRAMES = 49 MAX_EPOCHS = 2 # Maximum number of training epochs. MODEL_CHECKPOINT = "facebook/wav2vec2-base" # Name of pretrained model from Hugging Face Model Hub<jupyter_output><empty_output><jupyter_text>Load the Google Speech Commands Dataset We now download the [Google Speech Commands V1 Dataset](https://arxiv.org/abs/1804.03209),a popular benchmark for training and evaluating deep learning models built for solving the KWS task.The dataset consists of a total of 60,973 audio files, each of 1 second duration,divided into ten classes of keywords ("Yes", "No", "Up", "Down", "Left", "Right", "On","Off", "Stop", and "Go"), a class for silence, and an unknown class to include the falsepositive. We load the dataset from [Hugging Face Datasets](https://github.com/huggingface/datasets).This can be easily done with the `load_dataset` function.<jupyter_code>from datasets import load_dataset speech_commands_v1 = load_dataset("superb", "ks")<jupyter_output><empty_output><jupyter_text>The dataset has the following fields:- **file**: the path to the raw .wav file of the audio- **audio**: the audio file sampled at 16kHz- **label**: label ID of the audio utterance<jupyter_code>print(speech_commands_v1)<jupyter_output><empty_output><jupyter_text>Data Pre-processing For the sake of demonstrating the workflow, in this notebook we only takesmall stratified balanced splits (50%) of the train as our training and test sets.We can easily split the dataset using the `train_test_split` method which expectsthe split size and the name of the column relative to which you want to stratify.Post splitting the dataset, we remove the `unknown` and `silence` classes and onlyfocus on the ten main classes. The `filter` method does that easily for you.Next we sample our train and test splits to a multiple of the `BATCH_SIZE` tofacilitate smooth training and inference. You can achieve that using the `select`method which expects the indices of the samples you want to keep. Rest all arediscarded.<jupyter_code>speech_commands_v1 = speech_commands_v1["train"].train_test_split( train_size=0.5, test_size=0.5, stratify_by_column="label" ) speech_commands_v1 = speech_commands_v1.filter( lambda x: x["label"] != ( speech_commands_v1["train"].features["label"].names.index("_unknown_") and speech_commands_v1["train"].features["label"].names.index("_silence_") ) ) speech_commands_v1["train"] = speech_commands_v1["train"].select( [i for i in range((len(speech_commands_v1["train"]) // BATCH_SIZE) * BATCH_SIZE)] ) speech_commands_v1["test"] = speech_commands_v1["test"].select( [i for i in range((len(speech_commands_v1["test"]) // BATCH_SIZE) * BATCH_SIZE)] ) print(speech_commands_v1)<jupyter_output><empty_output><jupyter_text>Additionally, you can check the actual labels corresponding to each label ID.<jupyter_code>labels = speech_commands_v1["train"].features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label print(id2label)<jupyter_output><empty_output><jupyter_text>Before we can feed the audio utterance samples to our model, we need topre-process them. This is done by a Hugging Face Transformers "Feature Extractor"which will (as the name indicates) re-sample your inputs to the sampling ratethe model expects (in-case they exist with a different sampling rate), as wellas generate the other inputs that model requires.To do all of this, we instantiate our `Feature Extractor` with the`AutoFeatureExtractor.from_pretrained`, which will ensure:We get a `Feature Extractor` that corresponds to the model architecture we want to use.We download the config that was used when pretraining this specific checkpoint.This will be cached so that it's not downloaded again the next time we run the cell.The `from_pretrained()` method expects the name of a model from the Hugging Face Hub. This isexactly similar to `MODEL_CHECKPOINT` and we just pass that.We write a simple function that helps us in the pre-processing that is compatiblewith Hugging Face Datasets. To summarize, our pre-processing function should:- Call the audio column to load and if necessary resample the audio file.- Check the sampling rate of the audio file matches the sampling rate of the audio data amodel was pretrained with. You can find this information on the Wav2Vec 2.0 model card.- Set a maximum input length so longer inputs are batched without being truncated.<jupyter_code>from transformers import AutoFeatureExtractor feature_extractor = AutoFeatureExtractor.from_pretrained( MODEL_CHECKPOINT, return_attention_mask=True ) def preprocess_function(examples): audio_arrays = [x["array"] for x in examples["audio"]] inputs = feature_extractor( audio_arrays, sampling_rate=feature_extractor.sampling_rate, max_length=MAX_SEQ_LENGTH, truncation=True, padding=True, ) return inputs # This line with pre-process our speech_commands_v1 dataset. We also remove the "audio" # and "file" columns as they will be of no use to us while training. processed_speech_commands_v1 = speech_commands_v1.map( preprocess_function, remove_columns=["audio", "file"], batched=True ) # Load the whole dataset splits as a dict of numpy arrays train = processed_speech_commands_v1["train"].shuffle(seed=42).with_format("numpy")[:] test = processed_speech_commands_v1["test"].shuffle(seed=42).with_format("numpy")[:]<jupyter_output><empty_output><jupyter_text>Defining the Wav2Vec 2.0 with Classification-Head We now define our model. To be precise, we define a Wav2Vec 2.0 model and add aClassification-Head on top to output a probability distribution of all classes for eachinput audio sample. Since the model might get complex we first define the Wav2Vec2.0 model with Classification-Head as a Keras layer and then build the model using that.We instantiate our main Wav2Vec 2.0 model using the `TFWav2Vec2Model` class. This willinstantiate a model which will output 768 or 1024 dimensional embeddings according tothe config you choose (BASE or LARGE). The `from_pretrained()` additionally helps youload pre-trained weights from the Hugging Face Model Hub. It will download the pre-trained weightstogether with the config corresponding to the name of the model you have mentioned whencalling the method. For our task, we choose the BASE variant of the model that hasjust been pre-trained, since we fine-tune over it.<jupyter_code>from transformers import TFWav2Vec2Model def mean_pool(hidden_states, feature_lengths): attenion_mask = tf.sequence_mask( feature_lengths, maxlen=MAX_FRAMES, dtype=tf.dtypes.int64 ) padding_mask = tf.cast( tf.reverse(tf.cumsum(tf.reverse(attenion_mask, [-1]), -1), [-1]), dtype=tf.dtypes.bool, ) hidden_states = tf.where( tf.broadcast_to( tf.expand_dims(~padding_mask, -1), (BATCH_SIZE, MAX_FRAMES, HIDDEN_DIM) ), 0.0, hidden_states, ) pooled_state = tf.math.reduce_sum(hidden_states, axis=1) / tf.reshape( tf.math.reduce_sum(tf.cast(padding_mask, dtype=tf.dtypes.float32), axis=1), [-1, 1], ) return pooled_state class TFWav2Vec2ForAudioClassification(layers.Layer): """Combines the encoder and decoder into an end-to-end model for training.""" def __init__(self, model_checkpoint, num_classes): super().__init__() # Instantiate the Wav2Vec 2.0 model without the Classification-Head self.wav2vec2 = TFWav2Vec2Model.from_pretrained( model_checkpoint, apply_spec_augment=False, from_pt=True ) self.pooling = layers.GlobalAveragePooling1D() # Drop-out layer before the final Classification-Head self.intermediate_layer_dropout = layers.Dropout(0.5) # Classification-Head self.final_layer = layers.Dense(num_classes, activation="softmax") def call(self, inputs): # We take only the first output in the returned dictionary corresponding to the # output of the last layer of Wav2vec 2.0 hidden_states = self.wav2vec2(inputs["input_values"])[0] # If attention mask does exist then mean-pool only un-masked output frames if tf.is_tensor(inputs["attention_mask"]): # Get the length of each audio input by summing up the attention_mask # (attention_mask = (BATCH_SIZE x MAX_SEQ_LENGTH) ∈ {1,0}) audio_lengths = tf.cumsum(inputs["attention_mask"], -1)[:, -1] # Get the number of Wav2Vec 2.0 output frames for each corresponding audio input # length feature_lengths = self.wav2vec2.wav2vec2._get_feat_extract_output_lengths( audio_lengths ) pooled_state = mean_pool(hidden_states, feature_lengths) # If attention mask does not exist then mean-pool only all output frames else: pooled_state = self.pooling(hidden_states) intermediate_state = self.intermediate_layer_dropout(pooled_state) final_state = self.final_layer(intermediate_state) return final_state<jupyter_output><empty_output><jupyter_text>Building and Compiling the model We now build and compile our model. We use the `SparseCategoricalCrossentropy`to train our model since it is a classification task. Following much of literaturewe evaluate our model on the `accuracy` metric.<jupyter_code>def build_model(): # Model's input inputs = { "input_values": tf.keras.Input(shape=(MAX_SEQ_LENGTH,), dtype="float32"), "attention_mask": tf.keras.Input(shape=(MAX_SEQ_LENGTH,), dtype="int32"), } # Instantiate the Wav2Vec 2.0 model with Classification-Head using the desired # pre-trained checkpoint wav2vec2_model = TFWav2Vec2ForAudioClassification(MODEL_CHECKPOINT, NUM_CLASSES)( inputs ) # Model model = tf.keras.Model(inputs, wav2vec2_model) # Loss loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) # Optimizer optimizer = keras.optimizers.Adam(learning_rate=1e-5) # Compile and return model.compile(loss=loss, optimizer=optimizer, metrics=["accuracy"]) return model model = build_model()<jupyter_output><empty_output><jupyter_text>Training the modelBefore we start training our model, we divide the inputs into itsdependent and independent variables.<jupyter_code># Remove targets from training dictionaries train_x = {x: y for x, y in train.items() if x != "label"} test_x = {x: y for x, y in test.items() if x != "label"}<jupyter_output><empty_output><jupyter_text>And now we can finally start training our model.<jupyter_code>model.fit( train_x, train["label"], validation_data=(test_x, test["label"]), batch_size=BATCH_SIZE, epochs=MAX_EPOCHS, )<jupyter_output><empty_output><jupyter_text>Great! Now that we have trained our model, we predict the classesfor audio samples in the test set using the `model.predict()` method! We seethe model predictions are not that great as it has been trained on a very smallnumber of samples for just 1 epoch. For best results, we recommend training onthe complete dataset for at least 5 epochs!<jupyter_code>preds = model.predict(test_x)<jupyter_output><empty_output><jupyter_text>Now we try to infer the model we trained on a randomly sampled audio file.We hear the audio file and then also see how well our model was able to predict!<jupyter_code>import IPython.display as ipd rand_int = random.randint(0, len(test_x)) ipd.Audio(data=np.asarray(test_x["input_values"][rand_int]), autoplay=True, rate=16000) print("Original Label is ", id2label[str(test["label"][rand_int])]) print("Predicted Label is ", id2label[str(np.argmax((preds[rand_int])))])<jupyter_output><empty_output>
keras-io/examples/audio/ipynb/wav2vec2_audiocls.ipynb/0
{ "file_path": "keras-io/examples/audio/ipynb/wav2vec2_audiocls.ipynb", "repo_id": "keras-io", "token_count": 4850 }
81
<jupyter_start><jupyter_text>DCGAN to generate face images**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2019/04/29**Last modified:** 2023/12/21**Description:** A simple DCGAN trained using `fit()` by overriding `train_step` on CelebA images. Setup<jupyter_code>import keras import tensorflow as tf from keras import layers from keras import ops import matplotlib.pyplot as plt import os import gdown from zipfile import ZipFile<jupyter_output><empty_output><jupyter_text>Prepare CelebA dataWe'll use face images from the CelebA dataset, resized to 64x64.<jupyter_code>os.makedirs("celeba_gan") url = "https://drive.google.com/uc?id=1O7m1010EJjLE5QxLZiM9Fpjs7Oj6e684" output = "celeba_gan/data.zip" gdown.download(url, output, quiet=True) with ZipFile("celeba_gan/data.zip", "r") as zipobj: zipobj.extractall("celeba_gan")<jupyter_output><empty_output><jupyter_text>Create a dataset from our folder, and rescale the images to the [0-1] range:<jupyter_code>dataset = keras.utils.image_dataset_from_directory( "celeba_gan", label_mode=None, image_size=(64, 64), batch_size=32 ) dataset = dataset.map(lambda x: x / 255.0)<jupyter_output><empty_output><jupyter_text>Let's display a sample image:<jupyter_code>for x in dataset: plt.axis("off") plt.imshow((x.numpy() * 255).astype("int32")[0]) break<jupyter_output><empty_output><jupyter_text>Create the discriminatorIt maps a 64x64 image to a binary classification score.<jupyter_code>discriminator = keras.Sequential( [ keras.Input(shape=(64, 64, 3)), layers.Conv2D(64, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(128, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(128, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Flatten(), layers.Dropout(0.2), layers.Dense(1, activation="sigmoid"), ], name="discriminator", ) discriminator.summary()<jupyter_output><empty_output><jupyter_text>Create the generatorIt mirrors the discriminator, replacing `Conv2D` layers with `Conv2DTranspose` layers.<jupyter_code>latent_dim = 128 generator = keras.Sequential( [ keras.Input(shape=(latent_dim,)), layers.Dense(8 * 8 * 128), layers.Reshape((8, 8, 128)), layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Conv2DTranspose(256, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Conv2DTranspose(512, kernel_size=4, strides=2, padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(3, kernel_size=5, padding="same", activation="sigmoid"), ], name="generator", ) generator.summary()<jupyter_output><empty_output><jupyter_text>Override `train_step`<jupyter_code>class GAN(keras.Model): def __init__(self, discriminator, generator, latent_dim): super().__init__() self.discriminator = discriminator self.generator = generator self.latent_dim = latent_dim self.seed_generator = keras.random.SeedGenerator(1337) def compile(self, d_optimizer, g_optimizer, loss_fn): super().compile() self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.loss_fn = loss_fn self.d_loss_metric = keras.metrics.Mean(name="d_loss") self.g_loss_metric = keras.metrics.Mean(name="g_loss") @property def metrics(self): return [self.d_loss_metric, self.g_loss_metric] def train_step(self, real_images): # Sample random points in the latent space batch_size = ops.shape(real_images)[0] random_latent_vectors = keras.random.normal( shape=(batch_size, self.latent_dim), seed=self.seed_generator ) # Decode them to fake images generated_images = self.generator(random_latent_vectors) # Combine them with real images combined_images = ops.concatenate([generated_images, real_images], axis=0) # Assemble labels discriminating real from fake images labels = ops.concatenate( [ops.ones((batch_size, 1)), ops.zeros((batch_size, 1))], axis=0 ) # Add random noise to the labels - important trick! labels += 0.05 * tf.random.uniform(tf.shape(labels)) # Train the discriminator with tf.GradientTape() as tape: predictions = self.discriminator(combined_images) d_loss = self.loss_fn(labels, predictions) grads = tape.gradient(d_loss, self.discriminator.trainable_weights) self.d_optimizer.apply_gradients( zip(grads, self.discriminator.trainable_weights) ) # Sample random points in the latent space random_latent_vectors = keras.random.normal( shape=(batch_size, self.latent_dim), seed=self.seed_generator ) # Assemble labels that say "all real images" misleading_labels = ops.zeros((batch_size, 1)) # Train the generator (note that we should *not* update the weights # of the discriminator)! with tf.GradientTape() as tape: predictions = self.discriminator(self.generator(random_latent_vectors)) g_loss = self.loss_fn(misleading_labels, predictions) grads = tape.gradient(g_loss, self.generator.trainable_weights) self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights)) # Update metrics self.d_loss_metric.update_state(d_loss) self.g_loss_metric.update_state(g_loss) return { "d_loss": self.d_loss_metric.result(), "g_loss": self.g_loss_metric.result(), }<jupyter_output><empty_output><jupyter_text>Create a callback that periodically saves generated images<jupyter_code>class GANMonitor(keras.callbacks.Callback): def __init__(self, num_img=3, latent_dim=128): self.num_img = num_img self.latent_dim = latent_dim self.seed_generator = keras.random.SeedGenerator(42) def on_epoch_end(self, epoch, logs=None): random_latent_vectors = keras.random.normal( shape=(self.num_img, self.latent_dim), seed=self.seed_generator ) generated_images = self.model.generator(random_latent_vectors) generated_images *= 255 generated_images.numpy() for i in range(self.num_img): img = keras.utils.array_to_img(generated_images[i]) img.save("generated_img_%03d_%d.png" % (epoch, i))<jupyter_output><empty_output><jupyter_text>Train the end-to-end model<jupyter_code>epochs = 1 # In practice, use ~100 epochs gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim) gan.compile( d_optimizer=keras.optimizers.Adam(learning_rate=0.0001), g_optimizer=keras.optimizers.Adam(learning_rate=0.0001), loss_fn=keras.losses.BinaryCrossentropy(), ) gan.fit( dataset, epochs=epochs, callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim)] )<jupyter_output><empty_output>
keras-io/examples/generative/ipynb/dcgan_overriding_train_step.ipynb/0
{ "file_path": "keras-io/examples/generative/ipynb/dcgan_overriding_train_step.ipynb", "repo_id": "keras-io", "token_count": 3064 }
82
<jupyter_start><jupyter_text>Face image generation with StyleGAN**Author:** [Soon-Yau Cheong](https://www.linkedin.com/in/soonyau/)**Date created:** 2021/07/01**Last modified:** 2021/12/20**Description:** Implementation of StyleGAN for image generation. IntroductionThe key idea of StyleGAN is to progressively increase the resolution of the generatedimages and to incorporate style features in the generative process.This[StyleGAN](https://arxiv.org/abs/1812.04948) implementation is based on the book[Hands-on Image Generation with TensorFlow](https://www.amazon.com/dp/1838826785).The code from the book's[GitHub repository](https://github.com/PacktPublishing/Hands-On-Image-Generation-with-TensorFlow-2.0/tree/master/Chapter07)was refactored to leverage a custom `train_step()` to enablefaster training time via compilation and distribution. Setup<jupyter_code>!pip install tensorflow_addons import os import numpy as np import matplotlib.pyplot as plt from functools import partial import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from tensorflow_addons.layers import InstanceNormalization import gdown from zipfile import ZipFile<jupyter_output><empty_output><jupyter_text>Prepare the datasetIn this example, we will train using the CelebA from TensorFlow Datasets.<jupyter_code>def log2(x): return int(np.log2(x)) # we use different batch size for different resolution, so larger image size # could fit into GPU memory. The keys is image resolution in log2 batch_sizes = {2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 8, 8: 4, 9: 2, 10: 1} # We adjust the train step accordingly train_step_ratio = {k: batch_sizes[2] / v for k, v in batch_sizes.items()} os.makedirs("celeba_gan") url = "https://drive.google.com/uc?id=1O7m1010EJjLE5QxLZiM9Fpjs7Oj6e684" output = "celeba_gan/data.zip" gdown.download(url, output, quiet=True) with ZipFile("celeba_gan/data.zip", "r") as zipobj: zipobj.extractall("celeba_gan") # Create a dataset from our folder, and rescale the images to the [0-1] range: ds_train = keras.utils.image_dataset_from_directory( "celeba_gan", label_mode=None, image_size=(64, 64), batch_size=32 ) def resize_image(res, image): # only downsampling, so use nearest neighbor that is faster to run image = tf.image.resize( image, (res, res), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR ) image = tf.cast(image, tf.float32) / 127.5 - 1.0 return image def create_dataloader(res): batch_size = batch_sizes[log2(res)] dl = ds_train.map(partial(resize_image, res), num_parallel_calls=tf.data.AUTOTUNE).unbatch() dl = dl.shuffle(200).batch(batch_size, drop_remainder=True).prefetch(1).repeat() return dl<jupyter_output>Found 202599 files belonging to 1 classes.<jupyter_text>Utility function to display images after each epoch<jupyter_code>def plot_images(images, log2_res, fname=""): scales = {2: 0.5, 3: 1, 4: 2, 5: 3, 6: 4, 7: 5, 8: 6, 9: 7, 10: 8} scale = scales[log2_res] grid_col = min(images.shape[0], int(32 // scale)) grid_row = 1 f, axarr = plt.subplots( grid_row, grid_col, figsize=(grid_col * scale, grid_row * scale) ) for row in range(grid_row): ax = axarr if grid_row == 1 else axarr[row] for col in range(grid_col): ax[col].imshow(images[row * grid_col + col]) ax[col].axis("off") plt.show() if fname: f.savefig(fname)<jupyter_output><empty_output><jupyter_text>Custom LayersThe following are building blocks that will be used to construct the generators anddiscriminators of the StyleGAN model.<jupyter_code>def fade_in(alpha, a, b): return alpha * a + (1.0 - alpha) * b def wasserstein_loss(y_true, y_pred): return -tf.reduce_mean(y_true * y_pred) def pixel_norm(x, epsilon=1e-8): return x / tf.math.sqrt(tf.reduce_mean(x ** 2, axis=-1, keepdims=True) + epsilon) def minibatch_std(input_tensor, epsilon=1e-8): n, h, w, c = tf.shape(input_tensor) group_size = tf.minimum(4, n) x = tf.reshape(input_tensor, [group_size, -1, h, w, c]) group_mean, group_var = tf.nn.moments(x, axes=(0), keepdims=False) group_std = tf.sqrt(group_var + epsilon) avg_std = tf.reduce_mean(group_std, axis=[1, 2, 3], keepdims=True) x = tf.tile(avg_std, [group_size, h, w, 1]) return tf.concat([input_tensor, x], axis=-1) class EqualizedConv(layers.Layer): def __init__(self, out_channels, kernel=3, gain=2, **kwargs): super().__init__(**kwargs) self.kernel = kernel self.out_channels = out_channels self.gain = gain self.pad = kernel != 1 def build(self, input_shape): self.in_channels = input_shape[-1] initializer = keras.initializers.RandomNormal(mean=0.0, stddev=1.0) self.w = self.add_weight( shape=[self.kernel, self.kernel, self.in_channels, self.out_channels], initializer=initializer, trainable=True, name="kernel", ) self.b = self.add_weight( shape=(self.out_channels,), initializer="zeros", trainable=True, name="bias" ) fan_in = self.kernel * self.kernel * self.in_channels self.scale = tf.sqrt(self.gain / fan_in) def call(self, inputs): if self.pad: x = tf.pad(inputs, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="REFLECT") else: x = inputs output = ( tf.nn.conv2d(x, self.scale * self.w, strides=1, padding="VALID") + self.b ) return output class EqualizedDense(layers.Layer): def __init__(self, units, gain=2, learning_rate_multiplier=1, **kwargs): super().__init__(**kwargs) self.units = units self.gain = gain self.learning_rate_multiplier = learning_rate_multiplier def build(self, input_shape): self.in_channels = input_shape[-1] initializer = keras.initializers.RandomNormal( mean=0.0, stddev=1.0 / self.learning_rate_multiplier ) self.w = self.add_weight( shape=[self.in_channels, self.units], initializer=initializer, trainable=True, name="kernel", ) self.b = self.add_weight( shape=(self.units,), initializer="zeros", trainable=True, name="bias" ) fan_in = self.in_channels self.scale = tf.sqrt(self.gain / fan_in) def call(self, inputs): output = tf.add(tf.matmul(inputs, self.scale * self.w), self.b) return output * self.learning_rate_multiplier class AddNoise(layers.Layer): def build(self, input_shape): n, h, w, c = input_shape[0] initializer = keras.initializers.RandomNormal(mean=0.0, stddev=1.0) self.b = self.add_weight( shape=[1, 1, 1, c], initializer=initializer, trainable=True, name="kernel" ) def call(self, inputs): x, noise = inputs output = x + self.b * noise return output class AdaIN(layers.Layer): def __init__(self, gain=1, **kwargs): super().__init__(**kwargs) self.gain = gain def build(self, input_shapes): x_shape = input_shapes[0] w_shape = input_shapes[1] self.w_channels = w_shape[-1] self.x_channels = x_shape[-1] self.dense_1 = EqualizedDense(self.x_channels, gain=1) self.dense_2 = EqualizedDense(self.x_channels, gain=1) def call(self, inputs): x, w = inputs ys = tf.reshape(self.dense_1(w), (-1, 1, 1, self.x_channels)) yb = tf.reshape(self.dense_2(w), (-1, 1, 1, self.x_channels)) return ys * x + yb<jupyter_output><empty_output><jupyter_text>Next we build the following:- A model mapping to map the random noise into style code- The generator- The discriminatorFor the generator, we build generator blocks at multiple resolutions,e.g. 4x4, 8x8, ...up to 1024x1024. We only use 4x4 in the beginningand we use progressively larger-resolution blocks as the training proceeds.Same for the discriminator.<jupyter_code>def Mapping(num_stages, input_shape=512): z = layers.Input(shape=(input_shape)) w = pixel_norm(z) for i in range(8): w = EqualizedDense(512, learning_rate_multiplier=0.01)(w) w = layers.LeakyReLU(0.2)(w) w = tf.tile(tf.expand_dims(w, 1), (1, num_stages, 1)) return keras.Model(z, w, name="mapping") class Generator: def __init__(self, start_res_log2, target_res_log2): self.start_res_log2 = start_res_log2 self.target_res_log2 = target_res_log2 self.num_stages = target_res_log2 - start_res_log2 + 1 # list of generator blocks at increasing resolution self.g_blocks = [] # list of layers to convert g_block activation to RGB self.to_rgb = [] # list of noise input of different resolutions into g_blocks self.noise_inputs = [] # filter size to use at each stage, keys are log2(resolution) self.filter_nums = { 0: 512, 1: 512, 2: 512, # 4x4 3: 512, # 8x8 4: 512, # 16x16 5: 512, # 32x32 6: 256, # 64x64 7: 128, # 128x128 8: 64, # 256x256 9: 32, # 512x512 10: 16, } # 1024x1024 start_res = 2 ** start_res_log2 self.input_shape = (start_res, start_res, self.filter_nums[start_res_log2]) self.g_input = layers.Input(self.input_shape, name="generator_input") for i in range(start_res_log2, target_res_log2 + 1): filter_num = self.filter_nums[i] res = 2 ** i self.noise_inputs.append( layers.Input(shape=(res, res, 1), name=f"noise_{res}x{res}") ) to_rgb = Sequential( [ layers.InputLayer(input_shape=(res, res, filter_num)), EqualizedConv(3, 1, gain=1), ], name=f"to_rgb_{res}x{res}", ) self.to_rgb.append(to_rgb) is_base = i == self.start_res_log2 if is_base: input_shape = (res, res, self.filter_nums[i - 1]) else: input_shape = (2 ** (i - 1), 2 ** (i - 1), self.filter_nums[i - 1]) g_block = self.build_block( filter_num, res=res, input_shape=input_shape, is_base=is_base ) self.g_blocks.append(g_block) def build_block(self, filter_num, res, input_shape, is_base): input_tensor = layers.Input(shape=input_shape, name=f"g_{res}") noise = layers.Input(shape=(res, res, 1), name=f"noise_{res}") w = layers.Input(shape=512) x = input_tensor if not is_base: x = layers.UpSampling2D((2, 2))(x) x = EqualizedConv(filter_num, 3)(x) x = AddNoise()([x, noise]) x = layers.LeakyReLU(0.2)(x) x = InstanceNormalization()(x) x = AdaIN()([x, w]) x = EqualizedConv(filter_num, 3)(x) x = AddNoise()([x, noise]) x = layers.LeakyReLU(0.2)(x) x = InstanceNormalization()(x) x = AdaIN()([x, w]) return keras.Model([input_tensor, w, noise], x, name=f"genblock_{res}x{res}") def grow(self, res_log2): res = 2 ** res_log2 num_stages = res_log2 - self.start_res_log2 + 1 w = layers.Input(shape=(self.num_stages, 512), name="w") alpha = layers.Input(shape=(1), name="g_alpha") x = self.g_blocks[0]([self.g_input, w[:, 0], self.noise_inputs[0]]) if num_stages == 1: rgb = self.to_rgb[0](x) else: for i in range(1, num_stages - 1): x = self.g_blocks[i]([x, w[:, i], self.noise_inputs[i]]) old_rgb = self.to_rgb[num_stages - 2](x) old_rgb = layers.UpSampling2D((2, 2))(old_rgb) i = num_stages - 1 x = self.g_blocks[i]([x, w[:, i], self.noise_inputs[i]]) new_rgb = self.to_rgb[i](x) rgb = fade_in(alpha[0], new_rgb, old_rgb) return keras.Model( [self.g_input, w, self.noise_inputs, alpha], rgb, name=f"generator_{res}_x_{res}", ) class Discriminator: def __init__(self, start_res_log2, target_res_log2): self.start_res_log2 = start_res_log2 self.target_res_log2 = target_res_log2 self.num_stages = target_res_log2 - start_res_log2 + 1 # filter size to use at each stage, keys are log2(resolution) self.filter_nums = { 0: 512, 1: 512, 2: 512, # 4x4 3: 512, # 8x8 4: 512, # 16x16 5: 512, # 32x32 6: 256, # 64x64 7: 128, # 128x128 8: 64, # 256x256 9: 32, # 512x512 10: 16, } # 1024x1024 # list of discriminator blocks at increasing resolution self.d_blocks = [] # list of layers to convert RGB into activation for d_blocks inputs self.from_rgb = [] for res_log2 in range(self.start_res_log2, self.target_res_log2 + 1): res = 2 ** res_log2 filter_num = self.filter_nums[res_log2] from_rgb = Sequential( [ layers.InputLayer( input_shape=(res, res, 3), name=f"from_rgb_input_{res}" ), EqualizedConv(filter_num, 1), layers.LeakyReLU(0.2), ], name=f"from_rgb_{res}", ) self.from_rgb.append(from_rgb) input_shape = (res, res, filter_num) if len(self.d_blocks) == 0: d_block = self.build_base(filter_num, res) else: d_block = self.build_block( filter_num, self.filter_nums[res_log2 - 1], res ) self.d_blocks.append(d_block) def build_base(self, filter_num, res): input_tensor = layers.Input(shape=(res, res, filter_num), name=f"d_{res}") x = minibatch_std(input_tensor) x = EqualizedConv(filter_num, 3)(x) x = layers.LeakyReLU(0.2)(x) x = layers.Flatten()(x) x = EqualizedDense(filter_num)(x) x = layers.LeakyReLU(0.2)(x) x = EqualizedDense(1)(x) return keras.Model(input_tensor, x, name=f"d_{res}") def build_block(self, filter_num_1, filter_num_2, res): input_tensor = layers.Input(shape=(res, res, filter_num_1), name=f"d_{res}") x = EqualizedConv(filter_num_1, 3)(input_tensor) x = layers.LeakyReLU(0.2)(x) x = EqualizedConv(filter_num_2)(x) x = layers.LeakyReLU(0.2)(x) x = layers.AveragePooling2D((2, 2))(x) return keras.Model(input_tensor, x, name=f"d_{res}") def grow(self, res_log2): res = 2 ** res_log2 idx = res_log2 - self.start_res_log2 alpha = layers.Input(shape=(1), name="d_alpha") input_image = layers.Input(shape=(res, res, 3), name="input_image") x = self.from_rgb[idx](input_image) x = self.d_blocks[idx](x) if idx > 0: idx -= 1 downsized_image = layers.AveragePooling2D((2, 2))(input_image) y = self.from_rgb[idx](downsized_image) x = fade_in(alpha[0], x, y) for i in range(idx, -1, -1): x = self.d_blocks[i](x) return keras.Model([input_image, alpha], x, name=f"discriminator_{res}_x_{res}")<jupyter_output><empty_output><jupyter_text>Build StyleGAN with custom train step<jupyter_code>class StyleGAN(tf.keras.Model): def __init__(self, z_dim=512, target_res=64, start_res=4): super().__init__() self.z_dim = z_dim self.target_res_log2 = log2(target_res) self.start_res_log2 = log2(start_res) self.current_res_log2 = self.target_res_log2 self.num_stages = self.target_res_log2 - self.start_res_log2 + 1 self.alpha = tf.Variable(1.0, dtype=tf.float32, trainable=False, name="alpha") self.mapping = Mapping(num_stages=self.num_stages) self.d_builder = Discriminator(self.start_res_log2, self.target_res_log2) self.g_builder = Generator(self.start_res_log2, self.target_res_log2) self.g_input_shape = self.g_builder.input_shape self.phase = None self.train_step_counter = tf.Variable(0, dtype=tf.int32, trainable=False) self.loss_weights = {"gradient_penalty": 10, "drift": 0.001} def grow_model(self, res): tf.keras.backend.clear_session() res_log2 = log2(res) self.generator = self.g_builder.grow(res_log2) self.discriminator = self.d_builder.grow(res_log2) self.current_res_log2 = res_log2 print(f"\nModel resolution:{res}x{res}") def compile( self, steps_per_epoch, phase, res, d_optimizer, g_optimizer, *args, **kwargs ): self.loss_weights = kwargs.pop("loss_weights", self.loss_weights) self.steps_per_epoch = steps_per_epoch if res != 2 ** self.current_res_log2: self.grow_model(res) self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.train_step_counter.assign(0) self.phase = phase self.d_loss_metric = keras.metrics.Mean(name="d_loss") self.g_loss_metric = keras.metrics.Mean(name="g_loss") super().compile(*args, **kwargs) @property def metrics(self): return [self.d_loss_metric, self.g_loss_metric] def generate_noise(self, batch_size): noise = [ tf.random.normal((batch_size, 2 ** res, 2 ** res, 1)) for res in range(self.start_res_log2, self.target_res_log2 + 1) ] return noise def gradient_loss(self, grad): loss = tf.square(grad) loss = tf.reduce_sum(loss, axis=tf.range(1, tf.size(tf.shape(loss)))) loss = tf.sqrt(loss) loss = tf.reduce_mean(tf.square(loss - 1)) return loss def train_step(self, real_images): self.train_step_counter.assign_add(1) if self.phase == "TRANSITION": self.alpha.assign( tf.cast(self.train_step_counter / self.steps_per_epoch, tf.float32) ) elif self.phase == "STABLE": self.alpha.assign(1.0) else: raise NotImplementedError alpha = tf.expand_dims(self.alpha, 0) batch_size = tf.shape(real_images)[0] real_labels = tf.ones(batch_size) fake_labels = -tf.ones(batch_size) z = tf.random.normal((batch_size, self.z_dim)) const_input = tf.ones(tuple([batch_size] + list(self.g_input_shape))) noise = self.generate_noise(batch_size) # generator with tf.GradientTape() as g_tape: w = self.mapping(z) fake_images = self.generator([const_input, w, noise, alpha]) pred_fake = self.discriminator([fake_images, alpha]) g_loss = wasserstein_loss(real_labels, pred_fake) trainable_weights = ( self.mapping.trainable_weights + self.generator.trainable_weights ) gradients = g_tape.gradient(g_loss, trainable_weights) self.g_optimizer.apply_gradients(zip(gradients, trainable_weights)) # discriminator with tf.GradientTape() as gradient_tape, tf.GradientTape() as total_tape: # forward pass pred_fake = self.discriminator([fake_images, alpha]) pred_real = self.discriminator([real_images, alpha]) epsilon = tf.random.uniform((batch_size, 1, 1, 1)) interpolates = epsilon * real_images + (1 - epsilon) * fake_images gradient_tape.watch(interpolates) pred_fake_grad = self.discriminator([interpolates, alpha]) # calculate losses loss_fake = wasserstein_loss(fake_labels, pred_fake) loss_real = wasserstein_loss(real_labels, pred_real) loss_fake_grad = wasserstein_loss(fake_labels, pred_fake_grad) # gradient penalty gradients_fake = gradient_tape.gradient(loss_fake_grad, [interpolates]) gradient_penalty = self.loss_weights[ "gradient_penalty" ] * self.gradient_loss(gradients_fake) # drift loss all_pred = tf.concat([pred_fake, pred_real], axis=0) drift_loss = self.loss_weights["drift"] * tf.reduce_mean(all_pred ** 2) d_loss = loss_fake + loss_real + gradient_penalty + drift_loss gradients = total_tape.gradient( d_loss, self.discriminator.trainable_weights ) self.d_optimizer.apply_gradients( zip(gradients, self.discriminator.trainable_weights) ) # Update metrics self.d_loss_metric.update_state(d_loss) self.g_loss_metric.update_state(g_loss) return { "d_loss": self.d_loss_metric.result(), "g_loss": self.g_loss_metric.result(), } def call(self, inputs: dict()): style_code = inputs.get("style_code", None) z = inputs.get("z", None) noise = inputs.get("noise", None) batch_size = inputs.get("batch_size", 1) alpha = inputs.get("alpha", 1.0) alpha = tf.expand_dims(alpha, 0) if style_code is None: if z is None: z = tf.random.normal((batch_size, self.z_dim)) style_code = self.mapping(z) if noise is None: noise = self.generate_noise(batch_size) # self.alpha.assign(alpha) const_input = tf.ones(tuple([batch_size] + list(self.g_input_shape))) images = self.generator([const_input, style_code, noise, alpha]) images = np.clip((images * 0.5 + 0.5) * 255, 0, 255).astype(np.uint8) return images<jupyter_output><empty_output><jupyter_text>TrainingWe first build the StyleGAN at smallest resolution, such as 4x4 or 8x8. Then weprogressively grow the model to higher resolution by appending new generator anddiscriminator blocks.<jupyter_code>START_RES = 4 TARGET_RES = 128 style_gan = StyleGAN(start_res=START_RES, target_res=TARGET_RES)<jupyter_output><empty_output><jupyter_text>The training for each new resolution happens in two phases - "transition" and "stable".In the transition phase, the features from the previous resolution are mixed with thecurrent resolution. This allows for a smoother transition when scaling up. We use eachepoch in `model.fit()` as a phase.<jupyter_code>def train( start_res=START_RES, target_res=TARGET_RES, steps_per_epoch=5000, display_images=True, ): opt_cfg = {"learning_rate": 1e-3, "beta_1": 0.0, "beta_2": 0.99, "epsilon": 1e-8} val_batch_size = 16 val_z = tf.random.normal((val_batch_size, style_gan.z_dim)) val_noise = style_gan.generate_noise(val_batch_size) start_res_log2 = int(np.log2(start_res)) target_res_log2 = int(np.log2(target_res)) for res_log2 in range(start_res_log2, target_res_log2 + 1): res = 2 ** res_log2 for phase in ["TRANSITION", "STABLE"]: if res == start_res and phase == "TRANSITION": continue train_dl = create_dataloader(res) steps = int(train_step_ratio[res_log2] * steps_per_epoch) style_gan.compile( d_optimizer=tf.keras.optimizers.legacy.Adam(**opt_cfg), g_optimizer=tf.keras.optimizers.legacy.Adam(**opt_cfg), loss_weights={"gradient_penalty": 10, "drift": 0.001}, steps_per_epoch=steps, res=res, phase=phase, run_eagerly=False, ) prefix = f"res_{res}x{res}_{style_gan.phase}" ckpt_cb = keras.callbacks.ModelCheckpoint( f"checkpoints/stylegan_{res}x{res}.ckpt", save_weights_only=True, verbose=0, ) print(phase) style_gan.fit( train_dl, epochs=1, steps_per_epoch=steps, callbacks=[ckpt_cb] ) if display_images: images = style_gan({"z": val_z, "noise": val_noise, "alpha": 1.0}) plot_images(images, res_log2)<jupyter_output><empty_output><jupyter_text>StyleGAN can take a long time to train, in the code below, a small `steps_per_epoch`value of 1 is used to sanity-check the code is working alright. In practice, a larger`steps_per_epoch` value (over 10000)is required to get decent results.<jupyter_code>train(start_res=4, target_res=16, steps_per_epoch=1, display_images=True)<jupyter_output>Model resolution:4x4 STABLE 1/1 [==============================] - 6s 6s/step - d_loss: 2.7647 - g_loss: -1.6941<jupyter_text>ResultsWe can now run some inference using pre-trained 64x64 checkpoints. In general, the imagefidelity increases with the resolution. You can try to train this StyleGAN to resolutionsabove 128x128 with the CelebA HQ dataset.<jupyter_code>url = "https://github.com/soon-yau/stylegan_keras/releases/download/keras_example_v1.0/stylegan_128x128.ckpt.zip" weights_path = keras.utils.get_file( "stylegan_128x128.ckpt.zip", url, extract=True, cache_dir=os.path.abspath("."), cache_subdir="pretrained", ) style_gan.grow_model(128) style_gan.load_weights(os.path.join("pretrained/stylegan_128x128.ckpt")) tf.random.set_seed(196) batch_size = 2 z = tf.random.normal((batch_size, style_gan.z_dim)) w = style_gan.mapping(z) noise = style_gan.generate_noise(batch_size=batch_size) images = style_gan({"style_code": w, "noise": noise, "alpha": 1.0}) plot_images(images, 5)<jupyter_output>Downloading data from https://github.com/soon-yau/stylegan_keras/releases/download/keras_example_v1.0/stylegan_128x128.ckpt.zip 540540928/540534982 [==============================] - 11s 0us/step 540549120/540534982 [==============================] - 11s 0us/step Model resolution:128x128 WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program. Two checkpoint references resolved to different objects (<__main__.EqualizedConv object at 0x7f4211b33a50> and <keras.layers.core.flatten.Flatten object at 0x7f42125e0190>). WARNING:tensorflow:Inconsistent references when loading the checkpoint into this object graph. Either the Trackable object references in the Python program have changed in an incompatible way, or the checkpoint was generated in an incompatible program. Two checkpoint references re[...]<jupyter_text>Style MixingWe can also mix styles from two images to create a new image.<jupyter_code>alpha = 0.4 w_mix = np.expand_dims(alpha * w[0] + (1 - alpha) * w[1], 0) noise_a = [np.expand_dims(n[0], 0) for n in noise] mix_images = style_gan({"style_code": w_mix, "noise": noise_a}) image_row = np.hstack([images[0], images[1], mix_images[0]]) plt.figure(figsize=(9, 3)) plt.imshow(image_row) plt.axis("off")<jupyter_output><empty_output>
keras-io/examples/generative/ipynb/stylegan.ipynb/0
{ "file_path": "keras-io/examples/generative/ipynb/stylegan.ipynb", "repo_id": "keras-io", "token_count": 12686 }
83
# DreamBooth **Author:** [Sayak Paul](https://twitter.com/RisingSayak), [Chansung Park](https://twitter.com/algo_diver)<br> **Date created:** 2023/02/01<br> **Last modified:** 2023/02/05<br> **Description:** Implementing DreamBooth. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/dreambooth.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/dreambooth.py) --- ## Introduction In this example, we implement DreamBooth, a fine-tuning technique to teach new visual concepts to text-conditioned Diffusion models with just 3 - 5 images. DreamBooth was proposed in [DreamBooth: Fine Tuning Text-to-Image Diffusion Models for Subject-Driven Generation](https://arxiv.org/abs/2208.12242) by Ruiz et al. DreamBooth, in a sense, is similar to the [traditional way of fine-tuning a text-conditioned Diffusion model except](https://keras.io/examples/generative/finetune_stable_diffusion/) for a few gotchas. This example assumes that you have basic familiarity with Diffusion models and how to fine-tune them. Here are some reference examples that might help you to get familiarized quickly: * [High-performance image generation using Stable Diffusion in KerasCV](https://keras.io/guides/keras_cv/generate_images_with_stable_diffusion/) * [Teach StableDiffusion new concepts via Textual Inversion](https://keras.io/examples/generative/fine_tune_via_textual_inversion/) * [Fine-tuning Stable Diffusion](https://keras.io/examples/generative/finetune_stable_diffusion/) First, let's install the latest versions of KerasCV and TensorFlow. ```python !pip install -q -U keras_cv==0.6.0 !pip install -q -U tensorflow ``` If you're running the code, please ensure you're using a GPU with at least 24 GBs of VRAM. --- ## Initial imports ```python import math import keras_cv import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from imutils import paths from tensorflow import keras ``` --- ## Usage of DreamBooth ... is very versatile. By teaching Stable Diffusion about your favorite visual concepts, you can * Recontextualize objects in interesting ways: ![](https://i.imgur.com/4Da9ozw.png) * Generate artistic renderings of the underlying visual concept: ![](https://i.imgur.com/nI2N8bI.png) And many other applications. We welcome you to check out the original [DreamBooth paper](https://arxiv.org/abs/2208.12242) in this regard. --- ## Download the instance and class images DreamBooth uses a technique called "prior preservation" to meaningfully guide the training procedure such that the fine-tuned models can still preserve some of the prior semantics of the visual concept you're introducing. To know more about the idea of "prior preservation" refer to [this document](https://dreambooth.github.io/). Here, we need to introduce a few key terms specific to DreamBooth: * **Unique class**: Examples include "dog", "person", etc. In this example, we use "dog". * **Unique identifier**: A unique identifier that is prepended to the unique class while forming the "instance prompts". In this example, we use "sks" as this unique identifier. * **Instance prompt**: Denotes a prompt that best describes the "instance images". An example prompt could be - "f"a photo of {unique_id} {unique_class}". So, for our example, this becomes - "a photo of sks dog". * **Class prompt**: Denotes a prompt without the unique identifier. This prompt is used for generating "class images" for prior preservation. For our example, this prompt is - "a photo of dog". * **Instance images**: Denote the images that represent the visual concept you're trying to teach aka the "instance prompt". This number is typically just 3 - 5. We typically gather these images ourselves. * **Class images**: Denote the images generated using the "class prompt" for using prior preservation in DreamBooth training. We leverage the pre-trained model before fine-tuning it to generate these class images. Typically, 200 - 300 class images are enough. In code, this generation process looks quite simply: ```py from tqdm import tqdm import numpy as np import hashlib import keras_cv import PIL import os class_images_dir = "class-images" os.makedirs(class_images_dir, exist_ok=True) model = keras_cv.models.StableDiffusion(img_width=512, img_height=512, jit_compile=True) class_prompt = "a photo of dog" num_imgs_to_generate = 200 for i in tqdm(range(num_imgs_to_generate)): images = model.text_to_image( class_prompt, batch_size=3, ) idx = np.random.choice(len(images)) selected_image = PIL.Image.fromarray(images[idx]) hash_image = hashlib.sha1(selected_image.tobytes()).hexdigest() image_filename = os.path.join(class_images_dir, f"{hash_image}.jpg") selected_image.save(image_filename) ``` To keep the runtime of this example short, the authors of this example have gone ahead and generated some class images using [this notebook](https://colab.research.google.com/gist/sayakpaul/6b5de345d29cf5860f84b6d04d958692/generate_class_priors.ipynb). **Note** that prior preservation is an optional technique used in DreamBooth, but it almost always helps in improving the quality of the generated images. ```python instance_images_root = tf.keras.utils.get_file( origin="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/instance-images.tar.gz", untar=True, ) class_images_root = tf.keras.utils.get_file( origin="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/class-images.tar.gz", untar=True, ) ``` --- ## Visualize images First, let's load the image paths. ```python instance_image_paths = list(paths.list_images(instance_images_root)) class_image_paths = list(paths.list_images(class_images_root)) ``` Then we load the images from the paths. ```python def load_images(image_paths): images = [np.array(keras.utils.load_img(path)) for path in image_paths] return images ``` And then we make use a utility function to plot the loaded images. ```python def plot_images(images, title=None): plt.figure(figsize=(20, 20)) for i in range(len(images)): ax = plt.subplot(1, len(images), i + 1) if title is not None: plt.title(title) plt.imshow(images[i]) plt.axis("off") ``` **Instance images**: ```python plot_images(load_images(instance_image_paths[:5])) ``` ![png](/img/examples/generative/dreambooth/dreambooth_16_0.png) **Class images**: ```python plot_images(load_images(class_image_paths[:5])) ``` ![png](/img/examples/generative/dreambooth/dreambooth_18_0.png) --- ## Prepare datasets Dataset preparation includes two stages: (1): preparing the captions, (2) processing the images. ### Prepare the captions ```python # Since we're using prior preservation, we need to match the number # of instance images we're using. We just repeat the instance image paths # to do so. new_instance_image_paths = [] for index in range(len(class_image_paths)): instance_image = instance_image_paths[index % len(instance_image_paths)] new_instance_image_paths.append(instance_image) # We just repeat the prompts / captions per images. unique_id = "sks" class_label = "dog" instance_prompt = f"a photo of {unique_id} {class_label}" instance_prompts = [instance_prompt] * len(new_instance_image_paths) class_prompt = f"a photo of {class_label}" class_prompts = [class_prompt] * len(class_image_paths) ``` Next, we embed the prompts to save some compute. ```python import itertools # The padding token and maximum prompt length are specific to the text encoder. # If you're using a different text encoder be sure to change them accordingly. padding_token = 49407 max_prompt_length = 77 # Load the tokenizer. tokenizer = keras_cv.models.stable_diffusion.SimpleTokenizer() # Method to tokenize and pad the tokens. def process_text(caption): tokens = tokenizer.encode(caption) tokens = tokens + [padding_token] * (max_prompt_length - len(tokens)) return np.array(tokens) # Collate the tokenized captions into an array. tokenized_texts = np.empty( (len(instance_prompts) + len(class_prompts), max_prompt_length) ) for i, caption in enumerate(itertools.chain(instance_prompts, class_prompts)): tokenized_texts[i] = process_text(caption) # We also pre-compute the text embeddings to save some memory during training. POS_IDS = tf.convert_to_tensor([list(range(max_prompt_length))], dtype=tf.int32) text_encoder = keras_cv.models.stable_diffusion.TextEncoder(max_prompt_length) gpus = tf.config.list_logical_devices("GPU") # Ensure the computation takes place on a GPU. # Note that it's done automatically when there's a GPU present. # This example just attempts at showing how you can do it # more explicitly. with tf.device(gpus[0].name): embedded_text = text_encoder( [tf.convert_to_tensor(tokenized_texts), POS_IDS], training=False ).numpy() # To ensure text_encoder doesn't occupy any GPU space. del text_encoder ``` --- ## Prepare the images ```python resolution = 512 auto = tf.data.AUTOTUNE augmenter = keras.Sequential( layers=[ keras_cv.layers.CenterCrop(resolution, resolution), keras_cv.layers.RandomFlip(), keras.layers.Rescaling(scale=1.0 / 127.5, offset=-1), ] ) def process_image(image_path, tokenized_text): image = tf.io.read_file(image_path) image = tf.io.decode_png(image, 3) image = tf.image.resize(image, (resolution, resolution)) return image, tokenized_text def apply_augmentation(image_batch, embedded_tokens): return augmenter(image_batch), embedded_tokens def prepare_dict(instance_only=True): def fn(image_batch, embedded_tokens): if instance_only: batch_dict = { "instance_images": image_batch, "instance_embedded_texts": embedded_tokens, } return batch_dict else: batch_dict = { "class_images": image_batch, "class_embedded_texts": embedded_tokens, } return batch_dict return fn def assemble_dataset(image_paths, embedded_texts, instance_only=True, batch_size=1): dataset = tf.data.Dataset.from_tensor_slices((image_paths, embedded_texts)) dataset = dataset.map(process_image, num_parallel_calls=auto) dataset = dataset.shuffle(5, reshuffle_each_iteration=True) dataset = dataset.batch(batch_size) dataset = dataset.map(apply_augmentation, num_parallel_calls=auto) prepare_dict_fn = prepare_dict(instance_only=instance_only) dataset = dataset.map(prepare_dict_fn, num_parallel_calls=auto) return dataset ``` --- ## Assemble dataset ```python instance_dataset = assemble_dataset( new_instance_image_paths, embedded_text[: len(new_instance_image_paths)], ) class_dataset = assemble_dataset( class_image_paths, embedded_text[len(new_instance_image_paths) :], instance_only=False, ) train_dataset = tf.data.Dataset.zip((instance_dataset, class_dataset)) ``` --- ## Check shapes Now that the dataset has been prepared, let's quickly check what's inside it. ```python sample_batch = next(iter(train_dataset)) print(sample_batch[0].keys(), sample_batch[1].keys()) for k in sample_batch[0]: print(k, sample_batch[0][k].shape) for k in sample_batch[1]: print(k, sample_batch[1][k].shape) ``` <div class="k-default-codeblock"> ``` dict_keys(['instance_images', 'instance_embedded_texts']) dict_keys(['class_images', 'class_embedded_texts']) instance_images (1, 512, 512, 3) instance_embedded_texts (1, 77, 768) class_images (1, 512, 512, 3) class_embedded_texts (1, 77, 768) ``` </div> During training, we make use of these keys to gather the images and text embeddings and concat them accordingly. --- ## DreamBooth training loop Our DreamBooth training loop is very much inspired by [this script](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) provided by the Diffusers team at Hugging Face. However, there is an important difference to note. We only fine-tune the UNet (the model responsible for predicting noise) and don't fine-tune the text encoder in this example. If you're looking for an implementation that also performs the additional fine-tuning of the text encoder, refer to [this repository](https://github.com/sayakpaul/dreambooth-keras/). ```python import tensorflow.experimental.numpy as tnp class DreamBoothTrainer(tf.keras.Model): # Reference: # https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py def __init__( self, diffusion_model, vae, noise_scheduler, use_mixed_precision=False, prior_loss_weight=1.0, max_grad_norm=1.0, **kwargs, ): super().__init__(**kwargs) self.diffusion_model = diffusion_model self.vae = vae self.noise_scheduler = noise_scheduler self.prior_loss_weight = prior_loss_weight self.max_grad_norm = max_grad_norm self.use_mixed_precision = use_mixed_precision self.vae.trainable = False def train_step(self, inputs): instance_batch = inputs[0] class_batch = inputs[1] instance_images = instance_batch["instance_images"] instance_embedded_text = instance_batch["instance_embedded_texts"] class_images = class_batch["class_images"] class_embedded_text = class_batch["class_embedded_texts"] images = tf.concat([instance_images, class_images], 0) embedded_texts = tf.concat([instance_embedded_text, class_embedded_text], 0) batch_size = tf.shape(images)[0] with tf.GradientTape() as tape: # Project image into the latent space and sample from it. latents = self.sample_from_encoder_outputs(self.vae(images, training=False)) # Know more about the magic number here: # https://keras.io/examples/generative/fine_tune_via_textual_inversion/ latents = latents * 0.18215 # Sample noise that we'll add to the latents. noise = tf.random.normal(tf.shape(latents)) # Sample a random timestep for each image. timesteps = tnp.random.randint( 0, self.noise_scheduler.train_timesteps, (batch_size,) ) # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process). noisy_latents = self.noise_scheduler.add_noise( tf.cast(latents, noise.dtype), noise, timesteps ) # Get the target for loss depending on the prediction type # just the sampled noise for now. target = noise # noise_schedule.predict_epsilon == True # Predict the noise residual and compute loss. timestep_embedding = tf.map_fn( lambda t: self.get_timestep_embedding(t), timesteps, dtype=tf.float32 ) model_pred = self.diffusion_model( [noisy_latents, timestep_embedding, embedded_texts], training=True ) loss = self.compute_loss(target, model_pred) if self.use_mixed_precision: loss = self.optimizer.get_scaled_loss(loss) # Update parameters of the diffusion model. trainable_vars = self.diffusion_model.trainable_variables gradients = tape.gradient(loss, trainable_vars) if self.use_mixed_precision: gradients = self.optimizer.get_unscaled_gradients(gradients) gradients = [tf.clip_by_norm(g, self.max_grad_norm) for g in gradients] self.optimizer.apply_gradients(zip(gradients, trainable_vars)) return {m.name: m.result() for m in self.metrics} def get_timestep_embedding(self, timestep, dim=320, max_period=10000): half = dim // 2 log_max_period = tf.math.log(tf.cast(max_period, tf.float32)) freqs = tf.math.exp( -log_max_period * tf.range(0, half, dtype=tf.float32) / half ) args = tf.convert_to_tensor([timestep], dtype=tf.float32) * freqs embedding = tf.concat([tf.math.cos(args), tf.math.sin(args)], 0) return embedding def sample_from_encoder_outputs(self, outputs): mean, logvar = tf.split(outputs, 2, axis=-1) logvar = tf.clip_by_value(logvar, -30.0, 20.0) std = tf.exp(0.5 * logvar) sample = tf.random.normal(tf.shape(mean), dtype=mean.dtype) return mean + std * sample def compute_loss(self, target, model_pred): # Chunk the noise and model_pred into two parts and compute the loss # on each part separately. # Since the first half of the inputs has instance samples and the second half # has class samples, we do the chunking accordingly. model_pred, model_pred_prior = tf.split( model_pred, num_or_size_splits=2, axis=0 ) target, target_prior = tf.split(target, num_or_size_splits=2, axis=0) # Compute instance loss. loss = self.compiled_loss(target, model_pred) # Compute prior loss. prior_loss = self.compiled_loss(target_prior, model_pred_prior) # Add the prior loss to the instance loss. loss = loss + self.prior_loss_weight * prior_loss return loss def save_weights(self, filepath, overwrite=True, save_format=None, options=None): # Overriding this method will allow us to use the `ModelCheckpoint` # callback directly with this trainer class. In this case, it will # only checkpoint the `diffusion_model` since that's what we're training # during fine-tuning. self.diffusion_model.save_weights( filepath=filepath, overwrite=overwrite, save_format=save_format, options=options, ) def load_weights(self, filepath, by_name=False, skip_mismatch=False, options=None): # Similarly override `load_weights()` so that we can directly call it on # the trainer class object. self.diffusion_model.load_weights( filepath=filepath, by_name=by_name, skip_mismatch=skip_mismatch, options=options, ) ``` --- ## Trainer initialization ```python # Comment it if you are not using a GPU having tensor cores. tf.keras.mixed_precision.set_global_policy("mixed_float16") use_mp = True # Set it to False if you're not using a GPU with tensor cores. image_encoder = keras_cv.models.stable_diffusion.ImageEncoder() dreambooth_trainer = DreamBoothTrainer( diffusion_model=keras_cv.models.stable_diffusion.DiffusionModel( resolution, resolution, max_prompt_length ), # Remove the top layer from the encoder, which cuts off the variance and only # returns the mean. vae=tf.keras.Model( image_encoder.input, image_encoder.layers[-2].output, ), noise_scheduler=keras_cv.models.stable_diffusion.NoiseScheduler(), use_mixed_precision=use_mp, ) # These hyperparameters come from this tutorial by Hugging Face: # https://github.com/huggingface/diffusers/tree/main/examples/dreambooth learning_rate = 5e-6 beta_1, beta_2 = 0.9, 0.999 weight_decay = (1e-2,) epsilon = 1e-08 optimizer = tf.keras.optimizers.experimental.AdamW( learning_rate=learning_rate, weight_decay=weight_decay, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, ) dreambooth_trainer.compile(optimizer=optimizer, loss="mse") ``` --- ## Train! We first calculate the number of epochs, we need to train for. ```python num_update_steps_per_epoch = train_dataset.cardinality() max_train_steps = 800 epochs = math.ceil(max_train_steps / num_update_steps_per_epoch) print(f"Training for {epochs} epochs.") ``` <div class="k-default-codeblock"> ``` Training for 4 epochs. ``` </div> And then we start training! ```python ckpt_path = "dreambooth-unet.h5" ckpt_callback = tf.keras.callbacks.ModelCheckpoint( ckpt_path, save_weights_only=True, monitor="loss", mode="min", ) dreambooth_trainer.fit(train_dataset, epochs=epochs, callbacks=[ckpt_callback]) ``` <div class="k-default-codeblock"> ``` Epoch 1/4 200/200 [==============================] - 301s 462ms/step - loss: 0.1203 Epoch 2/4 200/200 [==============================] - 94s 469ms/step - loss: 0.1139 Epoch 3/4 200/200 [==============================] - 94s 469ms/step - loss: 0.1016 Epoch 4/4 200/200 [==============================] - 94s 469ms/step - loss: 0.1231 <keras.callbacks.History at 0x7f19726600a0> ``` </div> --- ## Experiments and inference We ran various experiments with a slightly modified version of this example. Our experiments are based on [this repository](https://github.com/sayakpaul/dreambooth-keras/) and are inspired by [this blog post](https://huggingface.co/blog/dreambooth) from Hugging Face. First, let's see how we can use the fine-tuned checkpoint for running inference. ```python # Initialize a new Stable Diffusion model. dreambooth_model = keras_cv.models.StableDiffusion( img_width=resolution, img_height=resolution, jit_compile=True ) dreambooth_model.diffusion_model.load_weights(ckpt_path) # Note how the unique identifier and the class have been used in the prompt. prompt = f"A photo of {unique_id} {class_label} in a bucket" num_imgs_to_gen = 3 images_dreamboothed = dreambooth_model.text_to_image(prompt, batch_size=num_imgs_to_gen) plot_images(images_dreamboothed, prompt) ``` <div class="k-default-codeblock"> ``` By using this model checkpoint, you acknowledge that its usage is subject to the terms of the CreativeML Open RAIL-M license at https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE 50/50 [==============================] - 42s 160ms/step ``` </div> ![png](/img/examples/generative/dreambooth/dreambooth_40_1.png) Now, let's load checkpoints from a different experiment we conducted where we also fine-tuned the text encoder along with the UNet: ```python unet_weights = tf.keras.utils.get_file( origin="https://huggingface.co/chansung/dreambooth-dog/resolve/main/lr%409e-06-max_train_steps%40200-train_text_encoder%40True-unet.h5" ) text_encoder_weights = tf.keras.utils.get_file( origin="https://huggingface.co/chansung/dreambooth-dog/resolve/main/lr%409e-06-max_train_steps%40200-train_text_encoder%40True-text_encoder.h5" ) dreambooth_model.diffusion_model.load_weights(unet_weights) dreambooth_model.text_encoder.load_weights(text_encoder_weights) images_dreamboothed = dreambooth_model.text_to_image(prompt, batch_size=num_imgs_to_gen) plot_images(images_dreamboothed, prompt) ``` <div class="k-default-codeblock"> ``` Downloading data from https://huggingface.co/chansung/dreambooth-dog/resolve/main/lr%409e-06-max_train_steps%40200-train_text_encoder%40True-unet.h5 3439088208/3439088208 [==============================] - 67s 0us/step Downloading data from https://huggingface.co/chansung/dreambooth-dog/resolve/main/lr%409e-06-max_train_steps%40200-train_text_encoder%40True-text_encoder.h5 492466760/492466760 [==============================] - 9s 0us/step 50/50 [==============================] - 8s 159ms/step ``` </div> ![png](/img/examples/generative/dreambooth/dreambooth_42_1.png) The default number of steps for generating an image in `text_to_image()` [is 50](https://github.com/keras-team/keras-cv/blob/3575bc3b944564fe15b46b917e6555aa6a9d7be0/keras_cv/models/stable_diffusion/stable_diffusion.py#L73). Let's increase it to 100. ```python images_dreamboothed = dreambooth_model.text_to_image( prompt, batch_size=num_imgs_to_gen, num_steps=100 ) plot_images(images_dreamboothed, prompt) ``` <div class="k-default-codeblock"> ``` 100/100 [==============================] - 16s 159ms/step ``` </div> ![png](/img/examples/generative/dreambooth/dreambooth_44_1.png) Feel free to experiment with different prompts (don't forget to add the unique identifer and the class label!) to see how the results change. We welcome you to check out our codebase and more experimental results [here](https://github.com/sayakpaul/dreambooth-keras#results). You can also read [this blog post](https://huggingface.co/blog/dreambooth) to get more ideas. --- ## Acknowledgements * Thanks to the [DreamBooth example script](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) provided by Hugging Face which helped us a lot in getting the initial implementation ready quickly. * Getting DreamBooth to work on human faces can be challenging. We have compiled some general recommendations [here](https://github.com/sayakpaul/dreambooth-keras#notes-on-preparing-data-for-dreambooth-training-of-faces). Thanks to [Abhishek Thakur](https://no.linkedin.com/in/abhi1thakur) for helping with these.
keras-io/examples/generative/md/dreambooth.md/0
{ "file_path": "keras-io/examples/generative/md/dreambooth.md", "repo_id": "keras-io", "token_count": 9400 }
84
# Variational AutoEncoder **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2020/05/03<br> **Last modified:** 2023/11/22<br> **Description:** Convolutional Variational AutoEncoder (VAE) trained on MNIST digits. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/vae.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/vae.py) --- ## Setup ```python import os os.environ["KERAS_BACKEND"] = "tensorflow" import numpy as np import tensorflow as tf import keras from keras import layers ``` --- ## Create a sampling layer ```python class Sampling(layers.Layer): """Uses (z_mean, z_log_var) to sample z, the vector encoding a digit.""" def call(self, inputs): z_mean, z_log_var = inputs batch = tf.shape(z_mean)[0] dim = tf.shape(z_mean)[1] epsilon = tf.random.normal(shape=(batch, dim)) return z_mean + tf.exp(0.5 * z_log_var) * epsilon ``` --- ## Build the encoder ```python latent_dim = 2 encoder_inputs = keras.Input(shape=(28, 28, 1)) x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(encoder_inputs) x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x) x = layers.Flatten()(x) x = layers.Dense(16, activation="relu")(x) z_mean = layers.Dense(latent_dim, name="z_mean")(x) z_log_var = layers.Dense(latent_dim, name="z_log_var")(x) z = Sampling()([z_mean, z_log_var]) encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder") encoder.summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "encoder"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩ │ input_layer │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">320</span> │ input_layer[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">18,496</span> │ conv2d[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3136</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ conv2d_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">50,192</span> │ flatten[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ z_mean (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">34</span> │ dense[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ z_log_var (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">34</span> │ dense[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ sampling (<span style="color: #0087ff; text-decoration-color: #0087ff">Sampling</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ z_mean[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │ │ │ │ │ z_log_var[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ └─────────────────────┴───────────────────┴─────────┴──────────────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">69,076</span> (269.83 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">69,076</span> (269.83 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> --- ## Build the decoder ```python latent_inputs = keras.Input(shape=(latent_dim,)) x = layers.Dense(7 * 7 * 64, activation="relu")(latent_inputs) x = layers.Reshape((7, 7, 64))(x) x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x) x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x) decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x) decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder") decoder.summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "decoder"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ input_layer_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3136</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,408</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ reshape (<span style="color: #0087ff; text-decoration-color: #0087ff">Reshape</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_transpose │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_transpose_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">18,464</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_transpose_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">289</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">65,089</span> (254.25 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">65,089</span> (254.25 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> --- ## Define the VAE as a `Model` with a custom `train_step` ```python class VAE(keras.Model): def __init__(self, encoder, decoder, **kwargs): super().__init__(**kwargs) self.encoder = encoder self.decoder = decoder self.total_loss_tracker = keras.metrics.Mean(name="total_loss") self.reconstruction_loss_tracker = keras.metrics.Mean( name="reconstruction_loss" ) self.kl_loss_tracker = keras.metrics.Mean(name="kl_loss") @property def metrics(self): return [ self.total_loss_tracker, self.reconstruction_loss_tracker, self.kl_loss_tracker, ] def train_step(self, data): with tf.GradientTape() as tape: z_mean, z_log_var, z = self.encoder(data) reconstruction = self.decoder(z) reconstruction_loss = tf.reduce_mean( tf.reduce_sum( keras.losses.binary_crossentropy(data, reconstruction), axis=(1, 2), ) ) kl_loss = -0.5 * (1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)) kl_loss = tf.reduce_mean(tf.reduce_sum(kl_loss, axis=1)) total_loss = reconstruction_loss + kl_loss grads = tape.gradient(total_loss, self.trainable_weights) self.optimizer.apply_gradients(zip(grads, self.trainable_weights)) self.total_loss_tracker.update_state(total_loss) self.reconstruction_loss_tracker.update_state(reconstruction_loss) self.kl_loss_tracker.update_state(kl_loss) return { "loss": self.total_loss_tracker.result(), "reconstruction_loss": self.reconstruction_loss_tracker.result(), "kl_loss": self.kl_loss_tracker.result(), } ``` --- ## Train the VAE ```python (x_train, _), (x_test, _) = keras.datasets.mnist.load_data() mnist_digits = np.concatenate([x_train, x_test], axis=0) mnist_digits = np.expand_dims(mnist_digits, -1).astype("float32") / 255 vae = VAE(encoder, decoder) vae.compile(optimizer=keras.optimizers.Adam()) vae.fit(mnist_digits, epochs=30, batch_size=128) ``` <div class="k-default-codeblock"> ``` Epoch 1/30 41/547 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - kl_loss: 1.0488 - loss: 474.8513 - reconstruction_loss: 473.8025 WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1700704358.696643 3339857 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. W0000 00:00:1700704358.714145 3339857 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update W0000 00:00:1700704358.716080 3339857 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update 547/547 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - kl_loss: 2.9140 - loss: 262.3454 - reconstruction_loss: 259.4314 W0000 00:00:1700704363.390106 3339858 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update W0000 00:00:1700704363.392582 3339858 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update 547/547 ━━━━━━━━━━━━━━━━━━━━ 11s 9ms/step - kl_loss: 2.9145 - loss: 262.3454 - reconstruction_loss: 259.3424 - total_loss: 213.8374 Epoch 2/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 5.2591 - loss: 177.2659 - reconstruction_loss: 171.9981 - total_loss: 172.5344 Epoch 3/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.0199 - loss: 166.4822 - reconstruction_loss: 160.4603 - total_loss: 165.3463 Epoch 4/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - kl_loss: 6.1585 - loss: 163.0588 - reconstruction_loss: 156.8987 - total_loss: 162.2310 Epoch 5/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.2646 - loss: 160.6541 - reconstruction_loss: 154.3888 - total_loss: 160.2672 Epoch 6/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.3202 - loss: 159.1411 - reconstruction_loss: 152.8203 - total_loss: 158.8850 Epoch 7/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.3759 - loss: 157.8918 - reconstruction_loss: 151.5157 - total_loss: 157.8260 Epoch 8/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.3899 - loss: 157.2225 - reconstruction_loss: 150.8320 - total_loss: 156.8395 Epoch 9/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4204 - loss: 156.0726 - reconstruction_loss: 149.6520 - total_loss: 156.0463 Epoch 10/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4176 - loss: 155.6229 - reconstruction_loss: 149.2051 - total_loss: 155.4912 Epoch 11/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - kl_loss: 6.4297 - loss: 155.0198 - reconstruction_loss: 148.5899 - total_loss: 154.9487 Epoch 12/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4338 - loss: 154.1115 - reconstruction_loss: 147.6781 - total_loss: 154.3575 Epoch 13/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4356 - loss: 153.9087 - reconstruction_loss: 147.4730 - total_loss: 153.8745 Epoch 14/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4506 - loss: 153.7804 - reconstruction_loss: 147.3295 - total_loss: 153.6391 Epoch 15/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4399 - loss: 152.7727 - reconstruction_loss: 146.3336 - total_loss: 153.2117 Epoch 16/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4661 - loss: 152.7382 - reconstruction_loss: 146.2725 - total_loss: 152.9310 Epoch 17/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4566 - loss: 152.3313 - reconstruction_loss: 145.8751 - total_loss: 152.5897 Epoch 18/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4613 - loss: 152.4331 - reconstruction_loss: 145.9715 - total_loss: 152.2775 Epoch 19/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4551 - loss: 151.9406 - reconstruction_loss: 145.4857 - total_loss: 152.0997 Epoch 20/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4332 - loss: 152.1597 - reconstruction_loss: 145.7260 - total_loss: 151.8623 Epoch 21/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4644 - loss: 151.4290 - reconstruction_loss: 144.9649 - total_loss: 151.6146 Epoch 22/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4662 - loss: 151.1586 - reconstruction_loss: 144.6929 - total_loss: 151.4525 Epoch 23/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4532 - loss: 150.9665 - reconstruction_loss: 144.5139 - total_loss: 151.2734 Epoch 24/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4520 - loss: 151.2177 - reconstruction_loss: 144.7655 - total_loss: 151.1416 Epoch 25/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4537 - loss: 150.8981 - reconstruction_loss: 144.4445 - total_loss: 151.0104 Epoch 26/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4669 - loss: 150.5807 - reconstruction_loss: 144.1143 - total_loss: 150.8807 Epoch 27/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4575 - loss: 150.3731 - reconstruction_loss: 143.9162 - total_loss: 150.7236 Epoch 28/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4644 - loss: 150.7117 - reconstruction_loss: 144.2471 - total_loss: 150.6108 Epoch 29/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4902 - loss: 150.1759 - reconstruction_loss: 143.6862 - total_loss: 150.4756 Epoch 30/30 547/547 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - kl_loss: 6.4585 - loss: 150.6554 - reconstruction_loss: 144.1964 - total_loss: 150.3988 <keras.src.callbacks.history.History at 0x7fbe44614eb0> ``` </div> --- ## Display a grid of sampled digits ```python import matplotlib.pyplot as plt def plot_latent_space(vae, n=30, figsize=15): # display a n*n 2D manifold of digits digit_size = 28 scale = 1.0 figure = np.zeros((digit_size * n, digit_size * n)) # linearly spaced coordinates corresponding to the 2D plot # of digit classes in the latent space grid_x = np.linspace(-scale, scale, n) grid_y = np.linspace(-scale, scale, n)[::-1] for i, yi in enumerate(grid_y): for j, xi in enumerate(grid_x): z_sample = np.array([[xi, yi]]) x_decoded = vae.decoder.predict(z_sample, verbose=0) digit = x_decoded[0].reshape(digit_size, digit_size) figure[ i * digit_size : (i + 1) * digit_size, j * digit_size : (j + 1) * digit_size, ] = digit plt.figure(figsize=(figsize, figsize)) start_range = digit_size // 2 end_range = n * digit_size + start_range pixel_range = np.arange(start_range, end_range, digit_size) sample_range_x = np.round(grid_x, 1) sample_range_y = np.round(grid_y, 1) plt.xticks(pixel_range, sample_range_x) plt.yticks(pixel_range, sample_range_y) plt.xlabel("z[0]") plt.ylabel("z[1]") plt.imshow(figure, cmap="Greys_r") plt.show() plot_latent_space(vae) ``` ![png](/img/examples/generative/vae/vae_14_0.png) --- ## Display how the latent space clusters different digit classes ```python def plot_label_clusters(vae, data, labels): # display a 2D plot of the digit classes in the latent space z_mean, _, _ = vae.encoder.predict(data, verbose=0) plt.figure(figsize=(12, 10)) plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels) plt.colorbar() plt.xlabel("z[0]") plt.ylabel("z[1]") plt.show() (x_train, y_train), _ = keras.datasets.mnist.load_data() x_train = np.expand_dims(x_train, -1).astype("float32") / 255 plot_label_clusters(vae, x_train, y_train) ``` <div class="k-default-codeblock"> ``` W0000 00:00:1700704481.358429 3339856 graph_launch.cc:671] Fallback to op-by-op mode because memset node breaks graph update ``` </div> ![png](/img/examples/generative/vae/vae_16_1.png)
keras-io/examples/generative/md/vae.md/0
{ "file_path": "keras-io/examples/generative/md/vae.md", "repo_id": "keras-io", "token_count": 10664 }
85
<jupyter_start><jupyter_text>Graph representation learning with node2vec**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2021/05/15**Last modified:** 2021/05/15**Description:** Implementing the node2vec model to generate embeddings for movies from the MovieLens dataset. IntroductionLearning useful representations from objects structured as graphs is useful fora variety of machine learning (ML) applications—such as social and communication networks analysis,biomedicine studies, and recommendation systems.[Graph representation Learning](https://www.cs.mcgill.ca/~wlh/grl_book/) aims tolearn embeddings for the graph nodes, which can be used for a variety of ML taskssuch as node label prediction (e.g. categorizing an article based on its citations)and link prediction (e.g. recommending an interest group to a user in a social network).[node2vec](https://arxiv.org/abs/1607.00653) is a simple, yet scalable and effectivetechnique for learning low-dimensional embeddings for nodes in a graph by optimizinga neighborhood-preserving objective. The aim is to learn similar embeddings forneighboring nodes, with respect to the graph structure.Given your data items structured as a graph (where the items are represented asnodes and the relationship between items are represented as edges),node2vec works as follows:1. Generate item sequences using (biased) random walk.2. Create positive and negative training examples from these sequences.3. Train a [word2vec](https://www.tensorflow.org/tutorials/text/word2vec) model(skip-gram) to learn embeddings for the items.In this example, we demonstrate the node2vec technique on the[small version of the Movielens dataset](https://files.grouplens.org/datasets/movielens/ml-latest-small-README.html)to learn movie embeddings. Such a dataset can be represented as a graph by treatingthe movies as nodes, and creating edges between movies that have similar ratingsby the users. The learnt movie embeddings can be used for tasks such as movie recommendation,or movie genres prediction.This example requires `networkx` package, which can be installed using the following command:```shellpip install networkx``` Setup<jupyter_code>import os from collections import defaultdict import math import networkx as nx import random from tqdm import tqdm from zipfile import ZipFile from urllib.request import urlretrieve import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt<jupyter_output><empty_output><jupyter_text>Download the MovieLens dataset and prepare the dataThe small version of the MovieLens dataset includes around 100k ratingsfrom 610 users on 9,742 movies.First, let's download the dataset. The downloaded folder will containthree data files: `users.csv`, `movies.csv`, and `ratings.csv`. In this example,we will only need the `movies.dat`, and `ratings.dat` data files.<jupyter_code>urlretrieve( "http://files.grouplens.org/datasets/movielens/ml-latest-small.zip", "movielens.zip" ) ZipFile("movielens.zip", "r").extractall()<jupyter_output><empty_output><jupyter_text>Then, we load the data into a Pandas DataFrame and perform some basic preprocessing.<jupyter_code># Load movies to a DataFrame. movies = pd.read_csv("ml-latest-small/movies.csv") # Create a `movieId` string. movies["movieId"] = movies["movieId"].apply(lambda x: f"movie_{x}") # Load ratings to a DataFrame. ratings = pd.read_csv("ml-latest-small/ratings.csv") # Convert the `ratings` to floating point ratings["rating"] = ratings["rating"].apply(lambda x: float(x)) # Create the `movie_id` string. ratings["movieId"] = ratings["movieId"].apply(lambda x: f"movie_{x}") print("Movies data shape:", movies.shape) print("Ratings data shape:", ratings.shape)<jupyter_output><empty_output><jupyter_text>Let's inspect a sample instance of the `ratings` DataFrame.<jupyter_code>ratings.head()<jupyter_output><empty_output><jupyter_text>Next, let's check a sample instance of the `movies` DataFrame.<jupyter_code>movies.head()<jupyter_output><empty_output><jupyter_text>Implement two utility functions for the `movies` DataFrame.<jupyter_code>def get_movie_title_by_id(movieId): return list(movies[movies.movieId == movieId].title)[0] def get_movie_id_by_title(title): return list(movies[movies.title == title].movieId)[0]<jupyter_output><empty_output><jupyter_text>Construct the Movies graphWe create an edge between two movie nodes in the graph if both movies are ratedby the same user >= `min_rating`. The weight of the edge will be based on the[pointwise mutual information](https://en.wikipedia.org/wiki/Pointwise_mutual_information)between the two movies, which is computed as: `log(xy) - log(x) - log(y) + log(D)`, where:* `xy` is how many users rated both movie `x` and movie `y` with >= `min_rating`.* `x` is how many users rated movie `x` >= `min_rating`.* `y` is how many users rated movie `y` >= `min_rating`.* `D` total number of movie ratings >= `min_rating`. Step 1: create the weighted edges between movies.<jupyter_code>min_rating = 5 pair_frequency = defaultdict(int) item_frequency = defaultdict(int) # Filter instances where rating is greater than or equal to min_rating. rated_movies = ratings[ratings.rating >= min_rating] # Group instances by user. movies_grouped_by_users = list(rated_movies.groupby("userId")) for group in tqdm( movies_grouped_by_users, position=0, leave=True, desc="Compute movie rating frequencies", ): # Get a list of movies rated by the user. current_movies = list(group[1]["movieId"]) for i in range(len(current_movies)): item_frequency[current_movies[i]] += 1 for j in range(i + 1, len(current_movies)): x = min(current_movies[i], current_movies[j]) y = max(current_movies[i], current_movies[j]) pair_frequency[(x, y)] += 1<jupyter_output><empty_output><jupyter_text>Step 2: create the graph with the nodes and the edgesTo reduce the number of edges between nodes, we only add an edge between moviesif the weight of the edge is greater than `min_weight`.<jupyter_code>min_weight = 10 D = math.log(sum(item_frequency.values())) # Create the movies undirected graph. movies_graph = nx.Graph() # Add weighted edges between movies. # This automatically adds the movie nodes to the graph. for pair in tqdm( pair_frequency, position=0, leave=True, desc="Creating the movie graph" ): x, y = pair xy_frequency = pair_frequency[pair] x_frequency = item_frequency[x] y_frequency = item_frequency[y] pmi = math.log(xy_frequency) - math.log(x_frequency) - math.log(y_frequency) + D weight = pmi * xy_frequency # Only include edges with weight >= min_weight. if weight >= min_weight: movies_graph.add_edge(x, y, weight=weight)<jupyter_output><empty_output><jupyter_text>Let's display the total number of nodes and edges in the graph.Note that the number of nodes is less than the total number of movies,since only the movies that have edges to other movies are added.<jupyter_code>print("Total number of graph nodes:", movies_graph.number_of_nodes()) print("Total number of graph edges:", movies_graph.number_of_edges())<jupyter_output><empty_output><jupyter_text>Let's display the average node degree (number of neighbours) in the graph.<jupyter_code>degrees = [] for node in movies_graph.nodes: degrees.append(movies_graph.degree[node]) print("Average node degree:", round(sum(degrees) / len(degrees), 2))<jupyter_output><empty_output><jupyter_text>Step 3: Create vocabulary and a mapping from tokens to integer indicesThe vocabulary is the nodes (movie IDs) in the graph.<jupyter_code>vocabulary = ["NA"] + list(movies_graph.nodes) vocabulary_lookup = {token: idx for idx, token in enumerate(vocabulary)}<jupyter_output><empty_output><jupyter_text>Implement the biased random walkA random walk starts from a given node, and randomly picks a neighbour node to move to.If the edges are weighted, the neighbour is selected *probabilistically* withrespect to weights of the edges between the current node and its neighbours.This procedure is repeated for `num_steps` to generate a sequence of *related* nodes.The [*biased* random walk](https://en.wikipedia.org/wiki/Biased_random_walk_on_a_graph) balances between **breadth-first sampling**(where only local neighbours are visited) and **depth-first sampling**(where distant neighbours are visited) by introducing the following two parameters:1. **Return parameter** (`p`): Controls the likelihood of immediately revisitinga node in the walk. Setting it to a high value encourages moderate exploration,while setting it to a low value would keep the walk local.2. **In-out parameter** (`q`): Allows the search to differentiatebetween *inward* and *outward* nodes. Setting it to a high value biases therandom walk towards local nodes, while setting it to a low value biases the walkto visit nodes which are further away.<jupyter_code>def next_step(graph, previous, current, p, q): neighbors = list(graph.neighbors(current)) weights = [] # Adjust the weights of the edges to the neighbors with respect to p and q. for neighbor in neighbors: if neighbor == previous: # Control the probability to return to the previous node. weights.append(graph[current][neighbor]["weight"] / p) elif graph.has_edge(neighbor, previous): # The probability of visiting a local node. weights.append(graph[current][neighbor]["weight"]) else: # Control the probability to move forward. weights.append(graph[current][neighbor]["weight"] / q) # Compute the probabilities of visiting each neighbor. weight_sum = sum(weights) probabilities = [weight / weight_sum for weight in weights] # Probabilistically select a neighbor to visit. next = np.random.choice(neighbors, size=1, p=probabilities)[0] return next def random_walk(graph, num_walks, num_steps, p, q): walks = [] nodes = list(graph.nodes()) # Perform multiple iterations of the random walk. for walk_iteration in range(num_walks): random.shuffle(nodes) for node in tqdm( nodes, position=0, leave=True, desc=f"Random walks iteration {walk_iteration + 1} of {num_walks}", ): # Start the walk with a random node from the graph. walk = [node] # Randomly walk for num_steps. while len(walk) < num_steps: current = walk[-1] previous = walk[-2] if len(walk) > 1 else None # Compute the next node to visit. next = next_step(graph, previous, current, p, q) walk.append(next) # Replace node ids (movie ids) in the walk with token ids. walk = [vocabulary_lookup[token] for token in walk] # Add the walk to the generated sequence. walks.append(walk) return walks<jupyter_output><empty_output><jupyter_text>Generate training data using the biased random walkYou can explore different configurations of `p` and `q` to different results ofrelated movies.<jupyter_code># Random walk return parameter. p = 1 # Random walk in-out parameter. q = 1 # Number of iterations of random walks. num_walks = 5 # Number of steps of each random walk. num_steps = 10 walks = random_walk(movies_graph, num_walks, num_steps, p, q) print("Number of walks generated:", len(walks))<jupyter_output><empty_output><jupyter_text>Generate positive and negative examplesTo train a skip-gram model, we use the generated walks to create positive andnegative training examples. Each example includes the following features:1. `target`: A movie in a walk sequence.2. `context`: Another movie in a walk sequence.3. `weight`: How many times these two movies occured in walk sequences.4. `label`: The label is 1 if these two movies are samples from the walk sequences,otherwise (i.e., if randomly sampled) the label is 0. Generate examples<jupyter_code>def generate_examples(sequences, window_size, num_negative_samples, vocabulary_size): example_weights = defaultdict(int) # Iterate over all sequences (walks). for sequence in tqdm( sequences, position=0, leave=True, desc=f"Generating postive and negative examples", ): # Generate positive and negative skip-gram pairs for a sequence (walk). pairs, labels = keras.preprocessing.sequence.skipgrams( sequence, vocabulary_size=vocabulary_size, window_size=window_size, negative_samples=num_negative_samples, ) for idx in range(len(pairs)): pair = pairs[idx] label = labels[idx] target, context = min(pair[0], pair[1]), max(pair[0], pair[1]) if target == context: continue entry = (target, context, label) example_weights[entry] += 1 targets, contexts, labels, weights = [], [], [], [] for entry in example_weights: weight = example_weights[entry] target, context, label = entry targets.append(target) contexts.append(context) labels.append(label) weights.append(weight) return np.array(targets), np.array(contexts), np.array(labels), np.array(weights) num_negative_samples = 4 targets, contexts, labels, weights = generate_examples( sequences=walks, window_size=num_steps, num_negative_samples=num_negative_samples, vocabulary_size=len(vocabulary), )<jupyter_output><empty_output><jupyter_text>Let's display the shapes of the outputs<jupyter_code>print(f"Targets shape: {targets.shape}") print(f"Contexts shape: {contexts.shape}") print(f"Labels shape: {labels.shape}") print(f"Weights shape: {weights.shape}")<jupyter_output><empty_output><jupyter_text>Convert the data into `tf.data.Dataset` objects<jupyter_code>batch_size = 1024 def create_dataset(targets, contexts, labels, weights, batch_size): inputs = { "target": targets, "context": contexts, } dataset = tf.data.Dataset.from_tensor_slices((inputs, labels, weights)) dataset = dataset.shuffle(buffer_size=batch_size * 2) dataset = dataset.batch(batch_size, drop_remainder=True) dataset = dataset.prefetch(tf.data.AUTOTUNE) return dataset dataset = create_dataset( targets=targets, contexts=contexts, labels=labels, weights=weights, batch_size=batch_size, )<jupyter_output><empty_output><jupyter_text>Train the skip-gram modelOur skip-gram is a simple binary classification model that works as follows:1. An embedding is looked up for the `target` movie.2. An embedding is looked up for the `context` movie.3. The dot product is computed between these two embeddings.4. The result (after a sigmoid activation) is compared to the label.5. A binary crossentropy loss is used.<jupyter_code>learning_rate = 0.001 embedding_dim = 50 num_epochs = 10<jupyter_output><empty_output><jupyter_text>Implement the model<jupyter_code>def create_model(vocabulary_size, embedding_dim): inputs = { "target": layers.Input(name="target", shape=(), dtype="int32"), "context": layers.Input(name="context", shape=(), dtype="int32"), } # Initialize item embeddings. embed_item = layers.Embedding( input_dim=vocabulary_size, output_dim=embedding_dim, embeddings_initializer="he_normal", embeddings_regularizer=keras.regularizers.l2(1e-6), name="item_embeddings", ) # Lookup embeddings for target. target_embeddings = embed_item(inputs["target"]) # Lookup embeddings for context. context_embeddings = embed_item(inputs["context"]) # Compute dot similarity between target and context embeddings. logits = layers.Dot(axes=1, normalize=False, name="dot_similarity")( [target_embeddings, context_embeddings] ) # Create the model. model = keras.Model(inputs=inputs, outputs=logits) return model<jupyter_output><empty_output><jupyter_text>Train the model We instantiate the model and compile it.<jupyter_code>model = create_model(len(vocabulary), embedding_dim) model.compile( optimizer=keras.optimizers.Adam(learning_rate), loss=keras.losses.BinaryCrossentropy(from_logits=True), )<jupyter_output><empty_output><jupyter_text>Let's plot the model.<jupyter_code>keras.utils.plot_model( model, show_shapes=True, show_dtype=True, show_layer_names=True, )<jupyter_output><empty_output><jupyter_text>Now we train the model on the `dataset`.<jupyter_code>history = model.fit(dataset, epochs=num_epochs)<jupyter_output><empty_output><jupyter_text>Finally we plot the learning history.<jupyter_code>plt.plot(history.history["loss"]) plt.ylabel("loss") plt.xlabel("epoch") plt.show()<jupyter_output><empty_output><jupyter_text>Analyze the learnt embeddings.<jupyter_code>movie_embeddings = model.get_layer("item_embeddings").get_weights()[0] print("Embeddings shape:", movie_embeddings.shape)<jupyter_output><empty_output><jupyter_text>Find related moviesDefine a list with some movies called `query_movies`.<jupyter_code>query_movies = [ "Matrix, The (1999)", "Star Wars: Episode IV - A New Hope (1977)", "Lion King, The (1994)", "Terminator 2: Judgment Day (1991)", "Godfather, The (1972)", ]<jupyter_output><empty_output><jupyter_text>Get the embeddings of the movies in `query_movies`.<jupyter_code>query_embeddings = [] for movie_title in query_movies: movieId = get_movie_id_by_title(movie_title) token_id = vocabulary_lookup[movieId] movie_embedding = movie_embeddings[token_id] query_embeddings.append(movie_embedding) query_embeddings = np.array(query_embeddings)<jupyter_output><empty_output><jupyter_text>Compute the [consine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) between the embeddings of `query_movies`and all the other movies, then pick the top k for each.<jupyter_code>similarities = tf.linalg.matmul( tf.math.l2_normalize(query_embeddings), tf.math.l2_normalize(movie_embeddings), transpose_b=True, ) _, indices = tf.math.top_k(similarities, k=5) indices = indices.numpy().tolist()<jupyter_output><empty_output><jupyter_text>Display the top related movies in `query_movies`.<jupyter_code>for idx, title in enumerate(query_movies): print(title) print("".rjust(len(title), "-")) similar_tokens = indices[idx] for token in similar_tokens: similar_movieId = vocabulary[token] similar_title = get_movie_title_by_id(similar_movieId) print(f"- {similar_title}") print()<jupyter_output><empty_output><jupyter_text>Visualize the embeddings using the Embedding Projector<jupyter_code>import io out_v = io.open("embeddings.tsv", "w", encoding="utf-8") out_m = io.open("metadata.tsv", "w", encoding="utf-8") for idx, movie_id in enumerate(vocabulary[1:]): movie_title = list(movies[movies.movieId == movie_id].title)[0] vector = movie_embeddings[idx] out_v.write("\t".join([str(x) for x in vector]) + "\n") out_m.write(movie_title + "\n") out_v.close() out_m.close()<jupyter_output><empty_output>
keras-io/examples/graph/ipynb/node2vec_movielens.ipynb/0
{ "file_path": "keras-io/examples/graph/ipynb/node2vec_movielens.ipynb", "repo_id": "keras-io", "token_count": 6697 }
86
<jupyter_start><jupyter_text>How to train a Keras model on TFRecord files**Author:** Amy MiHyun Jang**Date created:** 2020/07/29**Last modified:** 2020/08/07**Description:** Loading TFRecords for computer vision models. Introduction + Set UpTFRecords store a sequence of binary records, read linearly. They are useful format forstoring data because they can be read efficiently. Learn more about TFRecords[here](https://www.tensorflow.org/tutorials/load_data/tfrecord).We'll explore how we can easily load in TFRecords for our melanoma classifier.<jupyter_code>import tensorflow as tf from functools import partial import matplotlib.pyplot as plt try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect() print("Device:", tpu.master()) strategy = tf.distribute.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() print("Number of replicas:", strategy.num_replicas_in_sync)<jupyter_output><empty_output><jupyter_text>We want a bigger batch size as our data is not balanced.<jupyter_code>AUTOTUNE = tf.data.AUTOTUNE GCS_PATH = "gs://kds-b38ce1b823c3ae623f5691483dbaa0f0363f04b0d6a90b63cf69946e" BATCH_SIZE = 64 IMAGE_SIZE = [1024, 1024]<jupyter_output><empty_output><jupyter_text>Load the data<jupyter_code>FILENAMES = tf.io.gfile.glob(GCS_PATH + "/tfrecords/train*.tfrec") split_ind = int(0.9 * len(FILENAMES)) TRAINING_FILENAMES, VALID_FILENAMES = FILENAMES[:split_ind], FILENAMES[split_ind:] TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + "/tfrecords/test*.tfrec") print("Train TFRecord Files:", len(TRAINING_FILENAMES)) print("Validation TFRecord Files:", len(VALID_FILENAMES)) print("Test TFRecord Files:", len(TEST_FILENAMES))<jupyter_output><empty_output><jupyter_text>Decoding the dataThe images have to be converted to tensors so that it will be a valid input in our model.As images utilize an RBG scale, we specify 3 channels.We also reshape our data so that all of the images will be the same shape.<jupyter_code>def decode_image(image): image = tf.image.decode_jpeg(image, channels=3) image = tf.cast(image, tf.float32) image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image<jupyter_output><empty_output><jupyter_text>As we load in our data, we need both our `X` and our `Y`. The X is our image; the modelwill find features and patterns in our image dataset. We want to predict Y, theprobability that the lesion in the image is malignant. We will to through our TFRecordsand parse out the image and the target values.<jupyter_code>def read_tfrecord(example, labeled): tfrecord_format = ( { "image": tf.io.FixedLenFeature([], tf.string), "target": tf.io.FixedLenFeature([], tf.int64), } if labeled else { "image": tf.io.FixedLenFeature([], tf.string), } ) example = tf.io.parse_single_example(example, tfrecord_format) image = decode_image(example["image"]) if labeled: label = tf.cast(example["target"], tf.int32) return image, label return image<jupyter_output><empty_output><jupyter_text>Define loading methodsOur dataset is not ordered in any meaningful way, so the order can be ignored whenloading our dataset. By ignoring the order and reading files as soon as they come in, itwill take a shorter time to load the data.<jupyter_code>def load_dataset(filenames, labeled=True): ignore_order = tf.data.Options() ignore_order.experimental_deterministic = False # disable order, increase speed dataset = tf.data.TFRecordDataset( filenames ) # automatically interleaves reads from multiple files dataset = dataset.with_options( ignore_order ) # uses data as soon as it streams in, rather than in its original order dataset = dataset.map( partial(read_tfrecord, labeled=labeled), num_parallel_calls=AUTOTUNE ) # returns a dataset of (image, label) pairs if labeled=True or just images if labeled=False return dataset<jupyter_output><empty_output><jupyter_text>We define the following function to get our different datasets.<jupyter_code>def get_dataset(filenames, labeled=True): dataset = load_dataset(filenames, labeled=labeled) dataset = dataset.shuffle(2048) dataset = dataset.prefetch(buffer_size=AUTOTUNE) dataset = dataset.batch(BATCH_SIZE) return dataset<jupyter_output><empty_output><jupyter_text>Visualize input images<jupyter_code>train_dataset = get_dataset(TRAINING_FILENAMES) valid_dataset = get_dataset(VALID_FILENAMES) test_dataset = get_dataset(TEST_FILENAMES, labeled=False) image_batch, label_batch = next(iter(train_dataset)) def show_batch(image_batch, label_batch): plt.figure(figsize=(10, 10)) for n in range(25): ax = plt.subplot(5, 5, n + 1) plt.imshow(image_batch[n] / 255.0) if label_batch[n]: plt.title("MALIGNANT") else: plt.title("BENIGN") plt.axis("off") show_batch(image_batch.numpy(), label_batch.numpy())<jupyter_output><empty_output><jupyter_text>Building our model Define callbacksThe following function allows for the model to change the learning rate as it runs eachepoch.We can use callbacks to stop training when there are no improvements in the model. At theend of the training process, the model will restore the weights of its best iteration.<jupyter_code>initial_learning_rate = 0.01 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate, decay_steps=20, decay_rate=0.96, staircase=True ) checkpoint_cb = tf.keras.callbacks.ModelCheckpoint( "melanoma_model.h5", save_best_only=True ) early_stopping_cb = tf.keras.callbacks.EarlyStopping( patience=10, restore_best_weights=True )<jupyter_output><empty_output><jupyter_text>Build our base modelTransfer learning is a great way to reap the benefits of a well-trained model withouthaving the train the model ourselves. For this notebook, we want to import the Xceptionmodel. A more in-depth analysis of transfer learning can be found[here](https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/).We do not want our metric to be ```accuracy``` because our data is imbalanced. For ourexample, we will be looking at the area under a ROC curve.<jupyter_code>def make_model(): base_model = tf.keras.applications.Xception( input_shape=(*IMAGE_SIZE, 3), include_top=False, weights="imagenet" ) base_model.trainable = False inputs = tf.keras.layers.Input([*IMAGE_SIZE, 3]) x = tf.keras.applications.xception.preprocess_input(inputs) x = base_model(x) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Dense(8, activation="relu")(x) x = tf.keras.layers.Dropout(0.7)(x) outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule), loss="binary_crossentropy", metrics=tf.keras.metrics.AUC(name="auc"), ) return model<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>with strategy.scope(): model = make_model() history = model.fit( train_dataset, epochs=2, validation_data=valid_dataset, callbacks=[checkpoint_cb, early_stopping_cb], )<jupyter_output><empty_output><jupyter_text>Predict resultsWe'll use our model to predict results for our test dataset images. Values closer to `0`are more likely to be benign and values closer to `1` are more likely to be malignant.<jupyter_code>def show_batch_predictions(image_batch): plt.figure(figsize=(10, 10)) for n in range(25): ax = plt.subplot(5, 5, n + 1) plt.imshow(image_batch[n] / 255.0) img_array = tf.expand_dims(image_batch[n], axis=0) plt.title(model.predict(img_array)[0]) plt.axis("off") image_batch = next(iter(test_dataset)) show_batch_predictions(image_batch)<jupyter_output><empty_output>
keras-io/examples/keras_recipes/ipynb/tfrecord.ipynb/0
{ "file_path": "keras-io/examples/keras_recipes/ipynb/tfrecord.ipynb", "repo_id": "keras-io", "token_count": 2943 }
87
<jupyter_start><jupyter_text>English-to-Spanish translation with a sequence-to-sequence Transformer**Author:** [fchollet](https://twitter.com/fchollet)**Date created:** 2021/05/26**Last modified:** 2023/02/25**Description:** Implementing a sequence-to-sequence Transformer and training it on a machine translation task. IntroductionIn this example, we'll build a sequence-to-sequence Transformer model, whichwe'll train on an English-to-Spanish machine translation task.You'll learn how to:- Vectorize text using the Keras `TextVectorization` layer.- Implement a `TransformerEncoder` layer, a `TransformerDecoder` layer,and a `PositionalEmbedding` layer.- Prepare data for training a sequence-to-sequence model.- Use the trained model to generate translations of never-seen-beforeinput sentences (sequence-to-sequence inference).The code featured here is adapted from the book[Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition)(chapter 11: Deep learning for text).The present example is fairly barebones, so for detailed explanations ofhow each building block works, as well as the theory behind Transformers,I recommend reading the book. Setup<jupyter_code># We set the backend to TensorFlow. The code works with # both `tensorflow` and `torch`. It does not work with JAX # due to the behavior of `jax.numpy.tile` in a jit scope # (used in `TransformerDecoder.get_causal_attention_mask()`: # `tile` in JAX does not support a dynamic `reps` argument. # You can make the code work in JAX by wrapping the # inside of the `get_causal_attention_mask` method in # a decorator to prevent jit compilation: # `with jax.ensure_compile_time_eval():`. import os os.environ["KERAS_BACKEND"] = "tensorflow" import pathlib import random import string import re import numpy as np import tensorflow.data as tf_data import tensorflow.strings as tf_strings import keras from keras import layers from keras import ops from keras.layers import TextVectorization<jupyter_output><empty_output><jupyter_text>Downloading the dataWe'll be working with an English-to-Spanish translation datasetprovided by [Anki](https://www.manythings.org/anki/). Let's download it:<jupyter_code>text_file = keras.utils.get_file( fname="spa-eng.zip", origin="http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip", extract=True, ) text_file = pathlib.Path(text_file).parent / "spa-eng" / "spa.txt"<jupyter_output><empty_output><jupyter_text>Parsing the dataEach line contains an English sentence and its corresponding Spanish sentence.The English sentence is the *source sequence* and Spanish one is the *target sequence*.We prepend the token `"[start]"` and we append the token `"[end]"` to the Spanish sentence.<jupyter_code>with open(text_file) as f: lines = f.read().split("\n")[:-1] text_pairs = [] for line in lines: eng, spa = line.split("\t") spa = "[start] " + spa + " [end]" text_pairs.append((eng, spa))<jupyter_output><empty_output><jupyter_text>Here's what our sentence pairs look like:<jupyter_code>for _ in range(5): print(random.choice(text_pairs))<jupyter_output><empty_output><jupyter_text>Now, let's split the sentence pairs into a training set, a validation set,and a test set.<jupyter_code>random.shuffle(text_pairs) num_val_samples = int(0.15 * len(text_pairs)) num_train_samples = len(text_pairs) - 2 * num_val_samples train_pairs = text_pairs[:num_train_samples] val_pairs = text_pairs[num_train_samples : num_train_samples + num_val_samples] test_pairs = text_pairs[num_train_samples + num_val_samples :] print(f"{len(text_pairs)} total pairs") print(f"{len(train_pairs)} training pairs") print(f"{len(val_pairs)} validation pairs") print(f"{len(test_pairs)} test pairs")<jupyter_output><empty_output><jupyter_text>Vectorizing the text dataWe'll use two instances of the `TextVectorization` layer to vectorize the textdata (one for English and one for Spanish),that is to say, to turn the original strings into integer sequenceswhere each integer represents the index of a word in a vocabulary.The English layer will use the default string standardization (strip punctuation characters)and splitting scheme (split on whitespace), whilethe Spanish layer will use a custom standardization, where we add the character`"¿"` to the set of punctuation characters to be stripped.Note: in a production-grade machine translation model, I would not recommendstripping the punctuation characters in either language. Instead, I would recommend turningeach punctuation character into its own token,which you could achieve by providing a custom `split` function to the `TextVectorization` layer.<jupyter_code>strip_chars = string.punctuation + "¿" strip_chars = strip_chars.replace("[", "") strip_chars = strip_chars.replace("]", "") vocab_size = 15000 sequence_length = 20 batch_size = 64 def custom_standardization(input_string): lowercase = tf_strings.lower(input_string) return tf_strings.regex_replace(lowercase, "[%s]" % re.escape(strip_chars), "") eng_vectorization = TextVectorization( max_tokens=vocab_size, output_mode="int", output_sequence_length=sequence_length, ) spa_vectorization = TextVectorization( max_tokens=vocab_size, output_mode="int", output_sequence_length=sequence_length + 1, standardize=custom_standardization, ) train_eng_texts = [pair[0] for pair in train_pairs] train_spa_texts = [pair[1] for pair in train_pairs] eng_vectorization.adapt(train_eng_texts) spa_vectorization.adapt(train_spa_texts)<jupyter_output><empty_output><jupyter_text>Next, we'll format our datasets.At each training step, the model will seek to predict target words N+1 (and beyond)using the source sentence and the target words 0 to N.As such, the training dataset will yield a tuple `(inputs, targets)`, where:- `inputs` is a dictionary with the keys `encoder_inputs` and `decoder_inputs`.`encoder_inputs` is the vectorized source sentence and `encoder_inputs` is the target sentence "so far",that is to say, the words 0 to N used to predict word N+1 (and beyond) in the target sentence.- `target` is the target sentence offset by one step:it provides the next words in the target sentence -- what the model will try to predict.<jupyter_code>def format_dataset(eng, spa): eng = eng_vectorization(eng) spa = spa_vectorization(spa) return ( { "encoder_inputs": eng, "decoder_inputs": spa[:, :-1], }, spa[:, 1:], ) def make_dataset(pairs): eng_texts, spa_texts = zip(*pairs) eng_texts = list(eng_texts) spa_texts = list(spa_texts) dataset = tf_data.Dataset.from_tensor_slices((eng_texts, spa_texts)) dataset = dataset.batch(batch_size) dataset = dataset.map(format_dataset) return dataset.cache().shuffle(2048).prefetch(16) train_ds = make_dataset(train_pairs) val_ds = make_dataset(val_pairs)<jupyter_output><empty_output><jupyter_text>Let's take a quick look at the sequence shapes(we have batches of 64 pairs, and all sequences are 20 steps long):<jupyter_code>for inputs, targets in train_ds.take(1): print(f'inputs["encoder_inputs"].shape: {inputs["encoder_inputs"].shape}') print(f'inputs["decoder_inputs"].shape: {inputs["decoder_inputs"].shape}') print(f"targets.shape: {targets.shape}")<jupyter_output><empty_output><jupyter_text>Building the modelOur sequence-to-sequence Transformer consists of a `TransformerEncoder`and a `TransformerDecoder` chained together. To make the model aware of word order,we also use a `PositionalEmbedding` layer.The source sequence will be pass to the `TransformerEncoder`,which will produce a new representation of it.This new representation will then be passedto the `TransformerDecoder`, together with the target sequence so far (target words 0 to N).The `TransformerDecoder` will then seek to predict the next words in the target sequence (N+1 and beyond).A key detail that makes this possible is causal masking(see method `get_causal_attention_mask()` on the `TransformerDecoder`).The `TransformerDecoder` sees the entire sequences at once, and thus we must makesure that it only uses information from target tokens 0 to N when predicting token N+1(otherwise, it could use information from the future, which wouldresult in a model that cannot be used at inference time).<jupyter_code>import keras.ops as ops class TransformerEncoder(layers.Layer): def __init__(self, embed_dim, dense_dim, num_heads, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.dense_dim = dense_dim self.num_heads = num_heads self.attention = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embed_dim ) self.dense_proj = keras.Sequential( [ layers.Dense(dense_dim, activation="relu"), layers.Dense(embed_dim), ] ) self.layernorm_1 = layers.LayerNormalization() self.layernorm_2 = layers.LayerNormalization() self.supports_masking = True def call(self, inputs, mask=None): if mask is not None: padding_mask = ops.cast(mask[:, None, :], dtype="int32") else: padding_mask = None attention_output = self.attention( query=inputs, value=inputs, key=inputs, attention_mask=padding_mask ) proj_input = self.layernorm_1(inputs + attention_output) proj_output = self.dense_proj(proj_input) return self.layernorm_2(proj_input + proj_output) def get_config(self): config = super().get_config() config.update( { "embed_dim": self.embed_dim, "dense_dim": self.dense_dim, "num_heads": self.num_heads, } ) return config class PositionalEmbedding(layers.Layer): def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs): super().__init__(**kwargs) self.token_embeddings = layers.Embedding( input_dim=vocab_size, output_dim=embed_dim ) self.position_embeddings = layers.Embedding( input_dim=sequence_length, output_dim=embed_dim ) self.sequence_length = sequence_length self.vocab_size = vocab_size self.embed_dim = embed_dim def call(self, inputs): length = ops.shape(inputs)[-1] positions = ops.arange(0, length, 1) embedded_tokens = self.token_embeddings(inputs) embedded_positions = self.position_embeddings(positions) return embedded_tokens + embedded_positions def compute_mask(self, inputs, mask=None): if mask is None: return None else: return ops.not_equal(inputs, 0) def get_config(self): config = super().get_config() config.update( { "sequence_length": self.sequence_length, "vocab_size": self.vocab_size, "embed_dim": self.embed_dim, } ) return config class TransformerDecoder(layers.Layer): def __init__(self, embed_dim, latent_dim, num_heads, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim self.latent_dim = latent_dim self.num_heads = num_heads self.attention_1 = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embed_dim ) self.attention_2 = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embed_dim ) self.dense_proj = keras.Sequential( [ layers.Dense(latent_dim, activation="relu"), layers.Dense(embed_dim), ] ) self.layernorm_1 = layers.LayerNormalization() self.layernorm_2 = layers.LayerNormalization() self.layernorm_3 = layers.LayerNormalization() self.supports_masking = True def call(self, inputs, encoder_outputs, mask=None): causal_mask = self.get_causal_attention_mask(inputs) if mask is not None: padding_mask = ops.cast(mask[:, None, :], dtype="int32") padding_mask = ops.minimum(padding_mask, causal_mask) else: padding_mask = None attention_output_1 = self.attention_1( query=inputs, value=inputs, key=inputs, attention_mask=causal_mask ) out_1 = self.layernorm_1(inputs + attention_output_1) attention_output_2 = self.attention_2( query=out_1, value=encoder_outputs, key=encoder_outputs, attention_mask=padding_mask, ) out_2 = self.layernorm_2(out_1 + attention_output_2) proj_output = self.dense_proj(out_2) return self.layernorm_3(out_2 + proj_output) def get_causal_attention_mask(self, inputs): input_shape = ops.shape(inputs) batch_size, sequence_length = input_shape[0], input_shape[1] i = ops.arange(sequence_length)[:, None] j = ops.arange(sequence_length) mask = ops.cast(i >= j, dtype="int32") mask = ops.reshape(mask, (1, input_shape[1], input_shape[1])) mult = ops.concatenate( [ops.expand_dims(batch_size, -1), ops.convert_to_tensor([1, 1])], axis=0, ) return ops.tile(mask, mult) def get_config(self): config = super().get_config() config.update( { "embed_dim": self.embed_dim, "latent_dim": self.latent_dim, "num_heads": self.num_heads, } ) return config<jupyter_output><empty_output><jupyter_text>Next, we assemble the end-to-end model.<jupyter_code>embed_dim = 256 latent_dim = 2048 num_heads = 8 encoder_inputs = keras.Input(shape=(None,), dtype="int64", name="encoder_inputs") x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(encoder_inputs) encoder_outputs = TransformerEncoder(embed_dim, latent_dim, num_heads)(x) encoder = keras.Model(encoder_inputs, encoder_outputs) decoder_inputs = keras.Input(shape=(None,), dtype="int64", name="decoder_inputs") encoded_seq_inputs = keras.Input(shape=(None, embed_dim), name="decoder_state_inputs") x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(decoder_inputs) x = TransformerDecoder(embed_dim, latent_dim, num_heads)(x, encoded_seq_inputs) x = layers.Dropout(0.5)(x) decoder_outputs = layers.Dense(vocab_size, activation="softmax")(x) decoder = keras.Model([decoder_inputs, encoded_seq_inputs], decoder_outputs) decoder_outputs = decoder([decoder_inputs, encoder_outputs]) transformer = keras.Model( [encoder_inputs, decoder_inputs], decoder_outputs, name="transformer" )<jupyter_output><empty_output><jupyter_text>Training our modelWe'll use accuracy as a quick way to monitor training progress on the validation data.Note that machine translation typically uses BLEU scores as well as other metrics, rather than accuracy.Here we only train for 1 epoch, but to get the model to actually convergeyou should train for at least 30 epochs.<jupyter_code>epochs = 1 # This should be at least 30 for convergence transformer.summary() transformer.compile( "rmsprop", loss="sparse_categorical_crossentropy", metrics=["accuracy"] ) transformer.fit(train_ds, epochs=epochs, validation_data=val_ds)<jupyter_output><empty_output><jupyter_text>Decoding test sentencesFinally, let's demonstrate how to translate brand new English sentences.We simply feed into the model the vectorized English sentenceas well as the target token `"[start]"`, then we repeatedly generated the next token, untilwe hit the token `"[end]"`.<jupyter_code>spa_vocab = spa_vectorization.get_vocabulary() spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab)) max_decoded_sentence_length = 20 def decode_sequence(input_sentence): tokenized_input_sentence = eng_vectorization([input_sentence]) decoded_sentence = "[start]" for i in range(max_decoded_sentence_length): tokenized_target_sentence = spa_vectorization([decoded_sentence])[:, :-1] predictions = transformer([tokenized_input_sentence, tokenized_target_sentence]) # ops.argmax(predictions[0, i, :]) is not a concrete value for jax here sampled_token_index = ops.convert_to_numpy( ops.argmax(predictions[0, i, :]) ).item(0) sampled_token = spa_index_lookup[sampled_token_index] decoded_sentence += " " + sampled_token if sampled_token == "[end]": break return decoded_sentence test_eng_texts = [pair[0] for pair in test_pairs] for _ in range(30): input_sentence = random.choice(test_eng_texts) translated = decode_sequence(input_sentence)<jupyter_output><empty_output>
keras-io/examples/nlp/ipynb/neural_machine_translation_with_transformer.ipynb/0
{ "file_path": "keras-io/examples/nlp/ipynb/neural_machine_translation_with_transformer.ipynb", "repo_id": "keras-io", "token_count": 6390 }
88
# Abstractive Text Summarization with BART **Author:** [Abheesht Sharma](https://github.com/abheesht17/)<br> **Date created:** 2023/07/08<br> **Last modified:** 2023/07/08<br> **Description:** Use KerasNLP to fine-tune BART on the abstractive summarization task. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/abstractive_summarization_with_bart.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/abstractive_summarization_with_bart.py) --- ## Introduction In the era of information overload, it has become crucial to extract the crux of a long document or a conversation and express it in a few sentences. Owing to the fact that summarization has widespread applications in different domains, it has become a key, well-studied NLP task in recent years. [Bidirectional Autoregressive Transformer (BART)](https://arxiv.org/abs/1910.13461) is a Transformer-based encoder-decoder model, often used for sequence-to-sequence tasks like summarization and neural machine translation. BART is pre-trained in a self-supervised fashion on a large text corpus. During pre-training, the text is corrupted and BART is trained to reconstruct the original text (hence called a "denoising autoencoder"). Some pre-training tasks include token masking, token deletion, sentence permutation (shuffle sentences and train BART to fix the order), etc. In this example, we will demonstrate how to fine-tune BART on the abstractive summarization task (on conversations!) using KerasNLP, and generate summaries using the fine-tuned model. --- ## Setup Before we start implementing the pipeline, let's install and import all the libraries we need. We'll be using the KerasNLP library. We will also need a couple of utility libraries. ```python !pip install git+https://github.com/keras-team/keras-nlp.git py7zr -q ``` <div class="k-default-codeblock"> ``` Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.toml) ... [?25l[?25hdone  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 66.4/66.4 kB 1.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.1/2.1 MB 34.8 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 412.3/412.3 kB 30.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 138.8/138.8 kB 15.1 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 49.8/49.8 kB 5.8 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 2.7/2.7 MB 61.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 93.1/93.1 kB 10.1 MB/s eta 0:00:00 [?25h Building wheel for keras-nlp (pyproject.toml) ... [?25l[?25hdone ``` </div> This examples uses [Keras Core](https://keras.io/keras_core/) to work in any of `"tensorflow"`, `"jax"` or `"torch"`. Support for Keras Core is baked into KerasNLP, simply change the `"KERAS_BACKEND"` environment variable to select the backend of your choice. We select the JAX backend below. ```python import os os.environ["KERAS_BACKEND"] = "jax" ``` Import all necessary libraries. ```python import py7zr import time import keras_nlp import tensorflow as tf import tensorflow_datasets as tfds import keras_core as keras ``` <div class="k-default-codeblock"> ``` Using JAX backend. ``` </div> Let's also define our hyperparameters. ```python BATCH_SIZE = 8 NUM_BATCHES = 600 EPOCHS = 1 # Can be set to a higher value for better results MAX_ENCODER_SEQUENCE_LENGTH = 512 MAX_DECODER_SEQUENCE_LENGTH = 128 MAX_GENERATION_LENGTH = 40 ``` --- ## Dataset Let's load the [SAMSum dataset](https://arxiv.org/abs/1911.12237). This dataset contains around 15,000 pairs of conversations/dialogues and summaries. ```python # Download the dataset. filename = keras.utils.get_file( "corpus.7z", origin="https://huggingface.co/datasets/samsum/resolve/main/data/corpus.7z", ) # Extract the `.7z` file. with py7zr.SevenZipFile(filename, mode="r") as z: z.extractall(path="/root/tensorflow_datasets/downloads/manual") # Load data using TFDS. samsum_ds = tfds.load("samsum", split="train", as_supervised=True) ``` <div class="k-default-codeblock"> ``` Downloading data from https://huggingface.co/datasets/samsum/resolve/main/data/corpus.7z 2944100/2944100 ━━━━━━━━━━━━━━━━━━━━ 1s 0us/step Downloading and preparing dataset Unknown size (download: Unknown size, generated: 10.71 MiB, total: 10.71 MiB) to /root/tensorflow_datasets/samsum/1.0.0... Generating splits...: 0%| | 0/3 [00:00<?, ? splits/s] Generating train examples...: 0%| | 0/14732 [00:00<?, ? examples/s] Shuffling /root/tensorflow_datasets/samsum/1.0.0.incompleteYA9MAV/samsum-train.tfrecord*...: 0%| | … Generating validation examples...: 0%| | 0/818 [00:00<?, ? examples/s] Shuffling /root/tensorflow_datasets/samsum/1.0.0.incompleteYA9MAV/samsum-validation.tfrecord*...: 0%| … Generating test examples...: 0%| | 0/819 [00:00<?, ? examples/s] Shuffling /root/tensorflow_datasets/samsum/1.0.0.incompleteYA9MAV/samsum-test.tfrecord*...: 0%| | 0… Dataset samsum downloaded and prepared to /root/tensorflow_datasets/samsum/1.0.0. Subsequent calls will reuse this data. ``` </div> The dataset has two fields: `dialogue` and `summary`. Let's see a sample. ```python for dialogue, summary in samsum_ds: print(dialogue.numpy()) print(summary.numpy()) break ``` <div class="k-default-codeblock"> ``` b"Carter: Hey Alexis, I just wanted to let you know that I had a really nice time with you tonight. \r\nAlexis: Thanks Carter. Yeah, I really enjoyed myself as well. \r\nCarter: If you are up for it, I would really like to see you again soon.\r\nAlexis: Thanks Carter, I'm flattered. But I have a really busy week coming up.\r\nCarter: Yeah, no worries. I totally understand. But if you ever want to go grab dinner again, just let me know. \r\nAlexis: Yeah of course. Thanks again for tonight. \r\nCarter: Sure. Have a great night. " b'Alexis and Carter met tonight. Carter would like to meet again, but Alexis is busy.' ``` </div> We'll now batch the dataset and retain only a subset of the dataset for the purpose of this example. The dialogue is fed to the encoder, and the corresponding summary serves as input to the decoder. We will, therefore, change the format of the dataset to a dictionary having two keys: `"encoder_text"` and `"decoder_text"`.This is how `keras_nlp.models.BartSeq2SeqLMPreprocessor` expects the input format to be. ```python train_ds = ( samsum_ds.map( lambda dialogue, summary: {"encoder_text": dialogue, "decoder_text": summary} ) .batch(BATCH_SIZE) .cache() ) train_ds = train_ds.take(NUM_BATCHES) ``` --- ## Fine-tune BART Let's load the model and preprocessor first. We use sequence lengths of 512 and 128 for the encoder and decoder, respectively, instead of 1024 (which is the default sequence length). This will allow us to run this example quickly on Colab. If you observe carefully, the preprocessor is attached to the model. What this means is that we don't have to worry about preprocessing the text inputs; everything will be done internally. The preprocessor tokenizes the encoder text and the decoder text, adds special tokens and pads them. To generate labels for auto-regressive training, the preprocessor shifts the decoder text one position to the right. This is done because at every timestep, the model is trained to predict the next token. ```python preprocessor = keras_nlp.models.BartSeq2SeqLMPreprocessor.from_preset( "bart_base_en", encoder_sequence_length=MAX_ENCODER_SEQUENCE_LENGTH, decoder_sequence_length=MAX_DECODER_SEQUENCE_LENGTH, ) bart_lm = keras_nlp.models.BartSeq2SeqLM.from_preset( "bart_base_en", preprocessor=preprocessor ) bart_lm.summary() ``` <div class="k-default-codeblock"> ``` Downloading data from https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/vocab.json 898823/898823 ━━━━━━━━━━━━━━━━━━━━ 1s 1us/step Downloading data from https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/merges.txt 456318/456318 ━━━━━━━━━━━━━━━━━━━━ 1s 1us/step Downloading data from https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/model.h5 557969120/557969120 ━━━━━━━━━━━━━━━━━━━━ 29s 0us/step ``` </div> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Preprocessor: "bart_seq2_seq_lm_preprocessor"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Tokenizer (type) </span>┃<span style="font-weight: bold"> Vocab # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ │ bart_tokenizer (<span style="color: #0087ff; text-decoration-color: #0087ff">BartTokenizer</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">50,265</span> │ └────────────────────────────────────────────────────┴─────────────────────────────────────────────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "bart_seq2_seq_lm"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ │ decoder_padding_mask │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├───────────────────────────────┼───────────────────────────┼─────────────┼────────────────────────────────┤ │ decoder_token_ids │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├───────────────────────────────┼───────────────────────────┼─────────────┼────────────────────────────────┤ │ encoder_padding_mask │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├───────────────────────────────┼───────────────────────────┼─────────────┼────────────────────────────────┤ │ encoder_token_ids │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├───────────────────────────────┼───────────────────────────┼─────────────┼────────────────────────────────┤ │ bart_backbone (<span style="color: #0087ff; text-decoration-color: #0087ff">BartBackbone</span>) │ [(<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">768</span>), │ <span style="color: #00af00; text-decoration-color: #00af00">139,417,344</span> │ decoder_padding_mask[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │ │ │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">768</span>)] │ │ decoder_token_ids[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │ │ │ │ │ encoder_padding_mask[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │ │ │ │ │ encoder_token_ids[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├───────────────────────────────┼───────────────────────────┼─────────────┼────────────────────────────────┤ │ reverse_embedding │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">50265</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">38,603,520</span> │ bart_backbone[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">ReverseEmbedding</span>) │ │ │ │ └───────────────────────────────┴───────────────────────────┴─────────────┴────────────────────────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">139,417,344</span> (4.15 GB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">139,417,344</span> (4.15 GB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> Define the optimizer and loss. We use the Adam optimizer with a linearly decaying learning rate. Compile the model. ```python optimizer = keras.optimizers.AdamW( learning_rate=5e-5, weight_decay=0.01, epsilon=1e-6, global_clipnorm=1.0, # Gradient clipping. ) # Exclude layernorm and bias terms from weight decay. optimizer.exclude_from_weight_decay(var_names=["bias"]) optimizer.exclude_from_weight_decay(var_names=["gamma"]) optimizer.exclude_from_weight_decay(var_names=["beta"]) loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) bart_lm.compile( optimizer=optimizer, loss=loss, weighted_metrics=["accuracy"], ) ``` Let's train the model! ```python bart_lm.fit(train_ds, epochs=EPOCHS) ``` <div class="k-default-codeblock"> ``` 600/600 ━━━━━━━━━━━━━━━━━━━━ 398s 586ms/step - loss: 0.4330 <keras_core.src.callbacks.history.History at 0x7ae2faf3e110> ``` </div> --- ## Generate summaries and evaluate them! Now that the model has been trained, let's get to the fun part - actually generating summaries! Let's pick the first 100 samples from the validation set and generate summaries for them. We will use the default decoding strategy, i.e., greedy search. Generation in KerasNLP is highly optimized. It is backed by the power of XLA. Secondly, key/value tensors in the self-attention layer and cross-attention layer in the decoder are cached to avoid recomputation at every timestep. ```python def generate_text(model, input_text, max_length=200, print_time_taken=False): start = time.time() output = model.generate(input_text, max_length=max_length) end = time.time() print(f"Total Time Elapsed: {end - start:.2f}s") return output # Load the dataset. val_ds = tfds.load("samsum", split="validation", as_supervised=True) val_ds = val_ds.take(100) dialogues = [] ground_truth_summaries = [] for dialogue, summary in val_ds: dialogues.append(dialogue.numpy()) ground_truth_summaries.append(summary.numpy()) # Let's make a dummy call - the first call to XLA generally takes a bit longer. _ = generate_text(bart_lm, "sample text", max_length=MAX_GENERATION_LENGTH) # Generate summaries. generated_summaries = generate_text( bart_lm, val_ds.map(lambda dialogue, _: dialogue).batch(8), max_length=MAX_GENERATION_LENGTH, print_time_taken=True, ) ``` <div class="k-default-codeblock"> ``` Total Time Elapsed: 21.22s Total Time Elapsed: 49.00s ``` </div> Let's see some of the summaries. ```python for dialogue, generated_summary, ground_truth_summary in zip( dialogues[:5], generated_summaries[:5], ground_truth_summaries[:5] ): print("Dialogue:", dialogue) print("Generated Summary:", generated_summary) print("Ground Truth Summary:", ground_truth_summary) print("=============================") ``` <div class="k-default-codeblock"> ``` Dialogue: b'Tony: Is the boss in?\r\nClaire: Not yet.\r\nTony: Could let me know when he comes, please? \r\nClaire: Of course.\r\nTony: Thank you.' Generated Summary: Tony will let Claire know when her boss comes. Ground Truth Summary: b"The boss isn't in yet. Claire will let Tony know when he comes." ============================= Dialogue: b"James: What shouldl I get her?\r\nTim: who?\r\nJames: gees Mary my girlfirend\r\nTim: Am I really the person you should be asking?\r\nJames: oh come on it's her birthday on Sat\r\nTim: ask Sandy\r\nTim: I honestly am not the right person to ask this\r\nJames: ugh fine!" Generated Summary: Mary's girlfriend is birthday. James and Tim are going to ask Sandy to buy her. Ground Truth Summary: b"Mary's birthday is on Saturday. Her boyfriend, James, is looking for gift ideas. Tim suggests that he ask Sandy." ============================= Dialogue: b"Mary: So, how's Israel? Have you been on the beach?\r\nKate: It's so expensive! But they say, it's Tel Aviv... Tomorrow we are going to Jerusalem.\r\nMary: I've heard Israel is expensive, Monica was there on vacation last year, she complained about how pricey it is. Are you going to the Dead Sea before it dies? ahahahha\r\nKate: ahahhaha yup, in few days." Generated Summary: Kate is on vacation in Tel Aviv. Mary will visit the Dead Sea in a few days. Ground Truth Summary: b'Mary and Kate discuss how expensive Israel is. Kate is in Tel Aviv now, planning to travel to Jerusalem tomorrow, and to the Dead Sea few days later.' ============================= Dialogue: b"Giny: do we have rice?\r\nRiley: nope, it's finished\r\nGiny: fuck!\r\nGiny: ok, I'll buy" Generated Summary: Giny wants to buy rice from Riley. Ground Truth Summary: b"Giny and Riley don't have any rice left. Giny will buy some." ============================= Dialogue: b"Jude: i'll be in warsaw at the beginning of december so we could meet again\r\nLeon: !!!\r\nLeon: at the beginning means...?\r\nLeon: cuz I won't be here during the first weekend\r\nJude: 10\r\nJude: but i think it's a monday, so never mind i guess :D\r\nLeon: yeah monday doesn't really work for me :D\r\nLeon: :<\r\nJude: oh well next time :d\r\nLeon: yeah...!" Generated Summary: Jude and Leon will meet again this weekend at 10 am. Ground Truth Summary: b'Jude is coming to Warsaw on the 10th of December and wants to see Leon. Leon has no time.' ============================= ``` </div> The generated summaries look awesome! Not bad for a model trained only for 1 epoch and on 5000 examples :)
keras-io/examples/nlp/md/abstractive_summarization_with_bart.md/0
{ "file_path": "keras-io/examples/nlp/md/abstractive_summarization_with_bart.md", "repo_id": "keras-io", "token_count": 9048 }
89
# Using pre-trained word embeddings **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2020/05/05<br> **Last modified:** 2020/05/05<br> **Description:** Text classification on the Newsgroup20 dataset using pre-trained GloVe word embeddings. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/pretrained_word_embeddings.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/pretrained_word_embeddings.py) --- ## Setup ```python import os # Only the TensorFlow backend supports string inputs. os.environ["KERAS_BACKEND"] = "tensorflow" import pathlib import numpy as np import tensorflow.data as tf_data import keras from keras import layers ``` --- ## Introduction In this example, we show how to train a text classification model that uses pre-trained word embeddings. We'll work with the Newsgroup20 dataset, a set of 20,000 message board messages belonging to 20 different topic categories. For the pre-trained word embeddings, we'll use [GloVe embeddings](http://nlp.stanford.edu/projects/glove/). --- ## Download the Newsgroup20 data ```python data_path = keras.utils.get_file( "news20.tar.gz", "http://www.cs.cmu.edu/afs/cs.cmu.edu/project/theo-20/www/data/news20.tar.gz", untar=True, ) ``` --- ## Let's take a look at the data ```python data_dir = pathlib.Path(data_path).parent / "20_newsgroup" dirnames = os.listdir(data_dir) print("Number of directories:", len(dirnames)) print("Directory names:", dirnames) fnames = os.listdir(data_dir / "comp.graphics") print("Number of files in comp.graphics:", len(fnames)) print("Some example filenames:", fnames[:5]) ``` <div class="k-default-codeblock"> ``` Number of directories: 20 Directory names: ['comp.sys.ibm.pc.hardware', 'comp.os.ms-windows.misc', 'comp.windows.x', 'sci.space', 'sci.crypt', 'sci.med', 'alt.atheism', 'rec.autos', 'rec.sport.hockey', 'talk.politics.misc', 'talk.politics.mideast', 'rec.motorcycles', 'talk.politics.guns', 'misc.forsale', 'sci.electronics', 'talk.religion.misc', 'comp.graphics', 'soc.religion.christian', 'comp.sys.mac.hardware', 'rec.sport.baseball'] Number of files in comp.graphics: 1000 Some example filenames: ['39638', '38747', '38242', '39057', '39031'] ``` </div> Here's a example of what one file contains: ```python print(open(data_dir / "comp.graphics" / "38987").read()) ``` <div class="k-default-codeblock"> ``` Newsgroups: comp.graphics Path: cantaloupe.srv.cs.cmu.edu!das-news.harvard.edu!noc.near.net!howland.reston.ans.net!agate!dog.ee.lbl.gov!network.ucsd.edu!usc!rpi!nason110.its.rpi.edu!mabusj From: [email protected] (Jasen M. Mabus) Subject: Looking for Brain in CAD Message-ID: <[email protected]> Nntp-Posting-Host: nason110.its.rpi.edu Reply-To: [email protected] Organization: Rensselaer Polytechnic Institute, Troy, NY. Date: Thu, 29 Apr 1993 23:27:20 GMT Lines: 7 ``` </div> <div class="k-default-codeblock"> ``` Jasen Mabus RPI student ``` </div> <div class="k-default-codeblock"> ``` I am looking for a hman brain in any CAD (.dxf,.cad,.iges,.cgm,etc.) or picture (.gif,.jpg,.ras,etc.) format for an animation demonstration. If any has or knows of a location please reply by e-mail to [email protected]. ``` </div> <div class="k-default-codeblock"> ``` Thank you in advance, Jasen Mabus ``` </div> As you can see, there are header lines that are leaking the file's category, either explicitly (the first line is literally the category name), or implicitly, e.g. via the `Organization` filed. Let's get rid of the headers: ```python samples = [] labels = [] class_names = [] class_index = 0 for dirname in sorted(os.listdir(data_dir)): class_names.append(dirname) dirpath = data_dir / dirname fnames = os.listdir(dirpath) print("Processing %s, %d files found" % (dirname, len(fnames))) for fname in fnames: fpath = dirpath / fname f = open(fpath, encoding="latin-1") content = f.read() lines = content.split("\n") lines = lines[10:] content = "\n".join(lines) samples.append(content) labels.append(class_index) class_index += 1 print("Classes:", class_names) print("Number of samples:", len(samples)) ``` <div class="k-default-codeblock"> ``` Processing alt.atheism, 1000 files found Processing comp.graphics, 1000 files found Processing comp.os.ms-windows.misc, 1000 files found Processing comp.sys.ibm.pc.hardware, 1000 files found Processing comp.sys.mac.hardware, 1000 files found Processing comp.windows.x, 1000 files found Processing misc.forsale, 1000 files found Processing rec.autos, 1000 files found Processing rec.motorcycles, 1000 files found Processing rec.sport.baseball, 1000 files found Processing rec.sport.hockey, 1000 files found Processing sci.crypt, 1000 files found Processing sci.electronics, 1000 files found Processing sci.med, 1000 files found Processing sci.space, 1000 files found Processing soc.religion.christian, 997 files found Processing talk.politics.guns, 1000 files found Processing talk.politics.mideast, 1000 files found Processing talk.politics.misc, 1000 files found Processing talk.religion.misc, 1000 files found Classes: ['alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc'] Number of samples: 19997 ``` </div> There's actually one category that doesn't have the expected number of files, but the difference is small enough that the problem remains a balanced classification problem. --- ## Shuffle and split the data into training & validation sets ```python # Shuffle the data seed = 1337 rng = np.random.RandomState(seed) rng.shuffle(samples) rng = np.random.RandomState(seed) rng.shuffle(labels) # Extract a training & validation split validation_split = 0.2 num_validation_samples = int(validation_split * len(samples)) train_samples = samples[:-num_validation_samples] val_samples = samples[-num_validation_samples:] train_labels = labels[:-num_validation_samples] val_labels = labels[-num_validation_samples:] ``` --- ## Create a vocabulary index Let's use the `TextVectorization` to index the vocabulary found in the dataset. Later, we'll use the same layer instance to vectorize the samples. Our layer will only consider the top 20,000 words, and will truncate or pad sequences to be actually 200 tokens long. ```python vectorizer = layers.TextVectorization(max_tokens=20000, output_sequence_length=200) text_ds = tf_data.Dataset.from_tensor_slices(train_samples).batch(128) vectorizer.adapt(text_ds) ``` You can retrieve the computed vocabulary used via `vectorizer.get_vocabulary()`. Let's print the top 5 words: ```python vectorizer.get_vocabulary()[:5] ``` <div class="k-default-codeblock"> ``` ['', '[UNK]', 'the', 'to', 'of'] ``` </div> Let's vectorize a test sentence: ```python output = vectorizer([["the cat sat on the mat"]]) output.numpy()[0, :6] ``` <div class="k-default-codeblock"> ``` array([ 2, 3480, 1818, 15, 2, 5830]) ``` </div> As you can see, "the" gets represented as "2". Why not 0, given that "the" was the first word in the vocabulary? That's because index 0 is reserved for padding and index 1 is reserved for "out of vocabulary" tokens. Here's a dict mapping words to their indices: ```python voc = vectorizer.get_vocabulary() word_index = dict(zip(voc, range(len(voc)))) ``` As you can see, we obtain the same encoding as above for our test sentence: ```python test = ["the", "cat", "sat", "on", "the", "mat"] [word_index[w] for w in test] ``` <div class="k-default-codeblock"> ``` [2, 3480, 1818, 15, 2, 5830] ``` </div> --- ## Load pre-trained word embeddings Let's download pre-trained GloVe embeddings (a 822M zip file). You'll need to run the following commands: ```python !wget https://downloads.cs.stanford.edu/nlp/data/glove.6B.zip !unzip -q glove.6B.zip ``` <div class="k-default-codeblock"> ``` --2023-11-19 22:45:27-- https://downloads.cs.stanford.edu/nlp/data/glove.6B.zip Resolving downloads.cs.stanford.edu (downloads.cs.stanford.edu)... 171.64.64.22 Connecting to downloads.cs.stanford.edu (downloads.cs.stanford.edu)|171.64.64.22|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 862182613 (822M) [application/zip] Saving to: ‘glove.6B.zip’ ``` </div> <div class="k-default-codeblock"> ``` glove.6B.zip 100%[===================>] 822.24M 5.05MB/s in 2m 39s ``` </div> <div class="k-default-codeblock"> ``` 2023-11-19 22:48:06 (5.19 MB/s) - ‘glove.6B.zip’ saved [862182613/862182613] ``` </div> The archive contains text-encoded vectors of various sizes: 50-dimensional, 100-dimensional, 200-dimensional, 300-dimensional. We'll use the 100D ones. Let's make a dict mapping words (strings) to their NumPy vector representation: ```python path_to_glove_file = "glove.6B.100d.txt" embeddings_index = {} with open(path_to_glove_file) as f: for line in f: word, coefs = line.split(maxsplit=1) coefs = np.fromstring(coefs, "f", sep=" ") embeddings_index[word] = coefs print("Found %s word vectors." % len(embeddings_index)) ``` <div class="k-default-codeblock"> ``` Found 400000 word vectors. ``` </div> Now, let's prepare a corresponding embedding matrix that we can use in a Keras `Embedding` layer. It's a simple NumPy matrix where entry at index `i` is the pre-trained vector for the word of index `i` in our `vectorizer`'s vocabulary. ```python num_tokens = len(voc) + 2 embedding_dim = 100 hits = 0 misses = 0 # Prepare embedding matrix embedding_matrix = np.zeros((num_tokens, embedding_dim)) for word, i in word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # Words not found in embedding index will be all-zeros. # This includes the representation for "padding" and "OOV" embedding_matrix[i] = embedding_vector hits += 1 else: misses += 1 print("Converted %d words (%d misses)" % (hits, misses)) ``` <div class="k-default-codeblock"> ``` Converted 18021 words (1979 misses) ``` </div> Next, we load the pre-trained word embeddings matrix into an `Embedding` layer. Note that we set `trainable=False` so as to keep the embeddings fixed (we don't want to update them during training). ```python from keras.layers import Embedding embedding_layer = Embedding( num_tokens, embedding_dim, trainable=False, ) embedding_layer.build((1,)) embedding_layer.set_weights([embedding_matrix]) ``` --- ## Build the model A simple 1D convnet with global max pooling and a classifier at the end. ```python int_sequences_input = keras.Input(shape=(None,), dtype="int32") embedded_sequences = embedding_layer(int_sequences_input) x = layers.Conv1D(128, 5, activation="relu")(embedded_sequences) x = layers.MaxPooling1D(5)(x) x = layers.Conv1D(128, 5, activation="relu")(x) x = layers.MaxPooling1D(5)(x) x = layers.Conv1D(128, 5, activation="relu")(x) x = layers.GlobalMaxPooling1D()(x) x = layers.Dense(128, activation="relu")(x) x = layers.Dropout(0.5)(x) preds = layers.Dense(len(class_names), activation="softmax")(x) model = keras.Model(int_sequences_input, preds) model.summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ embedding (<span style="color: #0087ff; text-decoration-color: #0087ff">Embedding</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">100</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,000,200</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv1d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64,128</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ max_pooling1d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv1d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">82,048</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ max_pooling1d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv1d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv1D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">82,048</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ global_max_pooling1d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalMaxPooling1D</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,512</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,580</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,247,516</span> (8.57 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,247,516</span> (8.57 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> --- ## Train the model First, convert our list-of-strings data to NumPy arrays of integer indices. The arrays are right-padded. ```python x_train = vectorizer(np.array([[s] for s in train_samples])).numpy() x_val = vectorizer(np.array([[s] for s in val_samples])).numpy() y_train = np.array(train_labels) y_val = np.array(val_labels) ``` We use categorical crossentropy as our loss since we're doing softmax classification. Moreover, we use `sparse_categorical_crossentropy` since our labels are integers. ```python model.compile( loss="sparse_categorical_crossentropy", optimizer="rmsprop", metrics=["acc"] ) model.fit(x_train, y_train, batch_size=128, epochs=20, validation_data=(x_val, y_val)) ``` <div class="k-default-codeblock"> ``` Epoch 1/20 2/125 ━━━━━━━━━━━━━━━━━━━━ 9s 78ms/step - acc: 0.0352 - loss: 3.2164 WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1700434131.619687 6780 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. 125/125 ━━━━━━━━━━━━━━━━━━━━ 22s 123ms/step - acc: 0.0926 - loss: 2.8961 - val_acc: 0.2451 - val_loss: 2.1965 Epoch 2/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 10s 78ms/step - acc: 0.2628 - loss: 2.1377 - val_acc: 0.4421 - val_loss: 1.6594 Epoch 3/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 10s 78ms/step - acc: 0.4504 - loss: 1.5765 - val_acc: 0.5849 - val_loss: 1.2577 Epoch 4/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 10s 76ms/step - acc: 0.5711 - loss: 1.2639 - val_acc: 0.6277 - val_loss: 1.1153 Epoch 5/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 9s 74ms/step - acc: 0.6430 - loss: 1.0318 - val_acc: 0.6684 - val_loss: 0.9902 Epoch 6/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 9s 72ms/step - acc: 0.6990 - loss: 0.8844 - val_acc: 0.6619 - val_loss: 1.0109 Epoch 7/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 9s 70ms/step - acc: 0.7330 - loss: 0.7614 - val_acc: 0.6832 - val_loss: 0.9585 Epoch 8/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 8s 68ms/step - acc: 0.7795 - loss: 0.6328 - val_acc: 0.6847 - val_loss: 0.9917 Epoch 9/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 8s 64ms/step - acc: 0.8203 - loss: 0.5242 - val_acc: 0.7187 - val_loss: 0.9224 Epoch 10/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 8s 60ms/step - acc: 0.8506 - loss: 0.4265 - val_acc: 0.7342 - val_loss: 0.9098 Epoch 11/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 7s 56ms/step - acc: 0.8756 - loss: 0.3659 - val_acc: 0.7204 - val_loss: 1.0022 Epoch 12/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 7s 54ms/step - acc: 0.8921 - loss: 0.3079 - val_acc: 0.7209 - val_loss: 1.0477 Epoch 13/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 7s 54ms/step - acc: 0.9077 - loss: 0.2767 - val_acc: 0.7169 - val_loss: 1.0915 Epoch 14/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 6s 50ms/step - acc: 0.9244 - loss: 0.2253 - val_acc: 0.7382 - val_loss: 1.1397 Epoch 15/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 6s 49ms/step - acc: 0.9301 - loss: 0.2054 - val_acc: 0.7562 - val_loss: 1.0984 Epoch 16/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 5s 42ms/step - acc: 0.9373 - loss: 0.1769 - val_acc: 0.7387 - val_loss: 1.2294 Epoch 17/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 5s 41ms/step - acc: 0.9467 - loss: 0.1626 - val_acc: 0.7009 - val_loss: 1.4906 Epoch 18/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 5s 39ms/step - acc: 0.9471 - loss: 0.1544 - val_acc: 0.7184 - val_loss: 1.6050 Epoch 19/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 5s 37ms/step - acc: 0.9532 - loss: 0.1388 - val_acc: 0.7407 - val_loss: 1.4360 Epoch 20/20 125/125 ━━━━━━━━━━━━━━━━━━━━ 5s 37ms/step - acc: 0.9519 - loss: 0.1388 - val_acc: 0.7309 - val_loss: 1.5327 <keras.src.callbacks.history.History at 0x7fbf50e6b910> ``` </div> --- ## Export an end-to-end model Now, we may want to export a `Model` object that takes as input a string of arbitrary length, rather than a sequence of indices. It would make the model much more portable, since you wouldn't have to worry about the input preprocessing pipeline. Our `vectorizer` is actually a Keras layer, so it's simple: ```python string_input = keras.Input(shape=(1,), dtype="string") x = vectorizer(string_input) preds = model(x) end_to_end_model = keras.Model(string_input, preds) probabilities = end_to_end_model( keras.ops.convert_to_tensor( [["this message is about computer graphics and 3D modeling"]] ) ) print(class_names[np.argmax(probabilities[0])]) ``` <div class="k-default-codeblock"> ``` comp.graphics ``` </div>
keras-io/examples/nlp/md/pretrained_word_embeddings.md/0
{ "file_path": "keras-io/examples/nlp/md/pretrained_word_embeddings.md", "repo_id": "keras-io", "token_count": 9211 }
90
<jupyter_start><jupyter_text>Classification with TensorFlow Decision Forests**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2022/01/25**Last modified:** 2022/01/25**Description:** Using TensorFlow Decision Forests for structured data classification. Introduction[TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests)is a collection of state-of-the-art algorithms of Decision Forest modelsthat are compatible with Keras APIs.The models include [Random Forests](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/RandomForestModel),[Gradient Boosted Trees](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel),and [CART](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/CartModel),and can be used for regression, classification, and ranking task.For a beginner's guide to TensorFlow Decision Forests,please refer to this [tutorial](https://www.tensorflow.org/decision_forests/tutorials/beginner_colab).This example uses Gradient Boosted Trees model in binary classification ofstructured data, and covers the following scenarios:1. Build a decision forests model by specifying the input feature usage.2. Implement a custom *Binary Target encoder* as a [Keras Preprocessing layer](https://keras.io/api/layers/preprocessing_layers/)to encode the categorical features with respect to their target value co-occurrences,and then use the encoded features to build a decision forests model.3. Encode the categorical features as [embeddings](https://keras.io/api/layers/core_layers/embedding),train these embeddings in a simple NN model, and then use thetrained embeddings as inputs to build decision forests model.This example uses TensorFlow 2.7 or higher,as well as [TensorFlow Decision Forests](https://www.tensorflow.org/decision_forests),which you can install using the following command:```pythonpip install -U tensorflow_decision_forests``` Setup<jupyter_code>import math import urllib import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_decision_forests as tfdf<jupyter_output><empty_output><jupyter_text>Prepare the dataThis example uses the[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/Census-Income+%28KDD%29)provided by the [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).The task is binary classification to determine whether a person makes over 50K a year.The dataset includes ~300K instances with 41 input features: 7 numerical featuresand 34 categorical features.First we load the data from the UCI Machine Learning Repository into a Pandas DataFrame.<jupyter_code>BASE_PATH = "https://kdd.ics.uci.edu/databases/census-income/census-income" CSV_HEADER = [ l.decode("utf-8").split(":")[0].replace(" ", "_") for l in urllib.request.urlopen(f"{BASE_PATH}.names") if not l.startswith(b"|") ][2:] CSV_HEADER.append("income_level") train_data = pd.read_csv(f"{BASE_PATH}.data.gz", header=None, names=CSV_HEADER,) test_data = pd.read_csv(f"{BASE_PATH}.test.gz", header=None, names=CSV_HEADER,)<jupyter_output><empty_output><jupyter_text>Define dataset metadataHere, we define the metadata of the dataset that will be useful for encodingthe input features with respect to their types.<jupyter_code># Target column name. TARGET_COLUMN_NAME = "income_level" # The labels of the target columns. TARGET_LABELS = [" - 50000.", " 50000+."] # Weight column name. WEIGHT_COLUMN_NAME = "instance_weight" # Numeric feature names. NUMERIC_FEATURE_NAMES = [ "age", "wage_per_hour", "capital_gains", "capital_losses", "dividends_from_stocks", "num_persons_worked_for_employer", "weeks_worked_in_year", ] # Categorical features and their vocabulary lists. CATEGORICAL_FEATURE_NAMES = [ "class_of_worker", "detailed_industry_recode", "detailed_occupation_recode", "education", "enroll_in_edu_inst_last_wk", "marital_stat", "major_industry_code", "major_occupation_code", "race", "hispanic_origin", "sex", "member_of_a_labor_union", "reason_for_unemployment", "full_or_part_time_employment_stat", "tax_filer_stat", "region_of_previous_residence", "state_of_previous_residence", "detailed_household_and_family_stat", "detailed_household_summary_in_household", "migration_code-change_in_msa", "migration_code-change_in_reg", "migration_code-move_within_reg", "live_in_this_house_1_year_ago", "migration_prev_res_in_sunbelt", "family_members_under_18", "country_of_birth_father", "country_of_birth_mother", "country_of_birth_self", "citizenship", "own_business_or_self_employed", "fill_inc_questionnaire_for_veteran's_admin", "veterans_benefits", "year", ]<jupyter_output><empty_output><jupyter_text>Now we perform basic data preparation.<jupyter_code>def prepare_dataframe(dataframe): # Convert the target labels from string to integer. dataframe[TARGET_COLUMN_NAME] = dataframe[TARGET_COLUMN_NAME].map( TARGET_LABELS.index ) # Cast the categorical features to string. for feature_name in CATEGORICAL_FEATURE_NAMES: dataframe[feature_name] = dataframe[feature_name].astype(str) prepare_dataframe(train_data) prepare_dataframe(test_data)<jupyter_output><empty_output><jupyter_text>Now let's show the shapes of the training and test dataframes, and display some instances.<jupyter_code>print(f"Train data shape: {train_data.shape}") print(f"Test data shape: {test_data.shape}") print(train_data.head().T)<jupyter_output><empty_output><jupyter_text>Configure hyperparametersYou can find all the parameters of the Gradient Boosted Tree model in the[documentation](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel)<jupyter_code># Maximum number of decision trees. The effective number of trained trees can be smaller if early stopping is enabled. NUM_TREES = 250 # Minimum number of examples in a node. MIN_EXAMPLES = 6 # Maximum depth of the tree. max_depth=1 means that all trees will be roots. MAX_DEPTH = 5 # Ratio of the dataset (sampling without replacement) used to train individual trees for the random sampling method. SUBSAMPLE = 0.65 # Control the sampling of the datasets used to train individual trees. SAMPLING_METHOD = "RANDOM" # Ratio of the training dataset used to monitor the training. Require to be >0 if early stopping is enabled. VALIDATION_RATIO = 0.1<jupyter_output><empty_output><jupyter_text>Implement a training and evaluation procedureThe `run_experiment()` method is responsible loading the train and test datasets,training a given model, and evaluating the trained model.Note that when training a Decision Forests model, only one epoch is needed toread the full dataset. Any extra steps will result in unnecessary slower training.Therefore, the default `num_epochs=1` is used in the `run_experiment()` method.<jupyter_code>def run_experiment(model, train_data, test_data, num_epochs=1, batch_size=None): train_dataset = tfdf.keras.pd_dataframe_to_tf_dataset( train_data, label=TARGET_COLUMN_NAME, weight=WEIGHT_COLUMN_NAME ) test_dataset = tfdf.keras.pd_dataframe_to_tf_dataset( test_data, label=TARGET_COLUMN_NAME, weight=WEIGHT_COLUMN_NAME ) model.fit(train_dataset, epochs=num_epochs, batch_size=batch_size) _, accuracy = model.evaluate(test_dataset, verbose=0) print(f"Test accuracy: {round(accuracy * 100, 2)}%")<jupyter_output><empty_output><jupyter_text>Experiment 1: Decision Forests with raw features Specify model input feature usagesYou can attach semantics to each feature to control how it is used by the model.If not specified, the semantics are inferred from the representation type.It is recommended to specify the [feature usages](https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/FeatureUsage)explicitly to avoid incorrect inferred semantics is incorrect.For example, a categorical value identifier (integer) will be be inferred as numerical,while it is semantically categorical.For numerical features, you can set the `discretized` parameters to the numberof buckets by which the numerical feature should be discretized.This makes the training faster but may lead to worse models.<jupyter_code>def specify_feature_usages(): feature_usages = [] for feature_name in NUMERIC_FEATURE_NAMES: feature_usage = tfdf.keras.FeatureUsage( name=feature_name, semantic=tfdf.keras.FeatureSemantic.NUMERICAL ) feature_usages.append(feature_usage) for feature_name in CATEGORICAL_FEATURE_NAMES: feature_usage = tfdf.keras.FeatureUsage( name=feature_name, semantic=tfdf.keras.FeatureSemantic.CATEGORICAL ) feature_usages.append(feature_usage) return feature_usages<jupyter_output><empty_output><jupyter_text>Create a Gradient Boosted Trees modelWhen compiling a decision forests model, you may only provide extra evaluation metrics.The loss is specified in the model construction,and the optimizer is irrelevant to decision forests models.<jupyter_code>def create_gbt_model(): # See all the model parameters in https://www.tensorflow.org/decision_forests/api_docs/python/tfdf/keras/GradientBoostedTreesModel gbt_model = tfdf.keras.GradientBoostedTreesModel( features=specify_feature_usages(), exclude_non_specified_features=True, num_trees=NUM_TREES, max_depth=MAX_DEPTH, min_examples=MIN_EXAMPLES, subsample=SUBSAMPLE, validation_ratio=VALIDATION_RATIO, task=tfdf.keras.Task.CLASSIFICATION, ) gbt_model.compile(metrics=[keras.metrics.BinaryAccuracy(name="accuracy")]) return gbt_model<jupyter_output><empty_output><jupyter_text>Train and evaluate the model<jupyter_code>gbt_model = create_gbt_model() run_experiment(gbt_model, train_data, test_data)<jupyter_output><empty_output><jupyter_text>Inspect the modelThe `model.summary()` method will display several types of information aboutyour decision trees model, model type, task, input features, and feature importance.<jupyter_code>print(gbt_model.summary())<jupyter_output><empty_output><jupyter_text>Experiment 2: Decision Forests with target encoding[Target encoding](https://dl.acm.org/doi/10.1145/507533.507538) is a common preprocessingtechnique for categorical features that convert them into numerical features.Using categorical features with high cardinality as-is may lead to overfitting.Target encoding aims to replace each categorical feature value with one or morenumerical values that represent its co-occurrence with the target labels.More precisely, given a categorical feature, the binary target encoder in this examplewill produce three new numerical features:1. `positive_frequency`: How many times each feature value occurred with a positive target label.2. `negative_frequency`: How many times each feature value occurred with a negative target label.3. `positive_probability`: The probability that the target label is positive,given the feature value, which is computed as`positive_frequency / (positive_frequency + negative_frequency + correction)`.The `correction` term is added in to make the division more stable for rare categorical values.The default value for `correction` is 1.0.Note that target encoding is effective with models that cannot automaticallylearn dense representations to categorical features, such as decision forestsor kernel methods. If neural network models are used, its recommended toencode categorical features as embeddings. Implement Binary Target EncoderFor simplicity, we assume that the inputs for the `adapt` and `call` methodsare in the expected data types and shapes, so no validation logic is added.It is recommended to pass the `vocabulary_size` of the categorical feature to the`BinaryTargetEncoding` constructor. If not specified, it will be computed duringthe `adapt()` method execution.<jupyter_code>class BinaryTargetEncoding(layers.Layer): def __init__(self, vocabulary_size=None, correction=1.0, **kwargs): super().__init__(**kwargs) self.vocabulary_size = vocabulary_size self.correction = correction def adapt(self, data): # data is expected to be an integer numpy array to a Tensor shape [num_exmples, 2]. # This contains feature values for a given feature in the dataset, and target values. # Convert the data to a tensor. data = tf.convert_to_tensor(data) # Separate the feature values and target values feature_values = tf.cast(data[:, 0], tf.dtypes.int32) target_values = tf.cast(data[:, 1], tf.dtypes.bool) # Compute the vocabulary_size of not specified. if self.vocabulary_size is None: self.vocabulary_size = tf.unique(feature_values).y.shape[0] # Filter the data where the target label is positive. positive_indices = tf.where(condition=target_values) postive_feature_values = tf.gather_nd( params=feature_values, indices=positive_indices ) # Compute how many times each feature value occurred with a positive target label. positive_frequency = tf.math.unsorted_segment_sum( data=tf.ones( shape=(postive_feature_values.shape[0], 1), dtype=tf.dtypes.float64 ), segment_ids=postive_feature_values, num_segments=self.vocabulary_size, ) # Filter the data where the target label is negative. negative_indices = tf.where(condition=tf.math.logical_not(target_values)) negative_feature_values = tf.gather_nd( params=feature_values, indices=negative_indices ) # Compute how many times each feature value occurred with a negative target label. negative_frequency = tf.math.unsorted_segment_sum( data=tf.ones( shape=(negative_feature_values.shape[0], 1), dtype=tf.dtypes.float64 ), segment_ids=negative_feature_values, num_segments=self.vocabulary_size, ) # Compute positive probability for the input feature values. positive_probability = positive_frequency / ( positive_frequency + negative_frequency + self.correction ) # Concatenate the computed statistics for traget_encoding. target_encoding_statistics = tf.cast( tf.concat( [positive_frequency, negative_frequency, positive_probability], axis=1 ), dtype=tf.dtypes.float32, ) self.target_encoding_statistics = tf.constant(target_encoding_statistics) def call(self, inputs): # inputs is expected to be an integer numpy array to a Tensor shape [num_exmples, 1]. # This includes the feature values for a given feature in the dataset. # Raise an error if the target encoding statistics are not computed. if self.target_encoding_statistics == None: raise ValueError( f"You need to call the adapt method to compute target encoding statistics." ) # Convert the inputs to a tensor. inputs = tf.convert_to_tensor(inputs) # Cast the inputs int64 a tensor. inputs = tf.cast(inputs, tf.dtypes.int64) # Lookup target encoding statistics for the input feature values. target_encoding_statistics = tf.cast( tf.gather_nd(self.target_encoding_statistics, inputs), dtype=tf.dtypes.float32, ) return target_encoding_statistics<jupyter_output><empty_output><jupyter_text>Let's test the binary target encoder<jupyter_code>data = tf.constant( [ [0, 1], [2, 0], [0, 1], [1, 1], [1, 1], [2, 0], [1, 0], [0, 1], [2, 1], [1, 0], [0, 1], [2, 0], [0, 1], [1, 1], [1, 1], [2, 0], [1, 0], [0, 1], [2, 0], ] ) binary_target_encoder = BinaryTargetEncoding() binary_target_encoder.adapt(data) print(binary_target_encoder([[0], [1], [2]]))<jupyter_output><empty_output><jupyter_text>Create model inputs<jupyter_code>def create_model_inputs(): inputs = {} for feature_name in NUMERIC_FEATURE_NAMES: inputs[feature_name] = layers.Input( name=feature_name, shape=(), dtype=tf.float32 ) for feature_name in CATEGORICAL_FEATURE_NAMES: inputs[feature_name] = layers.Input( name=feature_name, shape=(), dtype=tf.string ) return inputs<jupyter_output><empty_output><jupyter_text>Implement a feature encoding with target encoding<jupyter_code>def create_target_encoder(): inputs = create_model_inputs() target_values = train_data[[TARGET_COLUMN_NAME]].to_numpy() encoded_features = [] for feature_name in inputs: if feature_name in CATEGORICAL_FEATURE_NAMES: # Get the vocabulary of the categorical feature. vocabulary = sorted( [str(value) for value in list(train_data[feature_name].unique())] ) # Create a lookup to convert string values to an integer indices. # Since we are not using a mask token nor expecting any out of vocabulary # (oov) token, we set mask_token to None and num_oov_indices to 0. lookup = layers.StringLookup( vocabulary=vocabulary, mask_token=None, num_oov_indices=0 ) # Convert the string input values into integer indices. value_indices = lookup(inputs[feature_name]) # Prepare the data to adapt the target encoding. print("### Adapting target encoding for:", feature_name) feature_values = train_data[[feature_name]].to_numpy().astype(str) feature_value_indices = lookup(feature_values) data = tf.concat([feature_value_indices, target_values], axis=1) feature_encoder = BinaryTargetEncoding() feature_encoder.adapt(data) # Convert the feature value indices to target encoding representations. encoded_feature = feature_encoder(tf.expand_dims(value_indices, -1)) else: # Expand the dimensions of the numerical input feature and use it as-is. encoded_feature = tf.expand_dims(inputs[feature_name], -1) # Add the encoded feature to the list. encoded_features.append(encoded_feature) # Concatenate all the encoded features. encoded_features = tf.concat(encoded_features, axis=1) # Create and return a Keras model with encoded features as outputs. return keras.Model(inputs=inputs, outputs=encoded_features)<jupyter_output><empty_output><jupyter_text>Create a Gradient Boosted Trees model with a preprocessorIn this scenario, we use the target encoding as a preprocessor for the Gradient Boosted Tree model,and let the model infer semantics of the input features.<jupyter_code>def create_gbt_with_preprocessor(preprocessor): gbt_model = tfdf.keras.GradientBoostedTreesModel( preprocessing=preprocessor, num_trees=NUM_TREES, max_depth=MAX_DEPTH, min_examples=MIN_EXAMPLES, subsample=SUBSAMPLE, validation_ratio=VALIDATION_RATIO, task=tfdf.keras.Task.CLASSIFICATION, ) gbt_model.compile(metrics=[keras.metrics.BinaryAccuracy(name="accuracy")]) return gbt_model<jupyter_output><empty_output><jupyter_text>Train and evaluate the model<jupyter_code>gbt_model = create_gbt_with_preprocessor(create_target_encoder()) run_experiment(gbt_model, train_data, test_data)<jupyter_output><empty_output><jupyter_text>Experiment 3: Decision Forests with trained embeddingsIn this scenario, we build an encoder model that codes the categoricalfeatures to embeddings, where the size of the embedding for a given categoricalfeature is the square root to the size of its vocabulary.We train these embeddings in a simple NN model through backpropagation.After the embedding encoder is trained, we used it as a preprocessor to theinput features of a Gradient Boosted Tree model.Note that the embeddings and a decision forest model cannot be trainedsynergically in one phase, since decision forest models do not train with backpropagation.Rather, embeddings has to be trained in an initial phase,and then used as static inputs to the decision forest model. Implement feature encoding with embeddings<jupyter_code>def create_embedding_encoder(size=None): inputs = create_model_inputs() encoded_features = [] for feature_name in inputs: if feature_name in CATEGORICAL_FEATURE_NAMES: # Get the vocabulary of the categorical feature. vocabulary = sorted( [str(value) for value in list(train_data[feature_name].unique())] ) # Create a lookup to convert string values to an integer indices. # Since we are not using a mask token nor expecting any out of vocabulary # (oov) token, we set mask_token to None and num_oov_indices to 0. lookup = layers.StringLookup( vocabulary=vocabulary, mask_token=None, num_oov_indices=0 ) # Convert the string input values into integer indices. value_index = lookup(inputs[feature_name]) # Create an embedding layer with the specified dimensions vocabulary_size = len(vocabulary) embedding_size = int(math.sqrt(vocabulary_size)) feature_encoder = layers.Embedding( input_dim=len(vocabulary), output_dim=embedding_size ) # Convert the index values to embedding representations. encoded_feature = feature_encoder(value_index) else: # Expand the dimensions of the numerical input feature and use it as-is. encoded_feature = tf.expand_dims(inputs[feature_name], -1) # Add the encoded feature to the list. encoded_features.append(encoded_feature) # Concatenate all the encoded features. encoded_features = layers.concatenate(encoded_features, axis=1) # Apply dropout. encoded_features = layers.Dropout(rate=0.25)(encoded_features) # Perform non-linearity projection. encoded_features = layers.Dense( units=size if size else encoded_features.shape[-1], activation="gelu" )(encoded_features) # Create and return a Keras model with encoded features as outputs. return keras.Model(inputs=inputs, outputs=encoded_features)<jupyter_output><empty_output><jupyter_text>Build an NN model to train the embeddings<jupyter_code>def create_nn_model(encoder): inputs = create_model_inputs() embeddings = encoder(inputs) output = layers.Dense(units=1, activation="sigmoid")(embeddings) nn_model = keras.Model(inputs=inputs, outputs=output) nn_model.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.BinaryCrossentropy(), metrics=[keras.metrics.BinaryAccuracy("accuracy")], ) return nn_model embedding_encoder = create_embedding_encoder(size=64) run_experiment( create_nn_model(embedding_encoder), train_data, test_data, num_epochs=5, batch_size=256, )<jupyter_output><empty_output><jupyter_text>Train and evaluate a Gradient Boosted Tree model with embeddings<jupyter_code>gbt_model = create_gbt_with_preprocessor(embedding_encoder) run_experiment(gbt_model, train_data, test_data)<jupyter_output><empty_output>
keras-io/examples/structured_data/ipynb/classification_with_tfdf.ipynb/0
{ "file_path": "keras-io/examples/structured_data/ipynb/classification_with_tfdf.ipynb", "repo_id": "keras-io", "token_count": 8576 }
91
""" Title: Handwriting recognition Authors: [A_K_Nain](https://twitter.com/A_K_Nain), [Sayak Paul](https://twitter.com/RisingSayak) Date created: 2021/08/16 Last modified: 2023/07/06 Description: Training a handwriting recognition model with variable-length sequences. Accelerator: GPU """ """ ## Introduction This example shows how the [Captcha OCR](https://keras.io/examples/vision/captcha_ocr/) example can be extended to the [IAM Dataset](https://fki.tic.heia-fr.ch/databases/iam-handwriting-database), which has variable length ground-truth targets. Each sample in the dataset is an image of some handwritten text, and its corresponding target is the string present in the image. The IAM Dataset is widely used across many OCR benchmarks, so we hope this example can serve as a good starting point for building OCR systems. """ """ ## Data collection """ """shell wget -q https://github.com/sayakpaul/Handwriting-Recognizer-in-Keras/releases/download/v1.0.0/IAM_Words.zip unzip -qq IAM_Words.zip mkdir data mkdir data/words tar -xf IAM_Words/words.tgz -C data/words mv IAM_Words/words.txt data """ """ Preview how the dataset is organized. Lines prepended by "#" are just metadata information. """ """shell head -20 data/words.txt """ """ ## Imports """ from tensorflow.keras.layers import StringLookup from tensorflow import keras import matplotlib.pyplot as plt import tensorflow as tf import numpy as np import os np.random.seed(42) tf.random.set_seed(42) """ ## Dataset splitting """ base_path = "data" words_list = [] words = open(f"{base_path}/words.txt", "r").readlines() for line in words: if line[0] == "#": continue if line.split(" ")[1] != "err": # We don't need to deal with errored entries. words_list.append(line) len(words_list) np.random.shuffle(words_list) """ We will split the dataset into three subsets with a 90:5:5 ratio (train:validation:test). """ split_idx = int(0.9 * len(words_list)) train_samples = words_list[:split_idx] test_samples = words_list[split_idx:] val_split_idx = int(0.5 * len(test_samples)) validation_samples = test_samples[:val_split_idx] test_samples = test_samples[val_split_idx:] assert len(words_list) == len(train_samples) + len(validation_samples) + len( test_samples ) print(f"Total training samples: {len(train_samples)}") print(f"Total validation samples: {len(validation_samples)}") print(f"Total test samples: {len(test_samples)}") """ ## Data input pipeline We start building our data input pipeline by first preparing the image paths. """ base_image_path = os.path.join(base_path, "words") def get_image_paths_and_labels(samples): paths = [] corrected_samples = [] for i, file_line in enumerate(samples): line_split = file_line.strip() line_split = line_split.split(" ") # Each line split will have this format for the corresponding image: # part1/part1-part2/part1-part2-part3.png image_name = line_split[0] partI = image_name.split("-")[0] partII = image_name.split("-")[1] img_path = os.path.join( base_image_path, partI, partI + "-" + partII, image_name + ".png" ) if os.path.getsize(img_path): paths.append(img_path) corrected_samples.append(file_line.split("\n")[0]) return paths, corrected_samples train_img_paths, train_labels = get_image_paths_and_labels(train_samples) validation_img_paths, validation_labels = get_image_paths_and_labels(validation_samples) test_img_paths, test_labels = get_image_paths_and_labels(test_samples) """ Then we prepare the ground-truth labels. """ # Find maximum length and the size of the vocabulary in the training data. train_labels_cleaned = [] characters = set() max_len = 0 for label in train_labels: label = label.split(" ")[-1].strip() for char in label: characters.add(char) max_len = max(max_len, len(label)) train_labels_cleaned.append(label) characters = sorted(list(characters)) print("Maximum length: ", max_len) print("Vocab size: ", len(characters)) # Check some label samples. train_labels_cleaned[:10] """ Now we clean the validation and the test labels as well. """ def clean_labels(labels): cleaned_labels = [] for label in labels: label = label.split(" ")[-1].strip() cleaned_labels.append(label) return cleaned_labels validation_labels_cleaned = clean_labels(validation_labels) test_labels_cleaned = clean_labels(test_labels) """ ### Building the character vocabulary Keras provides different preprocessing layers to deal with different modalities of data. [This guide](https://keras.io/api/layers/preprocessing_layers/) provides a comprehensive introduction. Our example involves preprocessing labels at the character level. This means that if there are two labels, e.g. "cat" and "dog", then our character vocabulary should be {a, c, d, g, o, t} (without any special tokens). We use the [`StringLookup`](https://keras.io/api/layers/preprocessing_layers/categorical/string_lookup/) layer for this purpose. """ AUTOTUNE = tf.data.AUTOTUNE # Mapping characters to integers. char_to_num = StringLookup(vocabulary=list(characters), mask_token=None) # Mapping integers back to original characters. num_to_char = StringLookup( vocabulary=char_to_num.get_vocabulary(), mask_token=None, invert=True ) """ ### Resizing images without distortion Instead of square images, many OCR models work with rectangular images. This will become clearer in a moment when we will visualize a few samples from the dataset. While aspect-unaware resizing square images does not introduce a significant amount of distortion this is not the case for rectangular images. But resizing images to a uniform size is a requirement for mini-batching. So we need to perform our resizing such that the following criteria are met: * Aspect ratio is preserved. * Content of the images is not affected. """ def distortion_free_resize(image, img_size): w, h = img_size image = tf.image.resize(image, size=(h, w), preserve_aspect_ratio=True) # Check tha amount of padding needed to be done. pad_height = h - tf.shape(image)[0] pad_width = w - tf.shape(image)[1] # Only necessary if you want to do same amount of padding on both sides. if pad_height % 2 != 0: height = pad_height // 2 pad_height_top = height + 1 pad_height_bottom = height else: pad_height_top = pad_height_bottom = pad_height // 2 if pad_width % 2 != 0: width = pad_width // 2 pad_width_left = width + 1 pad_width_right = width else: pad_width_left = pad_width_right = pad_width // 2 image = tf.pad( image, paddings=[ [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0, 0], ], ) image = tf.transpose(image, perm=[1, 0, 2]) image = tf.image.flip_left_right(image) return image """ If we just go with the plain resizing then the images would look like so: ![](https://i.imgur.com/eqq3s4N.png) Notice how this resizing would have introduced unnecessary stretching. """ """ ### Putting the utilities together """ batch_size = 64 padding_token = 99 image_width = 128 image_height = 32 def preprocess_image(image_path, img_size=(image_width, image_height)): image = tf.io.read_file(image_path) image = tf.image.decode_png(image, 1) image = distortion_free_resize(image, img_size) image = tf.cast(image, tf.float32) / 255.0 return image def vectorize_label(label): label = char_to_num(tf.strings.unicode_split(label, input_encoding="UTF-8")) length = tf.shape(label)[0] pad_amount = max_len - length label = tf.pad(label, paddings=[[0, pad_amount]], constant_values=padding_token) return label def process_images_labels(image_path, label): image = preprocess_image(image_path) label = vectorize_label(label) return {"image": image, "label": label} def prepare_dataset(image_paths, labels): dataset = tf.data.Dataset.from_tensor_slices((image_paths, labels)).map( process_images_labels, num_parallel_calls=AUTOTUNE ) return dataset.batch(batch_size).cache().prefetch(AUTOTUNE) """ ## Prepare `tf.data.Dataset` objects """ train_ds = prepare_dataset(train_img_paths, train_labels_cleaned) validation_ds = prepare_dataset(validation_img_paths, validation_labels_cleaned) test_ds = prepare_dataset(test_img_paths, test_labels_cleaned) """ ## Visualize a few samples """ for data in train_ds.take(1): images, labels = data["image"], data["label"] _, ax = plt.subplots(4, 4, figsize=(15, 8)) for i in range(16): img = images[i] img = tf.image.flip_left_right(img) img = tf.transpose(img, perm=[1, 0, 2]) img = (img * 255.0).numpy().clip(0, 255).astype(np.uint8) img = img[:, :, 0] # Gather indices where label!= padding_token. label = labels[i] indices = tf.gather(label, tf.where(tf.math.not_equal(label, padding_token))) # Convert to string. label = tf.strings.reduce_join(num_to_char(indices)) label = label.numpy().decode("utf-8") ax[i // 4, i % 4].imshow(img, cmap="gray") ax[i // 4, i % 4].set_title(label) ax[i // 4, i % 4].axis("off") plt.show() """ You will notice that the content of original image is kept as faithful as possible and has been padded accordingly. """ """ ## Model Our model will use the CTC loss as an endpoint layer. For a detailed understanding of the CTC loss, refer to [this post](https://distill.pub/2017/ctc/). """ class CTCLayer(keras.layers.Layer): def __init__(self, name=None): super().__init__(name=name) self.loss_fn = keras.backend.ctc_batch_cost def call(self, y_true, y_pred): batch_len = tf.cast(tf.shape(y_true)[0], dtype="int64") input_length = tf.cast(tf.shape(y_pred)[1], dtype="int64") label_length = tf.cast(tf.shape(y_true)[1], dtype="int64") input_length = input_length * tf.ones(shape=(batch_len, 1), dtype="int64") label_length = label_length * tf.ones(shape=(batch_len, 1), dtype="int64") loss = self.loss_fn(y_true, y_pred, input_length, label_length) self.add_loss(loss) # At test time, just return the computed predictions. return y_pred def build_model(): # Inputs to the model input_img = keras.Input(shape=(image_width, image_height, 1), name="image") labels = keras.layers.Input(name="label", shape=(None,)) # First conv block. x = keras.layers.Conv2D( 32, (3, 3), activation="relu", kernel_initializer="he_normal", padding="same", name="Conv1", )(input_img) x = keras.layers.MaxPooling2D((2, 2), name="pool1")(x) # Second conv block. x = keras.layers.Conv2D( 64, (3, 3), activation="relu", kernel_initializer="he_normal", padding="same", name="Conv2", )(x) x = keras.layers.MaxPooling2D((2, 2), name="pool2")(x) # We have used two max pool with pool size and strides 2. # Hence, downsampled feature maps are 4x smaller. The number of # filters in the last layer is 64. Reshape accordingly before # passing the output to the RNN part of the model. new_shape = ((image_width // 4), (image_height // 4) * 64) x = keras.layers.Reshape(target_shape=new_shape, name="reshape")(x) x = keras.layers.Dense(64, activation="relu", name="dense1")(x) x = keras.layers.Dropout(0.2)(x) # RNNs. x = keras.layers.Bidirectional( keras.layers.LSTM(128, return_sequences=True, dropout=0.25) )(x) x = keras.layers.Bidirectional( keras.layers.LSTM(64, return_sequences=True, dropout=0.25) )(x) # +2 is to account for the two special tokens introduced by the CTC loss. # The recommendation comes here: https://git.io/J0eXP. x = keras.layers.Dense( len(char_to_num.get_vocabulary()) + 2, activation="softmax", name="dense2" )(x) # Add CTC layer for calculating CTC loss at each step. output = CTCLayer(name="ctc_loss")(labels, x) # Define the model. model = keras.models.Model( inputs=[input_img, labels], outputs=output, name="handwriting_recognizer" ) # Optimizer. opt = keras.optimizers.Adam() # Compile the model and return. model.compile(optimizer=opt) return model # Get the model. model = build_model() model.summary() """ ## Evaluation metric [Edit Distance](https://en.wikipedia.org/wiki/Edit_distance) is the most widely used metric for evaluating OCR models. In this section, we will implement it and use it as a callback to monitor our model. """ """ We first segregate the validation images and their labels for convenience. """ validation_images = [] validation_labels = [] for batch in validation_ds: validation_images.append(batch["image"]) validation_labels.append(batch["label"]) """ Now, we create a callback to monitor the edit distances. """ def calculate_edit_distance(labels, predictions): # Get a single batch and convert its labels to sparse tensors. saprse_labels = tf.cast(tf.sparse.from_dense(labels), dtype=tf.int64) # Make predictions and convert them to sparse tensors. input_len = np.ones(predictions.shape[0]) * predictions.shape[1] predictions_decoded = keras.backend.ctc_decode( predictions, input_length=input_len, greedy=True )[0][0][:, :max_len] sparse_predictions = tf.cast( tf.sparse.from_dense(predictions_decoded), dtype=tf.int64 ) # Compute individual edit distances and average them out. edit_distances = tf.edit_distance( sparse_predictions, saprse_labels, normalize=False ) return tf.reduce_mean(edit_distances) class EditDistanceCallback(keras.callbacks.Callback): def __init__(self, pred_model): super().__init__() self.prediction_model = pred_model def on_epoch_end(self, epoch, logs=None): edit_distances = [] for i in range(len(validation_images)): labels = validation_labels[i] predictions = self.prediction_model.predict(validation_images[i]) edit_distances.append(calculate_edit_distance(labels, predictions).numpy()) print( f"Mean edit distance for epoch {epoch + 1}: {np.mean(edit_distances):.4f}" ) """ ## Training Now we are ready to kick off model training. """ epochs = 10 # To get good results this should be at least 50. model = build_model() prediction_model = keras.models.Model( model.get_layer(name="image").input, model.get_layer(name="dense2").output ) edit_distance_callback = EditDistanceCallback(prediction_model) # Train the model. history = model.fit( train_ds, validation_data=validation_ds, epochs=epochs, callbacks=[edit_distance_callback], ) """ ## Inference """ # A utility function to decode the output of the network. def decode_batch_predictions(pred): input_len = np.ones(pred.shape[0]) * pred.shape[1] # Use greedy search. For complex tasks, you can use beam search. results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0][ :, :max_len ] # Iterate over the results and get back the text. output_text = [] for res in results: res = tf.gather(res, tf.where(tf.math.not_equal(res, -1))) res = tf.strings.reduce_join(num_to_char(res)).numpy().decode("utf-8") output_text.append(res) return output_text # Let's check results on some test samples. for batch in test_ds.take(1): batch_images = batch["image"] _, ax = plt.subplots(4, 4, figsize=(15, 8)) preds = prediction_model.predict(batch_images) pred_texts = decode_batch_predictions(preds) for i in range(16): img = batch_images[i] img = tf.image.flip_left_right(img) img = tf.transpose(img, perm=[1, 0, 2]) img = (img * 255.0).numpy().clip(0, 255).astype(np.uint8) img = img[:, :, 0] title = f"Prediction: {pred_texts[i]}" ax[i // 4, i % 4].imshow(img, cmap="gray") ax[i // 4, i % 4].set_title(title) ax[i // 4, i % 4].axis("off") plt.show() """ To get better results the model should be trained for at least 50 epochs. """ """ ## Final remarks * The `prediction_model` is fully compatible with TensorFlow Lite. If you are interested, you can use it inside a mobile application. You may find [this notebook](https://github.com/tulasiram58827/ocr_tflite/blob/main/colabs/captcha_ocr_tflite.ipynb) to be useful in this regard. * Not all the training examples are perfectly aligned as observed in this example. This can hurt model performance for complex sequences. To this end, we can leverage Spatial Transformer Networks ([Jaderberg et al.](https://arxiv.org/abs/1506.02025)) that can help the model learn affine transformations that maximize its performance. """
keras-io/examples/vision/handwriting_recognition.py/0
{ "file_path": "keras-io/examples/vision/handwriting_recognition.py", "repo_id": "keras-io", "token_count": 6529 }
92
<jupyter_start><jupyter_text>Focal Modulation: A replacement for Self-Attention**Author:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498), [Ritwik Raha](https://twitter.com/ritwik_raha)**Date created:** 2023/01/25**Last modified:** 2023/02/15**Description:** Image classification with Focal Modulation Networks. IntroductionThis tutorial aims to provide a comprehensive guide to the implementation ofFocal Modulation Networks, as presented in[Yang et al.](https://arxiv.org/abs/2203.11926).This tutorial will provide a formal, minimalistic approach to implementing FocalModulation Networks and explore its potential applications in the field of Deep Learning.**Problem statement**The Transformer architecture ([Vaswani et al.](https://arxiv.org/abs/1706.03762)),which has become the de facto standard in most Natural Language Processing tasks, hasalso been applied to the field of computer vision, e.g. VisionTransformers ([Dosovitskiy et al.](https://arxiv.org/abs/2010.11929v2)).> In Transformers, the self-attention (SA) is arguably the key to its success whichenables input-dependent global interactions, in contrast to convolution operation whichconstraints interactions in a local region with a shared kernel.The **Attention** module is mathematically written as shown in **Equation 1**.| || :--: || Equation 1: The mathematical equation of attention (Source: Aritra and Ritwik) |Where:- `Q` is the query- `K` is the key- `V` is the value- `d_k` is the dimension of the keyWith **self-attention**, the query, key, and value are all sourced from the inputsequence. Let us rewrite the attention equation for self-attention as shown in **Equation2**.| || :--: || Equation 2: The mathematical equation of self-attention (Source: Aritra and Ritwik) |Upon looking at the equation of self-attention, we see that it is a quadratic equation.Therefore, as the number of tokens increase, so does the computation time (cost too). Tomitigate this problem and make Transformers more interpretable, Yang et al.have tried to replace the Self-Attention module with better components.**The Solution**Yang et al. introduce the Focal Modulation layer to serve as aseamless replacement for the Self-Attention Layer. The layer boasts highinterpretability, making it a valuable tool for Deep Learning practitioners.In this tutorial, we will delve into the practical application of this layer by trainingthe entire model on the CIFAR-10 dataset and visually interpreting the layer'sperformance.Note: We try to align our implementation with the[official implementation](https://github.com/microsoft/FocalNet). Setup and ImportsWe use tensorflow version `2.11.0` for this tutorial.<jupyter_code>import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.optimizers.experimental import AdamW from typing import Optional, Tuple, List from matplotlib import pyplot as plt from random import randint # Set seed for reproducibility. tf.keras.utils.set_random_seed(42)<jupyter_output><empty_output><jupyter_text>Global ConfigurationWe do not have any strong rationale behind choosing these hyperparameters. Please feelfree to change the configuration and train the model.<jupyter_code># DATA TRAIN_SLICE = 40000 BUFFER_SIZE = 2048 BATCH_SIZE = 1024 AUTO = tf.data.AUTOTUNE INPUT_SHAPE = (32, 32, 3) IMAGE_SIZE = 48 NUM_CLASSES = 10 # OPTIMIZER LEARNING_RATE = 1e-4 WEIGHT_DECAY = 1e-4 # TRAINING EPOCHS = 25<jupyter_output><empty_output><jupyter_text>Load and process the CIFAR-10 dataset<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() (x_train, y_train), (x_val, y_val) = ( (x_train[:TRAIN_SLICE], y_train[:TRAIN_SLICE]), (x_train[TRAIN_SLICE:], y_train[TRAIN_SLICE:]), )<jupyter_output><empty_output><jupyter_text>Build the augmentationsWe use the `keras.Sequential` API to compose all the individual augmentation stepsinto one API.<jupyter_code># Build the `train` augmentation pipeline. train_aug = keras.Sequential( [ layers.Rescaling(1 / 255.0), layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20), layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE), layers.RandomFlip("horizontal"), ], name="train_data_augmentation", ) # Build the `val` and `test` data pipeline. test_aug = keras.Sequential( [ layers.Rescaling(1 / 255.0), layers.Resizing(IMAGE_SIZE, IMAGE_SIZE), ], name="test_data_augmentation", )<jupyter_output><empty_output><jupyter_text>Build `tf.data` pipeline<jupyter_code>train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_ds = ( train_ds.map( lambda image, label: (train_aug(image), label), num_parallel_calls=AUTO ) .shuffle(BUFFER_SIZE) .batch(BATCH_SIZE) .prefetch(AUTO) ) val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_ds = ( val_ds.map(lambda image, label: (test_aug(image), label), num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_ds = ( test_ds.map(lambda image, label: (test_aug(image), label), num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) )<jupyter_output><empty_output><jupyter_text>ArchitectureWe pause here to take a quick look at the Architecture of the Focal Modulation Network.**Figure 1** shows how every individual layer is compiled into a single model. This givesus a bird's eye view of the entire architecture.| || :--: || Figure 1: A diagram of the Focal Modulation model (Source: Aritra and Ritwik) |We dive deep into each of these layers in the following sections. This is the order wewill follow:- Patch Embedding Layer- Focal Modulation Block - Multi-Layer Perceptron - Focal Modulation Layer - Hierarchical Contextualization - Gated Aggregation - Building Focal Modulation Block- Building the Basic LayerTo better understand the architecture in a format we are well versed in, let us see howthe Focal Modulation Network would look when drawn like a Transformer architecture.**Figure 2** shows the encoder layer of a traditional Transformer architecture where SelfAttention is replaced with the Focal Modulation layer.The blue blocks represent the Focal Modulation block. A stackof these blocks builds a single Basic Layer. The green blocksrepresent the Focal Modulation layer.| || :--: || Figure 2: The Entire Architecture (Source: Aritra and Ritwik) | Patch Embedding LayerThe patch embedding layer is used to patchify the input images and project them into alatent space. This layer is also used as the down-sampling layer in the architecture.<jupyter_code>class PatchEmbed(layers.Layer): """Image patch embedding layer, also acts as the down-sampling layer. Args: image_size (Tuple[int]): Input image resolution. patch_size (Tuple[int]): Patch spatial resolution. embed_dim (int): Embedding dimension. """ def __init__( self, image_size: Tuple[int] = (224, 224), patch_size: Tuple[int] = (4, 4), embed_dim: int = 96, **kwargs, ): super().__init__(**kwargs) patch_resolution = [ image_size[0] // patch_size[0], image_size[1] // patch_size[1], ] self.image_size = image_size self.patch_size = patch_size self.embed_dim = embed_dim self.patch_resolution = patch_resolution self.num_patches = patch_resolution[0] * patch_resolution[1] self.proj = layers.Conv2D( filters=embed_dim, kernel_size=patch_size, strides=patch_size ) self.flatten = layers.Reshape(target_shape=(-1, embed_dim)) self.norm = keras.layers.LayerNormalization(epsilon=1e-7) def call(self, x: tf.Tensor) -> Tuple[tf.Tensor, int, int, int]: """Patchifies the image and converts into tokens. Args: x: Tensor of shape (B, H, W, C) Returns: A tuple of the processed tensor, height of the projected feature map, width of the projected feature map, number of channels of the projected feature map. """ # Project the inputs. x = self.proj(x) # Obtain the shape from the projected tensor. height = tf.shape(x)[1] width = tf.shape(x)[2] channels = tf.shape(x)[3] # B, H, W, C -> B, H*W, C x = self.norm(self.flatten(x)) return x, height, width, channels<jupyter_output><empty_output><jupyter_text>Focal Modulation blockA Focal Modulation block can be considered as a single Transformer Block with the SelfAttention (SA) module being replaced with Focal Modulation module, as we saw in **Figure2**.Let us recall how a focal modulation block is supposed to look like with the aid of the**Figure 3**.| || :--: || Figure 3: The isolated view of the Focal Modulation Block (Source: Aritra and Ritwik) |The Focal Modulation Block consists of:- Multilayer Perceptron- Focal Modulation layer Multilayer Perceptron<jupyter_code>def MLP( in_features: int, hidden_features: Optional[int] = None, out_features: Optional[int] = None, mlp_drop_rate: float = 0.0, ): hidden_features = hidden_features or in_features out_features = out_features or in_features return keras.Sequential( [ layers.Dense(units=hidden_features, activation=keras.activations.gelu), layers.Dense(units=out_features), layers.Dropout(rate=mlp_drop_rate), ] )<jupyter_output><empty_output><jupyter_text>Focal Modulation layerIn a typical Transformer architecture, for each visual token (**query**) `x_i in R^C` inan input feature map `X in R^{HxWxC}` a **generic encoding process** produces a featurerepresentation `y_i in R^C`.The encoding process consists of **interaction** (with its surroundings for e.g. a dotproduct), and **aggregation** (over the contexts for e.g weighted mean).We will talk about two types of encoding here:- Interaction and then Aggregation in **Self-Attention**- Aggregation and then Interaction in **Focal Modulation****Self-Attention**| || :--: || **Figure 4**: Self-Attention module. (Source: Aritra and Ritwik) || || :--: || **Equation 3:** Aggregation and Interaction in Self-Attention(Surce: Aritra and Ritwik)|As shown in **Figure 4** the query and the key interact (in the interaction step) witheach other to output the attention scores. The weighted aggregation of the value comesnext, known as the aggregation step.**Focal Modulation**| || :--: || **Figure 5**: Focal Modulation module. (Source: Aritra and Ritwik) || || :--: || **Equation 4:** Aggregation and Interaction in Focal Modulation (Source: Aritra and Ritwik) |**Figure 5** depicts the Focal Modulation layer. `q()` is the query projectionfunction. It is a **linear layer** that projects the query into a latent space. `m ()` isthe context aggregation function. Unlike self-attention, theaggregation step takes place in focal modulation before the interaction step. While `q()` is pretty straightforward to understand, the context aggregation function`m()` is more complex. Therefore, this section will focus on `m()`.| || :--: || **Figure 6**: Context Aggregation function `m()`. (Source: Aritra and Ritwik) |The context aggregation function `m()` consists of two parts as shown in **Figure 6**:- Hierarchical Contextualization- Gated Aggregation Hierarchical Contextualization| || :--: || **Figure 7**: Hierarchical Contextualization (Source: Aritra and Ritwik) |In **Figure 7**, we see that the input is first projected linearly. This linear projectionproduces `Z^0`. Where `Z^0` can be expressed as follows:| || :--: || Equation 5: Linear projection of `Z^0` (Source: Aritra and Ritwik) |`Z^0` is then passed on to a series of Depth-Wise (DWConv) Conv and[GeLU](https://www.tensorflow.org/api_docs/python/tf/keras/activations/gelu) layers. Theauthors term each block of DWConv and GeLU as levels denoted by `l`. In **Figure 6** wehave two levels. Mathematically this is represented as:| || :--: || Equation 6: Levels of the modulation layer (Source: Aritra and Ritwik) |where `l in {1, ... , L}`The final feature map goes through a Global Average Pooling Layer. This can be expressedas follows:| || :--: || Equation 7: Average Pooling of the final feature (Source: Aritra and Ritwik)| Gated Aggregation| || :--: || **Figure 8**: Gated Aggregation (Source: Aritra and Ritwik) |Now that we have `L+1` intermediate feature maps by virtue of the HierarchicalContextualization step, we need a gating mechanism that lets some features pass andprohibits others. This can be implemented with the attention module.Later in the tutorial, we will visualize these gates to better understand theirusefulness.First, we build the weights for aggregation. Here we apply a **linear layer** on the inputfeature map that projects it into `L+1` dimensions.| || :--: || Eqation 8: Gates (Source: Aritra and Ritwik) |Next we perform the weighted aggregation over the contexts.| || :--: || Eqation 9: Final feature map (Source: Aritra and Ritwik) |To enable communication across different channels, we use another linear layer `h()`to obtain the modulator| || :--: || Eqation 10: Modulator (Source: Aritra and Ritwik) |To sum up the Focal Modulation layer we have:| || :--: || Eqation 11: Focal Modulation Layer (Source: Aritra and Ritwik) |<jupyter_code>class FocalModulationLayer(layers.Layer): """The Focal Modulation layer includes query projection & context aggregation. Args: dim (int): Projection dimension. focal_window (int): Window size for focal modulation. focal_level (int): The current focal level. focal_factor (int): Factor of focal modulation. proj_drop_rate (float): Rate of dropout. """ def __init__( self, dim: int, focal_window: int, focal_level: int, focal_factor: int = 2, proj_drop_rate: float = 0.0, **kwargs, ): super().__init__(**kwargs) self.dim = dim self.focal_window = focal_window self.focal_level = focal_level self.focal_factor = focal_factor self.proj_drop_rate = proj_drop_rate # Project the input feature into a new feature space using a # linear layer. Note the `units` used. We will be projecting the input # feature all at once and split the projection into query, context, # and gates. self.initial_proj = layers.Dense( units=(2 * self.dim) + (self.focal_level + 1), use_bias=True, ) self.focal_layers = list() self.kernel_sizes = list() for idx in range(self.focal_level): kernel_size = (self.focal_factor * idx) + self.focal_window depth_gelu_block = keras.Sequential( [ layers.ZeroPadding2D(padding=(kernel_size // 2, kernel_size // 2)), layers.Conv2D( filters=self.dim, kernel_size=kernel_size, activation=keras.activations.gelu, groups=self.dim, use_bias=False, ), ] ) self.focal_layers.append(depth_gelu_block) self.kernel_sizes.append(kernel_size) self.activation = keras.activations.gelu self.gap = layers.GlobalAveragePooling2D(keepdims=True) self.modulator_proj = layers.Conv2D( filters=self.dim, kernel_size=(1, 1), use_bias=True, ) self.proj = layers.Dense(units=self.dim) self.proj_drop = layers.Dropout(self.proj_drop_rate) def call(self, x: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor: """Forward pass of the layer. Args: x: Tensor of shape (B, H, W, C) """ # Apply the linear projecion to the input feature map x_proj = self.initial_proj(x) # Split the projected x into query, context and gates query, context, self.gates = tf.split( value=x_proj, num_or_size_splits=[self.dim, self.dim, self.focal_level + 1], axis=-1, ) # Context aggregation context = self.focal_layers[0](context) context_all = context * self.gates[..., 0:1] for idx in range(1, self.focal_level): context = self.focal_layers[idx](context) context_all += context * self.gates[..., idx : idx + 1] # Build the global context context_global = self.activation(self.gap(context)) context_all += context_global * self.gates[..., self.focal_level :] # Focal Modulation self.modulator = self.modulator_proj(context_all) x_output = query * self.modulator # Project the output and apply dropout x_output = self.proj(x_output) x_output = self.proj_drop(x_output) return x_output<jupyter_output><empty_output><jupyter_text>The Focal Modulation blockFinally, we have all the components we need to build the Focal Modulation block. Here wetake the MLP and Focal Modulation layer together and build the Focal Modulation block.<jupyter_code>class FocalModulationBlock(layers.Layer): """Combine FFN and Focal Modulation Layer. Args: dim (int): Number of input channels. input_resolution (Tuple[int]): Input resulotion. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. drop (float): Dropout rate. drop_path (float): Stochastic depth rate. focal_level (int): Number of focal levels. focal_window (int): Focal window size at first focal level """ def __init__( self, dim: int, input_resolution: Tuple[int], mlp_ratio: float = 4.0, drop: float = 0.0, drop_path: float = 0.0, focal_level: int = 1, focal_window: int = 3, **kwargs, ): super().__init__(**kwargs) self.dim = dim self.input_resolution = input_resolution self.mlp_ratio = mlp_ratio self.focal_level = focal_level self.focal_window = focal_window self.norm = layers.LayerNormalization(epsilon=1e-5) self.modulation = FocalModulationLayer( dim=self.dim, focal_window=self.focal_window, focal_level=self.focal_level, proj_drop_rate=drop, ) mlp_hidden_dim = int(self.dim * self.mlp_ratio) self.mlp = MLP( in_features=self.dim, hidden_features=mlp_hidden_dim, mlp_drop_rate=drop, ) def call(self, x: tf.Tensor, height: int, width: int, channels: int) -> tf.Tensor: """Processes the input tensor through the focal modulation block. Args: x (tf.Tensor): Inputs of the shape (B, L, C) height (int): The height of the feature map width (int): The width of the feature map channels (int): The number of channels of the feature map Returns: The processed tensor. """ shortcut = x # Focal Modulation x = tf.reshape(x, shape=(-1, height, width, channels)) x = self.modulation(x) x = tf.reshape(x, shape=(-1, height * width, channels)) # FFN x = shortcut + x x = x + self.mlp(self.norm(x)) return x<jupyter_output><empty_output><jupyter_text>The Basic LayerThe basic layer consists of a collection of Focal Modulation blocks. This isillustrated in **Figure 9**.| || :--: || **Figure 9**: Basic Layer, a collection of focal modulation blocks. (Source: Aritra and Ritwik) |Notice how in **Fig. 9** there are more than one focal modulation blocks denoted by `Nx`.This shows how the Basic Layer is a collection of Focal Modulation blocks.<jupyter_code>class BasicLayer(layers.Layer): """Collection of Focal Modulation Blocks. Args: dim (int): Dimensions of the model. out_dim (int): Dimension used by the Patch Embedding Layer. input_resolution (Tuple[int]): Input image resolution. depth (int): The number of Focal Modulation Blocks. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. drop (float): Dropout rate. downsample (tf.keras.layers.Layer): Downsampling layer at the end of the layer. focal_level (int): The current focal level. focal_window (int): Focal window used. """ def __init__( self, dim: int, out_dim: int, input_resolution: Tuple[int], depth: int, mlp_ratio: float = 4.0, drop: float = 0.0, downsample=None, focal_level: int = 1, focal_window: int = 1, **kwargs, ): super().__init__(**kwargs) self.dim = dim self.input_resolution = input_resolution self.depth = depth self.blocks = [ FocalModulationBlock( dim=dim, input_resolution=input_resolution, mlp_ratio=mlp_ratio, drop=drop, focal_level=focal_level, focal_window=focal_window, ) for i in range(self.depth) ] # Downsample layer at the end of the layer if downsample is not None: self.downsample = downsample( image_size=input_resolution, patch_size=(2, 2), embed_dim=out_dim, ) else: self.downsample = None def call( self, x: tf.Tensor, height: int, width: int, channels: int ) -> Tuple[tf.Tensor, int, int, int]: """Forward pass of the layer. Args: x (tf.Tensor): Tensor of shape (B, L, C) height (int): Height of feature map width (int): Width of feature map channels (int): Embed Dim of feature map Returns: A tuple of the processed tensor, changed height, width, and dim of the tensor. """ # Apply Focal Modulation Blocks for block in self.blocks: x = block(x, height, width, channels) # Except the last Basic Layer, all the layers have # downsample at the end of it. if self.downsample is not None: x = tf.reshape(x, shape=(-1, height, width, channels)) x, height_o, width_o, channels_o = self.downsample(x) else: height_o, width_o, channels_o = height, width, channels return x, height_o, width_o, channels_o<jupyter_output><empty_output><jupyter_text>The Focal Modulation Network modelThis is the model that ties everything together.It consists of a collection of Basic Layers with a classification head.For a recap of how this is structured refer to **Figure 1**.<jupyter_code>class FocalModulationNetwork(keras.Model): """The Focal Modulation Network. Parameters: image_size (Tuple[int]): Spatial size of images used. patch_size (Tuple[int]): Patch size of each patch. num_classes (int): Number of classes used for classification. embed_dim (int): Patch embedding dimension. depths (List[int]): Depth of each Focal Transformer block. mlp_ratio (float): Ratio of expansion for the intermediate layer of MLP. drop_rate (float): The dropout rate for FM and MLP layers. focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. focal_windows (list): The focal window size at all stages. """ def __init__( self, image_size: Tuple[int] = (48, 48), patch_size: Tuple[int] = (4, 4), num_classes: int = 10, embed_dim: int = 256, depths: List[int] = [2, 3, 2], mlp_ratio: float = 4.0, drop_rate: float = 0.1, focal_levels=[2, 2, 2], focal_windows=[3, 3, 3], **kwargs, ): super().__init__(**kwargs) self.num_layers = len(depths) embed_dim = [embed_dim * (2**i) for i in range(self.num_layers)] self.num_classes = num_classes self.embed_dim = embed_dim self.num_features = embed_dim[-1] self.mlp_ratio = mlp_ratio self.patch_embed = PatchEmbed( image_size=image_size, patch_size=patch_size, embed_dim=embed_dim[0], ) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patch_resolution self.patches_resolution = patches_resolution self.pos_drop = layers.Dropout(drop_rate) self.basic_layers = list() for i_layer in range(self.num_layers): layer = BasicLayer( dim=embed_dim[i_layer], out_dim=embed_dim[i_layer + 1] if (i_layer < self.num_layers - 1) else None, input_resolution=( patches_resolution[0] // (2**i_layer), patches_resolution[1] // (2**i_layer), ), depth=depths[i_layer], mlp_ratio=self.mlp_ratio, drop=drop_rate, downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None, focal_level=focal_levels[i_layer], focal_window=focal_windows[i_layer], ) self.basic_layers.append(layer) self.norm = keras.layers.LayerNormalization(epsilon=1e-7) self.avgpool = layers.GlobalAveragePooling1D() self.flatten = layers.Flatten() self.head = layers.Dense(self.num_classes, activation="softmax") def call(self, x: tf.Tensor) -> tf.Tensor: """Forward pass of the layer. Args: x: Tensor of shape (B, H, W, C) Returns: The logits. """ # Patch Embed the input images. x, height, width, channels = self.patch_embed(x) x = self.pos_drop(x) for idx, layer in enumerate(self.basic_layers): x, height, width, channels = layer(x, height, width, channels) x = self.norm(x) x = self.avgpool(x) x = self.flatten(x) x = self.head(x) return x<jupyter_output><empty_output><jupyter_text>Train the modelNow with all the components in place and the architecture actually built, we are ready toput it to good use.In this section, we train our Focal Modulation model on the CIFAR-10 dataset. Visualization CallbackA key feature of the Focal Modulation Network is explicit input-dependency. This meansthe modulator is calculated by looking at the local features around the target location,so it depends on the input. In very simple terms, this makes interpretation easy. We cansimply lay down the gating values and the original image, next to each other to see howthe gating mechanism works.The authors of the paper visualize the gates and the modulator in order to focus on theinterpretability of the Focal Modulation layer. Below is a visualizationcallback that shows the gates and modulator of a specific layer in the model while themodel trains.We will notice later that as the model trains, the visualizations get better.The gates appear to selectively permit certain aspects of the input image to passthrough, while gently disregarding others, ultimately leading to improved classificationaccuracy.<jupyter_code>def display_grid( test_images: tf.Tensor, gates: tf.Tensor, modulator: tf.Tensor, ): """Displays the image with the gates and modulator overlayed. Args: test_images (tf.Tensor): A batch of test images. gates (tf.Tensor): The gates of the Focal Modualtion Layer. modulator (tf.Tensor): The modulator of the Focal Modulation Layer. """ fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(25, 5)) # Radomly sample an image from the batch. index = randint(0, BATCH_SIZE - 1) orig_image = test_images[index] gate_image = gates[index] modulator_image = modulator[index] # Original Image ax[0].imshow(orig_image) ax[0].set_title("Original:") ax[0].axis("off") for index in range(1, 5): img = ax[index].imshow(orig_image) if index != 4: overlay_image = gate_image[..., index - 1] title = f"G {index}:" else: overlay_image = tf.norm(modulator_image, ord=2, axis=-1) title = f"MOD:" ax[index].imshow( overlay_image, cmap="inferno", alpha=0.6, extent=img.get_extent() ) ax[index].set_title(title) ax[index].axis("off") plt.axis("off") plt.show() plt.close()<jupyter_output><empty_output><jupyter_text>TrainMonitor<jupyter_code># Taking a batch of test inputs to measure the model's progress. test_images, test_labels = next(iter(test_ds)) upsampler = tf.keras.layers.UpSampling2D( size=(4, 4), interpolation="bilinear", ) class TrainMonitor(keras.callbacks.Callback): def __init__(self, epoch_interval=None): self.epoch_interval = epoch_interval def on_epoch_end(self, epoch, logs=None): if self.epoch_interval and epoch % self.epoch_interval == 0: _ = self.model(test_images) # Take the mid layer for visualization gates = self.model.basic_layers[1].blocks[-1].modulation.gates gates = upsampler(gates) modulator = self.model.basic_layers[1].blocks[-1].modulation.modulator modulator = upsampler(modulator) # Display the grid of gates and modulator. display_grid(test_images=test_images, gates=gates, modulator=modulator)<jupyter_output><empty_output><jupyter_text>Learning Rate scheduler<jupyter_code># Some code is taken from: # https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2. class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule): def __init__( self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps ): super().__init__() self.learning_rate_base = learning_rate_base self.total_steps = total_steps self.warmup_learning_rate = warmup_learning_rate self.warmup_steps = warmup_steps self.pi = tf.constant(np.pi) def __call__(self, step): if self.total_steps < self.warmup_steps: raise ValueError("Total_steps must be larger or equal to warmup_steps.") cos_annealed_lr = tf.cos( self.pi * (tf.cast(step, tf.float32) - self.warmup_steps) / float(self.total_steps - self.warmup_steps) ) learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr) if self.warmup_steps > 0: if self.learning_rate_base < self.warmup_learning_rate: raise ValueError( "Learning_rate_base must be larger or equal to " "warmup_learning_rate." ) slope = ( self.learning_rate_base - self.warmup_learning_rate ) / self.warmup_steps warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate learning_rate = tf.where( step < self.warmup_steps, warmup_rate, learning_rate ) return tf.where( step > self.total_steps, 0.0, learning_rate, name="learning_rate" ) total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS) warmup_epoch_percentage = 0.15 warmup_steps = int(total_steps * warmup_epoch_percentage) scheduled_lrs = WarmUpCosine( learning_rate_base=LEARNING_RATE, total_steps=total_steps, warmup_learning_rate=0.0, warmup_steps=warmup_steps, )<jupyter_output><empty_output><jupyter_text>Initialize, compile and train the model<jupyter_code>focal_mod_net = FocalModulationNetwork() optimizer = AdamW(learning_rate=scheduled_lrs, weight_decay=WEIGHT_DECAY) # Compile and train the model. focal_mod_net.compile( optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"], ) history = focal_mod_net.fit( train_ds, epochs=EPOCHS, validation_data=val_ds, callbacks=[TrainMonitor(epoch_interval=10)], )<jupyter_output><empty_output><jupyter_text>Plot loss and accuracy<jupyter_code>plt.plot(history.history["loss"], label="loss") plt.plot(history.history["val_loss"], label="val_loss") plt.legend() plt.show() plt.plot(history.history["accuracy"], label="accuracy") plt.plot(history.history["val_accuracy"], label="val_accuracy") plt.legend() plt.show()<jupyter_output><empty_output><jupyter_text>Test visualizationsLet's test our model on some test images and see how the gates look like.<jupyter_code>test_images, test_labels = next(iter(test_ds)) _ = focal_mod_net(test_images) # Take the mid layer for visualization gates = focal_mod_net.basic_layers[1].blocks[-1].modulation.gates gates = upsampler(gates) modulator = focal_mod_net.basic_layers[1].blocks[-1].modulation.modulator modulator = upsampler(modulator) # Plot the test images with the gates and modulator overlayed. for row in range(5): display_grid( test_images=test_images, gates=gates, modulator=modulator, )<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/focal_modulation_network.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/focal_modulation_network.ipynb", "repo_id": "keras-io", "token_count": 13283 }
93
<jupyter_start><jupyter_text>Masked image modeling with Autoencoders**Author:** [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/12/20**Last modified:** 2021/12/21**Description:** Implementing Masked Autoencoders for self-supervised pretraining. IntroductionIn deep learning, models with growing **capacity** and **capability** can easily overfiton large datasets (ImageNet-1K). In the field of natural language processing, theappetite for data has been **successfully addressed** by self-supervised pretraining.In the academic paper[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)by He et. al. the authors propose a simple yet effective method to pretrain largevision models (here [ViT Huge](https://arxiv.org/abs/2010.11929)). Inspired fromthe pretraining algorithm of BERT ([Devlin et al.](https://arxiv.org/abs/1810.04805)),they mask patches of an image and, through an autoencoder predict the masked patches.In the spirit of "masked language modeling", this pretraining task could be referredto as "masked image modeling".In this example, we implement[Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377)with the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. Afterpretraining a scaled down version of ViT, we also implement the linear evaluationpipeline on CIFAR-10.This implementation covers (MAE refers to Masked Autoencoder):- The masking algorithm- MAE encoder- MAE decoder- Evaluation with linear probingAs a reference, we reuse some of the code presented in[this example](https://keras.io/examples/vision/image_classification_with_vision_transformer/). Imports<jupyter_code>import os os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf import keras from keras import layers import matplotlib.pyplot as plt import numpy as np import random # Setting seeds for reproducibility. SEED = 42 keras.utils.set_random_seed(SEED)<jupyter_output><empty_output><jupyter_text>Hyperparameters for pretrainingPlease feel free to change the hyperparameters and check your results. The best way toget an intuition about the architecture is to experiment with it. Our hyperparameters areheavily inspired by the design guidelines laid out by the authors in[the original paper](https://arxiv.org/abs/2111.06377).<jupyter_code># DATA BUFFER_SIZE = 1024 BATCH_SIZE = 256 AUTO = tf.data.AUTOTUNE INPUT_SHAPE = (32, 32, 3) NUM_CLASSES = 10 # OPTIMIZER LEARNING_RATE = 5e-3 WEIGHT_DECAY = 1e-4 # PRETRAINING EPOCHS = 100 # AUGMENTATION IMAGE_SIZE = 48 # We will resize input images to this size. PATCH_SIZE = 6 # Size of the patches to be extracted from the input images. NUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2 MASK_PROPORTION = 0.75 # We have found 75% masking to give us the best results. # ENCODER and DECODER LAYER_NORM_EPS = 1e-6 ENC_PROJECTION_DIM = 128 DEC_PROJECTION_DIM = 64 ENC_NUM_HEADS = 4 ENC_LAYERS = 6 DEC_NUM_HEADS = 4 DEC_LAYERS = ( 2 # The decoder is lightweight but should be reasonably deep for reconstruction. ) ENC_TRANSFORMER_UNITS = [ ENC_PROJECTION_DIM * 2, ENC_PROJECTION_DIM, ] # Size of the transformer layers. DEC_TRANSFORMER_UNITS = [ DEC_PROJECTION_DIM * 2, DEC_PROJECTION_DIM, ]<jupyter_output><empty_output><jupyter_text>Load and prepare the CIFAR-10 dataset<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() (x_train, y_train), (x_val, y_val) = ( (x_train[:40000], y_train[:40000]), (x_train[40000:], y_train[40000:]), ) print(f"Training samples: {len(x_train)}") print(f"Validation samples: {len(x_val)}") print(f"Testing samples: {len(x_test)}") train_ds = tf.data.Dataset.from_tensor_slices(x_train) train_ds = train_ds.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).prefetch(AUTO) val_ds = tf.data.Dataset.from_tensor_slices(x_val) val_ds = val_ds.batch(BATCH_SIZE).prefetch(AUTO) test_ds = tf.data.Dataset.from_tensor_slices(x_test) test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)<jupyter_output><empty_output><jupyter_text>Data augmentationIn previous self-supervised pretraining methodologies([SimCLR](https://arxiv.org/abs/2002.05709) alike), we have noticed that the dataaugmentation pipeline plays an important role. On the other hand the authors of thispaper point out that Masked Autoencoders **do not** rely on augmentations. They propose asimple augmentation pipeline of:- Resizing- Random cropping (fixed-sized or random sized)- Random horizontal flipping<jupyter_code>def get_train_augmentation_model(): model = keras.Sequential( [ layers.Rescaling(1 / 255.0), layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20), layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE), layers.RandomFlip("horizontal"), ], name="train_data_augmentation", ) return model def get_test_augmentation_model(): model = keras.Sequential( [ layers.Rescaling(1 / 255.0), layers.Resizing(IMAGE_SIZE, IMAGE_SIZE), ], name="test_data_augmentation", ) return model<jupyter_output><empty_output><jupyter_text>A layer for extracting patches from imagesThis layer takes images as input and divides them into patches. The layer also includestwo utility method:- `show_patched_image` -- Takes a batch of images and its corresponding patches to plot arandom pair of image and patches.- `reconstruct_from_patch` -- Takes a single instance of patches and stitches themtogether into the original image.<jupyter_code>class Patches(layers.Layer): def __init__(self, patch_size=PATCH_SIZE, **kwargs): super().__init__(**kwargs) self.patch_size = patch_size # Assuming the image has three channels each patch would be # of size (patch_size, patch_size, 3). self.resize = layers.Reshape((-1, patch_size * patch_size * 3)) def call(self, images): # Create patches from the input images patches = tf.image.extract_patches( images=images, sizes=[1, self.patch_size, self.patch_size, 1], strides=[1, self.patch_size, self.patch_size, 1], rates=[1, 1, 1, 1], padding="VALID", ) # Reshape the patches to (batch, num_patches, patch_area) and return it. patches = self.resize(patches) return patches def show_patched_image(self, images, patches): # This is a utility function which accepts a batch of images and its # corresponding patches and help visualize one image and its patches # side by side. idx = np.random.choice(patches.shape[0]) print(f"Index selected: {idx}.") plt.figure(figsize=(4, 4)) plt.imshow(keras.utils.array_to_img(images[idx])) plt.axis("off") plt.show() n = int(np.sqrt(patches.shape[1])) plt.figure(figsize=(4, 4)) for i, patch in enumerate(patches[idx]): ax = plt.subplot(n, n, i + 1) patch_img = tf.reshape(patch, (self.patch_size, self.patch_size, 3)) plt.imshow(keras.utils.img_to_array(patch_img)) plt.axis("off") plt.show() # Return the index chosen to validate it outside the method. return idx # taken from https://stackoverflow.com/a/58082878/10319735 def reconstruct_from_patch(self, patch): # This utility function takes patches from a *single* image and # reconstructs it back into the image. This is useful for the train # monitor callback. num_patches = patch.shape[0] n = int(np.sqrt(num_patches)) patch = tf.reshape(patch, (num_patches, self.patch_size, self.patch_size, 3)) rows = tf.split(patch, n, axis=0) rows = [tf.concat(tf.unstack(x), axis=1) for x in rows] reconstructed = tf.concat(rows, axis=0) return reconstructed<jupyter_output><empty_output><jupyter_text>Let's visualize the image patches.<jupyter_code># Get a batch of images. image_batch = next(iter(train_ds)) # Augment the images. augmentation_model = get_train_augmentation_model() augmented_images = augmentation_model(image_batch) # Define the patch layer. patch_layer = Patches() # Get the patches from the batched images. patches = patch_layer(images=augmented_images) # Now pass the images and the corresponding patches # to the `show_patched_image` method. random_index = patch_layer.show_patched_image(images=augmented_images, patches=patches) # Chose the same chose image and try reconstructing the patches # into the original image. image = patch_layer.reconstruct_from_patch(patches[random_index]) plt.imshow(image) plt.axis("off") plt.show()<jupyter_output><empty_output><jupyter_text>Patch encoding with maskingQuoting the paper> Following ViT, we divide an image into regular non-overlapping patches. Then we samplea subset of patches and mask (i.e., remove) the remaining ones. Our sampling strategy isstraightforward: we sample random patches without replacement, following a uniformdistribution. We simply refer to this as “random sampling”.This layer includes masking and encoding the patches.The utility methods of the layer are:- `get_random_indices` -- Provides the mask and unmask indices.- `generate_masked_image` -- Takes patches and unmask indices, results in a random maskedimage. This is an essential utility method for our training monitor callback (definedlater).<jupyter_code>class PatchEncoder(layers.Layer): def __init__( self, patch_size=PATCH_SIZE, projection_dim=ENC_PROJECTION_DIM, mask_proportion=MASK_PROPORTION, downstream=False, **kwargs, ): super().__init__(**kwargs) self.patch_size = patch_size self.projection_dim = projection_dim self.mask_proportion = mask_proportion self.downstream = downstream # This is a trainable mask token initialized randomly from a normal # distribution. self.mask_token = tf.Variable( tf.random.normal([1, patch_size * patch_size * 3]), trainable=True ) def build(self, input_shape): (_, self.num_patches, self.patch_area) = input_shape # Create the projection layer for the patches. self.projection = layers.Dense(units=self.projection_dim) # Create the positional embedding layer. self.position_embedding = layers.Embedding( input_dim=self.num_patches, output_dim=self.projection_dim ) # Number of patches that will be masked. self.num_mask = int(self.mask_proportion * self.num_patches) def call(self, patches): # Get the positional embeddings. batch_size = tf.shape(patches)[0] positions = tf.range(start=0, limit=self.num_patches, delta=1) pos_embeddings = self.position_embedding(positions[tf.newaxis, ...]) pos_embeddings = tf.tile( pos_embeddings, [batch_size, 1, 1] ) # (B, num_patches, projection_dim) # Embed the patches. patch_embeddings = ( self.projection(patches) + pos_embeddings ) # (B, num_patches, projection_dim) if self.downstream: return patch_embeddings else: mask_indices, unmask_indices = self.get_random_indices(batch_size) # The encoder input is the unmasked patch embeddings. Here we gather # all the patches that should be unmasked. unmasked_embeddings = tf.gather( patch_embeddings, unmask_indices, axis=1, batch_dims=1 ) # (B, unmask_numbers, projection_dim) # Get the unmasked and masked position embeddings. We will need them # for the decoder. unmasked_positions = tf.gather( pos_embeddings, unmask_indices, axis=1, batch_dims=1 ) # (B, unmask_numbers, projection_dim) masked_positions = tf.gather( pos_embeddings, mask_indices, axis=1, batch_dims=1 ) # (B, mask_numbers, projection_dim) # Repeat the mask token number of mask times. # Mask tokens replace the masks of the image. mask_tokens = tf.repeat(self.mask_token, repeats=self.num_mask, axis=0) mask_tokens = tf.repeat( mask_tokens[tf.newaxis, ...], repeats=batch_size, axis=0 ) # Get the masked embeddings for the tokens. masked_embeddings = self.projection(mask_tokens) + masked_positions return ( unmasked_embeddings, # Input to the encoder. masked_embeddings, # First part of input to the decoder. unmasked_positions, # Added to the encoder outputs. mask_indices, # The indices that were masked. unmask_indices, # The indices that were unmaksed. ) def get_random_indices(self, batch_size): # Create random indices from a uniform distribution and then split # it into mask and unmask indices. rand_indices = tf.argsort( tf.random.uniform(shape=(batch_size, self.num_patches)), axis=-1 ) mask_indices = rand_indices[:, : self.num_mask] unmask_indices = rand_indices[:, self.num_mask :] return mask_indices, unmask_indices def generate_masked_image(self, patches, unmask_indices): # Choose a random patch and it corresponding unmask index. idx = np.random.choice(patches.shape[0]) patch = patches[idx] unmask_index = unmask_indices[idx] # Build a numpy array of same shape as patch. new_patch = np.zeros_like(patch) # Iterate of the new_patch and plug the unmasked patches. count = 0 for i in range(unmask_index.shape[0]): new_patch[unmask_index[i]] = patch[unmask_index[i]] return new_patch, idx<jupyter_output><empty_output><jupyter_text>Let's see the masking process in action on a sample image.<jupyter_code># Create the patch encoder layer. patch_encoder = PatchEncoder() # Get the embeddings and positions. ( unmasked_embeddings, masked_embeddings, unmasked_positions, mask_indices, unmask_indices, ) = patch_encoder(patches=patches) # Show a maksed patch image. new_patch, random_index = patch_encoder.generate_masked_image(patches, unmask_indices) plt.figure(figsize=(10, 10)) plt.subplot(1, 2, 1) img = patch_layer.reconstruct_from_patch(new_patch) plt.imshow(keras.utils.array_to_img(img)) plt.axis("off") plt.title("Masked") plt.subplot(1, 2, 2) img = augmented_images[random_index] plt.imshow(keras.utils.array_to_img(img)) plt.axis("off") plt.title("Original") plt.show()<jupyter_output><empty_output><jupyter_text>MLPThis serves as the fully connected feed forward network of the transformer architecture.<jupyter_code>def mlp(x, dropout_rate, hidden_units): for units in hidden_units: x = layers.Dense(units, activation=tf.nn.gelu)(x) x = layers.Dropout(dropout_rate)(x) return x<jupyter_output><empty_output><jupyter_text>MAE encoderThe MAE encoder is ViT. The only point to note here is that the encoder outputs a layernormalized output.<jupyter_code>def create_encoder(num_heads=ENC_NUM_HEADS, num_layers=ENC_LAYERS): inputs = layers.Input((None, ENC_PROJECTION_DIM)) x = inputs for _ in range(num_layers): # Layer normalization 1. x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x) # Create a multi-head attention layer. attention_output = layers.MultiHeadAttention( num_heads=num_heads, key_dim=ENC_PROJECTION_DIM, dropout=0.1 )(x1, x1) # Skip connection 1. x2 = layers.Add()([attention_output, x]) # Layer normalization 2. x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2) # MLP. x3 = mlp(x3, hidden_units=ENC_TRANSFORMER_UNITS, dropout_rate=0.1) # Skip connection 2. x = layers.Add()([x3, x2]) outputs = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x) return keras.Model(inputs, outputs, name="mae_encoder")<jupyter_output><empty_output><jupyter_text>MAE decoderThe authors point out that they use an **asymmetric** autoencoder model. They use alightweight decoder that takes "<10% computation per token vs. the encoder". We are notspecific with the "<10% computation" in our implementation but have used a smallerdecoder (both in terms of depth and projection dimensions).<jupyter_code>def create_decoder( num_layers=DEC_LAYERS, num_heads=DEC_NUM_HEADS, image_size=IMAGE_SIZE ): inputs = layers.Input((NUM_PATCHES, ENC_PROJECTION_DIM)) x = layers.Dense(DEC_PROJECTION_DIM)(inputs) for _ in range(num_layers): # Layer normalization 1. x1 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x) # Create a multi-head attention layer. attention_output = layers.MultiHeadAttention( num_heads=num_heads, key_dim=DEC_PROJECTION_DIM, dropout=0.1 )(x1, x1) # Skip connection 1. x2 = layers.Add()([attention_output, x]) # Layer normalization 2. x3 = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x2) # MLP. x3 = mlp(x3, hidden_units=DEC_TRANSFORMER_UNITS, dropout_rate=0.1) # Skip connection 2. x = layers.Add()([x3, x2]) x = layers.LayerNormalization(epsilon=LAYER_NORM_EPS)(x) x = layers.Flatten()(x) pre_final = layers.Dense(units=image_size * image_size * 3, activation="sigmoid")(x) outputs = layers.Reshape((image_size, image_size, 3))(pre_final) return keras.Model(inputs, outputs, name="mae_decoder")<jupyter_output><empty_output><jupyter_text>MAE trainerThis is the trainer module. We wrap the encoder and decoder inside of a `tf.keras.Model`subclass. This allows us to customize what happens in the `model.fit()` loop.<jupyter_code>class MaskedAutoencoder(keras.Model): def __init__( self, train_augmentation_model, test_augmentation_model, patch_layer, patch_encoder, encoder, decoder, **kwargs, ): super().__init__(**kwargs) self.train_augmentation_model = train_augmentation_model self.test_augmentation_model = test_augmentation_model self.patch_layer = patch_layer self.patch_encoder = patch_encoder self.encoder = encoder self.decoder = decoder def calculate_loss(self, images, test=False): # Augment the input images. if test: augmented_images = self.test_augmentation_model(images) else: augmented_images = self.train_augmentation_model(images) # Patch the augmented images. patches = self.patch_layer(augmented_images) # Encode the patches. ( unmasked_embeddings, masked_embeddings, unmasked_positions, mask_indices, unmask_indices, ) = self.patch_encoder(patches) # Pass the unmaksed patche to the encoder. encoder_outputs = self.encoder(unmasked_embeddings) # Create the decoder inputs. encoder_outputs = encoder_outputs + unmasked_positions decoder_inputs = tf.concat([encoder_outputs, masked_embeddings], axis=1) # Decode the inputs. decoder_outputs = self.decoder(decoder_inputs) decoder_patches = self.patch_layer(decoder_outputs) loss_patch = tf.gather(patches, mask_indices, axis=1, batch_dims=1) loss_output = tf.gather(decoder_patches, mask_indices, axis=1, batch_dims=1) # Compute the total loss. total_loss = self.compute_loss(y=loss_patch, y_pred=loss_output) return total_loss, loss_patch, loss_output def train_step(self, images): with tf.GradientTape() as tape: total_loss, loss_patch, loss_output = self.calculate_loss(images) # Apply gradients. train_vars = [ self.train_augmentation_model.trainable_variables, self.patch_layer.trainable_variables, self.patch_encoder.trainable_variables, self.encoder.trainable_variables, self.decoder.trainable_variables, ] grads = tape.gradient(total_loss, train_vars) tv_list = [] for grad, var in zip(grads, train_vars): for g, v in zip(grad, var): tv_list.append((g, v)) self.optimizer.apply_gradients(tv_list) # Report progress. results = {} for metric in self.metrics: metric.update_state(loss_patch, loss_output) results[metric.name] = metric.result() return results def test_step(self, images): total_loss, loss_patch, loss_output = self.calculate_loss(images, test=True) # Update the trackers. results = {} for metric in self.metrics: metric.update_state(loss_patch, loss_output) results[metric.name] = metric.result() return results<jupyter_output><empty_output><jupyter_text>Model initialization<jupyter_code>train_augmentation_model = get_train_augmentation_model() test_augmentation_model = get_test_augmentation_model() patch_layer = Patches() patch_encoder = PatchEncoder() encoder = create_encoder() decoder = create_decoder() mae_model = MaskedAutoencoder( train_augmentation_model=train_augmentation_model, test_augmentation_model=test_augmentation_model, patch_layer=patch_layer, patch_encoder=patch_encoder, encoder=encoder, decoder=decoder, )<jupyter_output><empty_output><jupyter_text>Training callbacks Visualization callback<jupyter_code># Taking a batch of test inputs to measure model's progress. test_images = next(iter(test_ds)) class TrainMonitor(keras.callbacks.Callback): def __init__(self, epoch_interval=None): self.epoch_interval = epoch_interval def on_epoch_end(self, epoch, logs=None): if self.epoch_interval and epoch % self.epoch_interval == 0: test_augmented_images = self.model.test_augmentation_model(test_images) test_patches = self.model.patch_layer(test_augmented_images) ( test_unmasked_embeddings, test_masked_embeddings, test_unmasked_positions, test_mask_indices, test_unmask_indices, ) = self.model.patch_encoder(test_patches) test_encoder_outputs = self.model.encoder(test_unmasked_embeddings) test_encoder_outputs = test_encoder_outputs + test_unmasked_positions test_decoder_inputs = tf.concat( [test_encoder_outputs, test_masked_embeddings], axis=1 ) test_decoder_outputs = self.model.decoder(test_decoder_inputs) # Show a maksed patch image. test_masked_patch, idx = self.model.patch_encoder.generate_masked_image( test_patches, test_unmask_indices ) print(f"\nIdx chosen: {idx}") original_image = test_augmented_images[idx] masked_image = self.model.patch_layer.reconstruct_from_patch( test_masked_patch ) reconstructed_image = test_decoder_outputs[idx] fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15, 5)) ax[0].imshow(original_image) ax[0].set_title(f"Original: {epoch:03d}") ax[1].imshow(masked_image) ax[1].set_title(f"Masked: {epoch:03d}") ax[2].imshow(reconstructed_image) ax[2].set_title(f"Resonstructed: {epoch:03d}") plt.show() plt.close()<jupyter_output><empty_output><jupyter_text>Learning rate scheduler<jupyter_code># Some code is taken from: # https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2. class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule): def __init__( self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps ): super().__init__() self.learning_rate_base = learning_rate_base self.total_steps = total_steps self.warmup_learning_rate = warmup_learning_rate self.warmup_steps = warmup_steps self.pi = tf.constant(np.pi) def __call__(self, step): if self.total_steps < self.warmup_steps: raise ValueError("Total_steps must be larger or equal to warmup_steps.") cos_annealed_lr = tf.cos( self.pi * (tf.cast(step, tf.float32) - self.warmup_steps) / float(self.total_steps - self.warmup_steps) ) learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr) if self.warmup_steps > 0: if self.learning_rate_base < self.warmup_learning_rate: raise ValueError( "Learning_rate_base must be larger or equal to " "warmup_learning_rate." ) slope = ( self.learning_rate_base - self.warmup_learning_rate ) / self.warmup_steps warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate learning_rate = tf.where( step < self.warmup_steps, warmup_rate, learning_rate ) return tf.where( step > self.total_steps, 0.0, learning_rate, name="learning_rate" ) total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS) warmup_epoch_percentage = 0.15 warmup_steps = int(total_steps * warmup_epoch_percentage) scheduled_lrs = WarmUpCosine( learning_rate_base=LEARNING_RATE, total_steps=total_steps, warmup_learning_rate=0.0, warmup_steps=warmup_steps, ) lrs = [scheduled_lrs(step) for step in range(total_steps)] plt.plot(lrs) plt.xlabel("Step", fontsize=14) plt.ylabel("LR", fontsize=14) plt.show() # Assemble the callbacks. train_callbacks = [TrainMonitor(epoch_interval=5)]<jupyter_output><empty_output><jupyter_text>Model compilation and training<jupyter_code>optimizer = keras.optimizers.AdamW( learning_rate=scheduled_lrs, weight_decay=WEIGHT_DECAY ) # Compile and pretrain the model. mae_model.compile( optimizer=optimizer, loss=keras.losses.MeanSquaredError(), metrics=["mae"] ) history = mae_model.fit( train_ds, epochs=EPOCHS, validation_data=val_ds, callbacks=train_callbacks, ) # Measure its performance. loss, mae = mae_model.evaluate(test_ds) print(f"Loss: {loss:.2f}") print(f"MAE: {mae:.2f}")<jupyter_output><empty_output><jupyter_text>Evaluation with linear probing Extract the encoder model along with other layers<jupyter_code># Extract the augmentation layers. train_augmentation_model = mae_model.train_augmentation_model test_augmentation_model = mae_model.test_augmentation_model # Extract the patchers. patch_layer = mae_model.patch_layer patch_encoder = mae_model.patch_encoder patch_encoder.downstream = True # Swtich the downstream flag to True. # Extract the encoder. encoder = mae_model.encoder # Pack as a model. downstream_model = keras.Sequential( [ layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)), patch_layer, patch_encoder, encoder, layers.BatchNormalization(), # Refer to A.1 (Linear probing). layers.GlobalAveragePooling1D(), layers.Dense(NUM_CLASSES, activation="softmax"), ], name="linear_probe_model", ) # Only the final classification layer of the `downstream_model` should be trainable. for layer in downstream_model.layers[:-1]: layer.trainable = False downstream_model.summary()<jupyter_output><empty_output><jupyter_text>We are using average pooling to extract learned representations from the MAE encoder.Another approach would be to use a learnable dummy token inside the encoder duringpretraining (resembling the [CLS] token). Then we can extract representations from thattoken during the downstream tasks. Prepare datasets for linear probing<jupyter_code>def prepare_data(images, labels, is_train=True): if is_train: augmentation_model = train_augmentation_model else: augmentation_model = test_augmentation_model dataset = tf.data.Dataset.from_tensor_slices((images, labels)) if is_train: dataset = dataset.shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE).map( lambda x, y: (augmentation_model(x), y), num_parallel_calls=AUTO ) return dataset.prefetch(AUTO) train_ds = prepare_data(x_train, y_train) val_ds = prepare_data(x_train, y_train, is_train=False) test_ds = prepare_data(x_test, y_test, is_train=False)<jupyter_output><empty_output><jupyter_text>Perform linear probing<jupyter_code>linear_probe_epochs = 50 linear_prob_lr = 0.1 warm_epoch_percentage = 0.1 steps = int((len(x_train) // BATCH_SIZE) * linear_probe_epochs) warmup_steps = int(steps * warm_epoch_percentage) scheduled_lrs = WarmUpCosine( learning_rate_base=linear_prob_lr, total_steps=steps, warmup_learning_rate=0.0, warmup_steps=warmup_steps, ) optimizer = keras.optimizers.SGD(learning_rate=scheduled_lrs, momentum=0.9) downstream_model.compile( optimizer=optimizer, loss="sparse_categorical_crossentropy", metrics=["accuracy"] ) downstream_model.fit(train_ds, validation_data=val_ds, epochs=linear_probe_epochs) loss, accuracy = downstream_model.evaluate(test_ds) accuracy = round(accuracy * 100, 2) print(f"Accuracy on the test set: {accuracy}%.")<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/masked_image_modeling.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/masked_image_modeling.ipynb", "repo_id": "keras-io", "token_count": 12289 }
94
<jupyter_start><jupyter_text>Point cloud classification with PointNet**Author:** [David Griffiths](https://dgriffiths3.github.io)**Date created:** 2020/05/25**Last modified:** 2024/01/09**Description:** Implementation of PointNet for ModelNet10 classification. Point cloud classification IntroductionClassification, detection and segmentation of unordered 3D point sets i.e. point cloudsis a core problem in computer vision. This example implements the seminal point clouddeep learning paper [PointNet (Qi et al., 2017)](https://arxiv.org/abs/1612.00593). For adetailed intoduction on PointNet see [this blogpost](https://medium.com/@luis_gonzales/an-in-depth-look-at-pointnet-111d7efdaa1a). SetupIf using colab first install trimesh with `!pip install trimesh`.<jupyter_code>import os import glob import trimesh import numpy as np from tensorflow import data as tf_data from keras import ops import keras from keras import layers from matplotlib import pyplot as plt keras.utils.set_random_seed(seed=42)<jupyter_output><empty_output><jupyter_text>Load datasetWe use the ModelNet10 model dataset, the smaller 10 class version of the ModelNet40dataset. First download the data:<jupyter_code>DATA_DIR = keras.utils.get_file( "modelnet.zip", "http://3dvision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip", extract=True, ) DATA_DIR = os.path.join(os.path.dirname(DATA_DIR), "ModelNet10")<jupyter_output><empty_output><jupyter_text>We can use the `trimesh` package to read and visualize the `.off` mesh files.<jupyter_code>mesh = trimesh.load(os.path.join(DATA_DIR, "chair/train/chair_0001.off")) mesh.show()<jupyter_output><empty_output><jupyter_text>To convert a mesh file to a point cloud we first need to sample points on the meshsurface. `.sample()` performs a unifrom random sampling. Here we sample at 2048 locationsand visualize in `matplotlib`.<jupyter_code>points = mesh.sample(2048) fig = plt.figure(figsize=(5, 5)) ax = fig.add_subplot(111, projection="3d") ax.scatter(points[:, 0], points[:, 1], points[:, 2]) ax.set_axis_off() plt.show()<jupyter_output><empty_output><jupyter_text>To generate a `tf.data.Dataset()` we need to first parse through the ModelNet datafolders. Each mesh is loaded and sampled into a point cloud before being added to astandard python list and converted to a `numpy` array. We also store the currentenumerate index value as the object label and use a dictionary to recall this later.<jupyter_code>def parse_dataset(num_points=2048): train_points = [] train_labels = [] test_points = [] test_labels = [] class_map = {} folders = glob.glob(os.path.join(DATA_DIR, "[!README]*")) for i, folder in enumerate(folders): print("processing class: {}".format(os.path.basename(folder))) # store folder name with ID so we can retrieve later class_map[i] = folder.split("/")[-1] # gather all files train_files = glob.glob(os.path.join(folder, "train/*")) test_files = glob.glob(os.path.join(folder, "test/*")) for f in train_files: train_points.append(trimesh.load(f).sample(num_points)) train_labels.append(i) for f in test_files: test_points.append(trimesh.load(f).sample(num_points)) test_labels.append(i) return ( np.array(train_points), np.array(test_points), np.array(train_labels), np.array(test_labels), class_map, )<jupyter_output><empty_output><jupyter_text>Set the number of points to sample and batch size and parse the dataset. This can take~5minutes to complete.<jupyter_code>NUM_POINTS = 2048 NUM_CLASSES = 10 BATCH_SIZE = 32 train_points, test_points, train_labels, test_labels, CLASS_MAP = parse_dataset( NUM_POINTS )<jupyter_output><empty_output><jupyter_text>Our data can now be read into a `tf.data.Dataset()` object. We set the shuffle buffersize to the entire size of the dataset as prior to this the data is ordered by class.Data augmentation is important when working with point cloud data. We create aaugmentation function to jitter and shuffle the train dataset.<jupyter_code>def augment(points, label): # jitter points points += keras.random.uniform(points.shape, -0.005, 0.005, dtype="float64") # shuffle points points = keras.random.shuffle(points) return points, label train_size = 0.8 dataset = tf_data.Dataset.from_tensor_slices((train_points, train_labels)) test_dataset = tf_data.Dataset.from_tensor_slices((test_points, test_labels)) train_dataset_size = int(len(dataset) * train_size) dataset = dataset.shuffle(len(train_points)).map(augment) test_dataset = test_dataset.shuffle(len(test_points)).batch(BATCH_SIZE) train_dataset = dataset.take(train_dataset_size).batch(BATCH_SIZE) validation_dataset = dataset.skip(train_dataset_size).batch(BATCH_SIZE)<jupyter_output><empty_output><jupyter_text>Build a modelEach convolution and fully-connected layer (with exception for end layers) consits ofConvolution / Dense -> Batch Normalization -> ReLU Activation.<jupyter_code>def conv_bn(x, filters): x = layers.Conv1D(filters, kernel_size=1, padding="valid")(x) x = layers.BatchNormalization(momentum=0.0)(x) return layers.Activation("relu")(x) def dense_bn(x, filters): x = layers.Dense(filters)(x) x = layers.BatchNormalization(momentum=0.0)(x) return layers.Activation("relu")(x)<jupyter_output><empty_output><jupyter_text>PointNet consists of two core components. The primary MLP network, and the transformernet (T-net). The T-net aims to learn an affine transformation matrix by its own mininetwork. The T-net is used twice. The first time to transform the input features (n, 3)into a canonical representation. The second is an affine transformation for alignment infeature space (n, 3). As per the original paper we constrain the transformation to beclose to an orthogonal matrix (i.e. ||X*X^T - I|| = 0).<jupyter_code>class OrthogonalRegularizer(keras.regularizers.Regularizer): def __init__(self, num_features, l2reg=0.001): self.num_features = num_features self.l2reg = l2reg self.eye = ops.eye(num_features) def __call__(self, x): x = ops.reshape(x, (-1, self.num_features, self.num_features)) xxt = ops.tensordot(x, x, axes=(2, 2)) xxt = ops.reshape(xxt, (-1, self.num_features, self.num_features)) return ops.sum(self.l2reg * ops.square(xxt - self.eye))<jupyter_output><empty_output><jupyter_text>We can then define a general function to build T-net layers.<jupyter_code>def tnet(inputs, num_features): # Initalise bias as the indentity matrix bias = keras.initializers.Constant(np.eye(num_features).flatten()) reg = OrthogonalRegularizer(num_features) x = conv_bn(inputs, 32) x = conv_bn(x, 64) x = conv_bn(x, 512) x = layers.GlobalMaxPooling1D()(x) x = dense_bn(x, 256) x = dense_bn(x, 128) x = layers.Dense( num_features * num_features, kernel_initializer="zeros", bias_initializer=bias, activity_regularizer=reg, )(x) feat_T = layers.Reshape((num_features, num_features))(x) # Apply affine transformation to input features return layers.Dot(axes=(2, 1))([inputs, feat_T])<jupyter_output><empty_output><jupyter_text>The main network can be then implemented in the same manner where the t-net mini modelscan be dropped in a layers in the graph. Here we replicate the network architecturepublished in the original paper but with half the number of weights at each layer as weare using the smaller 10 class ModelNet dataset.<jupyter_code>inputs = keras.Input(shape=(NUM_POINTS, 3)) x = tnet(inputs, 3) x = conv_bn(x, 32) x = conv_bn(x, 32) x = tnet(x, 32) x = conv_bn(x, 32) x = conv_bn(x, 64) x = conv_bn(x, 512) x = layers.GlobalMaxPooling1D()(x) x = dense_bn(x, 256) x = layers.Dropout(0.3)(x) x = dense_bn(x, 128) x = layers.Dropout(0.3)(x) outputs = layers.Dense(NUM_CLASSES, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs, name="pointnet") model.summary()<jupyter_output><empty_output><jupyter_text>Train modelOnce the model is defined it can be trained like any other standard classification modelusing `.compile()` and `.fit()`.<jupyter_code>model.compile( loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.Adam(learning_rate=0.001), metrics=["sparse_categorical_accuracy"], ) model.fit(train_dataset, epochs=20, validation_data=validation_dataset)<jupyter_output><empty_output><jupyter_text>Visualize predictionsWe can use matplotlib to visualize our trained model performance.<jupyter_code>data = test_dataset.take(1) points, labels = list(data)[0] points = points[:8, ...] labels = labels[:8, ...] # run test data through model preds = model.predict(points) preds = ops.argmax(preds, -1) points = points.numpy() # plot points with predicted class and label fig = plt.figure(figsize=(15, 10)) for i in range(8): ax = fig.add_subplot(2, 4, i + 1, projection="3d") ax.scatter(points[i, :, 0], points[i, :, 1], points[i, :, 2]) ax.set_title( "pred: {:}, label: {:}".format( CLASS_MAP[preds[i].numpy()], CLASS_MAP[labels.numpy()[i]] ) ) ax.set_axis_off() plt.show()<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/pointnet.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/pointnet.ipynb", "repo_id": "keras-io", "token_count": 3461 }
95
<jupyter_start><jupyter_text>Image classification with Swin Transformers**Author:** [Rishit Dagli](https://twitter.com/rishit_dagli)**Date created:** 2021/09/08**Last modified:** 2021/09/08**Description:** Image classification using Swin Transformers, a general-purpose backbone for computer vision. This example implements[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030)by Liu et al. for image classification, and demonstrates it on the[CIFAR-100 dataset](https://www.cs.toronto.edu/~kriz/cifar.html).Swin Transformer (**S**hifted **Win**dow Transformer) can serve as ageneral-purpose backbone for computer vision. Swin Transformer is a hierarchicalTransformer whose representations are computed with _shifted windows_. Theshifted window scheme brings greater efficiency by limiting self-attentioncomputation to non-overlapping local windows while also allowing forcross-window connections. This architecture has the flexibility to modelinformation at various scales and has a linear computational complexity withrespect to image size.This example requires TensorFlow 2.5 or higher. Setup<jupyter_code>import matplotlib.pyplot as plt import numpy as np import tensorflow as tf # For tf.data and preprocessing only. import keras from keras import layers from keras import ops<jupyter_output><empty_output><jupyter_text>Configure the hyperparametersA key parameter to pick is the `patch_size`, the size of the input patches.In order to use each pixel as an individual input, you can set `patch_size` to`(1, 1)`. Below, we take inspiration from the original paper settings fortraining on ImageNet-1K, keeping most of the original settings for this example.<jupyter_code>num_classes = 100 input_shape = (32, 32, 3) patch_size = (2, 2) # 2-by-2 sized patches dropout_rate = 0.03 # Dropout rate num_heads = 8 # Attention heads embed_dim = 64 # Embedding dimension num_mlp = 256 # MLP layer size # Convert embedded patches to query, key, and values with a learnable additive # value qkv_bias = True window_size = 2 # Size of attention window shift_size = 1 # Size of shifting window image_dimension = 32 # Initial image size num_patch_x = input_shape[0] // patch_size[0] num_patch_y = input_shape[1] // patch_size[1] learning_rate = 1e-3 batch_size = 128 num_epochs = 40 validation_split = 0.1 weight_decay = 0.0001 label_smoothing = 0.1<jupyter_output><empty_output><jupyter_text>Prepare the dataWe load the CIFAR-100 dataset through `keras.datasets`,normalize the images, and convert the integer labels to one-hot encoded vectors.<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) num_train_samples = int(len(x_train) * (1 - validation_split)) num_val_samples = len(x_train) - num_train_samples x_train, x_val = np.split(x_train, [num_train_samples]) y_train, y_val = np.split(y_train, [num_train_samples]) print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}") print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}") plt.figure(figsize=(10, 10)) for i in range(25): plt.subplot(5, 5, i + 1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(x_train[i]) plt.show()<jupyter_output><empty_output><jupyter_text>Helper functionsWe create two helper functions to help us get a sequence ofpatches from the image, merge patches, and apply dropout.<jupyter_code>def window_partition(x, window_size): _, height, width, channels = x.shape patch_num_y = height // window_size patch_num_x = width // window_size x = ops.reshape( x, ( -1, patch_num_y, window_size, patch_num_x, window_size, channels, ), ) x = ops.transpose(x, (0, 1, 3, 2, 4, 5)) windows = ops.reshape(x, (-1, window_size, window_size, channels)) return windows def window_reverse(windows, window_size, height, width, channels): patch_num_y = height // window_size patch_num_x = width // window_size x = ops.reshape( windows, ( -1, patch_num_y, patch_num_x, window_size, window_size, channels, ), ) x = ops.transpose(x, (0, 1, 3, 2, 4, 5)) x = ops.reshape(x, (-1, height, width, channels)) return x<jupyter_output><empty_output><jupyter_text>Window based multi-head self-attentionUsually Transformers perform global self-attention, where the relationshipsbetween a token and all other tokens are computed. The global computation leadsto quadratic complexity with respect to the number of tokens. Here, as the[original paper](https://arxiv.org/abs/2103.14030) suggests, we computeself-attention within local windows, in a non-overlapping manner. Globalself-attention leads to quadratic computational complexity in the number ofpatches, whereas window-based self-attention leads to linear complexity and iseasily scalable.<jupyter_code>class WindowAttention(layers.Layer): def __init__( self, dim, window_size, num_heads, qkv_bias=True, dropout_rate=0.0, **kwargs, ): super().__init__(**kwargs) self.dim = dim self.window_size = window_size self.num_heads = num_heads self.scale = (dim // num_heads) ** -0.5 self.qkv = layers.Dense(dim * 3, use_bias=qkv_bias) self.dropout = layers.Dropout(dropout_rate) self.proj = layers.Dense(dim) num_window_elements = (2 * self.window_size[0] - 1) * ( 2 * self.window_size[1] - 1 ) self.relative_position_bias_table = self.add_weight( shape=(num_window_elements, self.num_heads), initializer=keras.initializers.Zeros(), trainable=True, ) coords_h = np.arange(self.window_size[0]) coords_w = np.arange(self.window_size[1]) coords_matrix = np.meshgrid(coords_h, coords_w, indexing="ij") coords = np.stack(coords_matrix) coords_flatten = coords.reshape(2, -1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.transpose([1, 2, 0]) relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.relative_position_index = keras.Variable( initializer=relative_position_index, shape=relative_position_index.shape, dtype="int", trainable=False, ) def call(self, x, mask=None): _, size, channels = x.shape head_dim = channels // self.num_heads x_qkv = self.qkv(x) x_qkv = ops.reshape(x_qkv, (-1, size, 3, self.num_heads, head_dim)) x_qkv = ops.transpose(x_qkv, (2, 0, 3, 1, 4)) q, k, v = x_qkv[0], x_qkv[1], x_qkv[2] q = q * self.scale k = ops.transpose(k, (0, 1, 3, 2)) attn = q @ k num_window_elements = self.window_size[0] * self.window_size[1] relative_position_index_flat = ops.reshape(self.relative_position_index, (-1,)) relative_position_bias = ops.take( self.relative_position_bias_table, relative_position_index_flat, axis=0, ) relative_position_bias = ops.reshape( relative_position_bias, (num_window_elements, num_window_elements, -1), ) relative_position_bias = ops.transpose(relative_position_bias, (2, 0, 1)) attn = attn + ops.expand_dims(relative_position_bias, axis=0) if mask is not None: nW = mask.shape[0] mask_float = ops.cast( ops.expand_dims(ops.expand_dims(mask, axis=1), axis=0), "float32", ) attn = ops.reshape(attn, (-1, nW, self.num_heads, size, size)) + mask_float attn = ops.reshape(attn, (-1, self.num_heads, size, size)) attn = keras.activations.softmax(attn, axis=-1) else: attn = keras.activations.softmax(attn, axis=-1) attn = self.dropout(attn) x_qkv = attn @ v x_qkv = ops.transpose(x_qkv, (0, 2, 1, 3)) x_qkv = ops.reshape(x_qkv, (-1, size, channels)) x_qkv = self.proj(x_qkv) x_qkv = self.dropout(x_qkv) return x_qkv<jupyter_output><empty_output><jupyter_text>The complete Swin Transformer modelFinally, we put together the complete Swin Transformer by replacing the standardmulti-head attention (MHA) with shifted windows attention. As suggested in theoriginal paper, we create a model comprising of a shifted window-based MHAlayer, followed by a 2-layer MLP with GELU nonlinearity in between, applying`LayerNormalization` before each MSA layer and each MLP, and a residualconnection after each of these layers.Notice that we only create a simple MLP with 2 Dense and2 Dropout layers. Often you will see models using ResNet-50 as the MLP which isquite standard in the literature. However in this paper the authors use a2-layer MLP with GELU nonlinearity in between.<jupyter_code>class SwinTransformer(layers.Layer): def __init__( self, dim, num_patch, num_heads, window_size=7, shift_size=0, num_mlp=1024, qkv_bias=True, dropout_rate=0.0, **kwargs, ): super().__init__(**kwargs) self.dim = dim # number of input dimensions self.num_patch = num_patch # number of embedded patches self.num_heads = num_heads # number of attention heads self.window_size = window_size # size of window self.shift_size = shift_size # size of window shift self.num_mlp = num_mlp # number of MLP nodes self.norm1 = layers.LayerNormalization(epsilon=1e-5) self.attn = WindowAttention( dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, dropout_rate=dropout_rate, ) self.drop_path = layers.Dropout(dropout_rate) self.norm2 = layers.LayerNormalization(epsilon=1e-5) self.mlp = keras.Sequential( [ layers.Dense(num_mlp), layers.Activation(keras.activations.gelu), layers.Dropout(dropout_rate), layers.Dense(dim), layers.Dropout(dropout_rate), ] ) if min(self.num_patch) < self.window_size: self.shift_size = 0 self.window_size = min(self.num_patch) def build(self, input_shape): if self.shift_size == 0: self.attn_mask = None else: height, width = self.num_patch h_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) w_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) mask_array = np.zeros((1, height, width, 1)) count = 0 for h in h_slices: for w in w_slices: mask_array[:, h, w, :] = count count += 1 mask_array = ops.convert_to_tensor(mask_array) # mask array to windows mask_windows = window_partition(mask_array, self.window_size) mask_windows = ops.reshape( mask_windows, [-1, self.window_size * self.window_size] ) attn_mask = ops.expand_dims(mask_windows, axis=1) - ops.expand_dims( mask_windows, axis=2 ) attn_mask = ops.where(attn_mask != 0, -100.0, attn_mask) attn_mask = ops.where(attn_mask == 0, 0.0, attn_mask) self.attn_mask = keras.Variable( initializer=attn_mask, shape=attn_mask.shape, dtype=attn_mask.dtype, trainable=False, ) def call(self, x, training=False): height, width = self.num_patch _, num_patches_before, channels = x.shape x_skip = x x = self.norm1(x) x = ops.reshape(x, (-1, height, width, channels)) if self.shift_size > 0: shifted_x = ops.roll( x, shift=[-self.shift_size, -self.shift_size], axis=[1, 2] ) else: shifted_x = x x_windows = window_partition(shifted_x, self.window_size) x_windows = ops.reshape( x_windows, (-1, self.window_size * self.window_size, channels) ) attn_windows = self.attn(x_windows, mask=self.attn_mask) attn_windows = ops.reshape( attn_windows, (-1, self.window_size, self.window_size, channels), ) shifted_x = window_reverse( attn_windows, self.window_size, height, width, channels ) if self.shift_size > 0: x = ops.roll( shifted_x, shift=[self.shift_size, self.shift_size], axis=[1, 2] ) else: x = shifted_x x = ops.reshape(x, (-1, height * width, channels)) x = self.drop_path(x, training=training) x = x_skip + x x_skip = x x = self.norm2(x) x = self.mlp(x) x = self.drop_path(x) x = x_skip + x return x<jupyter_output><empty_output><jupyter_text>Model training and evaluation Extract and embed patchesWe first create 3 layers to help us extract, embed and merge patches from theimages on top of which we will later use the Swin Transformer class we built.<jupyter_code># Using tf ops since it is only used in tf.data. def patch_extract(images): batch_size = tf.shape(images)[0] patches = tf.image.extract_patches( images=images, sizes=(1, patch_size[0], patch_size[1], 1), strides=(1, patch_size[0], patch_size[1], 1), rates=(1, 1, 1, 1), padding="VALID", ) patch_dim = patches.shape[-1] patch_num = patches.shape[1] return tf.reshape(patches, (batch_size, patch_num * patch_num, patch_dim)) class PatchEmbedding(layers.Layer): def __init__(self, num_patch, embed_dim, **kwargs): super().__init__(**kwargs) self.num_patch = num_patch self.proj = layers.Dense(embed_dim) self.pos_embed = layers.Embedding(input_dim=num_patch, output_dim=embed_dim) def call(self, patch): pos = ops.arange(start=0, stop=self.num_patch) return self.proj(patch) + self.pos_embed(pos) class PatchMerging(keras.layers.Layer): def __init__(self, num_patch, embed_dim): super().__init__() self.num_patch = num_patch self.embed_dim = embed_dim self.linear_trans = layers.Dense(2 * embed_dim, use_bias=False) def call(self, x): height, width = self.num_patch _, _, C = x.shape x = ops.reshape(x, (-1, height, width, C)) x0 = x[:, 0::2, 0::2, :] x1 = x[:, 1::2, 0::2, :] x2 = x[:, 0::2, 1::2, :] x3 = x[:, 1::2, 1::2, :] x = ops.concatenate((x0, x1, x2, x3), axis=-1) x = ops.reshape(x, (-1, (height // 2) * (width // 2), 4 * C)) return self.linear_trans(x)<jupyter_output><empty_output><jupyter_text>Prepare the tf.data.DatasetWe do all the steps, which do not have trainable weights with tf.data.Prepare the training, validation and testing sets.<jupyter_code>def augment(x): x = tf.image.random_crop(x, size=(image_dimension, image_dimension, 3)) x = tf.image.random_flip_left_right(x) return x dataset = ( tf.data.Dataset.from_tensor_slices((x_train, y_train)) .map(lambda x, y: (augment(x), y)) .batch(batch_size=batch_size) .map(lambda x, y: (patch_extract(x), y)) .prefetch(tf.data.experimental.AUTOTUNE) ) dataset_val = ( tf.data.Dataset.from_tensor_slices((x_val, y_val)) .batch(batch_size=batch_size) .map(lambda x, y: (patch_extract(x), y)) .prefetch(tf.data.experimental.AUTOTUNE) ) dataset_test = ( tf.data.Dataset.from_tensor_slices((x_test, y_test)) .batch(batch_size=batch_size) .map(lambda x, y: (patch_extract(x), y)) .prefetch(tf.data.experimental.AUTOTUNE) )<jupyter_output><empty_output><jupyter_text>Build the modelWe put together the Swin Transformer model.<jupyter_code>input = layers.Input(shape=(256, 12)) x = PatchEmbedding(num_patch_x * num_patch_y, embed_dim)(input) x = SwinTransformer( dim=embed_dim, num_patch=(num_patch_x, num_patch_y), num_heads=num_heads, window_size=window_size, shift_size=0, num_mlp=num_mlp, qkv_bias=qkv_bias, dropout_rate=dropout_rate, )(x) x = SwinTransformer( dim=embed_dim, num_patch=(num_patch_x, num_patch_y), num_heads=num_heads, window_size=window_size, shift_size=shift_size, num_mlp=num_mlp, qkv_bias=qkv_bias, dropout_rate=dropout_rate, )(x) x = PatchMerging((num_patch_x, num_patch_y), embed_dim=embed_dim)(x) x = layers.GlobalAveragePooling1D()(x) output = layers.Dense(num_classes, activation="softmax")(x)<jupyter_output><empty_output><jupyter_text>Train on CIFAR-100We train the model on CIFAR-100. Here, we only train the modelfor 40 epochs to keep the training time short in this example.In practice, you should train for 150 epochs to reach convergence.<jupyter_code>model = keras.Model(input, output) model.compile( loss=keras.losses.CategoricalCrossentropy(label_smoothing=label_smoothing), optimizer=keras.optimizers.AdamW( learning_rate=learning_rate, weight_decay=weight_decay ), metrics=[ keras.metrics.CategoricalAccuracy(name="accuracy"), keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"), ], ) history = model.fit( dataset, batch_size=batch_size, epochs=num_epochs, validation_data=dataset_val, )<jupyter_output><empty_output><jupyter_text>Let's visualize the training progress of the model.<jupyter_code>plt.plot(history.history["loss"], label="train_loss") plt.plot(history.history["val_loss"], label="val_loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.title("Train and Validation Losses Over Epochs", fontsize=14) plt.legend() plt.grid() plt.show()<jupyter_output><empty_output><jupyter_text>Let's display the final results of the training on CIFAR-100.<jupyter_code>loss, accuracy, top_5_accuracy = model.evaluate(dataset_test) print(f"Test loss: {round(loss, 2)}") print(f"Test accuracy: {round(accuracy * 100, 2)}%") print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/swin_transformers.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/swin_transformers.ipynb", "repo_id": "keras-io", "token_count": 8515 }
96
# Image classification with EANet (External Attention Transformer) **Author:** [ZhiYong Chang](https://github.com/czy00000)<br> **Date created:** 2021/10/19<br> **Last modified:** 2023/07/18<br> **Description:** Image classification with a Transformer that leverages external attention. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/eanet.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/eanet.py) --- ## Introduction This example implements the [EANet](https://arxiv.org/abs/2105.02358) model for image classification, and demonstrates it on the CIFAR-100 dataset. EANet introduces a novel attention mechanism named ***external attention***, based on two external, small, learnable, and shared memories, which can be implemented easily by simply using two cascaded linear layers and two normalization layers. It conveniently replaces self-attention as used in existing architectures. External attention has linear complexity, as it only implicitly considers the correlations between all samples. --- ## Setup ```python import keras from keras import layers from keras import ops import matplotlib.pyplot as plt ``` --- ## Prepare the data ```python num_classes = 100 input_shape = (32, 32, 3) (x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data() y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}") print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}") ``` <div class="k-default-codeblock"> ``` Downloading data from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz 169001437/169001437 ━━━━━━━━━━━━━━━━━━━━ 3s 0us/step x_train shape: (50000, 32, 32, 3) - y_train shape: (50000, 100) x_test shape: (10000, 32, 32, 3) - y_test shape: (10000, 100) ``` </div> --- ## Configure the hyperparameters ```python weight_decay = 0.0001 learning_rate = 0.001 label_smoothing = 0.1 validation_split = 0.2 batch_size = 128 num_epochs = 50 patch_size = 2 # Size of the patches to be extracted from the input images. num_patches = (input_shape[0] // patch_size) ** 2 # Number of patch embedding_dim = 64 # Number of hidden units. mlp_dim = 64 dim_coefficient = 4 num_heads = 4 attention_dropout = 0.2 projection_dropout = 0.2 num_transformer_blocks = 8 # Number of repetitions of the transformer layer print(f"Patch size: {patch_size} X {patch_size} = {patch_size ** 2} ") print(f"Patches per image: {num_patches}") ``` <div class="k-default-codeblock"> ``` Patch size: 2 X 2 = 4 Patches per image: 256 ``` </div> --- ## Use data augmentation ```python data_augmentation = keras.Sequential( [ layers.Normalization(), layers.RandomFlip("horizontal"), layers.RandomRotation(factor=0.1), layers.RandomContrast(factor=0.1), layers.RandomZoom(height_factor=0.2, width_factor=0.2), ], name="data_augmentation", ) # Compute the mean and the variance of the training data for normalization. data_augmentation.layers[0].adapt(x_train) ``` --- ## Implement the patch extraction and encoding layer ```python class PatchExtract(layers.Layer): def __init__(self, patch_size, **kwargs): super().__init__(**kwargs) self.patch_size = patch_size def call(self, x): B, C = ops.shape(x)[0], ops.shape(x)[-1] x = ops.image.extract_patches(x, self.patch_size) x = ops.reshape(x, (B, -1, self.patch_size * self.patch_size * C)) return x class PatchEmbedding(layers.Layer): def __init__(self, num_patch, embed_dim, **kwargs): super().__init__(**kwargs) self.num_patch = num_patch self.proj = layers.Dense(embed_dim) self.pos_embed = layers.Embedding(input_dim=num_patch, output_dim=embed_dim) def call(self, patch): pos = ops.arange(start=0, stop=self.num_patch, step=1) return self.proj(patch) + self.pos_embed(pos) ``` --- ## Implement the external attention block ```python def external_attention( x, dim, num_heads, dim_coefficient=4, attention_dropout=0, projection_dropout=0, ): _, num_patch, channel = x.shape assert dim % num_heads == 0 num_heads = num_heads * dim_coefficient x = layers.Dense(dim * dim_coefficient)(x) # create tensor [batch_size, num_patches, num_heads, dim*dim_coefficient//num_heads] x = ops.reshape(x, (-1, num_patch, num_heads, dim * dim_coefficient // num_heads)) x = ops.transpose(x, axes=[0, 2, 1, 3]) # a linear layer M_k attn = layers.Dense(dim // dim_coefficient)(x) # normalize attention map attn = layers.Softmax(axis=2)(attn) # dobule-normalization attn = layers.Lambda( lambda attn: ops.divide( attn, ops.convert_to_tensor(1e-9) + ops.sum(attn, axis=-1, keepdims=True), ) )(attn) attn = layers.Dropout(attention_dropout)(attn) # a linear layer M_v x = layers.Dense(dim * dim_coefficient // num_heads)(attn) x = ops.transpose(x, axes=[0, 2, 1, 3]) x = ops.reshape(x, [-1, num_patch, dim * dim_coefficient]) # a linear layer to project original dim x = layers.Dense(dim)(x) x = layers.Dropout(projection_dropout)(x) return x ``` --- ## Implement the MLP block ```python def mlp(x, embedding_dim, mlp_dim, drop_rate=0.2): x = layers.Dense(mlp_dim, activation=ops.gelu)(x) x = layers.Dropout(drop_rate)(x) x = layers.Dense(embedding_dim)(x) x = layers.Dropout(drop_rate)(x) return x ``` --- ## Implement the Transformer block ```python def transformer_encoder( x, embedding_dim, mlp_dim, num_heads, dim_coefficient, attention_dropout, projection_dropout, attention_type="external_attention", ): residual_1 = x x = layers.LayerNormalization(epsilon=1e-5)(x) if attention_type == "external_attention": x = external_attention( x, embedding_dim, num_heads, dim_coefficient, attention_dropout, projection_dropout, ) elif attention_type == "self_attention": x = layers.MultiHeadAttention( num_heads=num_heads, key_dim=embedding_dim, dropout=attention_dropout, )(x, x) x = layers.add([x, residual_1]) residual_2 = x x = layers.LayerNormalization(epsilon=1e-5)(x) x = mlp(x, embedding_dim, mlp_dim) x = layers.add([x, residual_2]) return x ``` --- ## Implement the EANet model The EANet model leverages external attention. The computational complexity of traditional self attention is `O(d * N ** 2)`, where `d` is the embedding size, and `N` is the number of patch. the authors find that most pixels are closely related to just a few other pixels, and an `N`-to-`N` attention matrix may be redundant. So, they propose as an alternative an external attention module where the computational complexity of external attention is `O(d * S * N)`. As `d` and `S` are hyper-parameters, the proposed algorithm is linear in the number of pixels. In fact, this is equivalent to a drop patch operation, because a lot of information contained in a patch in an image is redundant and unimportant. ```python def get_model(attention_type="external_attention"): inputs = layers.Input(shape=input_shape) # Image augment x = data_augmentation(inputs) # Extract patches. x = PatchExtract(patch_size)(x) # Create patch embedding. x = PatchEmbedding(num_patches, embedding_dim)(x) # Create Transformer block. for _ in range(num_transformer_blocks): x = transformer_encoder( x, embedding_dim, mlp_dim, num_heads, dim_coefficient, attention_dropout, projection_dropout, attention_type, ) x = layers.GlobalAveragePooling1D()(x) outputs = layers.Dense(num_classes, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) return model ``` --- ## Train on CIFAR-100 ```python model = get_model(attention_type="external_attention") model.compile( loss=keras.losses.CategoricalCrossentropy(label_smoothing=label_smoothing), optimizer=keras.optimizers.AdamW( learning_rate=learning_rate, weight_decay=weight_decay ), metrics=[ keras.metrics.CategoricalAccuracy(name="accuracy"), keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy"), ], ) history = model.fit( x_train, y_train, batch_size=batch_size, epochs=num_epochs, validation_split=validation_split, ) ``` <div class="k-default-codeblock"> ``` Epoch 1/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 56s 101ms/step - accuracy: 0.0367 - loss: 4.5081 - top-5-accuracy: 0.1369 - val_accuracy: 0.0659 - val_loss: 4.5736 - val_top-5-accuracy: 0.2277 Epoch 2/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 97ms/step - accuracy: 0.0970 - loss: 4.0453 - top-5-accuracy: 0.2965 - val_accuracy: 0.0624 - val_loss: 5.2273 - val_top-5-accuracy: 0.2178 Epoch 3/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.1287 - loss: 3.8706 - top-5-accuracy: 0.3621 - val_accuracy: 0.0690 - val_loss: 5.9141 - val_top-5-accuracy: 0.2342 Epoch 4/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.1569 - loss: 3.7600 - top-5-accuracy: 0.4071 - val_accuracy: 0.0806 - val_loss: 5.7599 - val_top-5-accuracy: 0.2510 Epoch 5/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.1839 - loss: 3.6534 - top-5-accuracy: 0.4437 - val_accuracy: 0.0954 - val_loss: 5.6725 - val_top-5-accuracy: 0.2772 Epoch 6/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.1983 - loss: 3.5784 - top-5-accuracy: 0.4643 - val_accuracy: 0.1050 - val_loss: 5.5299 - val_top-5-accuracy: 0.2898 Epoch 7/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.2142 - loss: 3.5126 - top-5-accuracy: 0.4879 - val_accuracy: 0.1108 - val_loss: 5.5076 - val_top-5-accuracy: 0.2995 Epoch 8/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 98ms/step - accuracy: 0.2277 - loss: 3.4624 - top-5-accuracy: 0.5044 - val_accuracy: 0.1157 - val_loss: 5.3608 - val_top-5-accuracy: 0.3065 Epoch 9/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.2360 - loss: 3.4188 - top-5-accuracy: 0.5191 - val_accuracy: 0.1200 - val_loss: 5.4690 - val_top-5-accuracy: 0.3106 Epoch 10/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.2444 - loss: 3.3684 - top-5-accuracy: 0.5387 - val_accuracy: 0.1286 - val_loss: 5.1677 - val_top-5-accuracy: 0.3263 Epoch 11/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.2532 - loss: 3.3380 - top-5-accuracy: 0.5425 - val_accuracy: 0.1161 - val_loss: 5.5990 - val_top-5-accuracy: 0.3166 Epoch 12/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.2646 - loss: 3.2978 - top-5-accuracy: 0.5537 - val_accuracy: 0.1244 - val_loss: 5.5238 - val_top-5-accuracy: 0.3181 Epoch 13/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.2722 - loss: 3.2706 - top-5-accuracy: 0.5663 - val_accuracy: 0.1304 - val_loss: 5.2244 - val_top-5-accuracy: 0.3392 Epoch 14/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.2773 - loss: 3.2406 - top-5-accuracy: 0.5707 - val_accuracy: 0.1358 - val_loss: 5.2482 - val_top-5-accuracy: 0.3431 Epoch 15/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.2839 - loss: 3.2050 - top-5-accuracy: 0.5855 - val_accuracy: 0.1288 - val_loss: 5.3406 - val_top-5-accuracy: 0.3388 Epoch 16/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.2881 - loss: 3.1856 - top-5-accuracy: 0.5918 - val_accuracy: 0.1402 - val_loss: 5.2058 - val_top-5-accuracy: 0.3502 Epoch 17/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3006 - loss: 3.1596 - top-5-accuracy: 0.5992 - val_accuracy: 0.1410 - val_loss: 5.2260 - val_top-5-accuracy: 0.3476 Epoch 18/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.3047 - loss: 3.1334 - top-5-accuracy: 0.6068 - val_accuracy: 0.1348 - val_loss: 5.2521 - val_top-5-accuracy: 0.3415 Epoch 19/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3058 - loss: 3.1203 - top-5-accuracy: 0.6125 - val_accuracy: 0.1433 - val_loss: 5.1966 - val_top-5-accuracy: 0.3570 Epoch 20/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3105 - loss: 3.0968 - top-5-accuracy: 0.6141 - val_accuracy: 0.1404 - val_loss: 5.3623 - val_top-5-accuracy: 0.3497 Epoch 21/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3161 - loss: 3.0748 - top-5-accuracy: 0.6247 - val_accuracy: 0.1486 - val_loss: 5.0754 - val_top-5-accuracy: 0.3740 Epoch 22/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 98ms/step - accuracy: 0.3233 - loss: 3.0536 - top-5-accuracy: 0.6288 - val_accuracy: 0.1472 - val_loss: 5.3110 - val_top-5-accuracy: 0.3545 Epoch 23/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 98ms/step - accuracy: 0.3281 - loss: 3.0272 - top-5-accuracy: 0.6387 - val_accuracy: 0.1408 - val_loss: 5.4392 - val_top-5-accuracy: 0.3524 Epoch 24/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 98ms/step - accuracy: 0.3363 - loss: 3.0089 - top-5-accuracy: 0.6389 - val_accuracy: 0.1395 - val_loss: 5.3579 - val_top-5-accuracy: 0.3555 Epoch 25/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.3386 - loss: 2.9958 - top-5-accuracy: 0.6427 - val_accuracy: 0.1550 - val_loss: 5.1783 - val_top-5-accuracy: 0.3655 Epoch 26/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 98ms/step - accuracy: 0.3474 - loss: 2.9824 - top-5-accuracy: 0.6496 - val_accuracy: 0.1448 - val_loss: 5.3971 - val_top-5-accuracy: 0.3596 Epoch 27/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 98ms/step - accuracy: 0.3500 - loss: 2.9647 - top-5-accuracy: 0.6532 - val_accuracy: 0.1519 - val_loss: 5.1895 - val_top-5-accuracy: 0.3665 Epoch 28/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 98ms/step - accuracy: 0.3561 - loss: 2.9414 - top-5-accuracy: 0.6604 - val_accuracy: 0.1470 - val_loss: 5.4482 - val_top-5-accuracy: 0.3600 Epoch 29/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.3572 - loss: 2.9410 - top-5-accuracy: 0.6593 - val_accuracy: 0.1572 - val_loss: 5.1866 - val_top-5-accuracy: 0.3795 Epoch 30/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 100ms/step - accuracy: 0.3561 - loss: 2.9263 - top-5-accuracy: 0.6670 - val_accuracy: 0.1638 - val_loss: 5.0637 - val_top-5-accuracy: 0.3934 Epoch 31/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.3621 - loss: 2.9050 - top-5-accuracy: 0.6730 - val_accuracy: 0.1589 - val_loss: 5.2504 - val_top-5-accuracy: 0.3835 Epoch 32/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3675 - loss: 2.8898 - top-5-accuracy: 0.6754 - val_accuracy: 0.1690 - val_loss: 5.0613 - val_top-5-accuracy: 0.3950 Epoch 33/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.3771 - loss: 2.8710 - top-5-accuracy: 0.6784 - val_accuracy: 0.1596 - val_loss: 5.1941 - val_top-5-accuracy: 0.3784 Epoch 34/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.3797 - loss: 2.8536 - top-5-accuracy: 0.6880 - val_accuracy: 0.1686 - val_loss: 5.1522 - val_top-5-accuracy: 0.3879 Epoch 35/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.3792 - loss: 2.8504 - top-5-accuracy: 0.6871 - val_accuracy: 0.1525 - val_loss: 5.2875 - val_top-5-accuracy: 0.3735 Epoch 36/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3868 - loss: 2.8278 - top-5-accuracy: 0.6950 - val_accuracy: 0.1573 - val_loss: 5.2148 - val_top-5-accuracy: 0.3797 Epoch 37/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.3869 - loss: 2.8129 - top-5-accuracy: 0.6973 - val_accuracy: 0.1562 - val_loss: 5.4344 - val_top-5-accuracy: 0.3646 Epoch 38/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3866 - loss: 2.8129 - top-5-accuracy: 0.6977 - val_accuracy: 0.1610 - val_loss: 5.2807 - val_top-5-accuracy: 0.3772 Epoch 39/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3934 - loss: 2.7990 - top-5-accuracy: 0.7006 - val_accuracy: 0.1681 - val_loss: 5.0741 - val_top-5-accuracy: 0.3967 Epoch 40/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.3947 - loss: 2.7863 - top-5-accuracy: 0.7065 - val_accuracy: 0.1612 - val_loss: 5.1039 - val_top-5-accuracy: 0.3885 Epoch 41/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.4030 - loss: 2.7687 - top-5-accuracy: 0.7092 - val_accuracy: 0.1592 - val_loss: 5.1138 - val_top-5-accuracy: 0.3837 Epoch 42/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.4013 - loss: 2.7706 - top-5-accuracy: 0.7071 - val_accuracy: 0.1718 - val_loss: 5.1391 - val_top-5-accuracy: 0.3938 Epoch 43/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.4062 - loss: 2.7569 - top-5-accuracy: 0.7137 - val_accuracy: 0.1593 - val_loss: 5.3004 - val_top-5-accuracy: 0.3781 Epoch 44/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 97ms/step - accuracy: 0.4109 - loss: 2.7429 - top-5-accuracy: 0.7129 - val_accuracy: 0.1823 - val_loss: 5.0221 - val_top-5-accuracy: 0.4038 Epoch 45/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.4074 - loss: 2.7312 - top-5-accuracy: 0.7212 - val_accuracy: 0.1706 - val_loss: 5.1799 - val_top-5-accuracy: 0.3898 Epoch 46/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 95ms/step - accuracy: 0.4175 - loss: 2.7121 - top-5-accuracy: 0.7202 - val_accuracy: 0.1701 - val_loss: 5.1674 - val_top-5-accuracy: 0.3910 Epoch 47/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 31s 101ms/step - accuracy: 0.4187 - loss: 2.7178 - top-5-accuracy: 0.7227 - val_accuracy: 0.1764 - val_loss: 5.0161 - val_top-5-accuracy: 0.4027 Epoch 48/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.4180 - loss: 2.7045 - top-5-accuracy: 0.7246 - val_accuracy: 0.1709 - val_loss: 5.0650 - val_top-5-accuracy: 0.3907 Epoch 49/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.4264 - loss: 2.6857 - top-5-accuracy: 0.7276 - val_accuracy: 0.1591 - val_loss: 5.3416 - val_top-5-accuracy: 0.3732 Epoch 50/50 313/313 ━━━━━━━━━━━━━━━━━━━━ 30s 96ms/step - accuracy: 0.4245 - loss: 2.6878 - top-5-accuracy: 0.7271 - val_accuracy: 0.1778 - val_loss: 5.1093 - val_top-5-accuracy: 0.3987 ``` </div> ### Let's visualize the training progress of the model. ```python plt.plot(history.history["loss"], label="train_loss") plt.plot(history.history["val_loss"], label="val_loss") plt.xlabel("Epochs") plt.ylabel("Loss") plt.title("Train and Validation Losses Over Epochs", fontsize=14) plt.legend() plt.grid() plt.show() ``` ![png](/img/examples/vision/eanet/eanet_24_0.png) ### Let's display the final results of the test on CIFAR-100. ```python loss, accuracy, top_5_accuracy = model.evaluate(x_test, y_test) print(f"Test loss: {round(loss, 2)}") print(f"Test accuracy: {round(accuracy * 100, 2)}%") print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%") ``` <div class="k-default-codeblock"> ``` 313/313 ━━━━━━━━━━━━━━━━━━━━ 3s 9ms/step - accuracy: 0.1774 - loss: 5.0871 - top-5-accuracy: 0.3963 Test loss: 5.15 Test accuracy: 17.26% Test top 5 accuracy: 38.94% ``` </div> EANet just replaces self attention in Vit with external attention. The traditional Vit achieved a ~73% test top-5 accuracy and ~41 top-1 accuracy after training 50 epochs, but with 0.6M parameters. Under the same experimental environment and the same hyperparameters, The EANet model we just trained has just 0.3M parameters, and it gets us to ~73% test top-5 accuracy and ~43% top-1 accuracy. This fully demonstrates the effectiveness of external attention. We only show the training process of EANet, you can train Vit under the same experimental conditions and observe the test results.
keras-io/examples/vision/md/eanet.md/0
{ "file_path": "keras-io/examples/vision/md/eanet.md", "repo_id": "keras-io", "token_count": 8946 }
97
# Image segmentation with a U-Net-like architecture **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2019/03/20<br> **Last modified:** 2020/04/20<br> **Description:** Image segmentation model trained from scratch on the Oxford Pets dataset. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/oxford_pets_image_segmentation.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/oxford_pets_image_segmentation.py) --- ## Download the data ```python !!wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz !!wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz ! !curl -O https://thor.robots.ox.ac.uk/datasets/pets/images.tar.gz !curl -O https://thor.robots.ox.ac.uk/datasets/pets/annotations.tar.gz ! !tar -xf images.tar.gz !tar -xf annotations.tar.gz ``` <div class="k-default-codeblock"> ``` % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 755M 100 755M 0 0 21.3M 0 0:00:35 0:00:35 --:--:-- 22.2M % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 18.2M 100 18.2M 0 0 7977k 0 0:00:02 0:00:02 --:--:-- 7974k ``` </div> --- ## Prepare paths of input images and target segmentation masks ```python import os input_dir = "images/" target_dir = "annotations/trimaps/" img_size = (160, 160) num_classes = 3 batch_size = 32 input_img_paths = sorted( [ os.path.join(input_dir, fname) for fname in os.listdir(input_dir) if fname.endswith(".jpg") ] ) target_img_paths = sorted( [ os.path.join(target_dir, fname) for fname in os.listdir(target_dir) if fname.endswith(".png") and not fname.startswith(".") ] ) print("Number of samples:", len(input_img_paths)) for input_path, target_path in zip(input_img_paths[:10], target_img_paths[:10]): print(input_path, "|", target_path) ``` <div class="k-default-codeblock"> ``` Number of samples: 7390 images/Abyssinian_1.jpg | annotations/trimaps/Abyssinian_1.png images/Abyssinian_10.jpg | annotations/trimaps/Abyssinian_10.png images/Abyssinian_100.jpg | annotations/trimaps/Abyssinian_100.png images/Abyssinian_101.jpg | annotations/trimaps/Abyssinian_101.png images/Abyssinian_102.jpg | annotations/trimaps/Abyssinian_102.png images/Abyssinian_103.jpg | annotations/trimaps/Abyssinian_103.png images/Abyssinian_104.jpg | annotations/trimaps/Abyssinian_104.png images/Abyssinian_105.jpg | annotations/trimaps/Abyssinian_105.png images/Abyssinian_106.jpg | annotations/trimaps/Abyssinian_106.png images/Abyssinian_107.jpg | annotations/trimaps/Abyssinian_107.png ``` </div> --- ## What does one input image and corresponding segmentation mask look like? ```python from IPython.display import Image, display from keras.utils import load_img from PIL import ImageOps # Display input image #7 display(Image(filename=input_img_paths[9])) # Display auto-contrast version of corresponding target (per-pixel categories) img = ImageOps.autocontrast(load_img(target_img_paths[9])) display(img) ``` ![jpeg](/img/examples/vision/oxford_pets_image_segmentation/oxford_pets_image_segmentation_6_0.jpg) ![png](/img/examples/vision/oxford_pets_image_segmentation/oxford_pets_image_segmentation_6_1.png) --- ## Prepare dataset to load & vectorize batches of data ```python import keras import numpy as np from tensorflow import data as tf_data from tensorflow import image as tf_image from tensorflow import io as tf_io def get_dataset( batch_size, img_size, input_img_paths, target_img_paths, max_dataset_len=None, ): """Returns a TF Dataset.""" def load_img_masks(input_img_path, target_img_path): input_img = tf_io.read_file(input_img_path) input_img = tf_io.decode_png(input_img, channels=3) input_img = tf_image.resize(input_img, img_size) input_img = tf_image.convert_image_dtype(input_img, "float32") target_img = tf_io.read_file(target_img_path) target_img = tf_io.decode_png(target_img, channels=1) target_img = tf_image.resize(target_img, img_size, method="nearest") target_img = tf_image.convert_image_dtype(target_img, "uint8") # Ground truth labels are 1, 2, 3. Subtract one to make them 0, 1, 2: target_img -= 1 return input_img, target_img # For faster debugging, limit the size of data if max_dataset_len: input_img_paths = input_img_paths[:max_dataset_len] target_img_paths = target_img_paths[:max_dataset_len] dataset = tf_data.Dataset.from_tensor_slices((input_img_paths, target_img_paths)) dataset = dataset.map(load_img_masks, num_parallel_calls=tf_data.AUTOTUNE) return dataset.batch(batch_size) ``` --- ## Prepare U-Net Xception-style model ```python from keras import layers def get_model(img_size, num_classes): inputs = keras.Input(shape=img_size + (3,)) ### [First half of the network: downsampling inputs] ### # Entry block x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) previous_block_activation = x # Set aside residual # Blocks 1, 2, 3 are identical apart from the feature depth. for filters in [64, 128, 256]: x = layers.Activation("relu")(x) x = layers.SeparableConv2D(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.SeparableConv2D(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.MaxPooling2D(3, strides=2, padding="same")(x) # Project residual residual = layers.Conv2D(filters, 1, strides=2, padding="same")( previous_block_activation ) x = layers.add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual ### [Second half of the network: upsampling inputs] ### for filters in [256, 128, 64, 32]: x = layers.Activation("relu")(x) x = layers.Conv2DTranspose(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) x = layers.Conv2DTranspose(filters, 3, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.UpSampling2D(2)(x) # Project residual residual = layers.UpSampling2D(2)(previous_block_activation) residual = layers.Conv2D(filters, 1, padding="same")(residual) x = layers.add([x, residual]) # Add back residual previous_block_activation = x # Set aside next residual # Add a per-pixel classification layer outputs = layers.Conv2D(num_classes, 3, activation="softmax", padding="same")(x) # Define the model model = keras.Model(inputs, outputs) return model # Build model model = get_model(img_size, num_classes) model.summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩ │ input_layer │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">896</span> │ input_layer[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalization │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">128</span> │ conv2d[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ activation[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ separable_conv2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">2,400</span> │ activation_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">SeparableConv2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">256</span> │ separable_conv2d[<span style="color: #00af00; text-decoration-color: #00af00">0</span>]… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ separable_conv2d_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">4,736</span> │ activation_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">SeparableConv2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">256</span> │ separable_conv2d_1[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ max_pooling2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">2,112</span> │ activation[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ add (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ max_pooling2d[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ conv2d_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ separable_conv2d_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">8,896</span> │ activation_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">SeparableConv2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">512</span> │ separable_conv2d_2[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_4 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ separable_conv2d_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">17,664</span> │ activation_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">SeparableConv2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">512</span> │ separable_conv2d_3[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ max_pooling2d_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">8,320</span> │ add[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ add_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ max_pooling2d_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ conv2d_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_5 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ separable_conv2d_4 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">34,176</span> │ activation_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">SeparableConv2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">1,024</span> │ separable_conv2d_4[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_6 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ separable_conv2d_5 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">68,096</span> │ activation_6[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">SeparableConv2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">1,024</span> │ separable_conv2d_5[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ max_pooling2d_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">33,024</span> │ add_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ add_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ max_pooling2d_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ conv2d_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_7 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_transpose │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">590,080</span> │ activation_7[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">1,024</span> │ conv2d_transpose[<span style="color: #00af00; text-decoration-color: #00af00">0</span>]… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_8 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_transpose_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">590,080</span> │ activation_8[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">1,024</span> │ conv2d_transpose_1[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ up_sampling2d_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ up_sampling2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">65,792</span> │ up_sampling2d_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ add_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ up_sampling2d[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>], │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ conv2d_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_9 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_transpose_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">295,040</span> │ activation_9[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">512</span> │ conv2d_transpose_2[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_10 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_transpose_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">147,584</span> │ activation_10[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, <span style="color: #00af00; text-decoration-color: #00af00">20</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">512</span> │ conv2d_transpose_3[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ up_sampling2d_3 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ up_sampling2d_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">32,896</span> │ up_sampling2d_3[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ add_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ up_sampling2d_2[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ conv2d_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_11 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_transpose_4 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">73,792</span> │ activation_11[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">256</span> │ conv2d_transpose_4[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_12 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_transpose_5 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │ activation_12[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">256</span> │ conv2d_transpose_5[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ up_sampling2d_5 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ up_sampling2d_4 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">8,256</span> │ up_sampling2d_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ add_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ up_sampling2d_4[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ conv2d_6[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_13 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_transpose_6 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">18,464</span> │ activation_13[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">128</span> │ conv2d_transpose_6[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ activation_14 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Activation</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_transpose_7 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │ activation_14[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ batch_normalizatio… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, <span style="color: #00af00; text-decoration-color: #00af00">80</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">128</span> │ conv2d_transpose_7[<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">BatchNormalizatio…</span> │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ up_sampling2d_7 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ add_5[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ up_sampling2d_6 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ batch_normalization… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">2,080</span> │ up_sampling2d_7[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ add_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Add</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ up_sampling2d_6[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ │ conv2d_7[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ conv2d_8 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, <span style="color: #00af00; text-decoration-color: #00af00">160</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">867</span> │ add_6[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ │ <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ │ │ └─────────────────────┴───────────────────┴─────────┴──────────────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,058,979</span> (7.85 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,055,203</span> (7.84 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">3,776</span> (14.75 KB) </pre> --- ## Set aside a validation split ```python import random # Split our img paths into a training and a validation set val_samples = 1000 random.Random(1337).shuffle(input_img_paths) random.Random(1337).shuffle(target_img_paths) train_input_img_paths = input_img_paths[:-val_samples] train_target_img_paths = target_img_paths[:-val_samples] val_input_img_paths = input_img_paths[-val_samples:] val_target_img_paths = target_img_paths[-val_samples:] # Instantiate dataset for each split # Limit input files in `max_dataset_len` for faster epoch training time. # Remove the `max_dataset_len` arg when running with full dataset. train_dataset = get_dataset( batch_size, img_size, train_input_img_paths, train_target_img_paths, max_dataset_len=1000, ) valid_dataset = get_dataset( batch_size, img_size, val_input_img_paths, val_target_img_paths ) ``` --- ## Train the model ```python # Configure the model for training. # We use the "sparse" version of categorical_crossentropy # because our target data is integers. model.compile( optimizer=keras.optimizers.Adam(1e-4), loss="sparse_categorical_crossentropy" ) callbacks = [ keras.callbacks.ModelCheckpoint("oxford_segmentation.keras", save_best_only=True) ] # Train the model, doing validation at the end of each epoch. epochs = 50 model.fit( train_dataset, epochs=epochs, validation_data=valid_dataset, callbacks=callbacks, verbose=2, ) ``` <div class="k-default-codeblock"> ``` Epoch 1/50 WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1700414690.172044 2226172 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 62s - 2s/step - loss: 1.6363 - val_loss: 2.2226 Epoch 2/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 94ms/step - loss: 0.9223 - val_loss: 1.8273 Epoch 3/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 82ms/step - loss: 0.7894 - val_loss: 2.0044 Epoch 4/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 83ms/step - loss: 0.7174 - val_loss: 2.3480 Epoch 5/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 82ms/step - loss: 0.6695 - val_loss: 2.7528 Epoch 6/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 83ms/step - loss: 0.6325 - val_loss: 3.1453 Epoch 7/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 84ms/step - loss: 0.6012 - val_loss: 3.5611 Epoch 8/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 87ms/step - loss: 0.5730 - val_loss: 4.0003 Epoch 9/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 85ms/step - loss: 0.5466 - val_loss: 4.4798 Epoch 10/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 86ms/step - loss: 0.5210 - val_loss: 5.0245 Epoch 11/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 87ms/step - loss: 0.4958 - val_loss: 5.5950 Epoch 12/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 87ms/step - loss: 0.4706 - val_loss: 6.1534 Epoch 13/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 85ms/step - loss: 0.4453 - val_loss: 6.6107 Epoch 14/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 83ms/step - loss: 0.4202 - val_loss: 6.8010 Epoch 15/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 84ms/step - loss: 0.3956 - val_loss: 6.6751 Epoch 16/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 83ms/step - loss: 0.3721 - val_loss: 6.0800 Epoch 17/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 84ms/step - loss: 0.3506 - val_loss: 5.1820 Epoch 18/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 82ms/step - loss: 0.3329 - val_loss: 4.0350 Epoch 19/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 4s - 114ms/step - loss: 0.3216 - val_loss: 3.0513 Epoch 20/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 94ms/step - loss: 0.3595 - val_loss: 2.2567 Epoch 21/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 100ms/step - loss: 0.4417 - val_loss: 1.5873 Epoch 22/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 101ms/step - loss: 0.3531 - val_loss: 1.5798 Epoch 23/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 96ms/step - loss: 0.3353 - val_loss: 1.5525 Epoch 24/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 95ms/step - loss: 0.3392 - val_loss: 1.4625 Epoch 25/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 95ms/step - loss: 0.3596 - val_loss: 0.8867 Epoch 26/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 94ms/step - loss: 0.3528 - val_loss: 0.8021 Epoch 27/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 92ms/step - loss: 0.3237 - val_loss: 0.7986 Epoch 28/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 89ms/step - loss: 0.3198 - val_loss: 0.8533 Epoch 29/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 84ms/step - loss: 0.3272 - val_loss: 1.0588 Epoch 30/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 88ms/step - loss: 0.3164 - val_loss: 1.1889 Epoch 31/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 85ms/step - loss: 0.2987 - val_loss: 0.9518 Epoch 32/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 87ms/step - loss: 0.2749 - val_loss: 0.9011 Epoch 33/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 84ms/step - loss: 0.2595 - val_loss: 0.8872 Epoch 34/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 87ms/step - loss: 0.2552 - val_loss: 1.0221 Epoch 35/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 82ms/step - loss: 0.2628 - val_loss: 1.1553 Epoch 36/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 85ms/step - loss: 0.2788 - val_loss: 2.1549 Epoch 37/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 94ms/step - loss: 0.2870 - val_loss: 1.6282 Epoch 38/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 89ms/step - loss: 0.2702 - val_loss: 1.3201 Epoch 39/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 91ms/step - loss: 0.2569 - val_loss: 1.2364 Epoch 40/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 106ms/step - loss: 0.2523 - val_loss: 1.3673 Epoch 41/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 86ms/step - loss: 0.2570 - val_loss: 1.3999 Epoch 42/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 87ms/step - loss: 0.2680 - val_loss: 0.9976 Epoch 43/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 83ms/step - loss: 0.2558 - val_loss: 1.0209 Epoch 44/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 85ms/step - loss: 0.2403 - val_loss: 1.3271 Epoch 45/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 83ms/step - loss: 0.2414 - val_loss: 1.1993 Epoch 46/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 84ms/step - loss: 0.2516 - val_loss: 1.0532 Epoch 47/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 83ms/step - loss: 0.2695 - val_loss: 1.1183 Epoch 48/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 87ms/step - loss: 0.2555 - val_loss: 1.0432 Epoch 49/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 82ms/step - loss: 0.2290 - val_loss: 0.9444 Epoch 50/50 Corrupt JPEG data: 240 extraneous bytes before marker 0xd9 32/32 - 3s - 83ms/step - loss: 0.1994 - val_loss: 1.2182 <keras.src.callbacks.history.History at 0x7fe01842dab0> ``` </div> --- ## Visualize predictions ```python # Generate predictions for all images in the validation set val_dataset = get_dataset( batch_size, img_size, val_input_img_paths, val_target_img_paths ) val_preds = model.predict(val_dataset) def display_mask(i): """Quick utility to display a model's prediction.""" mask = np.argmax(val_preds[i], axis=-1) mask = np.expand_dims(mask, axis=-1) img = ImageOps.autocontrast(keras.utils.array_to_img(mask)) display(img) # Display results for validation image #10 i = 10 # Display input image display(Image(filename=val_input_img_paths[i])) # Display ground-truth target mask img = ImageOps.autocontrast(load_img(val_target_img_paths[i])) display(img) # Display mask predicted by our model display_mask(i) # Note that the model only sees inputs at 150x150. ``` <div class="k-default-codeblock"> ``` 32/32 ━━━━━━━━━━━━━━━━━━━━ 5s 100ms/step ``` </div> ![jpeg](/img/examples/vision/oxford_pets_image_segmentation/oxford_pets_image_segmentation_16_1.jpg) ![png](/img/examples/vision/oxford_pets_image_segmentation/oxford_pets_image_segmentation_16_2.png) ![png](/img/examples/vision/oxford_pets_image_segmentation/oxford_pets_image_segmentation_16_3.png)
keras-io/examples/vision/md/oxford_pets_image_segmentation.md/0
{ "file_path": "keras-io/examples/vision/md/oxford_pets_image_segmentation.md", "repo_id": "keras-io", "token_count": 30725 }
98
""" Title: RandAugment for Image Classification for Improved Robustness Authors: [Sayak Paul](https://twitter.com/RisingSayak)[Sachin Prasad](https://github.com/sachinprasadhs) Date created: 2021/03/13 Last modified: 2023/12/12 Description: RandAugment for training an image classification model with improved robustness. Accelerator: GPU """ """ Data augmentation is a very useful technique that can help to improve the translational invariance of convolutional neural networks (CNN). RandAugment is a stochastic data augmentation routine for vision data and was proposed in [RandAugment: Practical automated data augmentation with a reduced search space](https://arxiv.org/abs/1909.13719). It is composed of strong augmentation transforms like color jitters, Gaussian blurs, saturations, etc. along with more traditional augmentation transforms such as random crops. These parameters are tuned for a given dataset and a network architecture. The authors of RandAugment also provide pseudocode of RandAugment in the original paper (Figure 2). Recently, it has been a key component of works like [Noisy Student Training](https://arxiv.org/abs/1911.04252) and [Unsupervised Data Augmentation for Consistency Training](https://arxiv.org/abs/1904.12848). It has been also central to the success of [EfficientNets](https://arxiv.org/abs/1905.11946). ```python pip install keras-cv ``` """ """ ## Imports & setup """ import os os.environ["KERAS_BACKEND"] = "tensorflow" import keras import keras_cv from keras import ops from keras import layers import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import tensorflow_datasets as tfds tfds.disable_progress_bar() keras.utils.set_random_seed(42) """ ## Load the CIFAR10 dataset For this example, we will be using the [CIFAR10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). """ (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() print(f"Total training examples: {len(x_train)}") print(f"Total test examples: {len(x_test)}") """ ## Define hyperparameters """ AUTO = tf.data.AUTOTUNE BATCH_SIZE = 128 EPOCHS = 1 IMAGE_SIZE = 72 """ ## Initialize `RandAugment` object Now, we will initialize a `RandAugment` object from the `imgaug.augmenters` module with the parameters suggested by the RandAugment authors. """ rand_augment = keras_cv.layers.RandAugment( value_range=(0, 255), augmentations_per_image=3, magnitude=0.8 ) """ ## Create TensorFlow `Dataset` objects """ train_ds_rand = ( tf.data.Dataset.from_tensor_slices((x_train, y_train)) .shuffle(BATCH_SIZE * 100) .batch(BATCH_SIZE) .map( lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y), num_parallel_calls=AUTO, ) .map( lambda x, y: (rand_augment(tf.cast(x, tf.uint8)), y), num_parallel_calls=AUTO, ) .prefetch(AUTO) ) test_ds = ( tf.data.Dataset.from_tensor_slices((x_test, y_test)) .batch(BATCH_SIZE) .map( lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y), num_parallel_calls=AUTO, ) .prefetch(AUTO) ) """ For comparison purposes, let's also define a simple augmentation pipeline consisting of random flips, random rotations, and random zoomings. """ simple_aug = keras.Sequential( [ layers.Resizing(IMAGE_SIZE, IMAGE_SIZE), layers.RandomFlip("horizontal"), layers.RandomRotation(factor=0.02), layers.RandomZoom(height_factor=0.2, width_factor=0.2), ] ) # Now, map the augmentation pipeline to our training dataset train_ds_simple = ( tf.data.Dataset.from_tensor_slices((x_train, y_train)) .shuffle(BATCH_SIZE * 100) .batch(BATCH_SIZE) .map(lambda x, y: (simple_aug(x), y), num_parallel_calls=AUTO) .prefetch(AUTO) ) """ ## Visualize the dataset augmented with RandAugment """ sample_images, _ = next(iter(train_ds_rand)) plt.figure(figsize=(10, 10)) for i, image in enumerate(sample_images[:9]): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().astype("int")) plt.axis("off") """ You are encouraged to run the above code block a couple of times to see different variations. """ """ ## Visualize the dataset augmented with `simple_aug` """ sample_images, _ = next(iter(train_ds_simple)) plt.figure(figsize=(10, 10)) for i, image in enumerate(sample_images[:9]): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().astype("int")) plt.axis("off") """ ## Define a model building utility function Now, we define a CNN model that is based on the [ResNet50V2 architecture](https://arxiv.org/abs/1603.05027). Also, notice that the network already has a rescaling layer inside it. This eliminates the need to do any separate preprocessing on our dataset and is specifically very useful for deployment purposes. """ def get_training_model(): resnet50_v2 = keras.applications.ResNet50V2( weights=None, include_top=True, input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), classes=10, ) model = keras.Sequential( [ layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)), layers.Rescaling(scale=1.0 / 127.5, offset=-1), resnet50_v2, ] ) return model get_training_model().summary() """ We will train this network on two different versions of our dataset: * One augmented with RandAugment. * Another one augmented with `simple_aug`. Since RandAugment is known to enhance the robustness of models to common perturbations and corruptions, we will also evaluate our models on the CIFAR-10-C dataset, proposed in [Benchmarking Neural Network Robustness to Common Corruptions and Perturbations](https://arxiv.org/abs/1903.12261) by Hendrycks et al. The CIFAR-10-C dataset consists of 19 different image corruptions and perturbations (for example speckle noise, fog, Gaussian blur, etc.) that too at varying severity levels. For this example we will be using the following configuration: [`cifar10_corrupted/saturate_5`](https://www.tensorflow.org/datasets/catalog/cifar10_corrupted#cifar10_corruptedsaturate_5). The images from this configuration look like so: ![](https://storage.googleapis.com/tfds-data/visualization/fig/cifar10_corrupted-saturate_5-1.0.0.png) In the interest of reproducibility, we serialize the initial random weights of our shallow network. """ initial_model = get_training_model() initial_model.save_weights("initial.weights.h5") """ ## Train model with RandAugment """ rand_aug_model = get_training_model() rand_aug_model.load_weights("initial.weights.h5") rand_aug_model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) rand_aug_model.fit(train_ds_rand, validation_data=test_ds, epochs=EPOCHS) _, test_acc = rand_aug_model.evaluate(test_ds) print("Test accuracy: {:.2f}%".format(test_acc * 100)) """ ## Train model with `simple_aug` """ simple_aug_model = get_training_model() simple_aug_model.load_weights("initial.weights.h5") simple_aug_model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) simple_aug_model.fit(train_ds_simple, validation_data=test_ds, epochs=EPOCHS) _, test_acc = simple_aug_model.evaluate(test_ds) print("Test accuracy: {:.2f}%".format(test_acc * 100)) """ ## Load the CIFAR-10-C dataset and evaluate performance """ # Load and prepare the CIFAR-10-C dataset # (If it's not already downloaded, it takes ~10 minutes of time to download) cifar_10_c = tfds.load("cifar10_corrupted/saturate_5", split="test", as_supervised=True) cifar_10_c = cifar_10_c.batch(BATCH_SIZE).map( lambda x, y: (tf.image.resize(x, (IMAGE_SIZE, IMAGE_SIZE)), y), num_parallel_calls=AUTO, ) # Evaluate `rand_aug_model` _, test_acc = rand_aug_model.evaluate(cifar_10_c, verbose=0) print( "Accuracy with RandAugment on CIFAR-10-C (saturate_5): {:.2f}%".format( test_acc * 100 ) ) # Evaluate `simple_aug_model` _, test_acc = simple_aug_model.evaluate(cifar_10_c, verbose=0) print( "Accuracy with simple_aug on CIFAR-10-C (saturate_5): {:.2f}%".format( test_acc * 100 ) ) """ For the purpose of this example, we trained the models for only a single epoch. On the CIFAR-10-C dataset, the model with RandAugment can perform better with a higher accuracy (for example, 76.64% in one experiment) compared with the model trained with `simple_aug` (e.g., 64.80%). RandAugment can also help stabilize the training. In the notebook, you may notice that, at the expense of increased training time with RandAugment, we are able to carve out far better performance on the CIFAR-10-C dataset. You can experiment on the other corruption and perturbation settings that come with the run the same CIFAR-10-C dataset and see if RandAugment helps. You can also experiment with the different values of `n` and `m` in the `RandAugment` object. In the [original paper](https://arxiv.org/abs/1909.13719), the authors show the impact of the individual augmentation transforms for a particular task and a range of ablation studies. You are welcome to check them out. RandAugment has shown great progress in improving the robustness of deep models for computer vision as shown in works like [Noisy Student Training](https://arxiv.org/abs/1911.04252) and [FixMatch](https://arxiv.org/abs/2001.07685). This makes RandAugment quite a useful recipe for training different vision models. You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/randaugment) and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/randaugment). """
keras-io/examples/vision/randaugment.py/0
{ "file_path": "keras-io/examples/vision/randaugment.py", "repo_id": "keras-io", "token_count": 3423 }
99
""" Title: Video Classification with a CNN-RNN Architecture Author: [Sayak Paul](https://twitter.com/RisingSayak) Date created: 2021/05/28 Last modified: 2023/12/08 Description: Training a video classifier with transfer learning and a recurrent model on the UCF101 dataset. Accelerator: GPU """ """ This example demonstrates video classification, an important use-case with applications in recommendations, security, and so on. We will be using the [UCF101 dataset](https://www.crcv.ucf.edu/data/UCF101.php) to build our video classifier. The dataset consists of videos categorized into different actions, like cricket shot, punching, biking, etc. This dataset is commonly used to build action recognizers, which are an application of video classification. A video consists of an ordered sequence of frames. Each frame contains *spatial* information, and the sequence of those frames contains *temporal* information. To model both of these aspects, we use a hybrid architecture that consists of convolutions (for spatial processing) as well as recurrent layers (for temporal processing). Specifically, we'll use a Convolutional Neural Network (CNN) and a Recurrent Neural Network (RNN) consisting of [GRU layers](https://keras.io/api/layers/recurrent_layers/gru/). This kind of hybrid architecture is popularly known as a **CNN-RNN**. This example requires TensorFlow 2.5 or higher, as well as TensorFlow Docs, which can be installed using the following command: """ """shell pip install -q git+https://github.com/tensorflow/docs """ """ ## Data collection In order to keep the runtime of this example relatively short, we will be using a subsampled version of the original UCF101 dataset. You can refer to [this notebook](https://colab.research.google.com/github/sayakpaul/Action-Recognition-in-TensorFlow/blob/main/Data_Preparation_UCF101.ipynb) to know how the subsampling was done. """ """shell !wget -q https://github.com/sayakpaul/Action-Recognition-in-TensorFlow/releases/download/v1.0.0/ucf101_top5.tar.gz tar xf ucf101_top5.tar.gz """ """ ## Setup """ import os import keras from imutils import paths import matplotlib.pyplot as plt import pandas as pd import numpy as np import imageio import cv2 from IPython.display import Image """ ## Define hyperparameters """ IMG_SIZE = 224 BATCH_SIZE = 64 EPOCHS = 10 MAX_SEQ_LENGTH = 20 NUM_FEATURES = 2048 """ ## Data preparation """ train_df = pd.read_csv("train.csv") test_df = pd.read_csv("test.csv") print(f"Total videos for training: {len(train_df)}") print(f"Total videos for testing: {len(test_df)}") train_df.sample(10) """ One of the many challenges of training video classifiers is figuring out a way to feed the videos to a network. [This blog post](https://blog.coast.ai/five-video-classification-methods-implemented-in-keras-and-tensorflow-99cad29cc0b5) discusses five such methods. Since a video is an ordered sequence of frames, we could just extract the frames and put them in a 3D tensor. But the number of frames may differ from video to video which would prevent us from stacking them into batches (unless we use padding). As an alternative, we can **save video frames at a fixed interval until a maximum frame count is reached**. In this example we will do the following: 1. Capture the frames of a video. 2. Extract frames from the videos until a maximum frame count is reached. 3. In the case, where a video's frame count is lesser than the maximum frame count we will pad the video with zeros. Note that this workflow is identical to [problems involving texts sequences](https://developers.google.com/machine-learning/guides/text-classification/). Videos of the UCF101 dataset is [known](https://www.crcv.ucf.edu/papers/UCF101_CRCV-TR-12-01.pdf) to not contain extreme variations in objects and actions across frames. Because of this, it may be okay to only consider a few frames for the learning task. But this approach may not generalize well to other video classification problems. We will be using [OpenCV's `VideoCapture()` method](https://docs.opencv.org/master/dd/d43/tutorial_py_video_display.html) to read frames from videos. """ # The following two methods are taken from this tutorial: # https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub def crop_center_square(frame): y, x = frame.shape[0:2] min_dim = min(y, x) start_x = (x // 2) - (min_dim // 2) start_y = (y // 2) - (min_dim // 2) return frame[start_y : start_y + min_dim, start_x : start_x + min_dim] def load_video(path, max_frames=0, resize=(IMG_SIZE, IMG_SIZE)): cap = cv2.VideoCapture(path) frames = [] try: while True: ret, frame = cap.read() if not ret: break frame = crop_center_square(frame) frame = cv2.resize(frame, resize) frame = frame[:, :, [2, 1, 0]] frames.append(frame) if len(frames) == max_frames: break finally: cap.release() return np.array(frames) """ We can use a pre-trained network to extract meaningful features from the extracted frames. The [`Keras Applications`](https://keras.io/api/applications/) module provides a number of state-of-the-art models pre-trained on the [ImageNet-1k dataset](http://image-net.org/). We will be using the [InceptionV3 model](https://arxiv.org/abs/1512.00567) for this purpose. """ def build_feature_extractor(): feature_extractor = keras.applications.InceptionV3( weights="imagenet", include_top=False, pooling="avg", input_shape=(IMG_SIZE, IMG_SIZE, 3), ) preprocess_input = keras.applications.inception_v3.preprocess_input inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3)) preprocessed = preprocess_input(inputs) outputs = feature_extractor(preprocessed) return keras.Model(inputs, outputs, name="feature_extractor") feature_extractor = build_feature_extractor() """ The labels of the videos are strings. Neural networks do not understand string values, so they must be converted to some numerical form before they are fed to the model. Here we will use the [`StringLookup`](https://keras.io/api/layers/preprocessing_layers/categorical/string_lookup) layer encode the class labels as integers. """ label_processor = keras.layers.StringLookup( num_oov_indices=0, vocabulary=np.unique(train_df["tag"]) ) print(label_processor.get_vocabulary()) """ Finally, we can put all the pieces together to create our data processing utility. """ def prepare_all_videos(df, root_dir): num_samples = len(df) video_paths = df["video_name"].values.tolist() labels = df["tag"].values labels = keras.ops.convert_to_numpy(label_processor(labels[..., None])) # `frame_masks` and `frame_features` are what we will feed to our sequence model. # `frame_masks` will contain a bunch of booleans denoting if a timestep is # masked with padding or not. frame_masks = np.zeros(shape=(num_samples, MAX_SEQ_LENGTH), dtype="bool") frame_features = np.zeros( shape=(num_samples, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32" ) # For each video. for idx, path in enumerate(video_paths): # Gather all its frames and add a batch dimension. frames = load_video(os.path.join(root_dir, path)) frames = frames[None, ...] # Initialize placeholders to store the masks and features of the current video. temp_frame_mask = np.zeros( shape=( 1, MAX_SEQ_LENGTH, ), dtype="bool", ) temp_frame_features = np.zeros( shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32" ) # Extract features from the frames of the current video. for i, batch in enumerate(frames): video_length = batch.shape[0] length = min(MAX_SEQ_LENGTH, video_length) for j in range(length): temp_frame_features[i, j, :] = feature_extractor.predict( batch[None, j, :], verbose=0, ) temp_frame_mask[i, :length] = 1 # 1 = not masked, 0 = masked frame_features[idx,] = temp_frame_features.squeeze() frame_masks[idx,] = temp_frame_mask.squeeze() return (frame_features, frame_masks), labels train_data, train_labels = prepare_all_videos(train_df, "train") test_data, test_labels = prepare_all_videos(test_df, "test") print(f"Frame features in train set: {train_data[0].shape}") print(f"Frame masks in train set: {train_data[1].shape}") """ The above code block will take ~20 minutes to execute depending on the machine it's being executed. """ """ ## The sequence model Now, we can feed this data to a sequence model consisting of recurrent layers like `GRU`. """ # Utility for our sequence model. def get_sequence_model(): class_vocab = label_processor.get_vocabulary() frame_features_input = keras.Input((MAX_SEQ_LENGTH, NUM_FEATURES)) mask_input = keras.Input((MAX_SEQ_LENGTH,), dtype="bool") # Refer to the following tutorial to understand the significance of using `mask`: # https://keras.io/api/layers/recurrent_layers/gru/ x = keras.layers.GRU(16, return_sequences=True)( frame_features_input, mask=mask_input ) x = keras.layers.GRU(8)(x) x = keras.layers.Dropout(0.4)(x) x = keras.layers.Dense(8, activation="relu")(x) output = keras.layers.Dense(len(class_vocab), activation="softmax")(x) rnn_model = keras.Model([frame_features_input, mask_input], output) rnn_model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) return rnn_model # Utility for running experiments. def run_experiment(): filepath = "/tmp/video_classifier/ckpt.weights.h5" checkpoint = keras.callbacks.ModelCheckpoint( filepath, save_weights_only=True, save_best_only=True, verbose=1 ) seq_model = get_sequence_model() history = seq_model.fit( [train_data[0], train_data[1]], train_labels, validation_split=0.3, epochs=EPOCHS, callbacks=[checkpoint], ) seq_model.load_weights(filepath) _, accuracy = seq_model.evaluate([test_data[0], test_data[1]], test_labels) print(f"Test accuracy: {round(accuracy * 100, 2)}%") return history, seq_model _, sequence_model = run_experiment() """ **Note**: To keep the runtime of this example relatively short, we just used a few training examples. This number of training examples is low with respect to the sequence model being used that has 99,909 trainable parameters. You are encouraged to sample more data from the UCF101 dataset using [the notebook](https://colab.research.google.com/github/sayakpaul/Action-Recognition-in-TensorFlow/blob/main/Data_Preparation_UCF101.ipynb) mentioned above and train the same model. """ """ ## Inference """ def prepare_single_video(frames): frames = frames[None, ...] frame_mask = np.zeros( shape=( 1, MAX_SEQ_LENGTH, ), dtype="bool", ) frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32") for i, batch in enumerate(frames): video_length = batch.shape[0] length = min(MAX_SEQ_LENGTH, video_length) for j in range(length): frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :]) frame_mask[i, :length] = 1 # 1 = not masked, 0 = masked return frame_features, frame_mask def sequence_prediction(path): class_vocab = label_processor.get_vocabulary() frames = load_video(os.path.join("test", path)) frame_features, frame_mask = prepare_single_video(frames) probabilities = sequence_model.predict([frame_features, frame_mask])[0] for i in np.argsort(probabilities)[::-1]: print(f" {class_vocab[i]}: {probabilities[i] * 100:5.2f}%") return frames # This utility is for visualization. # Referenced from: # https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub def to_gif(images): converted_images = images.astype(np.uint8) imageio.mimsave("animation.gif", converted_images, duration=100) return Image("animation.gif") test_video = np.random.choice(test_df["video_name"].values.tolist()) print(f"Test video path: {test_video}") test_frames = sequence_prediction(test_video) to_gif(test_frames[:MAX_SEQ_LENGTH]) """ ## Next steps * In this example, we made use of transfer learning for extracting meaningful features from video frames. You could also fine-tune the pre-trained network to notice how that affects the end results. * For speed-accuracy trade-offs, you can try out other models present inside `keras.applications`. * Try different combinations of `MAX_SEQ_LENGTH` to observe how that affects the performance. * Train on a higher number of classes and see if you are able to get good performance. * Following [this tutorial](https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub), try a [pre-trained action recognition model](https://arxiv.org/abs/1705.07750) from DeepMind. * Rolling-averaging can be useful technique for video classification and it can be combined with a standard image classification model to infer on videos. [This tutorial](https://www.pyimagesearch.com/2019/07/15/video-classification-with-keras-and-deep-learning/) will help understand how to use rolling-averaging with an image classifier. * When there are variations in between the frames of a video not all the frames might be equally important to decide its category. In those situations, putting a [self-attention layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Attention) in the sequence model will likely yield better results. * Following [this book chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-11), you can implement Transformers-based models for processing videos. """
keras-io/examples/vision/video_classification.py/0
{ "file_path": "keras-io/examples/vision/video_classification.py", "repo_id": "keras-io", "token_count": 4964 }
100
<jupyter_start><jupyter_text>Distributed training with Keras 3**Author:** [Qianli Zhu](https://github.com/qlzh727)**Date created:** 2023/11/07**Last modified:** 2023/11/07**Description:** Complete guide to the distribution API for multi-backend Keras. IntroductionThe Keras distribution API is a new interface designed to facilitatedistributed deep learning across a variety of backends like JAX, TensorFlow andPyTorch. This powerful API introduces a suite of tools enabling data and modelparallelism, allowing for efficient scaling of deep learning models on multipleaccelerators and hosts. Whether leveraging the power of GPUs or TPUs, the APIprovides a streamlined approach to initializing distributed environments,defining device meshes, and orchestrating the layout of tensors acrosscomputational resources. Through classes like `DataParallel` and`ModelParallel`, it abstracts the complexity involved in parallel computation,making it easier for developers to accelerate their machine learningworkflows. How it worksThe Keras distribution API provides a global programming model that allowsdevelopers to compose applications that operate on tensors in a global context(as if working with a single device) whileautomatically managing distribution across many devices. The API leverages theunderlying framework (e.g. JAX) to distribute the program and tensors according to thesharding directives through a procedure called single program, multiple data(SPMD) expansion.By decoupling the application from sharding directives, the API enables runningthe same application on a single device, multiple devices, or even multipleclients, while preserving its global semantics.<jupyter_code>!pip install keras==3.0.0 --upgrade --quiet<jupyter_output><empty_output><jupyter_text>Setup<jupyter_code>import os # The distribution API is only implemented for the JAX backend for now. os.environ["KERAS_BACKEND"] = "jax" import keras from keras import layers import jax import numpy as np from tensorflow import data as tf_data # For dataset input.<jupyter_output><empty_output><jupyter_text>`DeviceMesh` and `TensorLayout`The `keras.distribution.DeviceMesh` class in Keras distribution API represents a cluster ofcomputational devices configured for distributed computation. It aligns withsimilar concepts in [`jax.sharding.Mesh`](https://jax.readthedocs.io/en/latest/jax.sharding.htmljax.sharding.Mesh) and[`tf.dtensor.Mesh`](https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Mesh),where it's used to map the physical devices to a logical mesh structure.The `TensorLayout` class then specifies how tensors are distributed across the`DeviceMesh`, detailing the sharding of tensors along specified axes thatcorrespond to the names of the axes in the `DeviceMesh`.You can find more detailed concept explainers in the[TensorFlow DTensor guide](https://www.tensorflow.org/guide/dtensor_overviewdtensors_model_of_distributed_tensors).<jupyter_code># Retrieve the local available gpu devices. devices = jax.devices("gpu") # Assume it has 8 local GPUs. # Define a 2x4 device mesh with data and model parallel axes mesh = keras.distribution.DeviceMesh( shape=(2, 4), axis_names=["data", "model"], devices=devices ) # A 2D layout, which describes how a tensor is distributed across the # mesh. The layout can be visualized as a 2D grid with "model" as rows and # "data" as columns, and it is a [4, 2] grid when it mapped to the physcial # devices on the mesh. layout_2d = keras.distribution.TensorLayout(axes=("model", "data"), device_mesh=mesh) # A 4D layout which could be used for data parallel of a image input. replicated_layout_4d = keras.distribution.TensorLayout( axes=("data", None, None, None), device_mesh=mesh )<jupyter_output><empty_output><jupyter_text>DistributionThe `Distribution` class in Keras serves as a foundational abstract class designedfor developing custom distribution strategies. It encapsulates the core logicneeded to distribute a model's variables, input data, and intermediatecomputations across a device mesh. As an end user, you won't have to interactdirectly with this class, but its subclasses like `DataParallel` or`ModelParallel`. DataParallelThe `DataParallel` class in the Keras distribution API is designed for thedata parallelism strategy in distributed training, where the model weights arereplicated across all devices in the `DeviceMesh`, and each device processes aportion of the input data.Here is a sample usage of this class.<jupyter_code># Create DataParallel with list of devices. # As a shortcut, the devices can be skipped, # and Keras will detect all local available devices. # E.g. data_parallel = DataParallel() data_parallel = keras.distribution.DataParallel(devices=devices) # Or you can choose to create DataParallel with a 1D `DeviceMesh`. mesh_1d = keras.distribution.DeviceMesh( shape=(8,), axis_names=["data"], devices=devices ) data_parallel = keras.distribution.DataParallel(device_mesh=mesh_1d) inputs = np.random.normal(size=(128, 28, 28, 1)) labels = np.random.normal(size=(128, 10)) dataset = tf_data.Dataset.from_tensor_slices((inputs, labels)).batch(16) # Set the global distribution. keras.distribution.set_distribution(data_parallel) # Note that all the model weights from here on are replicated to # all the devices of the `DeviceMesh`. This includes the RNG # state, optimizer states, metrics, etc. The dataset fed into `model.fit` or # `model.evaluate` will be split evenly on the batch dimension, and sent to # all the devices. You don't have to do any manual aggregration of losses, # since all the computation happens in a global context. inputs = layers.Input(shape=(28, 28, 1)) y = layers.Flatten()(inputs) y = layers.Dense(units=200, use_bias=False, activation="relu")(y) y = layers.Dropout(0.4)(y) y = layers.Dense(units=10, activation="softmax")(y) model = keras.Model(inputs=inputs, outputs=y) model.compile(loss="mse") model.fit(dataset, epochs=3) model.evaluate(dataset)<jupyter_output><empty_output><jupyter_text>`ModelParallel` and `LayoutMap``ModelParallel` will be mostly useful when model weights are too large to fiton a single accelerator. This setting allows you to spit your model weights oractivation tensors across all the devices on the `DeviceMesh`, and enable thehorizontal scaling for the large models.Unlike the `DataParallel` model where all weights are fully replicated,the weights layout under `ModelParallel` usually need some customization forbest performances. We introduce `LayoutMap` to let you specify the`TensorLayout` for any weights and intermediate tensors from global perspective.`LayoutMap` is a dict-like object that maps a string to `TensorLayout`instances. It behaves differently from a normal Python dict in that the stringkey is treated as a regex when retrieving the value. The class allows you todefine the naming schema of `TensorLayout` and then retrieve the corresponding`TensorLayout` instance. Typically, the key used to queryis the `variable.path` attribute, which is the identifier of the variable.As a shortcut, a tuple or list of axisnames is also allowed when inserting a value, and it will be converted to`TensorLayout`.The `LayoutMap` can also optionally contain a `DeviceMesh` to populate the`TensorLayout.device_mesh` if it is not set. When retrieving a layout with akey, and if there isn't an exact match, all existing keys in the layout map willbe treated as regex and matched against the input key again. If there aremultiple matches, a `ValueError` is raised. If no matches are found, `None` isreturned.<jupyter_code>mesh_2d = keras.distribution.DeviceMesh( shape=(2, 4), axis_names=["data", "model"], devices=devices ) layout_map = keras.distribution.LayoutMap(mesh_2d) # The rule below means that for any weights that match with d1/kernel, it # will be sharded with model dimensions (4 devices), same for the d1/bias. # All other weights will be fully replicated. layout_map["d1/kernel"] = (None, "model") layout_map["d1/bias"] = ("model",) # You can also set the layout for the layer output like layout_map["d2/output"] = ("data", None) model_parallel = keras.distribution.ModelParallel( mesh_2d, layout_map, batch_dim_name="data" ) keras.distribution.set_distribution(model_parallel) inputs = layers.Input(shape=(28, 28, 1)) y = layers.Flatten()(inputs) y = layers.Dense(units=200, use_bias=False, activation="relu", name="d1")(y) y = layers.Dropout(0.4)(y) y = layers.Dense(units=10, activation="softmax", name="d2")(y) model = keras.Model(inputs=inputs, outputs=y) # The data will be sharded across the "data" dimension of the method, which # has 2 devices. model.compile(loss="mse") model.fit(dataset, epochs=3) model.evaluate(dataset)<jupyter_output><empty_output><jupyter_text>It is also easy to change the mesh structure to tune the computation betweenmore data parallel or model parallel. You can do this by adjusting the shape ofthe mesh. And no changes are needed for any other code.<jupyter_code>full_data_parallel_mesh = keras.distribution.DeviceMesh( shape=(8, 1), axis_names=["data", "model"], devices=devices ) more_data_parallel_mesh = keras.distribution.DeviceMesh( shape=(4, 2), axis_names=["data", "model"], devices=devices ) more_model_parallel_mesh = keras.distribution.DeviceMesh( shape=(2, 4), axis_names=["data", "model"], devices=devices ) full_model_parallel_mesh = keras.distribution.DeviceMesh( shape=(1, 8), axis_names=["data", "model"], devices=devices )<jupyter_output><empty_output>
keras-io/guides/ipynb/distribution.ipynb/0
{ "file_path": "keras-io/guides/ipynb/distribution.ipynb", "repo_id": "keras-io", "token_count": 2713 }
101
<jupyter_start><jupyter_text>Pretraining a Transformer from scratch with KerasNLP**Author:** [Matthew Watson](https://github.com/mattdangerw/)**Date created:** 2022/04/18**Last modified:** 2023/07/15**Description:** Use KerasNLP to train a Transformer model from scratch. KerasNLP aims to make it easy to build state-of-the-art text processing models. In thisguide, we will show how library components simplify pretraining and fine-tuning aTransformer model from scratch.This guide is broken into three parts:1. *Setup*, task definition, and establishing a baseline.2. *Pretraining* a Transformer model.3. *Fine-tuning* the Transformer model on our classification task. SetupThe following guide uses Keras 3 to work in any of `tensorflow`, `jax` or`torch`. We select the `jax` backend below, which will give us a particularlyfast train step below, but feel free to mix it up.<jupyter_code>!pip install -q --upgrade keras-nlp !pip install -q --upgrade keras # Upgrade to Keras 3. import os os.environ["KERAS_BACKEND"] = "jax" # or "tensorflow" or "torch" import keras_nlp import tensorflow as tf import keras<jupyter_output><empty_output><jupyter_text>Next up, we can download two datasets.- [SST-2](https://paperswithcode.com/sota/sentiment-analysis-on-sst-2-binary) a textclassification dataset and our "end goal". This dataset is often used to benchmarklanguage models.- [WikiText-103](https://paperswithcode.com/dataset/wikitext-103): A medium sizedcollection of featured articles from English Wikipedia, which we will use forpretraining.Finally, we will download a WordPiece vocabulary, to do sub-word tokenization later on inthis guide.<jupyter_code># Download pretraining data. keras.utils.get_file( origin="https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip", extract=True, ) wiki_dir = os.path.expanduser("~/.keras/datasets/wikitext-103-raw/") # Download finetuning data. keras.utils.get_file( origin="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip", extract=True, ) sst_dir = os.path.expanduser("~/.keras/datasets/SST-2/") # Download vocabulary data. vocab_file = keras.utils.get_file( origin="https://storage.googleapis.com/tensorflow/keras-nlp/examples/bert/bert_vocab_uncased.txt", )<jupyter_output><empty_output><jupyter_text>Next, we define some hyperparameters we will use during training.<jupyter_code># Preprocessing params. PRETRAINING_BATCH_SIZE = 128 FINETUNING_BATCH_SIZE = 32 SEQ_LENGTH = 128 MASK_RATE = 0.25 PREDICTIONS_PER_SEQ = 32 # Model params. NUM_LAYERS = 3 MODEL_DIM = 256 INTERMEDIATE_DIM = 512 NUM_HEADS = 4 DROPOUT = 0.1 NORM_EPSILON = 1e-5 # Training params. PRETRAINING_LEARNING_RATE = 5e-4 PRETRAINING_EPOCHS = 8 FINETUNING_LEARNING_RATE = 5e-5 FINETUNING_EPOCHS = 3<jupyter_output><empty_output><jupyter_text>Load dataWe load our data with [tf.data](https://www.tensorflow.org/guide/data), which will allowus to define input pipelines for tokenizing and preprocessing text.<jupyter_code># Load SST-2. sst_train_ds = tf.data.experimental.CsvDataset( sst_dir + "train.tsv", [tf.string, tf.int32], header=True, field_delim="\t" ).batch(FINETUNING_BATCH_SIZE) sst_val_ds = tf.data.experimental.CsvDataset( sst_dir + "dev.tsv", [tf.string, tf.int32], header=True, field_delim="\t" ).batch(FINETUNING_BATCH_SIZE) # Load wikitext-103 and filter out short lines. wiki_train_ds = ( tf.data.TextLineDataset(wiki_dir + "wiki.train.raw") .filter(lambda x: tf.strings.length(x) > 100) .batch(PRETRAINING_BATCH_SIZE) ) wiki_val_ds = ( tf.data.TextLineDataset(wiki_dir + "wiki.valid.raw") .filter(lambda x: tf.strings.length(x) > 100) .batch(PRETRAINING_BATCH_SIZE) ) # Take a peak at the sst-2 dataset. print(sst_train_ds.unbatch().batch(4).take(1).get_single_element())<jupyter_output><empty_output><jupyter_text>You can see that our `SST-2` dataset contains relatively short snippets of movie reviewtext. Our goal is to predict the sentiment of the snippet. A label of 1 indicatespositive sentiment, and a label of 0 negative sentiment. Establish a baselineAs a first step, we will establish a baseline of good performance. We don't actually needKerasNLP for this, we can just use core Keras layers.We will train a simple bag-of-words model, where we learn a positive or negative weightfor each word in our vocabulary. A sample's score is simply the sum of the weights of allwords that are present in the sample.<jupyter_code># This layer will turn our input sentence into a list of 1s and 0s the same size # our vocabulary, indicating whether a word is present in absent. multi_hot_layer = keras.layers.TextVectorization( max_tokens=4000, output_mode="multi_hot" ) multi_hot_layer.adapt(sst_train_ds.map(lambda x, y: x)) multi_hot_ds = sst_train_ds.map(lambda x, y: (multi_hot_layer(x), y)) multi_hot_val_ds = sst_val_ds.map(lambda x, y: (multi_hot_layer(x), y)) # We then learn a linear regression over that layer, and that's our entire # baseline model! inputs = keras.Input(shape=(4000,), dtype="int32") outputs = keras.layers.Dense(1, activation="sigmoid")(inputs) baseline_model = keras.Model(inputs, outputs) baseline_model.compile(loss="binary_crossentropy", metrics=["accuracy"]) baseline_model.fit(multi_hot_ds, validation_data=multi_hot_val_ds, epochs=5)<jupyter_output><empty_output><jupyter_text>A bag-of-words approach can be a fast and surprisingly powerful, especially when inputexamples contain a large number of words. With shorter sequences, it can hit aperformance ceiling.To do better, we would like to build a model that can evaluate words *in context*. Insteadof evaluating each word in a void, we need to use the information contained in the*entire ordered sequence* of our input.This runs us into a problem. `SST-2` is very small dataset, and there's simply not enoughexample text to attempt to build a larger, more parameterized model that can learn on asequence. We would quickly start to overfit and memorize our training set, without anyincrease in our ability to generalize to unseen examples.Enter **pretraining**, which will allow us to learn on a larger corpus, and transfer ourknowledge to the `SST-2` task. And enter **KerasNLP**, which will allow us to pretrain aparticularly powerful model, the Transformer, with ease. PretrainingTo beat our baseline, we will leverage the `WikiText103` dataset, an unlabeledcollection of Wikipedia articles that is much bigger than `SST-2`.We are going to train a *transformer*, a highly expressive model which will learnto embed each word in our input as a low dimensional vector. Our wikipedia dataset has nolabels, so we will use an unsupervised training objective called the *Masked LanguageModeling* (MaskedLM) objective.Essentially, we will be playing a big game of "guess the missing word". For each inputsample we will obscure 25% of our input data, and train our model to predict the parts wecovered up. Preprocess data for the MaskedLM taskOur text preprocessing for the MaskedLM task will occur in two stages.1. Tokenize input text into integer sequences of token ids.2. Mask certain positions in our input to predict on.To tokenize, we can use a `keras_nlp.tokenizers.Tokenizer` -- the KerasNLP building blockfor transforming text into sequences of integer token ids.In particular, we will use `keras_nlp.tokenizers.WordPieceTokenizer` which does*sub-word* tokenization. Sub-word tokenization is popular when training models on largetext corpora. Essentially, it allows our model to learn from uncommon words, while notrequiring a massive vocabulary of every word in our training set.The second thing we need to do is mask our input for the MaskedLM task. To do this, we can use`keras_nlp.layers.MaskedLMMaskGenerator`, which will randomly select a set of tokens in eachinput and mask them out.The tokenizer and the masking layer can both be used inside a call to[tf.data.Dataset.map](https://www.tensorflow.org/api_docs/python/tf/data/Datasetmap).We can use `tf.data` to efficiently pre-compute each batch on the CPU, while our GPU or TPUworks on training with the batch that came before. Because our masking layer willchoose new words to mask each time, each epoch over our dataset will give us a totallynew set of labels to train on.<jupyter_code># Setting sequence_length will trim or pad the token outputs to shape # (batch_size, SEQ_LENGTH). tokenizer = keras_nlp.tokenizers.WordPieceTokenizer( vocabulary=vocab_file, sequence_length=SEQ_LENGTH, lowercase=True, strip_accents=True, ) # Setting mask_selection_length will trim or pad the mask outputs to shape # (batch_size, PREDICTIONS_PER_SEQ). masker = keras_nlp.layers.MaskedLMMaskGenerator( vocabulary_size=tokenizer.vocabulary_size(), mask_selection_rate=MASK_RATE, mask_selection_length=PREDICTIONS_PER_SEQ, mask_token_id=tokenizer.token_to_id("[MASK]"), ) def preprocess(inputs): inputs = tokenizer(inputs) outputs = masker(inputs) # Split the masking layer outputs into a (features, labels, and weights) # tuple that we can use with keras.Model.fit(). features = { "token_ids": outputs["token_ids"], "mask_positions": outputs["mask_positions"], } labels = outputs["mask_ids"] weights = outputs["mask_weights"] return features, labels, weights # We use prefetch() to pre-compute preprocessed batches on the fly on the CPU. pretrain_ds = wiki_train_ds.map( preprocess, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) pretrain_val_ds = wiki_val_ds.map( preprocess, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) # Preview a single input example. # The masks will change each time you run the cell. print(pretrain_val_ds.take(1).get_single_element())<jupyter_output><empty_output><jupyter_text>The above block sorts our dataset into a `(features, labels, weights)` tuple, which can bepassed directly to `keras.Model.fit()`.We have two features:1. `"token_ids"`, where some tokens have been replaced with our mask token id.2. `"mask_positions"`, which keeps track of which tokens we masked out.Our labels are simply the ids we masked out.Because not all sequences will have the same number of masks, we also keep a`sample_weight` tensor, which removes padded labels from our loss function by giving themzero weight. Create the Transformer encoderKerasNLP provides all the building blocks to quickly build a Transformer encoder.We use `keras_nlp.layers.TokenAndPositionEmbedding` to first embed our input token ids.This layer simultaneously learns two embeddings -- one for words in a sentence and anotherfor integer positions in a sentence. The output embedding is simply the sum of the two.Then we can add a series of `keras_nlp.layers.TransformerEncoder` layers. These are thebread and butter of the Transformer model, using an attention mechanism to attend todifferent parts of the input sentence, followed by a multi-layer perceptron block.The output of this model will be a encoded vector per input token id. Unlike thebag-of-words model we used as a baseline, this model will embed each token accounting forthe context in which it appeared.<jupyter_code>inputs = keras.Input(shape=(SEQ_LENGTH,), dtype="int32") # Embed our tokens with a positional embedding. embedding_layer = keras_nlp.layers.TokenAndPositionEmbedding( vocabulary_size=tokenizer.vocabulary_size(), sequence_length=SEQ_LENGTH, embedding_dim=MODEL_DIM, ) outputs = embedding_layer(inputs) # Apply layer normalization and dropout to the embedding. outputs = keras.layers.LayerNormalization(epsilon=NORM_EPSILON)(outputs) outputs = keras.layers.Dropout(rate=DROPOUT)(outputs) # Add a number of encoder blocks for i in range(NUM_LAYERS): outputs = keras_nlp.layers.TransformerEncoder( intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS, dropout=DROPOUT, layer_norm_epsilon=NORM_EPSILON, )(outputs) encoder_model = keras.Model(inputs, outputs) encoder_model.summary()<jupyter_output><empty_output><jupyter_text>Pretrain the TransformerYou can think of the `encoder_model` as it's own modular unit, it is the piece of ourmodel that we are really interested in for our downstream task. However we still need toset up the encoder to train on the MaskedLM task; to do that we attach a`keras_nlp.layers.MaskedLMHead`.This layer will take as one input the token encodings, and as another the positions wemasked out in the original input. It will gather the token encodings we masked, andtransform them back in predictions over our entire vocabulary.With that, we are ready to compile and run pretraining. If you are running this in aColab, note that this will take about an hour. Training Transformer is famously computeintensive, so even this relatively small Transformer will take some time.<jupyter_code># Create the pretraining model by attaching a masked language model head. inputs = { "token_ids": keras.Input(shape=(SEQ_LENGTH,), dtype="int32", name="token_ids"), "mask_positions": keras.Input( shape=(PREDICTIONS_PER_SEQ,), dtype="int32", name="mask_positions" ), } # Encode the tokens. encoded_tokens = encoder_model(inputs["token_ids"]) # Predict an output word for each masked input token. # We use the input token embedding to project from our encoded vectors to # vocabulary logits, which has been shown to improve training efficiency. outputs = keras_nlp.layers.MaskedLMHead( token_embedding=embedding_layer.token_embedding, activation="softmax", )(encoded_tokens, mask_positions=inputs["mask_positions"]) # Define and compile our pretraining model. pretraining_model = keras.Model(inputs, outputs) pretraining_model.compile( loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.AdamW(PRETRAINING_LEARNING_RATE), weighted_metrics=["sparse_categorical_accuracy"], jit_compile=True, ) # Pretrain the model on our wiki text dataset. pretraining_model.fit( pretrain_ds, validation_data=pretrain_val_ds, epochs=PRETRAINING_EPOCHS, steps_per_epoch=2, ) # Save this base model for further finetuning. encoder_model.save("encoder_model.keras")<jupyter_output><empty_output><jupyter_text>Fine-tuningAfter pretraining, we can now fine-tune our model on the `SST-2` dataset. We canleverage the ability of the encoder we build to predict on words in context to boostour performance on the downstream task. Preprocess data for classificationPreprocessing for fine-tuning is much simpler than for our pretraining MaskedLM task. We justtokenize our input sentences and we are ready for training!<jupyter_code>def preprocess(sentences, labels): return tokenizer(sentences), labels # We use prefetch() to pre-compute preprocessed batches on the fly on our CPU. finetune_ds = sst_train_ds.map( preprocess, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) finetune_val_ds = sst_val_ds.map( preprocess, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) # Preview a single input example. print(finetune_val_ds.take(1).get_single_element())<jupyter_output><empty_output><jupyter_text>Fine-tune the TransformerTo go from our encoded token output to a classification prediction, we need to attachanother "head" to our Transformer model. We can afford to be simple here. We poolthe encoded tokens together, and use a single dense layer to make a prediction.<jupyter_code># Reload the encoder model from disk so we can restart fine-tuning from scratch. encoder_model = keras.models.load_model("encoder_model.keras", compile=False) # Take as input the tokenized input. inputs = keras.Input(shape=(SEQ_LENGTH,), dtype="int32") # Encode and pool the tokens. encoded_tokens = encoder_model(inputs) pooled_tokens = keras.layers.GlobalAveragePooling1D()(encoded_tokens[0]) # Predict an output label. outputs = keras.layers.Dense(1, activation="sigmoid")(pooled_tokens) # Define and compile our fine-tuning model. finetuning_model = keras.Model(inputs, outputs) finetuning_model.compile( loss="binary_crossentropy", optimizer=keras.optimizers.AdamW(FINETUNING_LEARNING_RATE), metrics=["accuracy"], ) # Finetune the model for the SST-2 task. finetuning_model.fit( finetune_ds, validation_data=finetune_val_ds, epochs=FINETUNING_EPOCHS, steps_per_epoch=2, )<jupyter_output><empty_output>
keras-io/guides/ipynb/keras_nlp/transformer_pretraining.ipynb/0
{ "file_path": "keras-io/guides/ipynb/keras_nlp/transformer_pretraining.ipynb", "repo_id": "keras-io", "token_count": 5268 }
102
<jupyter_start><jupyter_text>Working with RNNs**Authors:** Scott Zhu, Francois Chollet**Date created:** 2019/07/08**Last modified:** 2023/07/10**Description:** Complete guide to using & customizing RNN layers. IntroductionRecurrent neural networks (RNN) are a class of neural networks that is powerful formodeling sequence data such as time series or natural language.Schematically, a RNN layer uses a `for` loop to iterate over the timesteps of asequence, while maintaining an internal state that encodes information about thetimesteps it has seen so far.The Keras RNN API is designed with a focus on:- **Ease of use**: the built-in `keras.layers.RNN`, `keras.layers.LSTM`,`keras.layers.GRU` layers enable you to quickly build recurrent models withouthaving to make difficult configuration choices.- **Ease of customization**: You can also define your own RNN cell layer (the innerpart of the `for` loop) with custom behavior, and use it with the generic`keras.layers.RNN` layer (the `for` loop itself). This allows you to quicklyprototype different research ideas in a flexible way with minimal code. Setup<jupyter_code>import numpy as np import tensorflow as tf import keras from keras import layers<jupyter_output><empty_output><jupyter_text>Built-in RNN layers: a simple example There are three built-in RNN layers in Keras:1. `keras.layers.SimpleRNN`, a fully-connected RNN where the output from previoustimestep is to be fed to next timestep.2. `keras.layers.GRU`, first proposed in[Cho et al., 2014](https://arxiv.org/abs/1406.1078).3. `keras.layers.LSTM`, first proposed in[Hochreiter & Schmidhuber, 1997](https://www.bioinf.jku.at/publications/older/2604.pdf).In early 2015, Keras had the first reusable open-source Python implementations of LSTMand GRU.Here is a simple example of a `Sequential` model that processes sequences of integers,embeds each integer into a 64-dimensional vector, then processes the sequence ofvectors using a `LSTM` layer.<jupyter_code>model = keras.Sequential() # Add an Embedding layer expecting input vocab of size 1000, and # output embedding dimension of size 64. model.add(layers.Embedding(input_dim=1000, output_dim=64)) # Add a LSTM layer with 128 internal units. model.add(layers.LSTM(128)) # Add a Dense layer with 10 units. model.add(layers.Dense(10)) model.summary()<jupyter_output><empty_output><jupyter_text>Built-in RNNs support a number of useful features:- Recurrent dropout, via the `dropout` and `recurrent_dropout` arguments- Ability to process an input sequence in reverse, via the `go_backwards` argument- Loop unrolling (which can lead to a large speedup when processing short sequences onCPU), via the `unroll` argument- ...and more.For more information, see the[RNN API documentation](https://keras.io/api/layers/recurrent_layers/). Outputs and statesBy default, the output of a RNN layer contains a single vector per sample. This vectoris the RNN cell output corresponding to the last timestep, containing informationabout the entire input sequence. The shape of this output is `(batch_size, units)`where `units` corresponds to the `units` argument passed to the layer's constructor.A RNN layer can also return the entire sequence of outputs for each sample (one vectorper timestep per sample), if you set `return_sequences=True`. The shape of this outputis `(batch_size, timesteps, units)`.<jupyter_code>model = keras.Sequential() model.add(layers.Embedding(input_dim=1000, output_dim=64)) # The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256) model.add(layers.GRU(256, return_sequences=True)) # The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128) model.add(layers.SimpleRNN(128)) model.add(layers.Dense(10)) model.summary()<jupyter_output><empty_output><jupyter_text>In addition, a RNN layer can return its final internal state(s). The returned statescan be used to resume the RNN execution later, or[to initialize another RNN](https://arxiv.org/abs/1409.3215).This setting is commonly used in theencoder-decoder sequence-to-sequence model, where the encoder final state is used asthe initial state of the decoder.To configure a RNN layer to return its internal state, set the `return_state` parameterto `True` when creating the layer. Note that `LSTM` has 2 state tensors, but `GRU`only has one.To configure the initial state of the layer, just call the layer with additionalkeyword argument `initial_state`.Note that the shape of the state needs to match the unit size of the layer, like in theexample below.<jupyter_code>encoder_vocab = 1000 decoder_vocab = 2000 encoder_input = layers.Input(shape=(None,)) encoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)( encoder_input ) # Return states in addition to output output, state_h, state_c = layers.LSTM(64, return_state=True, name="encoder")( encoder_embedded ) encoder_state = [state_h, state_c] decoder_input = layers.Input(shape=(None,)) decoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)( decoder_input ) # Pass the 2 states to a new LSTM layer, as initial state decoder_output = layers.LSTM(64, name="decoder")( decoder_embedded, initial_state=encoder_state ) output = layers.Dense(10)(decoder_output) model = keras.Model([encoder_input, decoder_input], output) model.summary()<jupyter_output><empty_output><jupyter_text>RNN layers and RNN cellsIn addition to the built-in RNN layers, the RNN API also provides cell-level APIs.Unlike RNN layers, which processes whole batches of input sequences, the RNN cell onlyprocesses a single timestep.The cell is the inside of the `for` loop of a RNN layer. Wrapping a cell inside a`keras.layers.RNN` layer gives you a layer capable of processing batches ofsequences, e.g. `RNN(LSTMCell(10))`.Mathematically, `RNN(LSTMCell(10))` produces the same result as `LSTM(10)`. In fact,the implementation of this layer in TF v1.x was just creating the corresponding RNNcell and wrapping it in a RNN layer. However using the built-in `GRU` and `LSTM`layers enable the use of CuDNN and you may see better performance.There are three built-in RNN cells, each of them corresponding to the matching RNNlayer.- `keras.layers.SimpleRNNCell` corresponds to the `SimpleRNN` layer.- `keras.layers.GRUCell` corresponds to the `GRU` layer.- `keras.layers.LSTMCell` corresponds to the `LSTM` layer.The cell abstraction, together with the generic `keras.layers.RNN` class, make itvery easy to implement custom RNN architectures for your research. Cross-batch statefulnessWhen processing very long sequences (possibly infinite), you may want to use thepattern of **cross-batch statefulness**.Normally, the internal state of a RNN layer is reset every time it sees a new batch(i.e. every sample seen by the layer is assumed to be independent of the past). Thelayer will only maintain a state while processing a given sample.If you have very long sequences though, it is useful to break them into shortersequences, and to feed these shorter sequences sequentially into a RNN layer withoutresetting the layer's state. That way, the layer can retain information about theentirety of the sequence, even though it's only seeing one sub-sequence at a time.You can do this by setting `stateful=True` in the constructor.If you have a sequence `s = [t0, t1, ... t1546, t1547]`, you would split it into e.g.```s1 = [t0, t1, ... t100]s2 = [t101, ... t201]...s16 = [t1501, ... t1547]```Then you would process it via:```pythonlstm_layer = layers.LSTM(64, stateful=True)for s in sub_sequences: output = lstm_layer(s)```When you want to clear the state, you can use `layer.reset_states()`.> Note: In this setup, sample `i` in a given batch is assumed to be the continuation ofsample `i` in the previous batch. This means that all batches should contain the samenumber of samples (batch size). E.g. if a batch contains `[sequence_A_from_t0_to_t100, sequence_B_from_t0_to_t100]`, the next batch should contain`[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200]`.Here is a complete example:<jupyter_code>paragraph1 = np.random.random((20, 10, 50)).astype(np.float32) paragraph2 = np.random.random((20, 10, 50)).astype(np.float32) paragraph3 = np.random.random((20, 10, 50)).astype(np.float32) lstm_layer = layers.LSTM(64, stateful=True) output = lstm_layer(paragraph1) output = lstm_layer(paragraph2) output = lstm_layer(paragraph3) # reset_states() will reset the cached state to the original initial_state. # If no initial_state was provided, zero-states will be used by default. lstm_layer.reset_states()<jupyter_output><empty_output><jupyter_text>RNN State Reuse The recorded states of the RNN layer are not included in the `layer.weights()`. If youwould like to reuse the state from a RNN layer, you can retrieve the states value by`layer.states` and use it as theinitial state for a new layer via the Keras functional API like `new_layer(inputs,initial_state=layer.states)`, or model subclassing.Please also note that sequential model might not be used in this case since it onlysupports layers with single input and output, the extra input of initial state makesit impossible to use here.<jupyter_code>paragraph1 = np.random.random((20, 10, 50)).astype(np.float32) paragraph2 = np.random.random((20, 10, 50)).astype(np.float32) paragraph3 = np.random.random((20, 10, 50)).astype(np.float32) lstm_layer = layers.LSTM(64, stateful=True) output = lstm_layer(paragraph1) output = lstm_layer(paragraph2) existing_state = lstm_layer.states new_lstm_layer = layers.LSTM(64) new_output = new_lstm_layer(paragraph3, initial_state=existing_state)<jupyter_output><empty_output><jupyter_text>Bidirectional RNNsFor sequences other than time series (e.g. text), it is often the case that a RNN modelcan perform better if it not only processes sequence from start to end, but alsobackwards. For example, to predict the next word in a sentence, it is often useful tohave the context around the word, not only just the words that come before it.Keras provides an easy API for you to build such bidirectional RNNs: the`keras.layers.Bidirectional` wrapper.<jupyter_code>model = keras.Sequential() model.add( layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(5, 10)) ) model.add(layers.Bidirectional(layers.LSTM(32))) model.add(layers.Dense(10)) model.summary()<jupyter_output><empty_output><jupyter_text>Under the hood, `Bidirectional` will copy the RNN layer passed in, and flip the`go_backwards` field of the newly copied layer, so that it will process the inputs inreverse order.The output of the `Bidirectional` RNN will be, by default, the concatenation of the forward layeroutput and the backward layer output. If you need a different merging behavior, e.g.concatenation, change the `merge_mode` parameter in the `Bidirectional` wrapperconstructor. For more details about `Bidirectional`, please check[the API docs](https://keras.io/api/layers/recurrent_layers/bidirectional/). Performance optimization and CuDNN kernelsIn TensorFlow 2.0, the built-in LSTM and GRU layers have been updated to leverage CuDNNkernels by default when a GPU is available. With this change, the prior`keras.layers.CuDNNLSTM/CuDNNGRU` layers have been deprecated, and you can build yourmodel without worrying about the hardware it will run on.Since the CuDNN kernel is built with certain assumptions, this means the layer **willnot be able to use the CuDNN kernel if you change the defaults of the built-in LSTM orGRU layers**. E.g.:- Changing the `activation` function from `tanh` to something else.- Changing the `recurrent_activation` function from `sigmoid` to something else.- Using `recurrent_dropout` > 0.- Setting `unroll` to True, which forces LSTM/GRU to decompose the inner`tf.while_loop` into an unrolled `for` loop.- Setting `use_bias` to False.- Using masking when the input data is not strictly right padded (if the maskcorresponds to strictly right padded data, CuDNN can still be used. This is the mostcommon case).For the detailed list of constraints, please see the documentation for the[LSTM](https://keras.io/api/layers/recurrent_layers/lstm/) and[GRU](https://keras.io/api/layers/recurrent_layers/gru/) layers. Using CuDNN kernels when availableLet's build a simple LSTM model to demonstrate the performance difference.We'll use as input sequences the sequence of rows of MNIST digits (treating each row ofpixels as a timestep), and we'll predict the digit's label.<jupyter_code>batch_size = 64 # Each MNIST image batch is a tensor of shape (batch_size, 28, 28). # Each input sequence will be of size (28, 28) (height is treated like time). input_dim = 28 units = 64 output_size = 10 # labels are from 0 to 9 # Build the RNN model def build_model(allow_cudnn_kernel=True): # CuDNN is only available at the layer level, and not at the cell level. # This means `LSTM(units)` will use the CuDNN kernel, # while RNN(LSTMCell(units)) will run on non-CuDNN kernel. if allow_cudnn_kernel: # The LSTM layer with default options uses CuDNN. lstm_layer = keras.layers.LSTM(units, input_shape=(None, input_dim)) else: # Wrapping a LSTMCell in a RNN layer will not use CuDNN. lstm_layer = keras.layers.RNN( keras.layers.LSTMCell(units), input_shape=(None, input_dim) ) model = keras.models.Sequential( [ lstm_layer, keras.layers.BatchNormalization(), keras.layers.Dense(output_size), ] ) return model<jupyter_output><empty_output><jupyter_text>Let's load the MNIST dataset:<jupyter_code>mnist = keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 sample, sample_label = x_train[0], y_train[0]<jupyter_output><empty_output><jupyter_text>Let's create a model instance and train it.We choose `sparse_categorical_crossentropy` as the loss function for the model. Theoutput of the model has shape of `[batch_size, 10]`. The target for the model is aninteger vector, each of the integer is in the range of 0 to 9.<jupyter_code>model = build_model(allow_cudnn_kernel=True) model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer="sgd", metrics=["accuracy"], ) model.fit( x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1 )<jupyter_output><empty_output><jupyter_text>Now, let's compare to a model that does not use the CuDNN kernel:<jupyter_code>noncudnn_model = build_model(allow_cudnn_kernel=False) noncudnn_model.set_weights(model.get_weights()) noncudnn_model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer="sgd", metrics=["accuracy"], ) noncudnn_model.fit( x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1 )<jupyter_output><empty_output><jupyter_text>When running on a machine with a NVIDIA GPU and CuDNN installed,the model built with CuDNN is much faster to train compared to themodel that uses the regular TensorFlow kernel.The same CuDNN-enabled model can also be used to run inference in a CPU-onlyenvironment. The `tf.device` annotation below is just forcing the device placement.The model will run on CPU by default if no GPU is available.You simply don't have to worry about the hardware you're running on anymore. Isn't thatpretty cool?<jupyter_code>import matplotlib.pyplot as plt with tf.device("CPU:0"): cpu_model = build_model(allow_cudnn_kernel=True) cpu_model.set_weights(model.get_weights()) result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1) print( "Predicted result is: %s, target result is: %s" % (result.numpy(), sample_label) ) plt.imshow(sample, cmap=plt.get_cmap("gray"))<jupyter_output><empty_output><jupyter_text>RNNs with list/dict inputs, or nested inputsNested structures allow implementers to include more information within a singletimestep. For example, a video frame could have audio and video input at the sametime. The data shape in this case could be:`[batch, timestep, {"video": [height, width, channel], "audio": [frequency]}]`In another example, handwriting data could have both coordinates x and y for thecurrent position of the pen, as well as pressure information. So the datarepresentation could be:`[batch, timestep, {"location": [x, y], "pressure": [force]}]`The following code provides an example of how to build a custom RNN cell that acceptssuch structured inputs. Define a custom cell that supports nested input/output See [Making new Layers & Models via subclassing](/guides/making_new_layers_and_models_via_subclassing/)for details on writing your own layers.<jupyter_code>@keras.saving.register_keras_serializable() class NestedCell(keras.layers.Layer): def __init__(self, unit_1, unit_2, unit_3, **kwargs): self.unit_1 = unit_1 self.unit_2 = unit_2 self.unit_3 = unit_3 self.state_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])] self.output_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])] super().__init__(**kwargs) def build(self, input_shapes): # expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)] i1 = input_shapes[0][1] i2 = input_shapes[1][1] i3 = input_shapes[1][2] self.kernel_1 = self.add_weight( shape=(i1, self.unit_1), initializer="uniform", name="kernel_1" ) self.kernel_2_3 = self.add_weight( shape=(i2, i3, self.unit_2, self.unit_3), initializer="uniform", name="kernel_2_3", ) def call(self, inputs, states): # inputs should be in [(batch, input_1), (batch, input_2, input_3)] # state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)] input_1, input_2 = tf.nest.flatten(inputs) s1, s2 = states output_1 = tf.matmul(input_1, self.kernel_1) output_2_3 = tf.einsum("bij,ijkl->bkl", input_2, self.kernel_2_3) state_1 = s1 + output_1 state_2_3 = s2 + output_2_3 output = (output_1, output_2_3) new_states = (state_1, state_2_3) return output, new_states def get_config(self): return {"unit_1": self.unit_1, "unit_2": self.unit_2, "unit_3": self.unit_3}<jupyter_output><empty_output><jupyter_text>Build a RNN model with nested input/outputLet's build a Keras model that uses a `keras.layers.RNN` layer and the custom cellwe just defined.<jupyter_code>unit_1 = 10 unit_2 = 20 unit_3 = 30 i1 = 32 i2 = 64 i3 = 32 batch_size = 64 num_batches = 10 timestep = 50 cell = NestedCell(unit_1, unit_2, unit_3) rnn = keras.layers.RNN(cell) input_1 = keras.Input((None, i1)) input_2 = keras.Input((None, i2, i3)) outputs = rnn((input_1, input_2)) model = keras.models.Model([input_1, input_2], outputs) model.compile(optimizer="adam", loss="mse", metrics=["accuracy"])<jupyter_output><empty_output><jupyter_text>Train the model with randomly generated dataSince there isn't a good candidate dataset for this model, we use random Numpy data fordemonstration.<jupyter_code>input_1_data = np.random.random((batch_size * num_batches, timestep, i1)) input_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3)) target_1_data = np.random.random((batch_size * num_batches, unit_1)) target_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3)) input_data = [input_1_data, input_2_data] target_data = [target_1_data, target_2_data] model.fit(input_data, target_data, batch_size=batch_size)<jupyter_output><empty_output>
keras-io/guides/ipynb/working_with_rnns.ipynb/0
{ "file_path": "keras-io/guides/ipynb/working_with_rnns.ipynb", "repo_id": "keras-io", "token_count": 6567 }
103
""" Title: Pretraining a Transformer from scratch with KerasNLP Author: [Matthew Watson](https://github.com/mattdangerw/) Date created: 2022/04/18 Last modified: 2023/07/15 Description: Use KerasNLP to train a Transformer model from scratch. Accelerator: GPU Converted to Keras 3 by: [Anshuman Mishra](https://github.com/shivance) """ """ KerasNLP aims to make it easy to build state-of-the-art text processing models. In this guide, we will show how library components simplify pretraining and fine-tuning a Transformer model from scratch. This guide is broken into three parts: 1. *Setup*, task definition, and establishing a baseline. 2. *Pretraining* a Transformer model. 3. *Fine-tuning* the Transformer model on our classification task. """ """ ## Setup The following guide uses Keras 3 to work in any of `tensorflow`, `jax` or `torch`. We select the `jax` backend below, which will give us a particularly fast train step below, but feel free to mix it up. """ """shell pip install -q --upgrade keras-nlp pip install -q --upgrade keras # Upgrade to Keras 3. """ import os os.environ["KERAS_BACKEND"] = "jax" # or "tensorflow" or "torch" import keras_nlp import tensorflow as tf import keras """ Next up, we can download two datasets. - [SST-2](https://paperswithcode.com/sota/sentiment-analysis-on-sst-2-binary) a text classification dataset and our "end goal". This dataset is often used to benchmark language models. - [WikiText-103](https://paperswithcode.com/dataset/wikitext-103): A medium sized collection of featured articles from English Wikipedia, which we will use for pretraining. Finally, we will download a WordPiece vocabulary, to do sub-word tokenization later on in this guide. """ # Download pretraining data. keras.utils.get_file( origin="https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip", extract=True, ) wiki_dir = os.path.expanduser("~/.keras/datasets/wikitext-103-raw/") # Download finetuning data. keras.utils.get_file( origin="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip", extract=True, ) sst_dir = os.path.expanduser("~/.keras/datasets/SST-2/") # Download vocabulary data. vocab_file = keras.utils.get_file( origin="https://storage.googleapis.com/tensorflow/keras-nlp/examples/bert/bert_vocab_uncased.txt", ) """ Next, we define some hyperparameters we will use during training. """ # Preprocessing params. PRETRAINING_BATCH_SIZE = 128 FINETUNING_BATCH_SIZE = 32 SEQ_LENGTH = 128 MASK_RATE = 0.25 PREDICTIONS_PER_SEQ = 32 # Model params. NUM_LAYERS = 3 MODEL_DIM = 256 INTERMEDIATE_DIM = 512 NUM_HEADS = 4 DROPOUT = 0.1 NORM_EPSILON = 1e-5 # Training params. PRETRAINING_LEARNING_RATE = 5e-4 PRETRAINING_EPOCHS = 8 FINETUNING_LEARNING_RATE = 5e-5 FINETUNING_EPOCHS = 3 """ ### Load data We load our data with [tf.data](https://www.tensorflow.org/guide/data), which will allow us to define input pipelines for tokenizing and preprocessing text. """ # Load SST-2. sst_train_ds = tf.data.experimental.CsvDataset( sst_dir + "train.tsv", [tf.string, tf.int32], header=True, field_delim="\t" ).batch(FINETUNING_BATCH_SIZE) sst_val_ds = tf.data.experimental.CsvDataset( sst_dir + "dev.tsv", [tf.string, tf.int32], header=True, field_delim="\t" ).batch(FINETUNING_BATCH_SIZE) # Load wikitext-103 and filter out short lines. wiki_train_ds = ( tf.data.TextLineDataset(wiki_dir + "wiki.train.raw") .filter(lambda x: tf.strings.length(x) > 100) .batch(PRETRAINING_BATCH_SIZE) ) wiki_val_ds = ( tf.data.TextLineDataset(wiki_dir + "wiki.valid.raw") .filter(lambda x: tf.strings.length(x) > 100) .batch(PRETRAINING_BATCH_SIZE) ) # Take a peak at the sst-2 dataset. print(sst_train_ds.unbatch().batch(4).take(1).get_single_element()) """ You can see that our `SST-2` dataset contains relatively short snippets of movie review text. Our goal is to predict the sentiment of the snippet. A label of 1 indicates positive sentiment, and a label of 0 negative sentiment. """ """ ### Establish a baseline As a first step, we will establish a baseline of good performance. We don't actually need KerasNLP for this, we can just use core Keras layers. We will train a simple bag-of-words model, where we learn a positive or negative weight for each word in our vocabulary. A sample's score is simply the sum of the weights of all words that are present in the sample. """ # This layer will turn our input sentence into a list of 1s and 0s the same size # our vocabulary, indicating whether a word is present in absent. multi_hot_layer = keras.layers.TextVectorization( max_tokens=4000, output_mode="multi_hot" ) multi_hot_layer.adapt(sst_train_ds.map(lambda x, y: x)) multi_hot_ds = sst_train_ds.map(lambda x, y: (multi_hot_layer(x), y)) multi_hot_val_ds = sst_val_ds.map(lambda x, y: (multi_hot_layer(x), y)) # We then learn a linear regression over that layer, and that's our entire # baseline model! inputs = keras.Input(shape=(4000,), dtype="int32") outputs = keras.layers.Dense(1, activation="sigmoid")(inputs) baseline_model = keras.Model(inputs, outputs) baseline_model.compile(loss="binary_crossentropy", metrics=["accuracy"]) baseline_model.fit(multi_hot_ds, validation_data=multi_hot_val_ds, epochs=5) """ A bag-of-words approach can be a fast and surprisingly powerful, especially when input examples contain a large number of words. With shorter sequences, it can hit a performance ceiling. To do better, we would like to build a model that can evaluate words *in context*. Instead of evaluating each word in a void, we need to use the information contained in the *entire ordered sequence* of our input. This runs us into a problem. `SST-2` is very small dataset, and there's simply not enough example text to attempt to build a larger, more parameterized model that can learn on a sequence. We would quickly start to overfit and memorize our training set, without any increase in our ability to generalize to unseen examples. Enter **pretraining**, which will allow us to learn on a larger corpus, and transfer our knowledge to the `SST-2` task. And enter **KerasNLP**, which will allow us to pretrain a particularly powerful model, the Transformer, with ease. """ """ ## Pretraining To beat our baseline, we will leverage the `WikiText103` dataset, an unlabeled collection of Wikipedia articles that is much bigger than `SST-2`. We are going to train a *transformer*, a highly expressive model which will learn to embed each word in our input as a low dimensional vector. Our wikipedia dataset has no labels, so we will use an unsupervised training objective called the *Masked Language Modeling* (MaskedLM) objective. Essentially, we will be playing a big game of "guess the missing word". For each input sample we will obscure 25% of our input data, and train our model to predict the parts we covered up. """ """ ### Preprocess data for the MaskedLM task Our text preprocessing for the MaskedLM task will occur in two stages. 1. Tokenize input text into integer sequences of token ids. 2. Mask certain positions in our input to predict on. To tokenize, we can use a `keras_nlp.tokenizers.Tokenizer` -- the KerasNLP building block for transforming text into sequences of integer token ids. In particular, we will use `keras_nlp.tokenizers.WordPieceTokenizer` which does *sub-word* tokenization. Sub-word tokenization is popular when training models on large text corpora. Essentially, it allows our model to learn from uncommon words, while not requiring a massive vocabulary of every word in our training set. The second thing we need to do is mask our input for the MaskedLM task. To do this, we can use `keras_nlp.layers.MaskedLMMaskGenerator`, which will randomly select a set of tokens in each input and mask them out. The tokenizer and the masking layer can both be used inside a call to [tf.data.Dataset.map](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#map). We can use `tf.data` to efficiently pre-compute each batch on the CPU, while our GPU or TPU works on training with the batch that came before. Because our masking layer will choose new words to mask each time, each epoch over our dataset will give us a totally new set of labels to train on. """ # Setting sequence_length will trim or pad the token outputs to shape # (batch_size, SEQ_LENGTH). tokenizer = keras_nlp.tokenizers.WordPieceTokenizer( vocabulary=vocab_file, sequence_length=SEQ_LENGTH, lowercase=True, strip_accents=True, ) # Setting mask_selection_length will trim or pad the mask outputs to shape # (batch_size, PREDICTIONS_PER_SEQ). masker = keras_nlp.layers.MaskedLMMaskGenerator( vocabulary_size=tokenizer.vocabulary_size(), mask_selection_rate=MASK_RATE, mask_selection_length=PREDICTIONS_PER_SEQ, mask_token_id=tokenizer.token_to_id("[MASK]"), ) def preprocess(inputs): inputs = tokenizer(inputs) outputs = masker(inputs) # Split the masking layer outputs into a (features, labels, and weights) # tuple that we can use with keras.Model.fit(). features = { "token_ids": outputs["token_ids"], "mask_positions": outputs["mask_positions"], } labels = outputs["mask_ids"] weights = outputs["mask_weights"] return features, labels, weights # We use prefetch() to pre-compute preprocessed batches on the fly on the CPU. pretrain_ds = wiki_train_ds.map( preprocess, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) pretrain_val_ds = wiki_val_ds.map( preprocess, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) # Preview a single input example. # The masks will change each time you run the cell. print(pretrain_val_ds.take(1).get_single_element()) """ The above block sorts our dataset into a `(features, labels, weights)` tuple, which can be passed directly to `keras.Model.fit()`. We have two features: 1. `"token_ids"`, where some tokens have been replaced with our mask token id. 2. `"mask_positions"`, which keeps track of which tokens we masked out. Our labels are simply the ids we masked out. Because not all sequences will have the same number of masks, we also keep a `sample_weight` tensor, which removes padded labels from our loss function by giving them zero weight. """ """ ### Create the Transformer encoder KerasNLP provides all the building blocks to quickly build a Transformer encoder. We use `keras_nlp.layers.TokenAndPositionEmbedding` to first embed our input token ids. This layer simultaneously learns two embeddings -- one for words in a sentence and another for integer positions in a sentence. The output embedding is simply the sum of the two. Then we can add a series of `keras_nlp.layers.TransformerEncoder` layers. These are the bread and butter of the Transformer model, using an attention mechanism to attend to different parts of the input sentence, followed by a multi-layer perceptron block. The output of this model will be a encoded vector per input token id. Unlike the bag-of-words model we used as a baseline, this model will embed each token accounting for the context in which it appeared. """ inputs = keras.Input(shape=(SEQ_LENGTH,), dtype="int32") # Embed our tokens with a positional embedding. embedding_layer = keras_nlp.layers.TokenAndPositionEmbedding( vocabulary_size=tokenizer.vocabulary_size(), sequence_length=SEQ_LENGTH, embedding_dim=MODEL_DIM, ) outputs = embedding_layer(inputs) # Apply layer normalization and dropout to the embedding. outputs = keras.layers.LayerNormalization(epsilon=NORM_EPSILON)(outputs) outputs = keras.layers.Dropout(rate=DROPOUT)(outputs) # Add a number of encoder blocks for i in range(NUM_LAYERS): outputs = keras_nlp.layers.TransformerEncoder( intermediate_dim=INTERMEDIATE_DIM, num_heads=NUM_HEADS, dropout=DROPOUT, layer_norm_epsilon=NORM_EPSILON, )(outputs) encoder_model = keras.Model(inputs, outputs) encoder_model.summary() """ ### Pretrain the Transformer You can think of the `encoder_model` as it's own modular unit, it is the piece of our model that we are really interested in for our downstream task. However we still need to set up the encoder to train on the MaskedLM task; to do that we attach a `keras_nlp.layers.MaskedLMHead`. This layer will take as one input the token encodings, and as another the positions we masked out in the original input. It will gather the token encodings we masked, and transform them back in predictions over our entire vocabulary. With that, we are ready to compile and run pretraining. If you are running this in a Colab, note that this will take about an hour. Training Transformer is famously compute intensive, so even this relatively small Transformer will take some time. """ # Create the pretraining model by attaching a masked language model head. inputs = { "token_ids": keras.Input(shape=(SEQ_LENGTH,), dtype="int32", name="token_ids"), "mask_positions": keras.Input( shape=(PREDICTIONS_PER_SEQ,), dtype="int32", name="mask_positions" ), } # Encode the tokens. encoded_tokens = encoder_model(inputs["token_ids"]) # Predict an output word for each masked input token. # We use the input token embedding to project from our encoded vectors to # vocabulary logits, which has been shown to improve training efficiency. outputs = keras_nlp.layers.MaskedLMHead( token_embedding=embedding_layer.token_embedding, activation="softmax", )(encoded_tokens, mask_positions=inputs["mask_positions"]) # Define and compile our pretraining model. pretraining_model = keras.Model(inputs, outputs) pretraining_model.compile( loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.AdamW(PRETRAINING_LEARNING_RATE), weighted_metrics=["sparse_categorical_accuracy"], jit_compile=True, ) # Pretrain the model on our wiki text dataset. pretraining_model.fit( pretrain_ds, validation_data=pretrain_val_ds, epochs=PRETRAINING_EPOCHS, ) # Save this base model for further finetuning. encoder_model.save("encoder_model.keras") """ ## Fine-tuning After pretraining, we can now fine-tune our model on the `SST-2` dataset. We can leverage the ability of the encoder we build to predict on words in context to boost our performance on the downstream task. """ """ ### Preprocess data for classification Preprocessing for fine-tuning is much simpler than for our pretraining MaskedLM task. We just tokenize our input sentences and we are ready for training! """ def preprocess(sentences, labels): return tokenizer(sentences), labels # We use prefetch() to pre-compute preprocessed batches on the fly on our CPU. finetune_ds = sst_train_ds.map( preprocess, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) finetune_val_ds = sst_val_ds.map( preprocess, num_parallel_calls=tf.data.AUTOTUNE ).prefetch(tf.data.AUTOTUNE) # Preview a single input example. print(finetune_val_ds.take(1).get_single_element()) """ ### Fine-tune the Transformer To go from our encoded token output to a classification prediction, we need to attach another "head" to our Transformer model. We can afford to be simple here. We pool the encoded tokens together, and use a single dense layer to make a prediction. """ # Reload the encoder model from disk so we can restart fine-tuning from scratch. encoder_model = keras.models.load_model("encoder_model.keras", compile=False) # Take as input the tokenized input. inputs = keras.Input(shape=(SEQ_LENGTH,), dtype="int32") # Encode and pool the tokens. encoded_tokens = encoder_model(inputs) pooled_tokens = keras.layers.GlobalAveragePooling1D()(encoded_tokens[0]) # Predict an output label. outputs = keras.layers.Dense(1, activation="sigmoid")(pooled_tokens) # Define and compile our fine-tuning model. finetuning_model = keras.Model(inputs, outputs) finetuning_model.compile( loss="binary_crossentropy", optimizer=keras.optimizers.AdamW(FINETUNING_LEARNING_RATE), metrics=["accuracy"], ) # Finetune the model for the SST-2 task. finetuning_model.fit( finetune_ds, validation_data=finetune_val_ds, epochs=FINETUNING_EPOCHS, ) """ Pretraining was enough to boost our performance to 84%, and this is hardly the ceiling for Transformer models. You may have noticed during pretraining that our validation performance was still steadily increasing. Our model is still significantly undertrained. Training for more epochs, training a large Transformer, and training on more unlabeled text would all continue to boost performance significantly. One of the key goals of KerasNLP is to provide a modular approach to NLP model building. We have shown one approach to building a Transformer here, but KerasNLP supports an ever growing array of components for preprocessing text and building models. We hope it makes it easier to experiment on solutions to your natural language problems. """
keras-io/guides/keras_nlp/transformer_pretraining.py/0
{ "file_path": "keras-io/guides/keras_nlp/transformer_pretraining.py", "repo_id": "keras-io", "token_count": 5425 }
104
# Multi-GPU distributed training with PyTorch **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2023/06/29<br> **Last modified:** 2023/06/29<br> **Description:** Guide to multi-GPU training for Keras models with PyTorch. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/distributed_training_with_torch.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/distributed_training_with_torch.py) --- ## Introduction There are generally two ways to distribute computation across multiple devices: **Data parallelism**, where a single model gets replicated on multiple devices or multiple machines. Each of them processes different batches of data, then they merge their results. There exist many variants of this setup, that differ in how the different model replicas merge results, in whether they stay in sync at every batch or whether they are more loosely coupled, etc. **Model parallelism**, where different parts of a single model run on different devices, processing a single batch of data together. This works best with models that have a naturally-parallel architecture, such as models that feature multiple branches. This guide focuses on data parallelism, in particular **synchronous data parallelism**, where the different replicas of the model stay in sync after each batch they process. Synchronicity keeps the model convergence behavior identical to what you would see for single-device training. Specifically, this guide teaches you how to use PyTorch's `DistributedDataParallel` module wrapper to train Keras, with minimal changes to your code, on multiple GPUs (typically 2 to 16) installed on a single machine (single host, multi-device training). This is the most common setup for researchers and small-scale industry workflows. --- ## Setup Let's start by defining the function that creates the model that we will train, and the function that creates the dataset we will train on (MNIST in this case). ```python import os os.environ["KERAS_BACKEND"] = "torch" import torch import numpy as np import keras def get_model(): # Make a simple convnet with batch normalization and dropout. inputs = keras.Input(shape=(28, 28, 1)) x = keras.layers.Rescaling(1.0 / 255.0)(inputs) x = keras.layers.Conv2D(filters=12, kernel_size=3, padding="same", use_bias=False)( x ) x = keras.layers.BatchNormalization(scale=False, center=True)(x) x = keras.layers.ReLU()(x) x = keras.layers.Conv2D( filters=24, kernel_size=6, use_bias=False, strides=2, )(x) x = keras.layers.BatchNormalization(scale=False, center=True)(x) x = keras.layers.ReLU()(x) x = keras.layers.Conv2D( filters=32, kernel_size=6, padding="same", strides=2, name="large_k", )(x) x = keras.layers.BatchNormalization(scale=False, center=True)(x) x = keras.layers.ReLU()(x) x = keras.layers.GlobalAveragePooling2D()(x) x = keras.layers.Dense(256, activation="relu")(x) x = keras.layers.Dropout(0.5)(x) outputs = keras.layers.Dense(10)(x) model = keras.Model(inputs, outputs) return model def get_dataset(): # Load the data and split it between train and test sets (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") x_test = x_test.astype("float32") # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) print("x_train shape:", x_train.shape) # Create a TensorDataset dataset = torch.utils.data.TensorDataset( torch.from_numpy(x_train), torch.from_numpy(y_train) ) return dataset ``` Next, let's define a simple PyTorch training loop that targets a GPU (note the calls to `.cuda()`). ```python def train_model(model, dataloader, num_epochs, optimizer, loss_fn): for epoch in range(num_epochs): running_loss = 0.0 running_loss_count = 0 for batch_idx, (inputs, targets) in enumerate(dataloader): inputs = inputs.cuda(non_blocking=True) targets = targets.cuda(non_blocking=True) # Forward pass outputs = model(inputs) loss = loss_fn(outputs, targets) # Backward and optimize optimizer.zero_grad() loss.backward() optimizer.step() running_loss += loss.item() running_loss_count += 1 # Print loss statistics print( f"Epoch {epoch + 1}/{num_epochs}, " f"Loss: {running_loss / running_loss_count}" ) ``` --- ## Single-host, multi-device synchronous training In this setup, you have one machine with several GPUs on it (typically 2 to 16). Each device will run a copy of your model (called a **replica**). For simplicity, in what follows, we'll assume we're dealing with 8 GPUs, at no loss of generality. **How it works** At each step of training: - The current batch of data (called **global batch**) is split into 8 different sub-batches (called **local batches**). For instance, if the global batch has 512 samples, each of the 8 local batches will have 64 samples. - Each of the 8 replicas independently processes a local batch: they run a forward pass, then a backward pass, outputting the gradient of the weights with respect to the loss of the model on the local batch. - The weight updates originating from local gradients are efficiently merged across the 8 replicas. Because this is done at the end of every step, the replicas always stay in sync. In practice, the process of synchronously updating the weights of the model replicas is handled at the level of each individual weight variable. This is done through a **mirrored variable** object. **How to use it** To do single-host, multi-device synchronous training with a Keras model, you would use the `torch.nn.parallel.DistributedDataParallel` module wrapper. Here's how it works: - We use `torch.multiprocessing.start_processes` to start multiple Python processes, one per device. Each process will run the `per_device_launch_fn` function. - The `per_device_launch_fn` function does the following: - It uses `torch.distributed.init_process_group` and `torch.cuda.set_device` to configure the device to be used for that process. - It uses `torch.utils.data.distributed.DistributedSampler` and `torch.utils.data.DataLoader` to turn our data into a distributed data loader. - It also uses `torch.nn.parallel.DistributedDataParallel` to turn our model into a distributed PyTorch module. - It then calls the `train_model` function. - The `train_model` function will then run in each process, with the model using a separate device in each process. Here's the flow, where each step is split into its own utility function: ```python # Config num_gpu = torch.cuda.device_count() num_epochs = 2 batch_size = 64 print(f"Running on {num_gpu} GPUs") def setup_device(current_gpu_index, num_gpus): # Device setup os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "56492" device = torch.device("cuda:{}".format(current_gpu_index)) torch.distributed.init_process_group( backend="nccl", init_method="env://", world_size=num_gpus, rank=current_gpu_index, ) torch.cuda.set_device(device) def cleanup(): torch.distributed.destroy_process_group() def prepare_dataloader(dataset, current_gpu_index, num_gpus, batch_size): sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=num_gpus, rank=current_gpu_index, shuffle=False, ) dataloader = torch.utils.data.DataLoader( dataset, sampler=sampler, batch_size=batch_size, shuffle=False, ) return dataloader def per_device_launch_fn(current_gpu_index, num_gpu): # Setup the process groups setup_device(current_gpu_index, num_gpu) dataset = get_dataset() model = get_model() # prepare the dataloader dataloader = prepare_dataloader(dataset, current_gpu_index, num_gpu, batch_size) # Instantiate the torch optimizer optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) # Instantiate the torch loss function loss_fn = torch.nn.CrossEntropyLoss() # Put model on device model = model.to(current_gpu_index) ddp_model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[current_gpu_index], output_device=current_gpu_index ) train_model(ddp_model, dataloader, num_epochs, optimizer, loss_fn) cleanup() ``` <div class="k-default-codeblock"> ``` Running on 0 GPUs /opt/conda/envs/keras-torch/lib/python3.10/site-packages/torch/cuda/__init__.py:611: UserWarning: Can't initialize NVML warnings.warn("Can't initialize NVML") ``` </div> Time to start multiple processes: ```python if __name__ == "__main__": # We use the "fork" method rather than "spawn" to support notebooks torch.multiprocessing.start_processes( per_device_launch_fn, args=(num_gpu,), nprocs=num_gpu, join=True, start_method="fork", ) ``` That's it!
keras-io/guides/md/distributed_training_with_torch.md/0
{ "file_path": "keras-io/guides/md/distributed_training_with_torch.md", "repo_id": "keras-io", "token_count": 3480 }
105
# Tune hyperparameters in your custom training loop **Authors:** Tom O'Malley, Haifeng Jin<br> **Date created:** 2019/10/28<br> **Last modified:** 2022/01/12<br> **Description:** Use `HyperModel.fit()` to tune training hyperparameters (such as batch size). <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_tuner/custom_tuner.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_tuner/custom_tuner.py) ```python !pip install keras-tuner -q ``` --- ## Introduction The `HyperModel` class in KerasTuner provides a convenient way to define your search space in a reusable object. You can override `HyperModel.build()` to define and hypertune the model itself. To hypertune the training process (e.g. by selecting the proper batch size, number of training epochs, or data augmentation setup), you can override `HyperModel.fit()`, where you can access: - The `hp` object, which is an instance of `keras_tuner.HyperParameters` - The model built by `HyperModel.build()` A basic example is shown in the "tune model training" section of [Getting Started with KerasTuner](https://keras.io/guides/keras_tuner/getting_started/#tune-model-training). --- ## Tuning the custom training loop In this guide, we will subclass the `HyperModel` class and write a custom training loop by overriding `HyperModel.fit()`. For how to write a custom training loop with Keras, you can refer to the guide [Writing a training loop from scratch](https://keras.io/guides/writing_a_training_loop_from_scratch/). First, we import the libraries we need, and we create datasets for training and validation. Here, we just use some random data for demonstration purposes. ```python import keras_tuner import tensorflow as tf import keras import numpy as np x_train = np.random.rand(1000, 28, 28, 1) y_train = np.random.randint(0, 10, (1000, 1)) x_val = np.random.rand(1000, 28, 28, 1) y_val = np.random.randint(0, 10, (1000, 1)) ``` Then, we subclass the `HyperModel` class as `MyHyperModel`. In `MyHyperModel.build()`, we build a simple Keras model to do image classification for 10 different classes. `MyHyperModel.fit()` accepts several arguments. Its signature is shown below: ```python def fit(self, hp, model, x, y, validation_data, callbacks=None, **kwargs): ``` * The `hp` argument is for defining the hyperparameters. * The `model` argument is the model returned by `MyHyperModel.build()`. * `x`, `y`, and `validation_data` are all custom-defined arguments. We will pass our data to them by calling `tuner.search(x=x, y=y, validation_data=(x_val, y_val))` later. You can define any number of them and give custom names. * The `callbacks` argument was intended to be used with `model.fit()`. KerasTuner put some helpful Keras callbacks in it, for example, the callback for checkpointing the model at its best epoch. We will manually call the callbacks in the custom training loop. Before we can call them, we need to assign our model to them with the following code so that they have access to the model for checkpointing. ```py for callback in callbacks: callback.model = model ``` In this example, we only called the `on_epoch_end()` method of the callbacks to help us checkpoint the model. You may also call other callback methods if needed. If you don't need to save the model, you don't need to use the callbacks. In the custom training loop, we tune the batch size of the dataset as we wrap the NumPy data into a `tf.data.Dataset`. Note that you can tune any preprocessing steps here as well. We also tune the learning rate of the optimizer. We will use the validation loss as the evaluation metric for the model. To compute the mean validation loss, we will use `keras.metrics.Mean()`, which averages the validation loss across the batches. We need to return the validation loss for the tuner to make a record. ```python class MyHyperModel(keras_tuner.HyperModel): def build(self, hp): """Builds a convolutional model.""" inputs = keras.Input(shape=(28, 28, 1)) x = keras.layers.Flatten()(inputs) x = keras.layers.Dense( units=hp.Choice("units", [32, 64, 128]), activation="relu" )(x) outputs = keras.layers.Dense(10)(x) return keras.Model(inputs=inputs, outputs=outputs) def fit(self, hp, model, x, y, validation_data, callbacks=None, **kwargs): # Convert the datasets to tf.data.Dataset. batch_size = hp.Int("batch_size", 32, 128, step=32, default=64) train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch( batch_size ) validation_data = tf.data.Dataset.from_tensor_slices(validation_data).batch( batch_size ) # Define the optimizer. optimizer = keras.optimizers.Adam( hp.Float("learning_rate", 1e-4, 1e-2, sampling="log", default=1e-3) ) loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # The metric to track validation loss. epoch_loss_metric = keras.metrics.Mean() # Function to run the train step. @tf.function def run_train_step(images, labels): with tf.GradientTape() as tape: logits = model(images) loss = loss_fn(labels, logits) # Add any regularization losses. if model.losses: loss += tf.math.add_n(model.losses) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) # Function to run the validation step. @tf.function def run_val_step(images, labels): logits = model(images) loss = loss_fn(labels, logits) # Update the metric. epoch_loss_metric.update_state(loss) # Assign the model to the callbacks. for callback in callbacks: callback.set_model(model) # Record the best validation loss value best_epoch_loss = float("inf") # The custom training loop. for epoch in range(2): print(f"Epoch: {epoch}") # Iterate the training data to run the training step. for images, labels in train_ds: run_train_step(images, labels) # Iterate the validation data to run the validation step. for images, labels in validation_data: run_val_step(images, labels) # Calling the callbacks after epoch. epoch_loss = float(epoch_loss_metric.result().numpy()) for callback in callbacks: # The "my_metric" is the objective passed to the tuner. callback.on_epoch_end(epoch, logs={"my_metric": epoch_loss}) epoch_loss_metric.reset_state() print(f"Epoch loss: {epoch_loss}") best_epoch_loss = min(best_epoch_loss, epoch_loss) # Return the evaluation metric value. return best_epoch_loss ``` Now, we can initialize the tuner. Here, we use `Objective("my_metric", "min")` as our metric to be minimized. The objective name should be consistent with the one you use as the key in the `logs` passed to the 'on_epoch_end()' method of the callbacks. The callbacks need to use this value in the `logs` to find the best epoch to checkpoint the model. ```python tuner = keras_tuner.RandomSearch( objective=keras_tuner.Objective("my_metric", "min"), max_trials=2, hypermodel=MyHyperModel(), directory="results", project_name="custom_training", overwrite=True, ) ``` We start the search by passing the arguments we defined in the signature of `MyHyperModel.fit()` to `tuner.search()`. ```python tuner.search(x=x_train, y=y_train, validation_data=(x_val, y_val)) ``` <div class="k-default-codeblock"> ``` Trial 2 Complete [00h 00m 02s] my_metric: 2.3025283813476562 ``` </div> <div class="k-default-codeblock"> ``` Best my_metric So Far: 2.3025283813476562 Total elapsed time: 00h 00m 04s ``` </div> Finally, we can retrieve the results. ```python best_hps = tuner.get_best_hyperparameters()[0] print(best_hps.values) best_model = tuner.get_best_models()[0] best_model.summary() ``` <div class="k-default-codeblock"> ``` {'units': 128, 'batch_size': 32, 'learning_rate': 0.0034272591820215972} ``` </div> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">784</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">100,480</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,290</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">101,770</span> (397.54 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">101,770</span> (397.54 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> In summary, to tune the hyperparameters in your custom training loop, you just override `HyperModel.fit()` to train the model and return the evaluation results. With the provided callbacks, you can easily save the trained models at their best epochs and load the best models later. To find out more about the basics of KerasTuner, please see [Getting Started with KerasTuner](https://keras.io/guides/keras_tuner/getting_started/).
keras-io/guides/md/keras_tuner/custom_tuner.md/0
{ "file_path": "keras-io/guides/md/keras_tuner/custom_tuner.md", "repo_id": "keras-io", "token_count": 4811 }
106
# Writing a training loop from scratch in TensorFlow **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2019/03/01<br> **Last modified:** 2023/06/25<br> **Description:** Writing low-level training & evaluation loops in TensorFlow. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/writing_a_custom_training_loop_in_tensorflow.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/writing_a_custom_training_loop_in_tensorflow.py) --- ## Setup ```python import time import os # This guide can only be run with the TensorFlow backend. os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf import keras import numpy as np ``` --- ## Introduction Keras provides default training and evaluation loops, `fit()` and `evaluate()`. Their usage is covered in the guide [Training & evaluation with the built-in methods](/guides/training_with_built_in_methods/). If you want to customize the learning algorithm of your model while still leveraging the convenience of `fit()` (for instance, to train a GAN using `fit()`), you can subclass the `Model` class and implement your own `train_step()` method, which is called repeatedly during `fit()`. Now, if you want very low-level control over training & evaluation, you should write your own training & evaluation loops from scratch. This is what this guide is about. --- ## A first end-to-end example Let's consider a simple MNIST model: ```python def get_model(): inputs = keras.Input(shape=(784,), name="digits") x1 = keras.layers.Dense(64, activation="relu")(inputs) x2 = keras.layers.Dense(64, activation="relu")(x1) outputs = keras.layers.Dense(10, name="predictions")(x2) model = keras.Model(inputs=inputs, outputs=outputs) return model model = get_model() ``` Let's train it using mini-batch gradient with a custom training loop. First, we're going to need an optimizer, a loss function, and a dataset: ```python # Instantiate an optimizer. optimizer = keras.optimizers.Adam(learning_rate=1e-3) # Instantiate a loss function. loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Prepare the training dataset. batch_size = 32 (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = np.reshape(x_train, (-1, 784)) x_test = np.reshape(x_test, (-1, 784)) # Reserve 10,000 samples for validation. x_val = x_train[-10000:] y_val = y_train[-10000:] x_train = x_train[:-10000] y_train = y_train[:-10000] # Prepare the training dataset. train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) # Prepare the validation dataset. val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(batch_size) ``` Calling a model inside a `GradientTape` scope enables you to retrieve the gradients of the trainable weights of the layer with respect to a loss value. Using an optimizer instance, you can use these gradients to update these variables (which you can retrieve using `model.trainable_weights`). Here's our training loop, step by step: - We open a `for` loop that iterates over epochs - For each epoch, we open a `for` loop that iterates over the dataset, in batches - For each batch, we open a `GradientTape()` scope - Inside this scope, we call the model (forward pass) and compute the loss - Outside the scope, we retrieve the gradients of the weights of the model with regard to the loss - Finally, we use the optimizer to update the weights of the model based on the gradients ```python epochs = 3 for epoch in range(epochs): print(f"\nStart of epoch {epoch}") # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): # Open a GradientTape to record the operations run # during the forward pass, which enables auto-differentiation. with tf.GradientTape() as tape: # Run the forward pass of the layer. # The operations that the layer applies # to its inputs are going to be recorded # on the GradientTape. logits = model(x_batch_train, training=True) # Logits for this minibatch # Compute the loss value for this minibatch. loss_value = loss_fn(y_batch_train, logits) # Use the gradient tape to automatically retrieve # the gradients of the trainable variables with respect to the loss. grads = tape.gradient(loss_value, model.trainable_weights) # Run one step of gradient descent by updating # the value of the variables to minimize the loss. optimizer.apply(grads, model.trainable_weights) # Log every 100 batches. if step % 100 == 0: print( f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}" ) print(f"Seen so far: {(step + 1) * batch_size} samples") ``` <div class="k-default-codeblock"> ``` Start of epoch 0 Training loss (for 1 batch) at step 0: 95.3300 Seen so far: 32 samples Training loss (for 1 batch) at step 100: 2.5622 Seen so far: 3232 samples Training loss (for 1 batch) at step 200: 3.1138 Seen so far: 6432 samples Training loss (for 1 batch) at step 300: 0.6748 Seen so far: 9632 samples Training loss (for 1 batch) at step 400: 1.3308 Seen so far: 12832 samples Training loss (for 1 batch) at step 500: 1.9813 Seen so far: 16032 samples Training loss (for 1 batch) at step 600: 0.8640 Seen so far: 19232 samples Training loss (for 1 batch) at step 700: 1.0696 Seen so far: 22432 samples Training loss (for 1 batch) at step 800: 0.3662 Seen so far: 25632 samples Training loss (for 1 batch) at step 900: 0.9556 Seen so far: 28832 samples Training loss (for 1 batch) at step 1000: 0.7459 Seen so far: 32032 samples Training loss (for 1 batch) at step 1100: 0.0468 Seen so far: 35232 samples Training loss (for 1 batch) at step 1200: 0.7392 Seen so far: 38432 samples Training loss (for 1 batch) at step 1300: 0.8435 Seen so far: 41632 samples Training loss (for 1 batch) at step 1400: 0.3859 Seen so far: 44832 samples Training loss (for 1 batch) at step 1500: 0.4156 Seen so far: 48032 samples ``` </div> <div class="k-default-codeblock"> ``` Start of epoch 1 Training loss (for 1 batch) at step 0: 0.4045 Seen so far: 32 samples Training loss (for 1 batch) at step 100: 0.5983 Seen so far: 3232 samples Training loss (for 1 batch) at step 200: 0.3154 Seen so far: 6432 samples Training loss (for 1 batch) at step 300: 0.7911 Seen so far: 9632 samples Training loss (for 1 batch) at step 400: 0.2607 Seen so far: 12832 samples Training loss (for 1 batch) at step 500: 0.2303 Seen so far: 16032 samples Training loss (for 1 batch) at step 600: 0.6048 Seen so far: 19232 samples Training loss (for 1 batch) at step 700: 0.7041 Seen so far: 22432 samples Training loss (for 1 batch) at step 800: 0.3669 Seen so far: 25632 samples Training loss (for 1 batch) at step 900: 0.6389 Seen so far: 28832 samples Training loss (for 1 batch) at step 1000: 0.7739 Seen so far: 32032 samples Training loss (for 1 batch) at step 1100: 0.3888 Seen so far: 35232 samples Training loss (for 1 batch) at step 1200: 0.8133 Seen so far: 38432 samples Training loss (for 1 batch) at step 1300: 0.2034 Seen so far: 41632 samples Training loss (for 1 batch) at step 1400: 0.0768 Seen so far: 44832 samples Training loss (for 1 batch) at step 1500: 0.1544 Seen so far: 48032 samples ``` </div> <div class="k-default-codeblock"> ``` Start of epoch 2 Training loss (for 1 batch) at step 0: 0.1250 Seen so far: 32 samples Training loss (for 1 batch) at step 100: 0.0152 Seen so far: 3232 samples Training loss (for 1 batch) at step 200: 0.0917 Seen so far: 6432 samples Training loss (for 1 batch) at step 300: 0.1330 Seen so far: 9632 samples Training loss (for 1 batch) at step 400: 0.0884 Seen so far: 12832 samples Training loss (for 1 batch) at step 500: 0.2656 Seen so far: 16032 samples Training loss (for 1 batch) at step 600: 0.4375 Seen so far: 19232 samples Training loss (for 1 batch) at step 700: 0.2246 Seen so far: 22432 samples Training loss (for 1 batch) at step 800: 0.0748 Seen so far: 25632 samples Training loss (for 1 batch) at step 900: 0.1765 Seen so far: 28832 samples Training loss (for 1 batch) at step 1000: 0.0130 Seen so far: 32032 samples Training loss (for 1 batch) at step 1100: 0.4030 Seen so far: 35232 samples Training loss (for 1 batch) at step 1200: 0.0667 Seen so far: 38432 samples Training loss (for 1 batch) at step 1300: 1.0553 Seen so far: 41632 samples Training loss (for 1 batch) at step 1400: 0.6513 Seen so far: 44832 samples Training loss (for 1 batch) at step 1500: 0.0599 Seen so far: 48032 samples ``` </div> --- ## Low-level handling of metrics Let's add metrics monitoring to this basic loop. You can readily reuse the built-in metrics (or custom ones you wrote) in such training loops written from scratch. Here's the flow: - Instantiate the metric at the start of the loop - Call `metric.update_state()` after each batch - Call `metric.result()` when you need to display the current value of the metric - Call `metric.reset_state()` when you need to clear the state of the metric (typically at the end of an epoch) Let's use this knowledge to compute `SparseCategoricalAccuracy` on training and validation data at the end of each epoch: ```python # Get a fresh model model = get_model() # Instantiate an optimizer to train the model. optimizer = keras.optimizers.Adam(learning_rate=1e-3) # Instantiate a loss function. loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Prepare the metrics. train_acc_metric = keras.metrics.SparseCategoricalAccuracy() val_acc_metric = keras.metrics.SparseCategoricalAccuracy() ``` Here's our training & evaluation loop: ```python epochs = 2 for epoch in range(epochs): print(f"\nStart of epoch {epoch}") start_time = time.time() # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): with tf.GradientTape() as tape: logits = model(x_batch_train, training=True) loss_value = loss_fn(y_batch_train, logits) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply(grads, model.trainable_weights) # Update training metric. train_acc_metric.update_state(y_batch_train, logits) # Log every 100 batches. if step % 100 == 0: print( f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}" ) print(f"Seen so far: {(step + 1) * batch_size} samples") # Display metrics at the end of each epoch. train_acc = train_acc_metric.result() print(f"Training acc over epoch: {float(train_acc):.4f}") # Reset training metrics at the end of each epoch train_acc_metric.reset_state() # Run a validation loop at the end of each epoch. for x_batch_val, y_batch_val in val_dataset: val_logits = model(x_batch_val, training=False) # Update val metrics val_acc_metric.update_state(y_batch_val, val_logits) val_acc = val_acc_metric.result() val_acc_metric.reset_state() print(f"Validation acc: {float(val_acc):.4f}") print(f"Time taken: {time.time() - start_time:.2f}s") ``` <div class="k-default-codeblock"> ``` Start of epoch 0 Training loss (for 1 batch) at step 0: 89.1303 Seen so far: 32 samples Training loss (for 1 batch) at step 100: 1.0351 Seen so far: 3232 samples Training loss (for 1 batch) at step 200: 2.9143 Seen so far: 6432 samples Training loss (for 1 batch) at step 300: 1.7842 Seen so far: 9632 samples Training loss (for 1 batch) at step 400: 0.9583 Seen so far: 12832 samples Training loss (for 1 batch) at step 500: 1.1100 Seen so far: 16032 samples Training loss (for 1 batch) at step 600: 2.1144 Seen so far: 19232 samples Training loss (for 1 batch) at step 700: 0.6801 Seen so far: 22432 samples Training loss (for 1 batch) at step 800: 0.6202 Seen so far: 25632 samples Training loss (for 1 batch) at step 900: 1.2570 Seen so far: 28832 samples Training loss (for 1 batch) at step 1000: 0.3638 Seen so far: 32032 samples Training loss (for 1 batch) at step 1100: 1.8402 Seen so far: 35232 samples Training loss (for 1 batch) at step 1200: 0.7836 Seen so far: 38432 samples Training loss (for 1 batch) at step 1300: 0.5147 Seen so far: 41632 samples Training loss (for 1 batch) at step 1400: 0.4798 Seen so far: 44832 samples Training loss (for 1 batch) at step 1500: 0.1653 Seen so far: 48032 samples Training acc over epoch: 0.7961 Validation acc: 0.8825 Time taken: 46.06s ``` </div> <div class="k-default-codeblock"> ``` Start of epoch 1 Training loss (for 1 batch) at step 0: 1.3917 Seen so far: 32 samples Training loss (for 1 batch) at step 100: 0.2600 Seen so far: 3232 samples Training loss (for 1 batch) at step 200: 0.7206 Seen so far: 6432 samples Training loss (for 1 batch) at step 300: 0.4987 Seen so far: 9632 samples Training loss (for 1 batch) at step 400: 0.3410 Seen so far: 12832 samples Training loss (for 1 batch) at step 500: 0.6788 Seen so far: 16032 samples Training loss (for 1 batch) at step 600: 1.1355 Seen so far: 19232 samples Training loss (for 1 batch) at step 700: 0.1762 Seen so far: 22432 samples Training loss (for 1 batch) at step 800: 0.1801 Seen so far: 25632 samples Training loss (for 1 batch) at step 900: 0.3515 Seen so far: 28832 samples Training loss (for 1 batch) at step 1000: 0.4344 Seen so far: 32032 samples Training loss (for 1 batch) at step 1100: 0.2027 Seen so far: 35232 samples Training loss (for 1 batch) at step 1200: 0.4649 Seen so far: 38432 samples Training loss (for 1 batch) at step 1300: 0.6848 Seen so far: 41632 samples Training loss (for 1 batch) at step 1400: 0.4594 Seen so far: 44832 samples Training loss (for 1 batch) at step 1500: 0.3548 Seen so far: 48032 samples Training acc over epoch: 0.8896 Validation acc: 0.9094 Time taken: 43.49s ``` </div> --- ## Speeding-up your training step with `tf.function` The default runtime in TensorFlow is eager execution. As such, our training loop above executes eagerly. This is great for debugging, but graph compilation has a definite performance advantage. Describing your computation as a static graph enables the framework to apply global performance optimizations. This is impossible when the framework is constrained to greedily execute one operation after another, with no knowledge of what comes next. You can compile into a static graph any function that takes tensors as input. Just add a `@tf.function` decorator on it, like this: ```python @tf.function def train_step(x, y): with tf.GradientTape() as tape: logits = model(x, training=True) loss_value = loss_fn(y, logits) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply(grads, model.trainable_weights) train_acc_metric.update_state(y, logits) return loss_value ``` Let's do the same with the evaluation step: ```python @tf.function def test_step(x, y): val_logits = model(x, training=False) val_acc_metric.update_state(y, val_logits) ``` Now, let's re-run our training loop with this compiled training step: ```python epochs = 2 for epoch in range(epochs): print(f"\nStart of epoch {epoch}") start_time = time.time() # Iterate over the batches of the dataset. for step, (x_batch_train, y_batch_train) in enumerate(train_dataset): loss_value = train_step(x_batch_train, y_batch_train) # Log every 100 batches. if step % 100 == 0: print( f"Training loss (for 1 batch) at step {step}: {float(loss_value):.4f}" ) print(f"Seen so far: {(step + 1) * batch_size} samples") # Display metrics at the end of each epoch. train_acc = train_acc_metric.result() print(f"Training acc over epoch: {float(train_acc):.4f}") # Reset training metrics at the end of each epoch train_acc_metric.reset_state() # Run a validation loop at the end of each epoch. for x_batch_val, y_batch_val in val_dataset: test_step(x_batch_val, y_batch_val) val_acc = val_acc_metric.result() val_acc_metric.reset_state() print(f"Validation acc: {float(val_acc):.4f}") print(f"Time taken: {time.time() - start_time:.2f}s") ``` <div class="k-default-codeblock"> ``` Start of epoch 0 Training loss (for 1 batch) at step 0: 0.5366 Seen so far: 32 samples Training loss (for 1 batch) at step 100: 0.2732 Seen so far: 3232 samples Training loss (for 1 batch) at step 200: 0.2478 Seen so far: 6432 samples Training loss (for 1 batch) at step 300: 0.0263 Seen so far: 9632 samples Training loss (for 1 batch) at step 400: 0.4845 Seen so far: 12832 samples Training loss (for 1 batch) at step 500: 0.2239 Seen so far: 16032 samples Training loss (for 1 batch) at step 600: 0.2242 Seen so far: 19232 samples Training loss (for 1 batch) at step 700: 0.2122 Seen so far: 22432 samples Training loss (for 1 batch) at step 800: 0.2856 Seen so far: 25632 samples Training loss (for 1 batch) at step 900: 0.1957 Seen so far: 28832 samples Training loss (for 1 batch) at step 1000: 0.2946 Seen so far: 32032 samples Training loss (for 1 batch) at step 1100: 0.3080 Seen so far: 35232 samples Training loss (for 1 batch) at step 1200: 0.2326 Seen so far: 38432 samples Training loss (for 1 batch) at step 1300: 0.6514 Seen so far: 41632 samples Training loss (for 1 batch) at step 1400: 0.2018 Seen so far: 44832 samples Training loss (for 1 batch) at step 1500: 0.2812 Seen so far: 48032 samples Training acc over epoch: 0.9104 Validation acc: 0.9199 Time taken: 5.73s ``` </div> <div class="k-default-codeblock"> ``` Start of epoch 1 Training loss (for 1 batch) at step 0: 0.3080 Seen so far: 32 samples Training loss (for 1 batch) at step 100: 0.3943 Seen so far: 3232 samples Training loss (for 1 batch) at step 200: 0.1657 Seen so far: 6432 samples Training loss (for 1 batch) at step 300: 0.1463 Seen so far: 9632 samples Training loss (for 1 batch) at step 400: 0.5359 Seen so far: 12832 samples Training loss (for 1 batch) at step 500: 0.1894 Seen so far: 16032 samples Training loss (for 1 batch) at step 600: 0.1801 Seen so far: 19232 samples Training loss (for 1 batch) at step 700: 0.1724 Seen so far: 22432 samples Training loss (for 1 batch) at step 800: 0.3997 Seen so far: 25632 samples Training loss (for 1 batch) at step 900: 0.6017 Seen so far: 28832 samples Training loss (for 1 batch) at step 1000: 0.1539 Seen so far: 32032 samples Training loss (for 1 batch) at step 1100: 0.1078 Seen so far: 35232 samples Training loss (for 1 batch) at step 1200: 0.8731 Seen so far: 38432 samples Training loss (for 1 batch) at step 1300: 0.3110 Seen so far: 41632 samples Training loss (for 1 batch) at step 1400: 0.6092 Seen so far: 44832 samples Training loss (for 1 batch) at step 1500: 0.2046 Seen so far: 48032 samples Training acc over epoch: 0.9189 Validation acc: 0.9358 Time taken: 3.17s ``` </div> Much faster, isn't it? --- ## Low-level handling of losses tracked by the model Layers & models recursively track any losses created during the forward pass by layers that call `self.add_loss(value)`. The resulting list of scalar loss values are available via the property `model.losses` at the end of the forward pass. If you want to be using these loss components, you should sum them and add them to the main loss in your training step. Consider this layer, that creates an activity regularization loss: ```python class ActivityRegularizationLayer(keras.layers.Layer): def call(self, inputs): self.add_loss(1e-2 * tf.reduce_sum(inputs)) return inputs ``` Let's build a really simple model that uses it: ```python inputs = keras.Input(shape=(784,), name="digits") x = keras.layers.Dense(64, activation="relu")(inputs) # Insert activity regularization as a layer x = ActivityRegularizationLayer()(x) x = keras.layers.Dense(64, activation="relu")(x) outputs = keras.layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) ``` Here's what our training step should look like now: ```python @tf.function def train_step(x, y): with tf.GradientTape() as tape: logits = model(x, training=True) loss_value = loss_fn(y, logits) # Add any extra losses created during the forward pass. loss_value += sum(model.losses) grads = tape.gradient(loss_value, model.trainable_weights) optimizer.apply(grads, model.trainable_weights) train_acc_metric.update_state(y, logits) return loss_value ``` --- ## Summary Now you know everything there is to know about using built-in training loops and writing your own from scratch. To conclude, here's a simple end-to-end example that ties together everything you've learned in this guide: a DCGAN trained on MNIST digits. --- ## End-to-end example: a GAN training loop from scratch You may be familiar with Generative Adversarial Networks (GANs). GANs can generate new images that look almost real, by learning the latent distribution of a training dataset of images (the "latent space" of the images). A GAN is made of two parts: a "generator" model that maps points in the latent space to points in image space, a "discriminator" model, a classifier that can tell the difference between real images (from the training dataset) and fake images (the output of the generator network). A GAN training loop looks like this: 1) Train the discriminator. - Sample a batch of random points in the latent space. - Turn the points into fake images via the "generator" model. - Get a batch of real images and combine them with the generated images. - Train the "discriminator" model to classify generated vs. real images. 2) Train the generator. - Sample random points in the latent space. - Turn the points into fake images via the "generator" network. - Get a batch of real images and combine them with the generated images. - Train the "generator" model to "fool" the discriminator and classify the fake images as real. For a much more detailed overview of how GANs works, see [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python). Let's implement this training loop. First, create the discriminator meant to classify fake vs real digits: ```python discriminator = keras.Sequential( [ keras.Input(shape=(28, 28, 1)), keras.layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.GlobalMaxPooling2D(), keras.layers.Dense(1), ], name="discriminator", ) discriminator.summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "discriminator"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">640</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ leaky_re_lu (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">73,856</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ leaky_re_lu_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ global_max_pooling2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalMaxPooling2D</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">129</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">74,625</span> (291.50 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">74,625</span> (291.50 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> Then let's create a generator network, that turns latent vectors into outputs of shape `(28, 28, 1)` (representing MNIST digits): ```python latent_dim = 128 generator = keras.Sequential( [ keras.Input(shape=(latent_dim,)), # We want to generate 128 coefficients to reshape into a 7x7x128 map keras.layers.Dense(7 * 7 * 128), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.Reshape((7, 7, 128)), keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"), keras.layers.LeakyReLU(negative_slope=0.2), keras.layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"), ], name="generator", ) ``` Here's the key bit: the training loop. As you can see it is quite straightforward. The training step function only takes 17 lines. ```python # Instantiate one optimizer for the discriminator and another for the generator. d_optimizer = keras.optimizers.Adam(learning_rate=0.0003) g_optimizer = keras.optimizers.Adam(learning_rate=0.0004) # Instantiate a loss function. loss_fn = keras.losses.BinaryCrossentropy(from_logits=True) @tf.function def train_step(real_images): # Sample random points in the latent space random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim)) # Decode them to fake images generated_images = generator(random_latent_vectors) # Combine them with real images combined_images = tf.concat([generated_images, real_images], axis=0) # Assemble labels discriminating real from fake images labels = tf.concat( [tf.ones((batch_size, 1)), tf.zeros((real_images.shape[0], 1))], axis=0 ) # Add random noise to the labels - important trick! labels += 0.05 * tf.random.uniform(labels.shape) # Train the discriminator with tf.GradientTape() as tape: predictions = discriminator(combined_images) d_loss = loss_fn(labels, predictions) grads = tape.gradient(d_loss, discriminator.trainable_weights) d_optimizer.apply(grads, discriminator.trainable_weights) # Sample random points in the latent space random_latent_vectors = tf.random.normal(shape=(batch_size, latent_dim)) # Assemble labels that say "all real images" misleading_labels = tf.zeros((batch_size, 1)) # Train the generator (note that we should *not* update the weights # of the discriminator)! with tf.GradientTape() as tape: predictions = discriminator(generator(random_latent_vectors)) g_loss = loss_fn(misleading_labels, predictions) grads = tape.gradient(g_loss, generator.trainable_weights) g_optimizer.apply(grads, generator.trainable_weights) return d_loss, g_loss, generated_images ``` Let's train our GAN, by repeatedly calling `train_step` on batches of images. Since our discriminator and generator are convnets, you're going to want to run this code on a GPU. ```python # Prepare the dataset. We use both the training & test MNIST digits. batch_size = 64 (x_train, _), (x_test, _) = keras.datasets.mnist.load_data() all_digits = np.concatenate([x_train, x_test]) all_digits = all_digits.astype("float32") / 255.0 all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) dataset = tf.data.Dataset.from_tensor_slices(all_digits) dataset = dataset.shuffle(buffer_size=1024).batch(batch_size) epochs = 1 # In practice you need at least 20 epochs to generate nice digits. save_dir = "./" for epoch in range(epochs): print(f"\nStart epoch {epoch}") for step, real_images in enumerate(dataset): # Train the discriminator & generator on one batch of real images. d_loss, g_loss, generated_images = train_step(real_images) # Logging. if step % 100 == 0: # Print metrics print(f"discriminator loss at step {step}: {d_loss:.2f}") print(f"adversarial loss at step {step}: {g_loss:.2f}") # Save one generated image img = keras.utils.array_to_img(generated_images[0] * 255.0, scale=False) img.save(os.path.join(save_dir, f"generated_img_{step}.png")) # To limit execution time we stop after 10 steps. # Remove the lines below to actually train the model! if step > 10: break ``` <div class="k-default-codeblock"> ``` Start epoch 0 discriminator loss at step 0: 0.69 adversarial loss at step 0: 0.69 ``` </div> That's it! You'll get nice-looking fake MNIST digits after just ~30s of training on the Colab GPU.
keras-io/guides/md/writing_a_custom_training_loop_in_tensorflow.md/0
{ "file_path": "keras-io/guides/md/writing_a_custom_training_loop_in_tensorflow.md", "repo_id": "keras-io", "token_count": 11845 }
107
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_nlp/modeling_layers/masked_lm_head/'" />
keras-io/redirects/api/keras_nlp/layers/mlm_head/index.html/0
{ "file_path": "keras-io/redirects/api/keras_nlp/layers/mlm_head/index.html", "repo_id": "keras-io", "token_count": 49 }
108
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/callbacks/'" />
keras-io/redirects/callbacks/index.html/0
{ "file_path": "keras-io/redirects/callbacks/index.html", "repo_id": "keras-io", "token_count": 32 }
109
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/core_layers/'" />
keras-io/redirects/layers/core/index.html/0
{ "file_path": "keras-io/redirects/layers/core/index.html", "repo_id": "keras-io", "token_count": 37 }
110
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/preprocessing/image/'" />
keras-io/redirects/preprocessing/image/index.html/0
{ "file_path": "keras-io/redirects/preprocessing/image/index.html", "repo_id": "keras-io", "token_count": 34 }
111
from pathlib import Path import tutobooks import copy import json import hashlib import string import re import yaml # The order of CONFIG is also used to generate the _toc.yaml for tensorflow.org. CONFIG = [ { "title": "The Sequential model", "source_name": "sequential_model", "target_name": "sequential_model", }, { "title": "The Functional API", "source_name": "functional_api", "target_name": "functional", }, { "title": "Training and evaluation with the built-in methods", "source_name": "training_with_built_in_methods", "target_name": "train_and_evaluate", }, { "title": "Making new Layers and Models via subclassing", "source_name": "making_new_layers_and_models_via_subclassing", "target_name": "custom_layers_and_models", }, { "title": "Save and load Keras models", "source_name": "serialization_and_saving", "target_name": "save_and_serialize", }, { "title": "Working with preprocessing layers", "source_name": "preprocessing_layers", "target_name": "preprocessing_layers", }, { "title": "Customize what happens in Model.fit", "source_name": "customizing_what_happens_in_fit", "target_name": "customizing_what_happens_in_fit", }, { "title": "Writing a training loop from scratch", "source_name": "writing_a_training_loop_from_scratch", "target_name": "writing_a_training_loop_from_scratch", }, { "title": "Recurrent Neural Networks (RNN) with Keras", "source_name": "working_with_rnns", "target_name": "rnn", }, { "title": "Masking and padding with Keras", "source_name": "understanding_masking_and_padding", "target_name": "masking_and_padding", }, { "title": "Writing your own callbacks", "source_name": "writing_your_own_callbacks", "target_name": "custom_callback", }, { "title": "Transfer learning and fine-tuning", "source_name": "transfer_learning", "target_name": "transfer_learning", }, { "title": "Training Keras models with TensorFlow Cloud", "source_name": "training_keras_models_on_cloud", "target_name": "training_keras_models_on_cloud", }, ] TF_BUTTONS_TEMPLATE = { "cell_type": "markdown", "metadata": { "colab_type": "text", }, "source": [ '<table class="tfo-notebook-buttons" align="left">\n', " <td>\n", ' <a target="_blank" href="https://www.tensorflow.org/guide/keras/TARGET_NAME"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>\n', " </td>\n", " <td>\n", ' <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/snapshot-keras/site/en/guide/keras/TARGET_NAME.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>\n', " </td>\n", " <td>\n", ' <a target="_blank" href="https://github.com/keras-team/keras-io/blob/master/guides/SOURCE_NAME.py"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>\n', " </td>\n", " <td>\n", ' <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/keras/TARGET_NAME.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>\n', " </td>\n", "</table>", ], } TF_IPYNB_CELLS_TEMPLATE = [ { "cell_type": "markdown", "metadata": { "colab_type": "text", }, "source": ["##### Copyright 2020 The TensorFlow Authors."], }, { "cell_type": "code", "execution_count": 0, "metadata": { "cellView": "form", "colab": {}, "colab_type": "code", }, "outputs": [], "source": [ '#@title Licensed under the Apache License, Version 2.0 (the "License");\n', "# you may not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "# https://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing, software\n", '# distributed under the License is distributed on an "AS IS" BASIS,\n', "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", "# See the License for the specific language governing permissions and\n", "# limitations under the License.", ], }, # Then: title # Then: buttons ] TF_IPYNB_BASE = { "metadata": { "colab": { "collapsed_sections": [], "name": "", # FILL ME "private_outputs": True, "provenance": [], "toc_visible": True, }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3", }, }, "nbformat": 4, "nbformat_minor": 0, } def generate_single_tf_guide(source_dir, target_dir, title, source_name, target_name): # Before we start, regenerate the ipynb. max_loc = tutobooks.MAX_LOC tutobooks.MAX_LOC = 400 py_path = (Path(source_dir).parent / source_name).with_suffix(".py") nb_path = (Path(source_dir) / source_name).with_suffix(".ipynb") tutobooks.py_to_nb(py_path, nb_path, fill_outputs=False) tutobooks.MAX_LOC = max_loc original_ipynb = json.loads(nb_path.read_text()) # Skip first title cell cells = original_ipynb["cells"][1:] # Strip Keras tags for cell in cells: if cell["cell_type"] == "markdown": new_lines = [] lines = cell["source"] num_lines = len(lines) for i in range(num_lines - 1): if lines[i].startswith('<div class="k-default-codeblock">') and lines[ i + 1 ].startswith("```"): continue elif lines[i].startswith("</div>") and lines[i - 1].startswith("```"): continue else: new_lines.append(lines[i]) if len(lines) >= 2 and not ( lines[-1].startswith("</div>") and lines[-2].startswith("```") ): new_lines.append(lines[-1]) if len(lines) < 2: new_lines.append(lines[-1]) cell["source"] = new_lines elif cell["cell_type"] == "code": lines = cell["source"] if not lines[0].strip(): lines = lines[1:] if not lines[-1].strip(): lines = lines[:-1] cell["source"] = lines # Add header cells header_cells = copy.deepcopy(TF_IPYNB_CELLS_TEMPLATE) # Add title cell header_cells.append( { "cell_type": "markdown", "metadata": {"colab_type": "text"}, "source": ["# " + title], } ) buttons = copy.deepcopy(TF_BUTTONS_TEMPLATE) for i in range(len(buttons["source"])): buttons["source"][i] = buttons["source"][i].replace("TARGET_NAME", target_name) buttons["source"][i] = buttons["source"][i].replace("SOURCE_NAME", source_name) header_cells.append(buttons) cells = header_cells + cells cell_count = 0 for cell in cells: cell_count += 1 str_to_hash = f"{cell_count} {cell['source']}" cell_id = hashlib.sha256(str_to_hash.encode("utf-8")).hexdigest() cell["metadata"]["id"] = cell_id[:12] notebook = {} for key in TF_IPYNB_BASE.keys(): notebook[key] = TF_IPYNB_BASE[key] notebook["metadata"]["colab"]["name"] = target_name + ".ipynb" notebook["cells"] = cells f = open(Path(target_dir) / (target_name + ".ipynb"), "w") json_st = json.dumps(notebook, indent=1, sort_keys=True) # Apply link conversion json_st = json_st.replace( "(/api/callbacks/", "(https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/", ) json_st = json_st.replace( "keras.io/api/layers/recurrent_layers/rnn/", "https://www.tensorflow.org/api_docs/python/tf/keras/layers/RNN/", ) json_st = json_st.replace( "https://keras.io/api/layers/recurrent_layers/gru/", "https://www.tensorflow.org/api_docs/python/tf/keras/layers/GRU/", ) json_st = json_st.replace( "https://keras.io/api/layers/recurrent_layers/lstm/", "https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM/", ) json_st = json_st.replace( "https://keras.io/api/layers/recurrent_layers/bidirectional/", "https://www.tensorflow.org/api_docs/python/tf/keras/layers/Bidirectional/", ) json_st = json_st.replace( "https://keras.io/api/callbacks/", "https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/", ) for entry in CONFIG: src = entry["source_name"] dst = entry["target_name"] json_st = re.sub( r"(?is)]\((\s*)/guides/" + src, "](https://www.tensorflow.org/guide/keras/" + dst, json_st, ) json_st = re.sub( r"(?is)(\s+)/guides/" + src, "https://www.tensorflow.org/guide/keras/" + dst, json_st, ) f.write(json_st) f.close() def generate_toc(target_dir): target_dir = Path(target_dir) toc = [] for config in CONFIG: toc.append( { "title": config["title"], "path": str(Path("/guide/keras") / config["target_name"]), } ) toc_dict = {"toc": toc} with open(str(target_dir / "_toc.yaml"), "w") as toc_file: yaml.dump(toc_dict, toc_file, sort_keys=False) def generate_tf_guides(): generate_toc(target_dir="../tf") for entry in CONFIG: generate_single_tf_guide( source_dir="../guides/ipynb/", target_dir="../tf/", title=entry["title"], source_name=entry["source_name"], target_name=entry["target_name"], )
keras-io/scripts/generate_tf_guides.py/0
{ "file_path": "keras-io/scripts/generate_tf_guides.py", "repo_id": "keras-io", "token_count": 5010 }
112
# KerasCV API KerasCV is a library of modular computer vision oriented Keras components. These components include models, layers, metrics, losses, callbacks, and utility functions. For an introduction to the library see the [KerasCV home page](/keras_cv). {{toc}}
keras-io/templates/api/keras_cv/index.md/0
{ "file_path": "keras-io/templates/api/keras_cv/index.md", "repo_id": "keras-io", "token_count": 73 }
113
# KerasTuner HyperModels The `HyperModel` base class makes the search space better encapsulated for sharing and reuse. A `HyperModel` subclass only needs to implement a `build(self, hp)` method, which creates a `keras.Model` using the `hp` argument to define the hyperparameters and returns the model instance. A simple code example is shown as follows. ```python class MyHyperModel(kt.HyperModel): def build(self, hp): model = keras.Sequential() model.add(keras.layers.Dense( hp.Choice('units', [8, 16, 32]), activation='relu')) model.add(keras.layers.Dense(1, activation='relu')) model.compile(loss='mse') return model ``` You can pass a `HyperModel` instance to the `Tuner` as the search space. ```python tuner = kt.RandomSearch( MyHyperModel(), objective='val_loss', max_trials=5) ``` There are also some built-in `HyperModel` subclasses (e.g. `HyperResNet`, `HyperXception`) for the users to directly use so that the users don't need to write their own search spaces. ```python tuner = kt.RandomSearch( HyperResNet(input_shape=(28, 28, 1), classes=10), objective='val_loss', max_trials=5) ``` {{toc}}
keras-io/templates/api/keras_tuner/hypermodels/index.md/0
{ "file_path": "keras-io/templates/api/keras_tuner/hypermodels/index.md", "repo_id": "keras-io", "token_count": 417 }
114
# Optimizers ## Usage with `compile()` & `fit()` An optimizer is one of the two arguments required for compiling a Keras model: ```python import keras from keras import layers model = keras.Sequential() model.add(layers.Dense(64, kernel_initializer='uniform', input_shape=(10,))) model.add(layers.Activation('softmax')) opt = keras.optimizers.Adam(learning_rate=0.01) model.compile(loss='categorical_crossentropy', optimizer=opt) ``` You can either instantiate an optimizer before passing it to `model.compile()` , as in the above example, or you can pass it by its string identifier. In the latter case, the default parameters for the optimizer will be used. ```python # pass optimizer by name: default parameters will be used model.compile(loss='categorical_crossentropy', optimizer='adam') ``` --- ## Learning rate decay / scheduling You can use a [learning rate schedule](/api/optimizers/learning_rate_schedules) to modulate how the learning rate of your optimizer changes over time: ```python lr_schedule = keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=1e-2, decay_steps=10000, decay_rate=0.9) optimizer = keras.optimizers.SGD(learning_rate=lr_schedule) ``` Check out [the learning rate schedule API documentation](/api/optimizers/learning_rate_schedules) for a list of available schedules. --- ## Available optimizers {{toc}} --- ## Core Optimizer API These methods and attributes are common to all Keras optimizers. {{autogenerated}}
keras-io/templates/api/optimizers/index.md/0
{ "file_path": "keras-io/templates/api/optimizers/index.md", "repo_id": "keras-io", "token_count": 470 }
115
# Hyperparameter Tuning These guides cover KerasTuner best practices. ## Available guides {{toc}}
keras-io/templates/guides/keras_tuner/index.md/0
{ "file_path": "keras-io/templates/guides/keras_tuner/index.md", "repo_id": "keras-io", "token_count": 29 }
116
set -e set -x cd "${KOKORO_ROOT}/" sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 PYTHON_BINARY="/usr/bin/python3.9" "${PYTHON_BINARY}" -m venv venv source venv/bin/activate # Check the python version python --version python3 --version export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:" # Check cuda nvidia-smi nvcc --version cd "src/github/keras-nlp" pip install -U pip setuptools psutil if [ "${KERAS2:-0}" == "1" ] then echo "Keras2 detected." pip install -r requirements-common.txt --progress-bar off pip install tensorflow-text==2.15 tensorflow[and-cuda]~=2.15 keras-core elif [ "$KERAS_BACKEND" == "tensorflow" ] then echo "TensorFlow backend detected." pip install -r requirements-tensorflow-cuda.txt --progress-bar off elif [ "$KERAS_BACKEND" == "jax" ] then echo "JAX backend detected." pip install -r requirements-jax-cuda.txt --progress-bar off elif [ "$KERAS_BACKEND" == "torch" ] then echo "PyTorch backend detected." pip install -r requirements-torch-cuda.txt --progress-bar off fi pip install --no-deps -e "." --progress-bar off # Run Extra Large Tests for Continuous builds if [ "${RUN_XLARGE:-0}" == "1" ] then pytest keras_nlp --check_gpu --run_large --run_extra_large \ --cov=keras-nlp else pytest keras_nlp --check_gpu --run_large \ --cov=keras-nlp fi
keras-nlp/.kokoro/github/ubuntu/gpu/build.sh/0
{ "file_path": "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh", "repo_id": "keras-nlp", "token_count": 566 }
117
# Release Process ⚠️ This doc is intended for maintainers of the KerasNLP library. Steps below require push access to base repository. However, all are welcome to use this process for other projects, or suggest improvements! ## Overview Our release process consists of two main components: - Adding a new release to the [keras-nlp](https://pypi.org/project/keras-nlp/) project on the Python Package Index (pypi). - Updating our documentation on [keras.io](https://keras.io/keras_nlp/) to match the release. We follow [semantic versioning](https://semver.org/) for our releases, and have a different process when releasing major/minor versions (e.g. 1.0 or 1.2) vs a "patch" release (1.0.1). Both are covered below. ## Creating a new major or minor release Use the following steps to create an `X.Y.0` release. 1. Similar to the Keras and Tensorflow repositories, we keep a named branch `rX.Y` for each minor release. We need to set this up. If you have not, please set `upstream` as `keras-team/keras-nlp` by running: ```shell git remote add upstream https://github.com/keras-team/keras-nlp.git ``` From the master branch, create a new branch with a name matching the first two digits of the upcoming release: ```shell git fetch --all git checkout --no-track -b rX.Y upstream/master git push -u upstream rX.Y ``` This branch will now be used for all subsequent `X.Y.Z` releases, e.g., `0.2.1` should still use branch `r0.2` instead of creating `r0.2.1`. 2. Before we officially push a new stable release to pypi, it is good practice to test out a [development release](https://pythonpackaging.info/07-Package-Release.html#Versioning-your-code) of the package. Development releases will have version numbers like `X.Y.0.dev0`, and critically will never be installed by default by `pip`. Make a PR following [this template](https://github.com/keras-team/keras-nlp/pull/456/files) to update the our version number fo look like `X.Y.0.dev0`. This PR should base off our new release branch instead of the master branch. You can use the following commands: ```shell git fetch --all git checkout --no-track -b version-bump-X.Y.0.dev0 upstream/rX.Y # Update both setup.py and keras_nlp/__init__.py with an editor. git commit -m "Version bump to X.Y.0.dev0" git push -u origin version-bump-X.Y.0.dev0 ``` On github, make a PR targeting the new release branch instead of the master branch, and ask someone to review. 3. On github, we can now create the `X.Y.0.dev0` release. Use [this link](https://github.com/keras-team/keras-nlp/releases/new) to kick it off. This release should be titled `X.Y.0.dev0`, and create a new tag with the same name on publish. You can use the following screenshot as a reference. ![Release page screenshot](.github/assets/release_screenshot.png) Making a github release will automatically kick off a pypi release, as configured by [this file](.github/workflows/publish-to-pypi.yml). 4. Wait a few minutes until the release appears on pypi, then test out the release by running `pip install keras-nlp==X.Y.0.dev0`. Try to test the package thoroughly! It is a good idea to run through a few of our guides with the new version. Fix any bugs you find, and repeat steps 2 and 3 with a new dev number (e.g. `X.Y.0.dev1`) until you are confident in the release. It is important that we make any fixes to the master branch first, and then cherry-pick them to the release branch. Given a commit hash `e32e9ded`, you can cherry pick a change as follows. ```shell git checkout rX.Y # Make sure we are exactly up to date with the upstream branch. git fetch --all git reset --hard upstream/rX.Y # Cherry pick as many times as you need. git cherry-pick e32e9ded git push upstream rX.Y ``` 5. Before cutting the final release, we should try previewing our documentation on keras.io. This will help catch bugs with our symbol export and docstrings. The keras.io [README](https://github.com/keras-team/keras-io/) contains instructions on building and previewing the site, and you can use [this PR](https://github.com/keras-team/keras-io/pull/1134) as a reference for what to change. Ask fchollet@ to review. During development of the branch, you can pin the keras-nlp dev release in the keras-io `requirements.txt` file. Remember to update this to the official release before we merge the PR. 6. We are now ready to cut the official release! Make a PR similar to step 2, but updating the release number to `X.Y.0` (no `.dev0` suffix). Land the PR. Confirm that the latest commit on our release branch is green before making the actual release! We should not release if there are any test failures. Make a release similar to step 3, but updating the tag and title to `X.Y.0`. Leave "Set as pre-release" unchecked and check the box that says "Set as the latest release". Click "Publish release" when ready. 7. Now that our release is done, we should bump the version number on our master branch. Let `Ŷ = Y + 1`. Our new master branch version should look like `X.Ŷ.0`. ```shell git fetch --all git checkout --no-track -b version-bump-X.Ŷ.0 upstream/master # Update both setup.py and keras_nlp/__init__.py with an editor. git commit -m "Version bump to X.Ŷ.0" git push -u origin version-bump-X.Ŷ.0 ``` Create a land a PR with this change to the master branch. ## Creating a new patch release Use the following steps to create a "patch" `X.Y.Z` release. We do this when we do not yet want to release everything on our master branch, but still would like to push certain fixes out to our users. 1. We need to bring in code changes to the release branch. Whenever possible these should be changes also on the master branch, that we cherry pick for the release. Given a commit hash `e32e9ded`, you can cherry pick a change to the release branch as follows. ```shell git checkout rX.Y # Make sure we are exactly up to date with the upstream branch. git fetch --all git reset --hard upstream/rX.Y # Cherry pick as many times as you need. git cherry-pick e32e9ded git push upstream rX.Y ``` 2. Before we officially push a new stable release to pypi, it is good practice to test out a [development release](https://pythonpackaging.info/07-Package-Release.html#Versioning-your-code) of the package. Development releases will have version numbers like `X.Y.Z.dev0`, and critically will never be installed by default by `pip`. Make a PR following [this template](https://github.com/keras-team/keras-nlp/pull/456/files) to update the our version number fo look like `X.Y.Z.dev0`. This PR should base off our new release branch. You can use the following commands. ```shell git fetch --all git checkout --no-track -b version-bump-X.Y.Z.dev0 upstream/rX.Y # Update both setup.py and keras_nlp/__init__.py with an editor. git commit -m "Version bump to X.Y.Z.dev0" git push -u origin version-bump-X.Y.Z.dev0 ``` On github, make a PR from your fork to the new release branch, and ask someone to review. 3. On github, we can now create the `X.Y.Z.dev0` release. Use [this link](https://github.com/keras-team/keras-nlp/releases/new). This release should be titled `X.Y.Z.dev0`, and create a new tag with the same name on publish. Refer to the screenshot above for details on the github release page setup. Making a github release will automatically kick off a pypi release, as configured by [this file](.github/workflows/publish-to-pypi.yml). 4. Wait a few minutes until the release appears on pypi, then test out the release by running `pip install keras-nlp==X.Y.Z.dev0`. Try to test the package thoroughly! It is a good idea to run through a few of our guides with the new version. Fix any bugs you find, and repeat steps 2 and 3 with a new dev number (e.g. `X.Y.Z.dev1`) until you are confident in the release. 5. Before cutting the final release, we should try previewing our documentation on keras.io. This will help catch bugs with our symbol export and docstrings. The keras.io [README](https://github.com/keras-team/keras-io/) contains instructions on building and previewing the site, and you can use [this PR](https://github.com/keras-team/keras-io/pull/1134) as a reference for what to change. Ask fchollet@ to review. During development of the branch, you can pin the keras-nlp dev release in the keras-io `requirements.txt` file. Remember to update this to the official release before we merge the PR. 6. We are now ready to cut the official release! Make a PR similar to step 2, but updating the release number to `X.Y.Z` (no `.dev0` suffix). Land the PR. Confirm that the latest commit on our release branch is green before making the actual release! We should not release if there are any test failures. Make a release similar to step 3, but updating the tag and title to `X.Y.Z`. Leave "Set as pre-release" unchecked and check the box that says "Set as the latest release" if `X.Y` is the latest stable release series. Click "Publish release" when ready.
keras-nlp/RELEASE_PROCESS.md/0
{ "file_path": "keras-nlp/RELEASE_PROCESS.md", "repo_id": "keras-nlp", "token_count": 2917 }
118
# GLUE Finetuning Script This script is written to help you evaluate your model on GLUE benchmarking. It provides the functionalities below: - Load and preprocess GLUE data. - Finetuning your Keras text classification model. - Generate GLUE submission files. To use the script, you need to change the code to load your pretrained model, and run the command below: ```shell python glue.py --task_name="mrpc" --batch_size=32 \ --submission_directory="glue_submissions/" ``` By default the script finetunes on the tiniest BERT model we have available (this will be fast but not top performing). To make a real GLUE leaderboard submission, you need to call the finetuning on all tasks, then enter the submission directory then zip the submission files: ```shell for task in cola sst2 mrpc rte stsb qnli qqp; do python glue.py --task_name="$task" --submission_directory="glue_submissions/" done python glue.py --task_name="mnli_matched" \ --submission_directory="glue_submissions/" \ --save_finetuning_model="saved/mnli" python glue.py --task_name="mnli_mismatched" \ --submission_directory="glue_submissions/" \ --load_finetuning_model="saved/mnli" python glue.py --task_name="ax" \ --submission_directory="glue_submissions/" \ --load_finetuning_model="saved/mnli" cd glue_submissions zip -r submission.zip *.tsv ``` Please note that `mnli_matched`, `mnli_mismatched` and `ax` share the same training set, so we only train once on `mnli_matched` and use the saved model to evaluate on `mnli_mismatched` and `ax`. GLUE submission requires the `submission.zip` contains `.tsv` file for all tasks, otherwise it will be a failed submission. An empty `.tsv` will also fail because it checks the content. If you only want to evaluate on certain tasks, you can download the sample submission, and put the `.tsv` files for tasks you don't run inside your submission file. For example if you don't want to run the `ax` task, then you can do: ``` curl -O https://gluebenchmark.com/assets/CBOW.zip unzip CBOW.zip -d sample_submissions cp sample_submissions/AX.tsv glue_submissions ``` ## How to Use the Script To use this script on your model, you need to do 3 things: 1. Implement your custom preprocessing in `preprocess_fn()`. 2. Load your pretrained model. 3. Make the finetune model with your model. Code needing customization is wrapped between comment `Custom code block starts` and `Custom code block ends`. See instructions on each step below. ### Custom Preprocessing In all GLUE dataset, each record comes with features of one or two sentences, and one label. In the script, we load GLUE dataset in the format `(features, labels)`, where `features` is a tuple of either 1 sentence or 2 sentences. Your need to write custom preprocessing logic to convert to data to the required input of your model. For example, in the current script (finetuning for KerasNLP BERT), it is doing: ```python bert_preprocessor = keras_nlp.models.BertPreprocessor.from_preset( "bert_tiny_en_uncased" ) def preprocess_fn(feature, label): return bert_preprocessor(feature), label ``` It uses the `BertPreprocessor` to convert input feature formats. ### Load Pretrained Model As long as it is a Keras model, you can use it with this script. ### Make the Finetuning Model Users need to make a classification model based on your pretrained model for evaluation purposes. For example, [`BertClassifier`](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/bert/bert_classifier.py) takes a `Bert` model as backbone, and adds a dense layer on top of it. Please pay attention that different model could use different classifier structure, e.g., in [RoBERTa](https://github.com/huggingface/transformers/blob/94b3f544a1f5e04b78d87a2ae32a7ac252e22e31/src/transformers/models/roberta/modeling_roberta.py#L1437-L1456), it has 2 dense layers. If you are using pretrained model from an OSS package, please find the correct classifier. If you use a custom model, you can start experimenting with a simple dense layer, and adjust the structure based on its performance. ## Flags Table | Flags Name | Explanation | Default | |---------------------------- |------------------------------------------------- |--------- | | task_name | The name of the GLUE task to finetune on. | "mrpc" | | batch_size | Data batch size | 32 | | epochs | Number of epochs to run finetuning. | 2 | | learning_rate | The optimizer's learning rate. | 5e-5 | | tpu_name | The name of TPU to connect to. | None | | submission_directory | The file path to save the glue submission file. | None | | load_finetuning_model | The path to load the finetuning model. | None | | save_finetuning_model | The path to save the finetuning model. | None |
keras-nlp/examples/glue_benchmark/README.md/0
{ "file_path": "keras-nlp/examples/glue_benchmark/README.md", "repo_id": "keras-nlp", "token_count": 1785 }
119
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.backend import config from keras_nlp.backend import ops from keras_nlp.backend import random from keras_nlp.layers.modeling.cached_multi_head_attention import ( CachedMultiHeadAttention, ) from keras_nlp.tests.test_case import TestCase class CachedMultiHeadAttentionTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( cls=CachedMultiHeadAttention, init_kwargs={ "num_heads": 2, "key_dim": 4, "dropout": 0.1, }, input_data={ "query": random.uniform(shape=(2, 4, 6)), "value": random.uniform(shape=(2, 4, 6)), }, expected_output_shape=(2, 4, 6), expected_num_trainable_weights=8, expected_num_non_trainable_variables=1, # Keras 2 does not handle mixed precision correctly when not set # globally. run_precision_checks=config.keras_3(), ) def test_cache_call_is_correct(self): batch_size = 2 seq_len = 5 num_heads = 2 key_dim = 4 hidden_dim = num_heads * key_dim input_shape = (batch_size, seq_len, hidden_dim) x = random.uniform(shape=input_shape) input_cache = ops.zeros((batch_size, 2, seq_len, num_heads, key_dim)) # Use a causal mask. mask = ops.tril(ops.ones((seq_len, seq_len))) outputs = ops.zeros_like(x) layer = CachedMultiHeadAttention(num_heads=num_heads, key_dim=key_dim) no_loop_outputs, no_loop_cache = layer( x, x, cache=input_cache, cache_update_index=0, attention_mask=mask, ) def loop_body(i, outputs, cache): # Compute the rest tokens. next_input = ops.slice(x, (0, i, 0), (batch_size, 1, hidden_dim)) next_mask = ops.slice(mask, (i, 0), (1, seq_len)) next_output, cache = layer( query=next_input, value=next_input, cache=cache, cache_update_index=i, attention_mask=next_mask, ) outputs = ops.slice_update(outputs, [0, i, 0], next_output) return i + 1, outputs, cache def call(outputs, cache): _, outputs, cache = ops.while_loop( cond=lambda i, outputs, cache: i < seq_len, body=loop_body, loop_vars=[0, outputs, cache], ) return outputs, cache output, output_cache = call(outputs, input_cache) self.assertAllClose(output, no_loop_outputs) self.assertAllClose(output_cache, no_loop_cache)
keras-nlp/keras_nlp/layers/modeling/cached_multi_head_attention_test.py/0
{ "file_path": "keras-nlp/keras_nlp/layers/modeling/cached_multi_head_attention_test.py", "repo_id": "keras-nlp", "token_count": 1560 }
120
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl.testing import parameterized from keras_nlp.backend import ops from keras_nlp.backend import random from keras_nlp.layers.modeling.transformer_decoder import TransformerDecoder from keras_nlp.tests.test_case import TestCase class TransformerDecoderTest(TestCase): @parameterized.named_parameters( ("without_norm_first", False), ("with_norm_first", True), ) def test_layer_behaviors(self, normalize_first): self.run_layer_test( cls=TransformerDecoder, init_kwargs={ "intermediate_dim": 4, "num_heads": 2, "normalize_first": normalize_first, "activation": "relu", "layer_norm_epsilon": 1e-05, "kernel_initializer": "HeNormal", "bias_initializer": "Zeros", "dropout": 0.1, }, input_data=random.uniform(shape=(2, 4, 6)), expected_output_shape=(2, 4, 6), expected_num_trainable_weights=16, expected_num_non_trainable_variables=3, # dropout rng seeds ) @parameterized.named_parameters( ("without_norm_first", False), ("with_norm_first", True), ) def test_layer_behaviors_with_cross_attention(self, normalize_first): self.run_layer_test( cls=TransformerDecoder, init_kwargs={ "intermediate_dim": 4, "num_heads": 2, "normalize_first": normalize_first, "activation": "relu", "layer_norm_epsilon": 1e-05, "kernel_initializer": "HeNormal", "bias_initializer": "Zeros", "dropout": 0.1, }, input_data={ "decoder_sequence": random.uniform(shape=(2, 4, 6)), "encoder_sequence": random.uniform(shape=(2, 4, 6)), }, expected_output_shape=(2, 4, 6), expected_num_trainable_weights=26, expected_num_non_trainable_variables=5, # dropout rng seeds ) def test_invalid_calls(self): encoder_input = ops.zeros((2, 4, 6)) decoder_input = ops.zeros((2, 4, 6)) # with cross-attention. decoder = TransformerDecoder( intermediate_dim=4, num_heads=2, ) decoder(decoder_input, encoder_input) # should raise ValueError if encoder_input is not provided with self.assertRaises(ValueError): decoder(decoder_input) # without cross-attention. decoder = TransformerDecoder( intermediate_dim=4, num_heads=2, ) decoder(decoder_input) # should raise ValueError if encoder_input is provided with self.assertRaises(ValueError): decoder(decoder_input, encoder_input) def test_value_error_when_invalid_kernel_inititalizer(self): with self.assertRaises(ValueError): TransformerDecoder( intermediate_dim=4, num_heads=2, dropout=0.5, kernel_initializer="Invalid", ) def test_mask_propagation(self): decoder = TransformerDecoder( intermediate_dim=4, num_heads=2, ) decoder_sequence = random.uniform(shape=[1, 4, 6]) encoder_sequence = random.uniform(shape=[1, 4, 6]) mask = ops.array([[True, True, False, False]]) decoder_sequence._keras_mask = mask outputs = decoder(decoder_sequence, encoder_sequence) self.assertAllEqual(outputs._keras_mask, mask) def test_mask_propagation_without_cross_attention(self): decoder = TransformerDecoder( intermediate_dim=4, num_heads=2, ) decoder_sequence = random.uniform(shape=[1, 4, 6]) mask = ops.array([[True, True, False, False]]) decoder_sequence._keras_mask = mask outputs = decoder(decoder_sequence) self.assertAllEqual(outputs._keras_mask, mask) def test_cache_call_is_correct(self): batch_size, seq_len, num_heads, key_dim = 2, 5, 2, 4 hidden_dim = num_heads * key_dim input_shape = (batch_size, seq_len, hidden_dim) x = random.uniform(shape=input_shape) input_cache = ops.zeros((batch_size, 2, seq_len, num_heads, key_dim)) outputs = ops.zeros_like(x) layer = TransformerDecoder( intermediate_dim=4, num_heads=num_heads, ) no_loop_outputs, no_loop_cache = layer( x, self_attention_cache=input_cache, self_attention_cache_update_index=0, ) def loop_body(i, outputs, cache): # Compute the rest tokens. next_input = ops.slice(x, (0, i, 0), (batch_size, 1, hidden_dim)) next_output, cache = layer( decoder_sequence=next_input, self_attention_cache=cache, self_attention_cache_update_index=i, ) outputs = ops.slice_update(outputs, [0, i, 0], next_output) return i + 1, outputs, cache def call(outputs, cache): _, outputs, cache = ops.while_loop( cond=lambda i, outputs, cache: i < seq_len, body=loop_body, loop_vars=[0, outputs, cache], ) return outputs, cache output, output_cache = call(outputs, input_cache) self.assertAllClose(output, no_loop_outputs) self.assertAllClose(output_cache, no_loop_cache) def test_cache_call_is_correct_with_cross_attention(self): batch_size, seq_len, num_heads, key_dim = 2, 5, 2, 4 hidden_dim = num_heads * key_dim input_shape = (batch_size, seq_len, hidden_dim) cache_shape = (batch_size, 2, seq_len, num_heads, key_dim) decoder_sequence = random.uniform(shape=input_shape) encoder_sequence = random.uniform(shape=input_shape) empty_cache = ops.zeros(cache_shape) outputs = ops.zeros_like(decoder_sequence) layer = TransformerDecoder( intermediate_dim=4, num_heads=num_heads, ) no_loop_outputs, no_loop_self_cache, no_loop_cross_cache = layer( decoder_sequence, encoder_sequence, self_attention_cache=empty_cache, self_attention_cache_update_index=0, cross_attention_cache=empty_cache, cross_attention_cache_update_index=0, ) def loop_body(i, outputs, self_cache, cross_cache): # Compute the rest tokens. start, size = (0, i, 0), (batch_size, 1, hidden_dim) next_input = ops.slice(decoder_sequence, start, size) next_output, self_cache, cross_cache = layer( decoder_sequence=next_input, encoder_sequence=encoder_sequence, self_attention_cache=self_cache, self_attention_cache_update_index=i, cross_attention_cache=cross_cache, ) outputs = ops.slice_update(outputs, start, next_output) return i + 1, outputs, self_cache, cross_cache def call(outputs, self_cache, cross_cache): _, outputs, self_cache, cross_cache = ops.while_loop( cond=lambda i, outputs, self_cache, cross_cache: i < seq_len, body=loop_body, loop_vars=[0, outputs, self_cache, cross_cache], ) return outputs, self_cache, cross_cache output, self_cache, cross_cache = call( outputs, empty_cache, no_loop_cross_cache ) self.assertAllClose(output, no_loop_outputs) self.assertAllClose(self_cache, no_loop_self_cache) self.assertAllClose(cross_cache, no_loop_cross_cache) def test_different_feature_dimension_for_encoder_and_decoder_sequence(self): decoder = TransformerDecoder( intermediate_dim=4, num_heads=2, ) decoder_sequence = random.uniform(shape=[1, 4, 6]) encoder_sequence = random.uniform(shape=[1, 4, 5]) decoder(decoder_sequence, encoder_sequence)
keras-nlp/keras_nlp/layers/modeling/transformer_decoder_test.py/0
{ "file_path": "keras-nlp/keras_nlp/layers/modeling/transformer_decoder_test.py", "repo_id": "keras-nlp", "token_count": 4234 }
121
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker from keras_nlp.tests.test_case import TestCase class StartEndPackerTest(TestCase): def test_dense_input(self): input_data = [5, 6, 7] start_end_packer = StartEndPacker(sequence_length=5) output = start_end_packer(input_data) expected_output = [5, 6, 7, 0, 0] self.assertAllEqual(output, expected_output) def test_dense_2D_input(self): input_data = [[5, 6, 7]] start_end_packer = StartEndPacker(sequence_length=5) output = start_end_packer(input_data) expected_output = [[5, 6, 7, 0, 0]] self.assertAllEqual(output, expected_output) def test_ragged_input(self): input_data = [[5, 6, 7], [8, 9, 10, 11]] start_end_packer = StartEndPacker(sequence_length=5) output = start_end_packer(input_data) expected_output = [[5, 6, 7, 0, 0], [8, 9, 10, 11, 0]] self.assertAllEqual(output, expected_output) def test_start_end_token(self): input_data = [[5, 6, 7], [8, 9, 10, 11]] start_end_packer = StartEndPacker( sequence_length=6, start_value=1, end_value=2 ) output = start_end_packer(input_data) expected_output = [[1, 5, 6, 7, 2, 0], [1, 8, 9, 10, 11, 2]] self.assertAllEqual(output, expected_output) def test_multiple_start_end_tokens(self): input_data = [[5, 6, 7], [8, 9, 10, 11, 12, 13]] start_end_packer = StartEndPacker( sequence_length=8, start_value=[1, 2], end_value=[3, 4], pad_value=0, ) output = start_end_packer(input_data) expected_output = [[1, 2, 5, 6, 7, 3, 4, 0], [1, 2, 8, 9, 10, 11, 3, 4]] self.assertAllEqual(output, expected_output) def test_start_end_padding_value(self): input_data = [[5, 6, 7], [8, 9, 10, 11]] start_end_packer = StartEndPacker( sequence_length=7, start_value=1, end_value=2, pad_value=3 ) output = start_end_packer(input_data) expected_output = [[1, 5, 6, 7, 2, 3, 3], [1, 8, 9, 10, 11, 2, 3]] self.assertAllEqual(output, expected_output) def test_end_token_value_during_truncation(self): input_data = [[5, 6], [8, 9, 10, 11, 12, 13]] start_end_packer = StartEndPacker( sequence_length=5, start_value=1, end_value=2, pad_value=0 ) output = start_end_packer(input_data) expected_output = [[1, 5, 6, 2, 0], [1, 8, 9, 10, 2]] self.assertAllEqual(output, expected_output) def test_string_input(self): input_data = [["KerasNLP", "is", "awesome"], ["amazing"]] start_end_packer = StartEndPacker( sequence_length=5, start_value="[START]", end_value="[END]", pad_value="[PAD]", ) output = start_end_packer(input_data) expected_output = [ ["[START]", "KerasNLP", "is", "awesome", "[END]"], ["[START]", "amazing", "[END]", "[PAD]", "[PAD]"], ] self.assertAllEqual(output, expected_output) def test_string_input_with_multiple_special_values(self): input_data = [["KerasNLP", "is", "awesome"], ["amazing"]] start_end_packer = StartEndPacker( sequence_length=6, start_value=["[END]", "[START]"], end_value="[END]", pad_value="[PAD]", ) output = start_end_packer(input_data) expected_output = [ ["[END]", "[START]", "KerasNLP", "is", "awesome", "[END]"], ["[END]", "[START]", "amazing", "[END]", "[PAD]", "[PAD]"], ] self.assertAllEqual(output, expected_output) def test_special_token_dtype_error(self): with self.assertRaises(ValueError): StartEndPacker(sequence_length=5, start_value=1.0) def test_batch(self): start_end_packer = StartEndPacker( sequence_length=7, start_value=1, end_value=2, pad_value=3 ) ds = tf.data.Dataset.from_tensor_slices( tf.ragged.constant([[5, 6, 7], [8, 9, 10, 11]]) ) ds = ds.batch(2).map(start_end_packer) output = ds.take(1).get_single_element() exp_output = [[1, 5, 6, 7, 2, 3, 3], [1, 8, 9, 10, 11, 2, 3]] self.assertAllEqual(output, exp_output) def test_call_overrides(self): x = [5, 6, 7] packer = StartEndPacker(start_value=1, end_value=2, sequence_length=4) self.assertAllEqual(packer(x), [1, 5, 6, 2]) self.assertAllEqual(packer(x, add_start_value=False), [5, 6, 7, 2]) self.assertAllEqual(packer(x, add_end_value=False), [1, 5, 6, 7]) self.assertAllEqual(packer(x, sequence_length=2), [1, 2]) def test_get_config(self): start_end_packer = StartEndPacker( sequence_length=512, start_value=10, end_value=20, pad_value=100, name="start_end_packer_test", ) config = start_end_packer.get_config() expected_config_subset = { "sequence_length": 512, "start_value": 10, "end_value": 20, "pad_value": 100, } self.assertEqual(config, {**config, **expected_config_subset})
keras-nlp/keras_nlp/layers/preprocessing/start_end_packer_test.py/0
{ "file_path": "keras-nlp/keras_nlp/layers/preprocessing/start_end_packer_test.py", "repo_id": "keras-nlp", "token_count": 2836 }
122
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from keras_nlp.backend import ops from keras_nlp.models.albert.albert_backbone import AlbertBackbone from keras_nlp.tests.test_case import TestCase class AlbertBackboneTest(TestCase): def setUp(self): self.init_kwargs = { "vocabulary_size": 10, "num_layers": 2, "num_heads": 2, "num_groups": 1, "num_inner_repetitions": 1, "num_inner_repetitions": 1, "embedding_dim": 16, "hidden_dim": 2, "intermediate_dim": 4, "max_sequence_length": 5, } self.input_data = { "token_ids": ops.ones((2, 5), dtype="int32"), "segment_ids": ops.zeros((2, 5), dtype="int32"), "padding_mask": ops.ones((2, 5), dtype="int32"), } def test_backbone_basics(self): self.run_backbone_test( cls=AlbertBackbone, init_kwargs=self.init_kwargs, input_data=self.input_data, expected_output_shape={ "sequence_output": (2, 5, 2), "pooled_output": (2, 2), }, ) def test_error_for_invalid_num_groups(self): with self.assertRaises(ValueError): self.model = AlbertBackbone( vocabulary_size=10, num_layers=3, num_heads=2, num_groups=2, num_inner_repetitions=1, embedding_dim=4, hidden_dim=64, intermediate_dim=128, ) @pytest.mark.large def test_saved_model(self): self.run_model_saving_test( cls=AlbertBackbone, init_kwargs=self.init_kwargs, input_data=self.input_data, ) @pytest.mark.large def test_smallest_preset(self): self.run_preset_test( cls=AlbertBackbone, preset="albert_base_en_uncased", input_data={ "token_ids": ops.array([[2, 13, 1, 3]], dtype="int32"), "segment_ids": ops.zeros((1, 4), dtype="int32"), "padding_mask": ops.ones((1, 4), dtype="int32"), }, expected_output_shape={ "sequence_output": (1, 4, 768), "pooled_output": (1, 768), }, # The forward pass from a preset should be stable! expected_partial_output={ "sequence_output": ( ops.array( [1.830863, 1.698645, -1.819195, -0.53382, -0.38114] ) ), "pooled_output": ( ops.array( [0.328261, -0.415397, -0.388745, 0.156846, 0.657874] ) ), }, ) @pytest.mark.extra_large def test_all_presets(self): for preset in AlbertBackbone.presets: self.run_preset_test( cls=AlbertBackbone, preset=preset, input_data=self.input_data, )
keras-nlp/keras_nlp/models/albert/albert_backbone_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/albert/albert_backbone_test.py", "repo_id": "keras-nlp", "token_count": 1946 }
123
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_nlp.api_export import keras_nlp_export from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker from keras_nlp.models.bart.bart_presets import backbone_presets from keras_nlp.models.bart.bart_tokenizer import BartTokenizer from keras_nlp.models.preprocessor import Preprocessor from keras_nlp.utils.keras_utils import ( convert_inputs_to_list_of_tensor_segments, ) from keras_nlp.utils.keras_utils import pack_x_y_sample_weight from keras_nlp.utils.python_utils import classproperty @keras_nlp_export("keras_nlp.models.BartPreprocessor") class BartPreprocessor(Preprocessor): """A BART preprocessing layer which tokenizes and packs inputs. This preprocessing layer will do three things: 1. Tokenize both encoder inputs and decoder inputs using the `tokenizer`. Both inputs can contain only one segment. 2. Add the appropriate special tokens - `"<s>"`, `"</s>"` and `"<pad>"`. 3. Construct a dictionary with keys `"encoder_token_ids"`, `"encoder_padding_mask"`, `"decoder_token_ids"`, `"decoder_padding_mask"` that can be passed directly to a BART model. Args: tokenizer: A `keras_nlp.models.BartTokenizer` instance. encoder_sequence_length: The length of the packed encoder inputs. decoder_sequence_length: The length of the packed decoder inputs. Call arguments: x: A dictionary with `encoder_text` and `decoder_text` as its keys. Each value in the dictionary should be a tensor of single string sequences. Inputs may be batched or unbatched. Raw python inputs will be converted to tensors. y: Any label data. Will be passed through unaltered. sample_weight: Any label weight data. Will be passed through unaltered. Examples: Directly calling the layer on data. ```python preprocessor = keras_nlp.models.BartPreprocessor.from_preset("bart_base_en") # Preprocess unbatched inputs. inputs = { "encoder_text": "The fox was sleeping.", "decoder_text": "The fox was awake." } preprocessor(inputs) # Preprocess batched inputs. inputs = { "encoder_text": ["The fox was sleeping.", "The lion was quiet."], "decoder_text": ["The fox was awake.", "The lion was roaring."] } preprocessor(inputs) # Custom vocabulary. vocab = { "<s>": 0, "<pad>": 1, "</s>": 2, "Ġafter": 5, "noon": 6, "Ġsun": 7, } merges = ["Ġ a", "Ġ s", "Ġ n", "e r", "n o", "o n", "Ġs u", "Ġa f", "no on"] merges += ["Ġsu n", "Ġaf t", "Ġaft er"] tokenizer = keras_nlp.models.BartTokenizer( vocabulary=vocab, merges=merges, ) preprocessor = keras_nlp.models.BartPreprocessor( tokenizer=tokenizer, encoder_sequence_length=20, decoder_sequence_length=10, ) inputs = { "encoder_text": "The fox was sleeping.", "decoder_text": "The fox was awake." } preprocessor(inputs) ``` Mapping with `tf.data.Dataset`. ```python preprocessor = keras_nlp.models.BartPreprocessor.from_preset("bart_base_en") # Map labeled single sentences. features = { "encoder_text": tf.constant( ["The fox was sleeping.", "The lion was quiet."] ), "decoder_text": tf.constant( ["The fox was awake.", "The lion was silent."] ) } labels = tf.constant(["True", "False"]) ds = tf.data.Dataset.from_tensor_slices((features, labels)) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) # Map unlabeled single sentences. features = { "encoder_text": tf.constant( ["The fox was sleeping.", "The lion was quiet."] ), "decoder_text": tf.constant( ["The fox was awake.", "The lion was roaring."] ) } ds = tf.data.Dataset.from_tensor_slices(features) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) ``` """ def __init__( self, tokenizer, encoder_sequence_length=1024, decoder_sequence_length=1024, **kwargs, ): super().__init__(**kwargs) self.tokenizer = tokenizer self.encoder_packer = None self.decoder_packer = None self.encoder_sequence_length = encoder_sequence_length self.decoder_sequence_length = decoder_sequence_length def build(self, input_shape): # Defer packer creation to `build()` so that we can be sure tokenizer # assets have loaded when restoring a saved model. # TODO: Use `MultiSegmentPacker` instead of `StartEndPacker` once we # want to move to multi-segment packing and have improved # `MultiSegmentPacker`'s performance. self.encoder_packer = StartEndPacker( start_value=self.tokenizer.start_token_id, end_value=self.tokenizer.end_token_id, pad_value=self.tokenizer.pad_token_id, sequence_length=self.encoder_sequence_length, return_padding_mask=True, ) # The decoder is packed a bit differently; the format is as follows: # `[end_token_id, start_token_id, tokens..., end_token_id, padding...]`. self.decoder_packer = StartEndPacker( start_value=[ self.tokenizer.end_token_id, self.tokenizer.start_token_id, ], end_value=self.tokenizer.end_token_id, pad_value=self.tokenizer.pad_token_id, sequence_length=self.decoder_sequence_length, return_padding_mask=True, ) self.built = True def call( self, x, y=None, sample_weight=None, *, encoder_sequence_length=None, decoder_sequence_length=None, # `sequence_length` is an alias for `decoder_sequence_length` sequence_length=None, ): if not ( isinstance(x, dict) and all(k in x for k in ("encoder_text", "decoder_text")) ): raise ValueError( '`x` must be a dictionary, containing the keys `"encoder_text"`' f' and `"decoder_text"`. Received x={x}.' ) if encoder_sequence_length is None: encoder_sequence_length = self.encoder_sequence_length decoder_sequence_length = decoder_sequence_length or sequence_length if decoder_sequence_length is None: decoder_sequence_length = self.decoder_sequence_length encoder_text = x["encoder_text"] decoder_text = x["decoder_text"] encoder_text = convert_inputs_to_list_of_tensor_segments(encoder_text) decoder_text = convert_inputs_to_list_of_tensor_segments(decoder_text) if len(encoder_text) > 1 or len(decoder_text) > 1: raise ValueError( '`BARTPreprocessor` requires both `"encoder_text"` and ' f'`"decoder_text"` to contain only one segment, but received ' f"{len(encoder_text)} and {len(decoder_text)}, respectively." ) encoder_inputs = self.tokenizer(encoder_text[0]) encoder_token_ids, encoder_padding_mask = self.encoder_packer( encoder_inputs, sequence_length=encoder_sequence_length, ) decoder_inputs = self.tokenizer(decoder_text[0]) decoder_token_ids, decoder_padding_mask = self.decoder_packer( decoder_inputs, sequence_length=decoder_sequence_length, ) x = { "encoder_token_ids": encoder_token_ids, "encoder_padding_mask": encoder_padding_mask, "decoder_token_ids": decoder_token_ids, "decoder_padding_mask": decoder_padding_mask, } return pack_x_y_sample_weight(x, y, sample_weight) def get_config(self): config = super().get_config() config.update( { "encoder_sequence_length": self.encoder_sequence_length, "decoder_sequence_length": self.decoder_sequence_length, } ) return config @property def encoder_sequence_length(self): """The padded length of encoder input sequences.""" return self._encoder_sequence_length @encoder_sequence_length.setter def encoder_sequence_length(self, value): self._encoder_sequence_length = value if self.encoder_packer is not None: self.encoder_packer.sequence_length = value @property def decoder_sequence_length(self): """The padded length of decoder input sequences.""" return self._decoder_sequence_length @decoder_sequence_length.setter def decoder_sequence_length(self, value): self._decoder_sequence_length = value if self.decoder_packer is not None: self.decoder_packer.sequence_length = value @property def sequence_length(self): """Alias for `decoder_sequence_length`.""" return self.decoder_sequence_length @sequence_length.setter def sequence_length(self, value): self.decoder_sequence_length = value @classproperty def tokenizer_cls(cls): return BartTokenizer @classproperty def presets(cls): return copy.deepcopy(backbone_presets)
keras-nlp/keras_nlp/models/bart/bart_preprocessor.py/0
{ "file_path": "keras-nlp/keras_nlp/models/bart/bart_preprocessor.py", "repo_id": "keras-nlp", "token_count": 4286 }
124
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer from keras_nlp.tests.test_case import TestCase class DebertaV3TokenizerTest(TestCase): def setUp(self): # Generated using create_deberta_v3_test_proto.py proto = os.path.join( self.get_test_data_dir(), "deberta_v3_test_vocab.spm" ) self.tokenizer = DebertaV3Tokenizer(proto=proto) self.init_kwargs = {"proto": proto} self.input_data = ["the quick brown fox", "the earth is round"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=DebertaV3Tokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, expected_output=[[5, 10, 6, 8], [5, 7, 9, 11]], ) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): DebertaV3Tokenizer( # Generated using create_no_special_token_proto.py proto=os.path.join( self.get_test_data_dir(), "no_special_token_vocab.spm" ) ) def test_mask_token_handling(self): tokenizer = DebertaV3Tokenizer(**self.init_kwargs) self.assertEqual(tokenizer.get_vocabulary()[4], "[MASK]") self.assertEqual(tokenizer.id_to_token(4), "[MASK]") self.assertEqual(tokenizer.token_to_id("[MASK]"), 4) input_data = [[5, 10, 6, 8, self.tokenizer.mask_token_id]] output = tokenizer.detokenize(input_data) self.assertEqual(output, ["the quick brown fox"]) @pytest.mark.large def test_smallest_preset(self): self.run_preset_test( cls=DebertaV3Tokenizer, preset="deberta_v3_extra_small_en", input_data=["The quick brown fox."], expected_output=[[279, 1538, 3258, 16123, 260]], ) @pytest.mark.extra_large def test_all_presets(self): for preset in DebertaV3Tokenizer.presets: self.run_preset_test( cls=DebertaV3Tokenizer, preset=preset, input_data=self.input_data, )
keras-nlp/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py", "repo_id": "keras-nlp", "token_count": 1257 }
125
# Copyright 2024 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pytest from keras_nlp.models.gemma.gemma_preprocessor import GemmaPreprocessor from keras_nlp.models.gemma.gemma_tokenizer import GemmaTokenizer from keras_nlp.tests.test_case import TestCase @pytest.mark.keras_3_only class GemmaPreprocessorTest(TestCase): def setUp(self): self.tokenizer = GemmaTokenizer( proto=os.path.join( self.get_test_data_dir(), "gemma_test_vocab.spm" ), ) self.init_kwargs = { "tokenizer": self.tokenizer, "sequence_length": 8, } self.input_data = ["the quick brown fox"] def test_preprocessor_basics(self): self.run_preprocessing_layer_test( cls=GemmaPreprocessor, init_kwargs=self.init_kwargs, input_data=self.input_data, expected_output={ "token_ids": [[1, 4, 9, 5, 7, 2, 0, 0]], "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0]], }, ) def test_no_start_end_token(self): input_data = ["the quick brown fox"] * 4 preprocessor = GemmaPreprocessor( tokenizer=self.tokenizer, sequence_length=8, add_start_token=False, add_end_token=False, ) x = preprocessor(input_data) self.assertAllEqual(x["token_ids"], [[4, 9, 5, 7, 0, 0, 0, 0]] * 4) self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 0, 0, 0, 0]] * 4) def test_sequence_length_override(self): input_data = "the quick brown fox" preprocessor = GemmaPreprocessor(**self.init_kwargs) x = preprocessor(input_data, sequence_length=4) self.assertAllEqual(x["token_ids"], [1, 4, 9, 2]) @pytest.mark.extra_large def test_all_presets(self): for preset in GemmaPreprocessor.presets: self.run_preset_test( cls=GemmaPreprocessor, preset=preset, input_data=self.input_data, )
keras-nlp/keras_nlp/models/gemma/gemma_preprocessor_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/gemma/gemma_preprocessor_test.py", "repo_id": "keras-nlp", "token_count": 1161 }
126
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding from keras_nlp.utils.keras_utils import clone_initializer class LlamaAttention(keras.layers.Layer): """Grouped query attention for Llama models""" def __init__( self, num_query_heads, num_key_value_heads, rope_scaling_factor=1.0, kernel_initializer="glorot_uniform", rope_max_wavelength=10000, max_sequence_length=512, **kwargs, ): super().__init__(**kwargs) self.num_query_heads = num_query_heads self.num_key_value_heads = num_key_value_heads self.num_key_value_groups = num_query_heads // num_key_value_heads self.kernel_initializer = keras.initializers.get(kernel_initializer) self.max_sequence_length = max_sequence_length self.rope_scaling_factor = rope_scaling_factor self.rope_max_wavelength = rope_max_wavelength def build(self, inputs_shape): self.hidden_dim = inputs_shape[-1] self.attn_head_size = self.hidden_dim // self.num_query_heads # Einsum variables: # b = batch size # q = query length # k = key/value length # m = model dim # u = num query heads # v = num key/value heads # h = head dim self._query_dense = keras.layers.EinsumDense( equation="bqm,muh->bquh", output_shape=(None, self.num_query_heads, self.attn_head_size), kernel_initializer=clone_initializer(self.kernel_initializer), dtype=self.dtype_policy, name="query", ) self._query_dense.build(inputs_shape) self._key_dense = keras.layers.EinsumDense( equation="bkm,mvh->bkvh", output_shape=(None, self.num_key_value_heads, self.attn_head_size), kernel_initializer=clone_initializer(self.kernel_initializer), dtype=self.dtype_policy, name="key", ) self._key_dense.build(inputs_shape) self._value_dense = keras.layers.EinsumDense( equation="bkm,mvh->bkvh", output_shape=(None, self.num_key_value_heads, self.attn_head_size), kernel_initializer=clone_initializer(self.kernel_initializer), dtype=self.dtype_policy, name="value", ) self._value_dense.build(inputs_shape) self._softmax = keras.layers.Softmax( axis=-1, dtype="float32", name="attention_softmax", ) self._output_dense = keras.layers.EinsumDense( equation="bqm,mh->bqh", output_shape=(None, self.hidden_dim), kernel_initializer=clone_initializer(self.kernel_initializer), dtype=self.dtype_policy, name="attention_output", ) self._output_dense.build(inputs_shape) self._rotary_embedding_layer = RotaryEmbedding( max_wavelength=self.rope_max_wavelength, scaling_factor=self.rope_scaling_factor, dtype=self.dtype_policy, ) self._rotary_embedding_layer.build(inputs_shape) self.built = True def call( self, hidden_states, attention_mask=None, cache=None, cache_update_index=None, ): query = self._query_dense(hidden_states) if cache is not None: key_cache = cache[:, 0, ...] value_cache = cache[:, 1, ...] if cache_update_index is None: key = key_cache value = value_cache else: key_update = self._key_dense(hidden_states) value_update = self._value_dense(hidden_states) start = [0, cache_update_index, 0, 0] key = ops.slice_update(key_cache, start, key_update) value = ops.slice_update(value_cache, start, value_update) cache = ops.stack((key, value), axis=1) else: if cache_update_index is not None: raise ValueError( "`cache_update_index` should not be set if `cache` is " f"`None`. Received: cache={cache}, " f"cache_update_index={cache_update_index}" ) key = self._key_dense(hidden_states) value = self._value_dense(hidden_states) query = self._rotary_embedding_layer(query) key = self._rotary_embedding_layer(key) key = ops.tile(key, [1, 1, self.num_key_value_groups, 1]) value = ops.tile(value, [1, 1, self.num_key_value_groups, 1]) attention_output, attention_scores = self._compute_attention( query, key, value, attention_mask ) attention_output_shape = ops.shape(attention_output) attention_output = ops.reshape( attention_output, [ attention_output_shape[0], attention_output_shape[1], self.hidden_dim, ], ) attention_output = self._output_dense(attention_output) if cache is not None: return (attention_output, cache) return attention_output def _masked_softmax(self, attention_scores, attention_mask=None): if attention_mask is not None: mask_expansion_axis = -3 for _ in range( len(attention_scores.shape) - len(attention_mask.shape) ): attention_mask = ops.expand_dims( attention_mask, axis=mask_expansion_axis ) return self._softmax(attention_scores, attention_mask) def _compute_attention(self, query, key, value, attention_mask=None): attention_scores = ops.einsum("aecd,abcd->acbe", key, query) norm_factor = ops.sqrt( ops.convert_to_tensor(self.attn_head_size, self.compute_dtype) ) attention_scores /= norm_factor attention_scores = self._masked_softmax( attention_scores, attention_mask ) attention_scores = ops.cast(attention_scores, self.compute_dtype) attention_output = ops.einsum( "acbe,aecd->abcd", attention_scores, value ) return attention_output, attention_scores def get_config(self): config = super().get_config() config.update( { "num_query_heads": self.num_query_heads, "hidden_dim": self.hidden_dim, "kernel_initializer": keras.initializers.serialize( self.kernel_initializer ), "rope_max_wavelength": self.rope_max_wavelength, "rope_scaling_factor": self.rope_scaling_factor, "num_key_value_heads": self.num_key_value_heads, "max_sequence_length": self.max_sequence_length, } ) return config
keras-nlp/keras_nlp/models/llama/llama_attention.py/0
{ "file_path": "keras-nlp/keras_nlp/models/llama/llama_attention.py", "repo_id": "keras-nlp", "token_count": 3645 }
127
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_nlp.api_export import keras_nlp_export from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker from keras_nlp.models.mistral.mistral_presets import backbone_presets from keras_nlp.models.mistral.mistral_tokenizer import MistralTokenizer from keras_nlp.models.preprocessor import Preprocessor from keras_nlp.utils.keras_utils import ( convert_inputs_to_list_of_tensor_segments, ) from keras_nlp.utils.keras_utils import pack_x_y_sample_weight from keras_nlp.utils.python_utils import classproperty @keras_nlp_export("keras_nlp.models.MistralPreprocessor") class MistralPreprocessor(Preprocessor): """A Mistral preprocessing layer which tokenizes and packs inputs. This preprocessing layer will do three things: 1. Tokenize any number of input segments using the `tokenizer`. 2. Pack the inputs together using a `keras_nlp.layers.StartEndPacker`. with the appropriate tokens. 3. Construct a dictionary with keys `"token_ids"`, and `"padding_mask"` that can be passed directly to `keras_nlp.models.MistralBackbone`. This layer can be used directly with `tf.data.Dataset.map` to preprocess string data in the `(x, y, sample_weight)` format used by `keras.Model.fit`. Args: tokenizer: A `keras_nlp.models.MistralTokenizer` instance. sequence_length: The length of the packed inputs. add_start_token: If `True`, the preprocessor will prepend the tokenizer start token to each input sequence. Default is `True`. add_end_token: If `True`, the preprocessor will append the tokenizer end token to each input sequence. Default is `False`. Call arguments: x: A tensor of single string sequences, or a tuple of multiple tensor sequences to be packed together. Inputs may be batched or unbatched. For single sequences, raw python inputs will be converted to tensors. For multiple sequences, pass tensors directly. y: Any label data. Will be passed through unaltered. sample_weight: Any label weight data. Will be passed through unaltered. sequence_length: Pass to override the configured `sequence_length` of the layer. Examples: Directly calling the from_preset(). ```python preprocessor = keras_nlp.models.MistralPreprocessor.from_preset( "mistral_base_en" ) # Tokenize and pack a single sentence. preprocessor("The quick brown fox jumped.") # Tokenize and a batch of single sentences. preprocessor(["The quick brown fox jumped.", "Call me Ishmael."]) # Preprocess a batch of sentence pairs. # When handling multiple sequences, always convert to tensors first! first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) second = tf.constant(["The fox tripped.", "Oh look, a whale."]) preprocessor((first, second)) ``` Mapping with `tf.data.Dataset`. ```python preprocessor = keras_nlp.models.MistralPreprocessor.from_preset( "mistral_base_en" ) first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) second = tf.constant(["The fox tripped.", "Oh look, a whale."]) label = tf.constant([1, 1]) # Map labeled single sentences. ds = tf.data.Dataset.from_tensor_slices((first, label)) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) # Map unlabeled single sentences. ds = tf.data.Dataset.from_tensor_slices(first) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) # Map labeled sentence pairs. ds = tf.data.Dataset.from_tensor_slices(((first, second), label)) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) # Map unlabeled sentence pairs. ds = tf.data.Dataset.from_tensor_slices((first, second)) # Watch out for tf.data's default unpacking of tuples here! # Best to invoke the `preprocessor` directly in this case. ds = ds.map( lambda first, second: preprocessor(x=(first, second)), num_parallel_calls=tf.data.AUTOTUNE, ) ``` """ def __init__( self, tokenizer, sequence_length=1024, add_start_token=True, add_end_token=False, **kwargs, ): super().__init__(**kwargs) self.tokenizer = tokenizer self.packer = None self.add_start_token = add_start_token self.add_end_token = add_end_token self.sequence_length = sequence_length def build(self, input_shape): # Defer packer creation to `build()` so that we can be sure tokenizer # assets have loaded when restoring a saved model. self.packer = StartEndPacker( start_value=self.tokenizer.start_token_id, end_value=self.tokenizer.end_token_id, sequence_length=self.sequence_length, return_padding_mask=True, ) self.built = True def get_config(self): config = super().get_config() config.update( { "sequence_length": self.sequence_length, "add_start_token": self.add_start_token, "add_end_token": self.add_end_token, } ) return config def call( self, x, y=None, sample_weight=None, sequence_length=None, ): x = convert_inputs_to_list_of_tensor_segments(x) if len(x) != 1: raise ValueError( "Mistral requires each input feature to contain only " f"one segment, but received {len(x)}. If you are using Mistral" " for a multi-segment classification task, please refer to " "classification models like BERT or RoBERTa." ) sequence_length = sequence_length or self.sequence_length token_ids, padding_mask = self.packer( self.tokenizer(x[0]), sequence_length=sequence_length, add_start_value=self.add_start_token, add_end_value=self.add_end_token, ) x = { "token_ids": token_ids, "padding_mask": padding_mask, } return pack_x_y_sample_weight(x, y, sample_weight) @property def sequence_length(self): """The padded length of model input sequences.""" return self._sequence_length @sequence_length.setter def sequence_length(self, value): self._sequence_length = value if self.packer is not None: self.packer.sequence_length = value @classproperty def tokenizer_cls(cls): return MistralTokenizer @classproperty def presets(cls): return copy.deepcopy(backbone_presets)
keras-nlp/keras_nlp/models/mistral/mistral_preprocessor.py/0
{ "file_path": "keras-nlp/keras_nlp/models/mistral/mistral_preprocessor.py", "repo_id": "keras-nlp", "token_count": 2920 }
128
# Copyright 2022 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from keras_nlp.backend import ops from keras_nlp.models.whisper.whisper_backbone import WhisperBackbone from keras_nlp.tests.test_case import TestCase @pytest.mark.tf_only class WhisperBackboneTest(TestCase): def setUp(self): self.init_kwargs = { "vocabulary_size": 10, "num_layers": 2, "num_heads": 2, "hidden_dim": 2, "intermediate_dim": 4, "max_encoder_sequence_length": 6, "max_decoder_sequence_length": 6, } self.input_data = { "encoder_features": ops.ones((2, 5, 80), dtype="float32"), "decoder_token_ids": ops.ones((2, 5), dtype="int32"), "decoder_padding_mask": ops.ones((2, 5), dtype="int32"), } def test_backbone_basics(self): self.run_backbone_test( cls=WhisperBackbone, init_kwargs=self.init_kwargs, input_data=self.input_data, expected_output_shape={ "encoder_sequence_output": (2, 3, 2), "decoder_sequence_output": (2, 5, 2), }, ) def test_key_projection_bias_absence(self): backbone = WhisperBackbone(**self.init_kwargs) # Check only for the first encoder layer and first decoder layer. self.assertIsNone( backbone.get_layer( "transformer_encoder_layer_0" )._self_attention_layer._key_dense.bias ) self.assertIsNone( backbone.get_layer( "transformer_decoder_layer_0" )._self_attention_layer._key_dense.bias ) self.assertIsNone( backbone.get_layer( "transformer_decoder_layer_0" )._cross_attention_layer._key_dense.bias ) @pytest.mark.large def test_saved_model(self): self.run_model_saving_test( cls=WhisperBackbone, init_kwargs=self.init_kwargs, input_data=self.input_data, ) @pytest.mark.large def test_smallest_preset(self): self.run_preset_test( cls=WhisperBackbone, preset="whisper_tiny_en", input_data={ "encoder_features": ops.ones((1, 3000, 80)), "decoder_token_ids": ops.array( [[50257, 50362, 464, 2068, 7586, 21831, 13, 50256, 50256]] ), "decoder_padding_mask": ops.array( [[1, 1, 1, 1, 1, 1, 1, 1, 0]] ), }, expected_output_shape={ "encoder_sequence_output": (1, 1500, 384), "decoder_sequence_output": (1, 9, 384), }, # The forward pass from a preset should be stable! expected_partial_output={ "encoder_sequence_output": ops.array( [-0.21382, -0.48528, 0.42348, -1.33874, -0.14191] ), "decoder_sequence_output": ops.array( [13.238, 1.051, 8.348, -20.012, -5.022] ), }, ) @pytest.mark.extra_large def test_all_presets(self): for preset in WhisperBackbone.presets: self.run_preset_test( cls=WhisperBackbone, preset=preset, input_data=self.input_data, )
keras-nlp/keras_nlp/models/whisper/whisper_backbone_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/whisper/whisper_backbone_test.py", "repo_id": "keras-nlp", "token_count": 2028 }
129
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tree from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import ops from keras_nlp.samplers.sampler import Sampler @keras_nlp_export("keras_nlp.samplers.ContrastiveSampler") class ContrastiveSampler(Sampler): """Contrastive Sampler class. This sampler implements contrastive search algorithm. In short, the sampler chooses the token having the max "score" as the next token. The "score" is a weighted sum between token's probability and max similarity against previous tokens. By using this joint score, contrastive sampler reduces the behavior of duplicating seen tokens. Args: k: int, the `k` value of top-k. Next token will be chosen from k tokens. alpha: float, the weight of minus max similarity in joint score computation. The larger the value of `alpha`, the score relies more on the similarity than the token probability. seed: int. The random seed. Defaults to `None`. Call arguments: {{call_args}} Examples: ```python causal_lm = keras_nlp.models.GPT2CausalLM.from_preset("gpt2_base_en") # Pass by name to compile. causal_lm.compile(sampler="contrastive") causal_lm.generate(["Keras is a"]) # Pass by object to compile. sampler = keras_nlp.samplers.ContrastiveSampler(k=5) causal_lm.compile(sampler=sampler) causal_lm.generate(["Keras is a"]) ``` """ def __init__( self, k=5, alpha=0.6, **kwargs, ): super().__init__(**kwargs) self.k = k self.alpha = alpha def __call__( self, next, prompt, cache=None, index=0, mask=None, end_token_id=None, hidden_states=None, model=None, ): if hidden_states is None: raise ValueError( "`ContrastiveSampler` requires passing a `hidden_states`, but" "received `None`." ) batch_size, max_length = ops.shape(prompt)[0], ops.shape(prompt)[1] index = ops.cast(index, "int32") def create_beams(x): """Add initial beam state.""" x = ops.repeat(x, self.k, axis=0) flat_shape = (batch_size * self.k,) + ops.shape(x)[1:] return ops.reshape(x, flat_shape) def flatten_beams(x): """Combine the beam dim and batch dim.""" flat_shape = (batch_size * self.k,) + ops.shape(x)[2:] return ops.reshape(x, flat_shape) def unflatten_beams(x): """Separate the beam dim and batch dim.""" unflat_shape = (batch_size, self.k) + ops.shape(x)[1:] return ops.reshape(x, unflat_shape) mask = ops.zeros_like(prompt, dtype="bool") if mask is None else mask # Compute initial logits. logits, _, cache = next(prompt, cache, index) # `ops.while_loop` will not accept `None` as a value for `loop_vars`. has_cache = cache is not None cache = cache if has_cache else () def cond(prompt, cache, index, logits, hidden_states): if end_token_id is None: return True # Stop if all sequences have produced a *new* end_token_id. end_tokens = (prompt == end_token_id) & (~mask) prompt_done = ops.any(end_tokens, axis=-1) return ops.logical_not(ops.all(prompt_done)) def body(prompt, cache, index, logits, hidden_states): # Compute the softmax distribution for the next token. probabilities = self.compute_probabilities(logits) # Replicate for `self.k` times to find the best token in top-k # candidates. prompt_beams = create_beams(prompt) mask_beams = create_beams(mask) hidden_states_beams = create_beams(hidden_states) cache_beams = None if has_cache: cache_beams = tree.map_structure(create_beams, cache) # Get top-k candidate tokens and their probabilities. top_k_probabilities, top_k_indices = ops.top_k( probabilities, k=self.k, sorted=False ) next_token_probabilities = flatten_beams(top_k_probabilities) next_token = flatten_beams(top_k_indices) next_token = ops.cast(next_token, prompt.dtype) next_token = ops.where( mask_beams[:, index], prompt_beams[:, index], next_token ) # Update the prompt with the next token. next_token = ops.expand_dims(next_token, -1) prompt_beams = ops.slice_update( prompt_beams, [0, index], next_token ) # Compute the logits and hidden states for top-k candidate tokens. next_logits, next_hidden_states_beams, cache_beams = next( prompt_beams, cache_beams, index + 1 ) # Compute the max similarity score for top-k candidate tokens # against previous tokens. similarity_scores = self.similarity( hidden_states_beams, next_hidden_states_beams ) # Replace all future indices with -1, the lowest similarity score. score_mask = ops.expand_dims(ops.arange(max_length) < index, 0) similarity_scores = ops.where(score_mask, similarity_scores, -1) max_similarity_scores = ops.cast( ops.max(similarity_scores, axis=1), dtype=next_token_probabilities.dtype, ) # The final score of each candidate token is weighted sum of # probability and similarity against previous tokens. accumulated_scores = ( (1 - self.alpha) * next_token_probabilities - self.alpha * max_similarity_scores ) # Unflatten variables to shape [batch_size, self.k, ...] for # gather purpose. unflat_score = unflatten_beams(accumulated_scores) unflat_prompt = unflatten_beams(prompt_beams) unflat_next_logits = unflatten_beams(next_logits) unflat_next_hidden_states = unflatten_beams( next_hidden_states_beams ) best_token_indices = ops.argmax(unflat_score, axis=1) def gather_best_token(beams): indices = best_token_indices for axis in range(1, len(beams.shape)): indices = ops.expand_dims(indices, axis=axis) best = ops.take_along_axis( beams, indices, axis=1, ) return ops.squeeze(best, axis=1) prompt = gather_best_token(unflat_prompt) # We avoid recomputing forward pass for each token by updating the # cache/hidden_states using the output, and pass the logits to # next iteration step. logits = gather_best_token(unflat_next_logits) next_hidden_states = gather_best_token(unflat_next_hidden_states) if has_cache: cache = tree.map_structure(unflatten_beams, cache_beams) cache = tree.map_structure(gather_best_token, cache) hidden_states = ops.slice_update( hidden_states, [0, index, 0], next_hidden_states[:, None, :], ) return (prompt, cache, index + 1, logits, hidden_states) prompt, _, _, _, _ = self.run_loop( cond=cond, body=body, loop_vars=(prompt, cache, index, logits, hidden_states), maximum_iterations=(max_length - index), model=model, ) return prompt def similarity(self, h1, h2): h2 = ops.expand_dims(h2, -1) h1_norm = ops.sqrt(ops.sum(h1 * h1, axis=-1)) h2_norm = ops.sqrt(ops.sum(h2 * h2, axis=-2)) return ops.squeeze(ops.matmul(h1, h2), axis=-1) / (h1_norm * h2_norm) def get_config(self): config = super().get_config() config.update( { "k": self.k, "alpha": self.alpha, } ) return config
keras-nlp/keras_nlp/samplers/contrastive_sampler.py/0
{ "file_path": "keras-nlp/keras_nlp/samplers/contrastive_sampler.py", "repo_id": "keras-nlp", "token_count": 4169 }
130
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import doctest import io import os import sys import unittest import numpy as np import pytest import sentencepiece import tensorflow as tf import keras_nlp from keras_nlp.backend import keras from keras_nlp.tests.doc_tests import docstring_lib from keras_nlp.tests.doc_tests import fenced_docstring_lib from keras_nlp.tests.doc_tests.fenced_docstring_lib import ( astor, # For checking conditional import. ) PACKAGE = "keras_nlp." def find_modules(): keras_nlp_modules = [] for name, module in sys.modules.items(): if name.startswith(PACKAGE): keras_nlp_modules.append(module) return keras_nlp_modules @pytest.fixture(scope="session") def docstring_module(pytestconfig): return pytestconfig.getoption("docstring_module") @pytest.mark.tf_only def test_docstrings(docstring_module): keras_nlp_modules = find_modules() # As of this writing, it doesn't seem like pytest support load_tests # protocol for unittest: # https://docs.pytest.org/en/7.1.x/how-to/unittest.html # So we run the unittest.TestSuite manually and report the results back. runner = unittest.TextTestRunner() suite = unittest.TestSuite() for module in keras_nlp_modules: if docstring_module and docstring_module not in module.__name__: continue print(f"Adding tests for docstrings in {module.__name__}") suite.addTest( doctest.DocTestSuite( module, test_finder=doctest.DocTestFinder(exclude_empty=False), extraglobs={ "tf": tf, "np": np, "os": os, "keras": keras, "keras_nlp": keras_nlp, }, checker=docstring_lib.DoctestOutputChecker(), optionflags=( doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL | doctest.DONT_ACCEPT_BLANKLINE ), ) ) result = runner.run(suite) if not result.wasSuccessful(): print(result) assert result.wasSuccessful() @pytest.mark.tf_only @pytest.mark.extra_large @pytest.mark.skipif( astor is None, reason="This test requires `astor`. Please `pip install astor` to run.", ) def test_fenced_docstrings(docstring_module): """Tests fenced code blocks in docstrings. This can only be run manually and will take many minutes. Run with: `pytest keras_nlp/tests/doc_tests/docstring_test.py --run_extra_large` To restrict the docstring you test, you can pass an additional --docstring_module flag. For example, to run only "bert" module tests: `pytest keras_nlp/tests/doc_tests/docstring_test.py --run_extra_large --docstring_module "models.bert"` """ keras_nlp_modules = find_modules() runner = unittest.TextTestRunner() suite = unittest.TestSuite() for module in keras_nlp_modules: if docstring_module and docstring_module not in module.__name__: continue print(f"Adding tests for fenced docstrings in {module.__name__}") suite.addTest( doctest.DocTestSuite( module, test_finder=doctest.DocTestFinder( exclude_empty=False, parser=fenced_docstring_lib.FencedCellParser( fence_label="python" ), ), globs={ "_print_if_not_none": fenced_docstring_lib._print_if_not_none }, extraglobs={ "tf": tf, "np": np, "os": os, "keras": keras, "keras_nlp": keras_nlp, "io": io, "sentencepiece": sentencepiece, }, checker=docstring_lib.DoctestOutputChecker(), optionflags=( doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL | doctest.DONT_ACCEPT_BLANKLINE ), ) ) result = runner.run(suite) if not result.wasSuccessful(): print(result) assert result.wasSuccessful()
keras-nlp/keras_nlp/tests/doc_tests/docstring_test.py/0
{ "file_path": "keras-nlp/keras_nlp/tests/doc_tests/docstring_test.py", "repo_id": "keras-nlp", "token_count": 2335 }
131
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import tensorflow as tf from keras_nlp.api_export import keras_nlp_export from keras_nlp.tokenizers import tokenizer from keras_nlp.utils.tensor_utils import assert_tf_text_installed from keras_nlp.utils.tensor_utils import convert_to_ragged_batch from keras_nlp.utils.tensor_utils import is_int_dtype try: import tensorflow_text as tf_text except ImportError: tf_text = None @keras_nlp_export("keras_nlp.tokenizers.ByteTokenizer") class ByteTokenizer(tokenizer.Tokenizer): """Raw byte tokenizer. This tokenizer is a vocabulary-free tokenizer which will tokenize text as as raw bytes from [0, 256). Tokenizer outputs can either be padded and truncated with a `sequence_length` argument, or left un-truncated. The exact output will depend on the rank of the input tensors. If input is a batch of strings: By default, the layer will output a `tf.RaggedTensor` where the last dimension of the output is ragged. If `sequence_length` is set, the layer will output a dense `tf.Tensor` where all inputs have been padded or truncated to `sequence_length`. If input is a scalar string: There are two cases here. If `sequence_length` is set, the output will be a dense `tf.Tensor` of shape `[sequence_length]`. Otherwise, the output will be a dense `tf.Tensor` of shape `[None]`. The output dtype can be controlled via the `dtype` argument, which should be an integer type ("int16", "int32", etc.). Args: lowercase: boolean. If True, the input text will be converted to lowercase before tokenization. sequence_length: int. If set, the output will be converted to a dense tensor and padded/trimmed so all outputs are of sequence_length. normalization_form: string. One of the following values: (None, "NFC", "NFKC", "NFD", "NFKD"). If set, every UTF-8 string in the input tensor text will be normalized to the given form before tokenizing. errors: One of ('replace', 'remove', 'strict'). Specifies the `detokenize()` behavior when an invalid tokenizer is encountered. The value of `'strict'` will cause the operation to produce a `InvalidArgument` error on any invalid input formatting. A value of `'replace'` will cause the tokenizer to replace any invalid formatting in the input with the `replacement_char` codepoint. A value of `'ignore'` will cause the tokenizer to skip any invalid formatting in the input and produce no corresponding output character. replacement_char: int. The replacement character to use when an invalid byte sequence is encountered and when `errors` is set to "replace" (same behaviour as https://www.tensorflow.org/api_docs/python/tf/strings/unicode_transcode). (U+FFFD) is `65533`. Defaults to `65533`. Examples: Basic usage. >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer() >>> outputs = tokenizer("hello") >>> np.array(outputs) array([104, 101, 108, 108, 111], dtype=int32) Ragged outputs. >>> inputs = ["hello", "hi"] >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer() >>> seq1, seq2 = tokenizer(inputs) >>> np.array(seq1) array([104, 101, 108, 108, 111], dtype=int32) >>> np.array(seq2) array([104, 105], dtype=int32) Dense outputs. >>> inputs = ["hello", "hi"] >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer(sequence_length=8) >>> seq1, seq2 = tokenizer(inputs) >>> np.array(seq1) array([104, 101, 108, 108, 111, 0, 0, 0], dtype=int32) >>> np.array(seq2) array([104, 105, 0, 0, 0, 0, 0, 0], dtype=int32) Tokenize, then batch for ragged outputs. >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer() >>> ds = tf.data.Dataset.from_tensor_slices(["hello", "fun"]) >>> ds = ds.map(tokenizer) >>> ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(2)) >>> ds.take(1).get_single_element() <tf.RaggedTensor [[104, 101, 108, 108, 111], [102, 117, 110]]> Batch, then tokenize for ragged outputs. >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer() >>> ds = tf.data.Dataset.from_tensor_slices(["hello", "fun"]) >>> ds = ds.batch(2).map(tokenizer) >>> ds.take(1).get_single_element() <tf.RaggedTensor [[104, 101, 108, 108, 111], [102, 117, 110]]> Tokenize, then batch for dense outputs (`sequence_length` provided). >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer(sequence_length=5) >>> ds = tf.data.Dataset.from_tensor_slices(["hello", "fun"]) >>> ds = ds.map(tokenizer) >>> ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(2)) >>> ds.take(1).get_single_element() <tf.Tensor: shape=(2, 5), dtype=int32, numpy= array([[104, 101, 108, 108, 111], [102, 117, 110, 0, 0]], dtype=int32)> Batch, then tokenize for dense outputs. (`sequence_length` provided). >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer(sequence_length=5) >>> ds = tf.data.Dataset.from_tensor_slices(["hello", "fun"]) >>> ds = ds.batch(2).map(tokenizer) >>> ds.take(1).get_single_element() <tf.Tensor: shape=(2, 5), dtype=int32, numpy= array([[104, 101, 108, 108, 111], [102, 117, 110, 0, 0]], dtype=int32)> Detokenization. >>> inputs = [104, 101, 108, 108, 111] >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer() >>> outputs = tokenizer.detokenize(inputs) >>> np.array(outputs).astype("U") array('hello', dtype='<U5') Detokenization with invalid bytes. >>> # The 255 below is invalid utf-8. >>> inputs = [104, 101, 255, 108, 108, 111] >>> tokenizer = keras_nlp.tokenizers.ByteTokenizer( ... errors="replace", replacement_char=88) >>> outputs = tokenizer.detokenize(inputs) >>> np.array(outputs).astype("U") array('heXllo', dtype='<U6') """ def __init__( self, lowercase: bool = True, sequence_length: int = None, normalization_form: str = None, errors: str = "replace", replacement_char: int = 65533, dtype="int32", **kwargs, ): assert_tf_text_installed(self.__class__.__name__) if not is_int_dtype(dtype): raise ValueError( "Output dtype must be an integer type. " f"Received: dtype={dtype}" ) # Check normalization_form. if normalization_form not in (None, "NFC", "NFKC", "NFD", "NFKD"): raise ValueError( '`normalization_form` must be one of None, "NFC", "NFKC", ' '"NFD", "NFKD". Received: normalization_form=' f"{normalization_form}" ) # Check errors. if errors not in ("strict", "replace", "ignore"): raise ValueError( '`errors` must be one of "strict", "replace", "ignore" ' f"Received: errors={errors}" ) super().__init__(dtype=dtype, **kwargs) self.lowercase = lowercase self.sequence_length = sequence_length self.normalization_form = normalization_form self.errors = errors self.replacement_char = replacement_char self._char_lst = tf.constant( [i.tobytes() for i in np.arange(256, dtype=np.uint8)] ) def vocabulary_size(self) -> int: """Get the size of the tokenizer vocabulary.""" return 256 def tokenize(self, inputs): if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)): inputs = tf.convert_to_tensor(inputs) scalar_input = inputs.shape.rank == 0 if scalar_input: inputs = tf.expand_dims(inputs, 0) # Optional: Lowercase the input. if self.lowercase: inputs = tf_text.case_fold_utf8(inputs) # Optional: Normalize unicode. if self.normalization_form is not None: inputs = tf_text.normalize_utf8(inputs, self.normalization_form) # Tokenize input strings. tokens = tf.strings.bytes_split(inputs) tokens = tf.squeeze( tf.ragged.map_flat_values(tf.io.decode_raw, tokens, tf.uint8), -1 ) tokens = tf.cast(tokens, self.compute_dtype) # Convert to a dense output if `sequence_length` is set. if self.sequence_length: output_shape = tokens.shape.as_list() output_shape[-1] = self.sequence_length tokens = tokens.to_tensor(shape=output_shape) if scalar_input: tokens = tf.squeeze(tokens, 0) return tokens def detokenize(self, inputs): inputs, unbatched, _ = convert_to_ragged_batch(inputs) # Remove trailing padding tokens, so that trailing "\x00" bytes don't # show up in the detokenized output. inputs = tf.ragged.boolean_mask(inputs, tf.not_equal(inputs, 0)) outputs = tf.strings.reduce_join( tf.gather(self._char_lst, inputs), axis=-1 ) # Handle errors if an invalid byte sequence is encountered. outputs = tf.strings.unicode_transcode( outputs, "UTF-8", "UTF-8", errors=self.errors, replacement_char=self.replacement_char, ) if unbatched: outputs = tf.squeeze(outputs, 0) return outputs def get_config(self): config = super().get_config() config.update( { "lowercase": self.lowercase, "sequence_length": self.sequence_length, "normalization_form": self.normalization_form, "errors": self.errors, "replacement_char": self.replacement_char, } ) return config
keras-nlp/keras_nlp/tokenizers/byte_tokenizer.py/0
{ "file_path": "keras-nlp/keras_nlp/tokenizers/byte_tokenizer.py", "repo_id": "keras-nlp", "token_count": 4410 }
132
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_nlp.backend import keras from keras_nlp.tests.test_case import TestCase from keras_nlp.utils.keras_utils import clone_initializer from keras_nlp.utils.keras_utils import pack_x_y_sample_weight class CloneInitializerTest(TestCase): def test_config_equality(self): initializer = keras.initializers.VarianceScaling( scale=2.0, mode="fan_out", ) clone = clone_initializer(initializer) self.assertAllEqual(initializer.get_config(), clone.get_config()) def test_random_output(self): initializer = keras.initializers.VarianceScaling( scale=2.0, mode="fan_out", ) clone = clone_initializer(initializer) self.assertNotAllEqual(initializer(shape=(2, 2)), clone(shape=(2, 2))) def test_strings(self): initializer = "glorot_uniform" clone = clone_initializer(initializer) self.assertAllEqual(initializer, clone) class PackTest(TestCase): def test_pack_dict(self): tensor_dict = {"foo": tf.constant([1, 2])} data = pack_x_y_sample_weight(tensor_dict) self.assertAllEqual(data, tensor_dict) def test_pack_tuple(self): tensor_tuple = (tf.constant([1, 2]),) data = pack_x_y_sample_weight(tensor_tuple) self.assertAllEqual(data, (tensor_tuple,)) def test_pack_pair(self): x = tf.constant([1, 2]) y = tf.constant([3, 4]) data = pack_x_y_sample_weight(x, y) self.assertAllEqual(data, (x, y)) def test_pack_triplet(self): x = tf.constant([1, 2]) y = tf.constant([3, 4]) sw = tf.constant([5, 6]) data = pack_x_y_sample_weight(x, y, sw) self.assertAllEqual(data, (x, y, sw))
keras-nlp/keras_nlp/utils/keras_utils_test.py/0
{ "file_path": "keras-nlp/keras_nlp/utils/keras_utils_test.py", "repo_id": "keras-nlp", "token_count": 973 }
133
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import h5py import numpy as np import requests import tensorflow as tf from transformers import TFXLNetModel from transformers import XLNetTokenizer from keras_nlp.models import XLNetBackbone check_mems = False PRESET = "xlnet-base-cased" CKPT = f"https://huggingface.co/{PRESET}" SAVE_PATH = "./tf_weights.h5" # create HF model hf_model = TFXLNetModel.from_pretrained(PRESET) print(f"GPU Available or not : {tf.test.is_gpu_available()}") with open(SAVE_PATH, "wb") as p: response = requests.get(CKPT + "/resolve/main/tf_model.h5") p.write(response.content) tokenizer = XLNetTokenizer.from_pretrained(PRESET) string = "An input text string." tokens = tokenizer(string, return_tensors="tf", return_attention_mask=True) tokenized_hf = copy.deepcopy(tokens) tokenized_knlp = copy.deepcopy(tokens) tokenized_knlp["token_ids"] = tokenized_knlp["input_ids"] tokenized_knlp["padding_mask"] = tokenized_knlp["attention_mask"] tokenized_knlp["segment_ids"] = tokenized_knlp["token_type_ids"] del tokenized_knlp["attention_mask"] del tokenized_knlp["input_ids"] del tokenized_knlp["token_type_ids"] # create keras_nlp model knlp_model = XLNetBackbone( vocabulary_size=hf_model.config.vocab_size, num_layers=hf_model.config.n_layer, num_heads=hf_model.config.n_head, hidden_dim=hf_model.config.d_model, intermediate_dim=hf_model.config.d_inner, dropout=0.0, kernel_initializer_range=hf_model.config.initializer_range, ) # Load weights for keras_nlp model file_hf = h5py.File("./tf_weights.h5", "r") try: _ = file_hf["transformer"]["tfxl_net_lm_head_model"] member = "tfxl_net_lm_head_model" except: member = "tfxl_net_lm_head_model_1" # Content and Query Embeddings # mask emb mask_emb = np.array(file_hf["transformer"][member]["transformer"]["mask_emb:0"]) # word emb word_embed = np.array( file_hf["transformer"][member]["transformer"]["word_embedding"]["weight:0"] ) knlp_model.get_layer("content_query_embedding").word_embed.embeddings.assign( word_embed ) knlp_model.get_layer("encoder_block_attn_mask_layer").mask_emb.assign(mask_emb) # Encoders for i in range(hf_model.config.n_layer): # rel_attn # biases knlp_model.get_layer(f"xlnet_encoder_{i}").content_attention_bias.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["r_w_bias:0"] ) ) knlp_model.get_layer(f"xlnet_encoder_{i}").positional_attention_bias.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["r_r_bias:0"] ) ) knlp_model.get_layer(f"xlnet_encoder_{i}").segment_attention_bias.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["r_s_bias:0"] ) ) knlp_model.get_layer(f"xlnet_encoder_{i}").segment_encoding.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["seg_embed:0"] ) ) # layer-norm knlp_model.get_layer(f"xlnet_encoder_{i}").layer_norm.beta.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["layer_norm"]["beta:0"] ) ) knlp_model.get_layer(f"xlnet_encoder_{i}").layer_norm.gamma.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["layer_norm"]["gamma:0"] ) ) # rel_attn core knlp_model.get_layer( f"xlnet_encoder_{i}" ).relative_attention._query_dense.kernel.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["q:0"] ) ) knlp_model.get_layer( f"xlnet_encoder_{i}" ).relative_attention._key_dense.kernel.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["k:0"] ) ) knlp_model.get_layer( f"xlnet_encoder_{i}" ).relative_attention._value_dense.kernel.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["v:0"] ) ) knlp_model.get_layer( f"xlnet_encoder_{i}" ).relative_attention._output_dense.kernel.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["o:0"] ) ) knlp_model.get_layer( f"xlnet_encoder_{i}" ).relative_attention._encoding_dense.kernel.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"][ "rel_attn" ]["r:0"] ) ) # FF # FF layer 1 knlp_model.get_layer( f"xlnet_encoder_{i}" ).feedforward_intermediate_dense.kernel.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"]["ff"][ "layer_1" ]["kernel:0"] ) ) knlp_model.get_layer( f"xlnet_encoder_{i}" ).feedforward_intermediate_dense.bias.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"]["ff"][ "layer_1" ]["bias:0"] ) ) # FF layer 2 knlp_model.get_layer( f"xlnet_encoder_{i}" ).feedforward_output_dense.kernel.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"]["ff"][ "layer_2" ]["kernel:0"] ) ) knlp_model.get_layer( f"xlnet_encoder_{i}" ).feedforward_output_dense.bias.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"]["ff"][ "layer_2" ]["bias:0"] ) ) # FF Layer Norm knlp_model.get_layer(f"xlnet_encoder_{i}").layer_norm_ff.beta.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"]["ff"][ "layer_norm" ]["beta:0"] ) ) knlp_model.get_layer(f"xlnet_encoder_{i}").layer_norm_ff.gamma.assign( np.array( file_hf["transformer"][member]["transformer"][f"layer_._{i}"]["ff"][ "layer_norm" ]["gamma:0"] ) ) file_hf.close() print("Model Weights Loaded!") hf_preds = hf_model(tokenized_hf, training=False) print(hf_preds["last_hidden_state"]) knlp_preds = knlp_model(tokenized_knlp, training=False) print(knlp_preds, end="\n\n") print( "Outputs matching or not for Last Hidden State : ", np.allclose( hf_preds["last_hidden_state"] .numpy() .reshape(-1, hf_model.config.d_model), knlp_preds.numpy().reshape(-1, hf_model.config.d_model), atol=1e-3, ), ) # won't work since the recent version of the model doesn't return any mems! if check_mems: for i in range(hf_model.config.n_layer): print( f"Outputs matching or not for Mem {i} : ", np.allclose( hf_preds["mems"][i] .numpy() .reshape(-1, hf_model.config.d_model), knlp_preds["new_mems"][i] .numpy() .reshape(-1, hf_model.config.d_model), atol=1e-3, ), ) os.remove(SAVE_PATH)
keras-nlp/tools/checkpoint_conversion/convert_xlnet_checkpoints.py/0
{ "file_path": "keras-nlp/tools/checkpoint_conversion/convert_xlnet_checkpoints.py", "repo_id": "keras-nlp", "token_count": 4209 }
134
"""Utilities for real-time data augmentation on image data. """ import warnings import numpy as np from .affine_transformations import (apply_affine_transform, apply_brightness_shift, apply_channel_shift, flip_axis) from .dataframe_iterator import DataFrameIterator from .directory_iterator import DirectoryIterator from .numpy_array_iterator import NumpyArrayIterator class ImageDataGenerator(object): """Generate batches of tensor image data with real-time data augmentation. The data will be looped over (in batches). # Arguments featurewise_center: Boolean. Set input mean to 0 over the dataset, feature-wise. samplewise_center: Boolean. Set each sample mean to 0. featurewise_std_normalization: Boolean. Divide inputs by std of the dataset, feature-wise. samplewise_std_normalization: Boolean. Divide each input by its std. zca_whitening: Boolean. Apply ZCA whitening. zca_epsilon: epsilon for ZCA whitening. Default is 1e-6. rotation_range: Int. Degree range for random rotations. width_shift_range: Float, 1-D array-like or int - float: fraction of total width, if < 1, or pixels if >= 1. - 1-D array-like: random elements from the array. - int: integer number of pixels from interval `(-width_shift_range, +width_shift_range)` - With `width_shift_range=2` possible values are integers `[-1, 0, +1]`, same as with `width_shift_range=[-1, 0, +1]`, while with `width_shift_range=1.0` possible values are floats in the interval `[-1.0, +1.0)`. height_shift_range: Float, 1-D array-like or int - float: fraction of total height, if < 1, or pixels if >= 1. - 1-D array-like: random elements from the array. - int: integer number of pixels from interval `(-height_shift_range, +height_shift_range)` - With `height_shift_range=2` possible values are integers `[-1, 0, +1]`, same as with `height_shift_range=[-1, 0, +1]`, while with `height_shift_range=1.0` possible values are floats in the interval `[-1.0, +1.0)`. brightness_range: Tuple or list of two floats. Range for picking a brightness shift value from. shear_range: Float. Shear Intensity (Shear angle in counter-clockwise direction in degrees) zoom_range: Float or [lower, upper]. Range for random zoom. If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`. channel_shift_range: Float. Range for random channel shifts. fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}. Default is 'nearest'. Points outside the boundaries of the input are filled according to the given mode: - 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k) - 'nearest': aaaaaaaa|abcd|dddddddd - 'reflect': abcddcba|abcd|dcbaabcd - 'wrap': abcdabcd|abcd|abcdabcd cval: Float or Int. Value used for points outside the boundaries when `fill_mode = "constant"`. horizontal_flip: Boolean. Randomly flip inputs horizontally. vertical_flip: Boolean. Randomly flip inputs vertically. rescale: rescaling factor. Defaults to None. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (after applying all other transformations). preprocessing_function: function that will be applied on each input. The function will run after the image is resized and augmented. The function should take one argument: one image (NumPy tensor with rank 3), and should output a NumPy tensor with the same shape. data_format: Image data format, either "channels_first" or "channels_last". "channels_last" mode means that the images should have shape `(samples, height, width, channels)`, "channels_first" mode means that the images should have shape `(samples, channels, height, width)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". validation_split: Float. Fraction of images reserved for validation (strictly between 0 and 1). interpolation_order: int, order to use for the spline interpolation. Higher is slower. dtype: Dtype to use for the generated arrays. # Examples Example of using `.flow(x, y)`: ```python (x_train, y_train), (x_test, y_test) = cifar10.load_data() y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) datagen = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True) # compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied) datagen.fit(x_train) # fits the model on batches with real-time data augmentation: model.fit_generator(datagen.flow(x_train, y_train, batch_size=32), steps_per_epoch=len(x_train) / 32, epochs=epochs) # here's a more "manual" example for e in range(epochs): print('Epoch', e) batches = 0 for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32): model.fit(x_batch, y_batch) batches += 1 if batches >= len(x_train) / 32: # we need to break the loop by hand because # the generator loops indefinitely break ``` Example of using `.flow_from_directory(directory)`: ```python train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( 'data/train', target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_directory( 'data/validation', target_size=(150, 150), batch_size=32, class_mode='binary') model.fit_generator( train_generator, steps_per_epoch=2000, epochs=50, validation_data=validation_generator, validation_steps=800) ``` Example of transforming images and masks together. ```python # we create two instances with the same arguments data_gen_args = dict(featurewise_center=True, featurewise_std_normalization=True, rotation_range=90, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) image_datagen = ImageDataGenerator(**data_gen_args) mask_datagen = ImageDataGenerator(**data_gen_args) # Provide the same seed and keyword arguments to the fit and flow methods seed = 1 image_datagen.fit(images, augment=True, seed=seed) mask_datagen.fit(masks, augment=True, seed=seed) image_generator = image_datagen.flow_from_directory( 'data/images', class_mode=None, seed=seed) mask_generator = mask_datagen.flow_from_directory( 'data/masks', class_mode=None, seed=seed) # combine generators into one which yields image and masks train_generator = zip(image_generator, mask_generator) model.fit_generator( train_generator, steps_per_epoch=2000, epochs=50) ``` Example of using ```.flow_from_dataframe(dataframe, directory, x_col, y_col)```: ```python train_df = pandas.read_csv("./train.csv") valid_df = pandas.read_csv("./valid.csv") train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_dataframe( dataframe=train_df, directory='data/train', x_col="filename", y_col="class", target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_dataframe( dataframe=valid_df, directory='data/validation', x_col="filename", y_col="class", target_size=(150, 150), batch_size=32, class_mode='binary') model.fit_generator( train_generator, steps_per_epoch=2000, epochs=50, validation_data=validation_generator, validation_steps=800) ``` """ def __init__(self, featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-6, rotation_range=0, width_shift_range=0., height_shift_range=0., brightness_range=None, shear_range=0., zoom_range=0., channel_shift_range=0., fill_mode='nearest', cval=0., horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format='channels_last', validation_split=0.0, interpolation_order=1, dtype='float32'): self.featurewise_center = featurewise_center self.samplewise_center = samplewise_center self.featurewise_std_normalization = featurewise_std_normalization self.samplewise_std_normalization = samplewise_std_normalization self.zca_whitening = zca_whitening self.zca_epsilon = zca_epsilon self.rotation_range = rotation_range self.width_shift_range = width_shift_range self.height_shift_range = height_shift_range self.shear_range = shear_range self.zoom_range = zoom_range self.channel_shift_range = channel_shift_range self.fill_mode = fill_mode self.cval = cval self.horizontal_flip = horizontal_flip self.vertical_flip = vertical_flip self.rescale = rescale self.preprocessing_function = preprocessing_function self.dtype = dtype self.interpolation_order = interpolation_order if data_format not in {'channels_last', 'channels_first'}: raise ValueError( '`data_format` should be `"channels_last"` ' '(channel after row and column) or ' '`"channels_first"` (channel before row and column). ' 'Received: %s' % data_format) self.data_format = data_format if data_format == 'channels_first': self.channel_axis = 1 self.row_axis = 2 self.col_axis = 3 if data_format == 'channels_last': self.channel_axis = 3 self.row_axis = 1 self.col_axis = 2 if validation_split and not 0 < validation_split < 1: raise ValueError( '`validation_split` must be strictly between 0 and 1. ' ' Received: %s' % validation_split) self._validation_split = validation_split self.mean = None self.std = None self.zca_whitening_matrix = None if isinstance(zoom_range, (float, int)): self.zoom_range = [1 - zoom_range, 1 + zoom_range] elif (len(zoom_range) == 2 and all(isinstance(val, (float, int)) for val in zoom_range)): self.zoom_range = [zoom_range[0], zoom_range[1]] else: raise ValueError('`zoom_range` should be a float or ' 'a tuple or list of two floats. ' 'Received: %s' % (zoom_range,)) if zca_whitening: if not featurewise_center: self.featurewise_center = True warnings.warn('This ImageDataGenerator specifies ' '`zca_whitening`, which overrides ' 'setting of `featurewise_center`.') if featurewise_std_normalization: self.featurewise_std_normalization = False warnings.warn('This ImageDataGenerator specifies ' '`zca_whitening` ' 'which overrides setting of' '`featurewise_std_normalization`.') if featurewise_std_normalization: if not featurewise_center: self.featurewise_center = True warnings.warn('This ImageDataGenerator specifies ' '`featurewise_std_normalization`, ' 'which overrides setting of ' '`featurewise_center`.') if samplewise_std_normalization: if not samplewise_center: self.samplewise_center = True warnings.warn('This ImageDataGenerator specifies ' '`samplewise_std_normalization`, ' 'which overrides setting of ' '`samplewise_center`.') if brightness_range is not None: if (not isinstance(brightness_range, (tuple, list)) or len(brightness_range) != 2): raise ValueError( '`brightness_range should be tuple or list of two floats. ' 'Received: %s' % (brightness_range,)) self.brightness_range = brightness_range def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None, save_to_dir=None, save_prefix='', save_format='png', ignore_class_split=False, subset=None): """Takes data & label arrays, generates batches of augmented data. # Arguments x: Input data. NumPy array of rank 4 or a tuple. If tuple, the first element should contain the images and the second element another NumPy array or a list of NumPy arrays that gets passed to the output without any modifications. Can be used to feed the model miscellaneous data along with the images. In case of grayscale data, the channels axis of the image array should have value 1, in case of RGB data, it should have value 3, and in case of RGBA data, it should have value 4. y: Labels. batch_size: Int (default: 32). shuffle: Boolean (default: True). sample_weight: Sample weights. seed: Int (default: None). save_to_dir: None or str (default: None). This allows you to optionally specify a directory to which to save the augmented pictures being generated (useful for visualizing what you are doing). save_prefix: Str (default: `''`). Prefix to use for filenames of saved pictures (only relevant if `save_to_dir` is set). save_format: one of "png", "jpeg" (only relevant if `save_to_dir` is set). Default: "png". ignore_class_split: Boolean (default: False), ignore difference in number of classes in labels across train and validation split (useful for non-classification tasks) subset: Subset of data (`"training"` or `"validation"`) if `validation_split` is set in `ImageDataGenerator`. # Returns An `Iterator` yielding tuples of `(x, y)` where `x` is a NumPy array of image data (in the case of a single image input) or a list of NumPy arrays (in the case with additional inputs) and `y` is a NumPy array of corresponding labels. If 'sample_weight' is not None, the yielded tuples are of the form `(x, y, sample_weight)`. If `y` is None, only the NumPy array `x` is returned. """ return NumpyArrayIterator( x, y, self, batch_size=batch_size, shuffle=shuffle, sample_weight=sample_weight, seed=seed, data_format=self.data_format, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, ignore_class_split=ignore_class_split, subset=subset, dtype=self.dtype ) def flow_from_directory(self, directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', keep_aspect_ratio=False): """Takes the path to a directory & generates batches of augmented data. # Arguments directory: string, path to the target directory. It should contain one subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside each of the subdirectories directory tree will be included in the generator. See [this script]( https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d) for more details. target_size: Tuple of integers `(height, width)`, default: `(256, 256)`. The dimensions to which all images found will be resized. color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". Whether the images will be converted to have 1, 3, or 4 channels. classes: Optional list of class subdirectories (e.g. `['dogs', 'cats']`). Default: None. If not provided, the list of classes will be automatically inferred from the subdirectory names/structure under `directory`, where each subdirectory will be treated as a different class (and the order of the classes, which will map to the label indices, will be alphanumeric). The dictionary containing the mapping from class names to class indices can be obtained via the attribute `class_indices`. class_mode: One of "categorical", "binary", "sparse", "input", or None. Default: "categorical". Determines the type of label arrays that are returned: - "categorical" will be 2D one-hot encoded labels, - "binary" will be 1D binary labels, "sparse" will be 1D integer labels, - "input" will be images identical to input images (mainly used to work with autoencoders). - If None, no labels are returned (the generator will only yield batches of image data, which is useful to use with `model.predict_generator()`). Please note that in case of class_mode None, the data still needs to reside in a subdirectory of `directory` for it to work correctly. batch_size: Size of the batches of data (default: 32). shuffle: Whether to shuffle the data (default: True) If set to False, sorts the data in alphanumeric order. seed: Optional random seed for shuffling and transformations. save_to_dir: None or str (default: None). This allows you to optionally specify a directory to which to save the augmented pictures being generated (useful for visualizing what you are doing). save_prefix: Str. Prefix to use for filenames of saved pictures (only relevant if `save_to_dir` is set). save_format: One of "png", "jpeg" (only relevant if `save_to_dir` is set). Default: "png". follow_links: Whether to follow symlinks inside class subdirectories (default: False). subset: Subset of data (`"training"` or `"validation"`) if `validation_split` is set in `ImageDataGenerator`. interpolation: Interpolation method used to resample the image if the target size is different from that of the loaded image. Supported methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version 1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also supported. By default, `"nearest"` is used. keep_aspect_ratio: Boolean, whether to resize images to a target size without aspect ratio distortion. The image is cropped in the center with target aspect ratio before resizing. # Returns A `DirectoryIterator` yielding tuples of `(x, y)` where `x` is a NumPy array containing a batch of images with shape `(batch_size, *target_size, channels)` and `y` is a NumPy array of corresponding labels. """ return DirectoryIterator( directory, self, target_size=target_size, keep_aspect_ratio=keep_aspect_ratio, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, follow_links=follow_links, subset=subset, interpolation=interpolation, dtype=self.dtype ) def flow_from_dataframe(self, dataframe, directory=None, x_col="filename", y_col="class", weight_col=None, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None, interpolation='nearest', validate_filenames=True, **kwargs): """Takes the dataframe and the path to a directory and generates batches of augmented/normalized data. **A simple tutorial can be found **[here]( http://bit.ly/keras_flow_from_dataframe). # Arguments dataframe: Pandas dataframe containing the filepaths relative to `directory` (or absolute paths if `directory` is None) of the images in a string column. It should include other column/s depending on the `class_mode`: - if `class_mode` is `"categorical"` (default value) it must include the `y_col` column with the class/es of each image. Values in column can be string/list/tuple if a single class or list/tuple if multiple classes. - if `class_mode` is `"binary"` or `"sparse"` it must include the given `y_col` column with class values as strings. - if `class_mode` is `"raw"` or `"multi_output"` it should contain the columns specified in `y_col`. - if `class_mode` is `"input"` or `None` no extra column is needed. directory: string, path to the directory to read images from. If `None`, data in `x_col` column should be absolute paths. x_col: string, column in `dataframe` that contains the filenames (or absolute paths if `directory` is `None`). y_col: string or list, column/s in `dataframe` that has the target data. weight_col: string, column in `dataframe` that contains the sample weights. Default: `None`. target_size: tuple of integers `(height, width)`, default: `(256, 256)`. The dimensions to which all images found will be resized. color_mode: one of "grayscale", "rgb", "rgba". Default: "rgb". Whether the images will be converted to have 1 or 3 color channels. classes: optional list of classes (e.g. `['dogs', 'cats']`). Default: None. If not provided, the list of classes will be automatically inferred from the `y_col`, which will map to the label indices, will be alphanumeric). The dictionary containing the mapping from class names to class indices can be obtained via the attribute `class_indices`. class_mode: one of "binary", "categorical", "input", "multi_output", "raw", sparse" or None. Default: "categorical". Mode for yielding the targets: - `"binary"`: 1D NumPy array of binary labels, - `"categorical"`: 2D NumPy array of one-hot encoded labels. Supports multi-label output. - `"input"`: images identical to input images (mainly used to work with autoencoders), - `"multi_output"`: list with the values of the different columns, - `"raw"`: NumPy array of values in `y_col` column(s), - `"sparse"`: 1D NumPy array of integer labels, - `None`, no targets are returned (the generator will only yield batches of image data, which is useful to use in `model.predict_generator()`). batch_size: size of the batches of data (default: 32). shuffle: whether to shuffle the data (default: True) seed: optional random seed for shuffling and transformations. save_to_dir: None or str (default: None). This allows you to optionally specify a directory to which to save the augmented pictures being generated (useful for visualizing what you are doing). save_prefix: str. Prefix to use for filenames of saved pictures (only relevant if `save_to_dir` is set). save_format: one of "png", "jpeg" (only relevant if `save_to_dir` is set). Default: "png". follow_links: whether to follow symlinks inside class subdirectories (default: False). subset: Subset of data (`"training"` or `"validation"`) if `validation_split` is set in `ImageDataGenerator`. interpolation: Interpolation method used to resample the image if the target size is different from that of the loaded image. Supported methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version 1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also supported. By default, `"nearest"` is used. validate_filenames: Boolean, whether to validate image filenames in `x_col`. If `True`, invalid images will be ignored. Disabling this option can lead to speed-up in the execution of this function. Default: `True`. # Returns A `DataFrameIterator` yielding tuples of `(x, y)` where `x` is a NumPy array containing a batch of images with shape `(batch_size, *target_size, channels)` and `y` is a NumPy array of corresponding labels. """ if 'has_ext' in kwargs: warnings.warn('has_ext is deprecated, filenames in the dataframe have ' 'to match the exact filenames in disk.', DeprecationWarning) if 'sort' in kwargs: warnings.warn('sort is deprecated, batches will be created in the' 'same order than the filenames provided if shuffle' 'is set to False.', DeprecationWarning) if class_mode == 'other': warnings.warn('`class_mode` "other" is deprecated, please use ' '`class_mode` "raw".', DeprecationWarning) class_mode = 'raw' if 'drop_duplicates' in kwargs: warnings.warn('drop_duplicates is deprecated, you can drop duplicates ' 'by using the pandas.DataFrame.drop_duplicates method.', DeprecationWarning) return DataFrameIterator( dataframe, directory, self, x_col=x_col, y_col=y_col, weight_col=weight_col, target_size=target_size, color_mode=color_mode, classes=classes, class_mode=class_mode, data_format=self.data_format, batch_size=batch_size, shuffle=shuffle, seed=seed, save_to_dir=save_to_dir, save_prefix=save_prefix, save_format=save_format, subset=subset, interpolation=interpolation, validate_filenames=validate_filenames, dtype=self.dtype ) def standardize(self, x): """Applies the normalization configuration in-place to a batch of inputs. `x` is changed in-place since the function is mainly used internally to standardize images and feed them to your network. If a copy of `x` would be created instead it would have a significant performance cost. If you want to apply this method without changing the input in-place you can call the method creating a copy before: standardize(np.copy(x)) # Arguments x: Batch of inputs to be normalized. # Returns The inputs, normalized. """ if self.preprocessing_function: x = self.preprocessing_function(x) if self.rescale: x *= self.rescale if self.samplewise_center: x -= np.mean(x, keepdims=True) if self.samplewise_std_normalization: x /= (np.std(x, keepdims=True) + 1e-6) if self.featurewise_center: if self.mean is not None: x -= self.mean else: warnings.warn('This ImageDataGenerator specifies ' '`featurewise_center`, but it hasn\'t ' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.') if self.featurewise_std_normalization: if self.std is not None: x /= (self.std + 1e-6) else: warnings.warn('This ImageDataGenerator specifies ' '`featurewise_std_normalization`, ' 'but it hasn\'t ' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.') if self.zca_whitening: if self.zca_whitening_matrix is not None: flat_x = x.reshape(-1, np.prod(x.shape[-3:])) white_x = flat_x @ self.zca_whitening_matrix x = np.reshape(white_x, x.shape) else: warnings.warn('This ImageDataGenerator specifies ' '`zca_whitening`, but it hasn\'t ' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.') return x def get_random_transform(self, img_shape, seed=None): """Generates random parameters for a transformation. # Arguments seed: Random seed. img_shape: Tuple of integers. Shape of the image that is transformed. # Returns A dictionary containing randomly chosen parameters describing the transformation. """ img_row_axis = self.row_axis - 1 img_col_axis = self.col_axis - 1 if seed is not None: np.random.seed(seed) if self.rotation_range: theta = np.random.uniform( -self.rotation_range, self.rotation_range) else: theta = 0 if self.height_shift_range: try: # 1-D array-like or int tx = np.random.choice(self.height_shift_range) tx *= np.random.choice([-1, 1]) except ValueError: # floating point tx = np.random.uniform(-self.height_shift_range, self.height_shift_range) if np.max(self.height_shift_range) < 1: tx *= img_shape[img_row_axis] else: tx = 0 if self.width_shift_range: try: # 1-D array-like or int ty = np.random.choice(self.width_shift_range) ty *= np.random.choice([-1, 1]) except ValueError: # floating point ty = np.random.uniform(-self.width_shift_range, self.width_shift_range) if np.max(self.width_shift_range) < 1: ty *= img_shape[img_col_axis] else: ty = 0 if self.shear_range: shear = np.random.uniform( -self.shear_range, self.shear_range) else: shear = 0 if self.zoom_range[0] == 1 and self.zoom_range[1] == 1: zx, zy = 1, 1 else: zx, zy = np.random.uniform( self.zoom_range[0], self.zoom_range[1], 2) flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip flip_vertical = (np.random.random() < 0.5) * self.vertical_flip channel_shift_intensity = None if self.channel_shift_range != 0: channel_shift_intensity = np.random.uniform(-self.channel_shift_range, self.channel_shift_range) brightness = None if self.brightness_range is not None: brightness = np.random.uniform(self.brightness_range[0], self.brightness_range[1]) transform_parameters = {'theta': theta, 'tx': tx, 'ty': ty, 'shear': shear, 'zx': zx, 'zy': zy, 'flip_horizontal': flip_horizontal, 'flip_vertical': flip_vertical, 'channel_shift_intensity': channel_shift_intensity, 'brightness': brightness} return transform_parameters def apply_transform(self, x, transform_parameters): """Applies a transformation to an image according to given parameters. # Arguments x: 3D tensor, single image. transform_parameters: Dictionary with string - parameter pairs describing the transformation. Currently, the following parameters from the dictionary are used: - `'theta'`: Float. Rotation angle in degrees. - `'tx'`: Float. Shift in the x direction. - `'ty'`: Float. Shift in the y direction. - `'shear'`: Float. Shear angle in degrees. - `'zx'`: Float. Zoom in the x direction. - `'zy'`: Float. Zoom in the y direction. - `'flip_horizontal'`: Boolean. Horizontal flip. - `'flip_vertical'`: Boolean. Vertical flip. - `'channel_shift_intensity'`: Float. Channel shift intensity. - `'brightness'`: Float. Brightness shift intensity. # Returns A transformed version of the input (same shape). """ # x is a single image, so it doesn't have image number at index 0 img_row_axis = self.row_axis - 1 img_col_axis = self.col_axis - 1 img_channel_axis = self.channel_axis - 1 x = apply_affine_transform(x, transform_parameters.get('theta', 0), transform_parameters.get('tx', 0), transform_parameters.get('ty', 0), transform_parameters.get('shear', 0), transform_parameters.get('zx', 1), transform_parameters.get('zy', 1), row_axis=img_row_axis, col_axis=img_col_axis, channel_axis=img_channel_axis, fill_mode=self.fill_mode, cval=self.cval, order=self.interpolation_order) if transform_parameters.get('channel_shift_intensity') is not None: x = apply_channel_shift(x, transform_parameters['channel_shift_intensity'], img_channel_axis) if transform_parameters.get('flip_horizontal', False): x = flip_axis(x, img_col_axis) if transform_parameters.get('flip_vertical', False): x = flip_axis(x, img_row_axis) if transform_parameters.get('brightness') is not None: x = apply_brightness_shift(x, transform_parameters['brightness'], False) return x def random_transform(self, x, seed=None): """Applies a random transformation to an image. # Arguments x: 3D tensor, single image. seed: Random seed. # Returns A randomly transformed version of the input (same shape). """ params = self.get_random_transform(x.shape, seed) return self.apply_transform(x, params) def fit(self, x, augment=False, rounds=1, seed=None): """Fits the data generator to some sample data. This computes the internal data stats related to the data-dependent transformations, based on an array of sample data. Only required if `featurewise_center` or `featurewise_std_normalization` or `zca_whitening` are set to True. When `rescale` is set to a value, rescaling is applied to sample data before computing the internal data stats. # Arguments x: Sample data. Should have rank 4. In case of grayscale data, the channels axis should have value 1, in case of RGB data, it should have value 3, and in case of RGBA data, it should have value 4. augment: Boolean (default: False). Whether to fit on randomly augmented samples. rounds: Int (default: 1). If using data augmentation (`augment=True`), this is how many augmentation passes over the data to use. seed: Int (default: None). Random seed. """ x = np.asarray(x, dtype=self.dtype) if x.ndim != 4: raise ValueError('Input to `.fit()` should have rank 4. ' 'Got array with shape: ' + str(x.shape)) if x.shape[self.channel_axis] not in {1, 3, 4}: warnings.warn( 'Expected input to be images (as Numpy array) ' 'following the data format convention "' + self.data_format + '" (channels on axis ' + str(self.channel_axis) + '), i.e. expected ' 'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. ' 'However, it was passed an array with shape ' + str(x.shape) + ' (' + str(x.shape[self.channel_axis]) + ' channels).') if seed is not None: np.random.seed(seed) x = np.copy(x) if self.rescale: x *= self.rescale if augment: ax = np.zeros( tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=self.dtype) for r in range(rounds): for i in range(x.shape[0]): ax[i + r * x.shape[0]] = self.random_transform(x[i]) x = ax if self.featurewise_center: self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis)) broadcast_shape = [1, 1, 1] broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis] self.mean = np.reshape(self.mean, broadcast_shape) x -= self.mean if self.featurewise_std_normalization: self.std = np.std(x, axis=(0, self.row_axis, self.col_axis)) broadcast_shape = [1, 1, 1] broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis] self.std = np.reshape(self.std, broadcast_shape) x /= (self.std + 1e-6) if self.zca_whitening: n = len(x) flat_x = np.reshape(x, (n, -1)) u, s, _ = np.linalg.svd(flat_x.T, full_matrices=False) s_inv = np.sqrt(n) / (s + self.zca_epsilon) self.zca_whitening_matrix = (u * s_inv).dot(u.T)
keras-preprocessing/keras_preprocessing/image/image_data_generator.py/0
{ "file_path": "keras-preprocessing/keras_preprocessing/image/image_data_generator.py", "repo_id": "keras-preprocessing", "token_count": 21706 }
135
from math import ceil import numpy as np import pytest from numpy.testing import assert_allclose, assert_equal, assert_raises from keras_preprocessing import sequence def test_pad_sequences(): a = [[1], [1, 2], [1, 2, 3]] # test padding b = sequence.pad_sequences(a, maxlen=3, padding='pre') assert_allclose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]]) b = sequence.pad_sequences(a, maxlen=3, padding='post') assert_allclose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]]) # test truncating b = sequence.pad_sequences(a, maxlen=2, truncating='pre') assert_allclose(b, [[0, 1], [1, 2], [2, 3]]) b = sequence.pad_sequences(a, maxlen=2, truncating='post') assert_allclose(b, [[0, 1], [1, 2], [1, 2]]) # test value b = sequence.pad_sequences(a, maxlen=3, value=1) assert_allclose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]]) def test_pad_sequences_str(): a = [['1'], ['1', '2'], ['1', '2', '3']] # test padding b = sequence.pad_sequences(a, maxlen=3, padding='pre', value='pad', dtype=object) assert_equal(b, [['pad', 'pad', '1'], ['pad', '1', '2'], ['1', '2', '3']]) b = sequence.pad_sequences(a, maxlen=3, padding='post', value='pad', dtype='<U3') assert_equal(b, [['1', 'pad', 'pad'], ['1', '2', 'pad'], ['1', '2', '3']]) # test truncating b = sequence.pad_sequences(a, maxlen=2, truncating='pre', value='pad', dtype=object) assert_equal(b, [['pad', '1'], ['1', '2'], ['2', '3']]) b = sequence.pad_sequences(a, maxlen=2, truncating='post', value='pad', dtype='<U3') assert_equal(b, [['pad', '1'], ['1', '2'], ['1', '2']]) with pytest.raises(ValueError, match="`dtype` int32 is not compatible with "): sequence.pad_sequences(a, maxlen=2, truncating='post', value='pad') def test_pad_sequences_vector(): a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]] # test padding b = sequence.pad_sequences(a, maxlen=3, padding='pre') assert_allclose(b, [[[0, 0], [0, 0], [1, 1]], [[0, 0], [2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]) b = sequence.pad_sequences(a, maxlen=3, padding='post') assert_allclose(b, [[[1, 1], [0, 0], [0, 0]], [[2, 1], [2, 2], [0, 0]], [[3, 1], [3, 2], [3, 3]]]) # test truncating b = sequence.pad_sequences(a, maxlen=2, truncating='pre') assert_allclose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3, 3]]]) b = sequence.pad_sequences(a, maxlen=2, truncating='post') assert_allclose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2]]]) # test value b = sequence.pad_sequences(a, maxlen=3, value=1) assert_allclose(b, [[[1, 1], [1, 1], [1, 1]], [[1, 1], [2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]) def test_make_sampling_table(): a = sequence.make_sampling_table(3) assert_allclose(a, np.asarray([0.00315225, 0.00315225, 0.00547597]), rtol=.1) def test_skipgrams(): # test with no window size and binary labels couples, labels = sequence.skipgrams(np.arange(3), vocabulary_size=3) for couple in couples: assert couple[0] in [0, 1, 2] and couple[1] in [0, 1, 2] # test window size and categorical labels couples, labels = sequence.skipgrams(np.arange(5), vocabulary_size=5, window_size=1, categorical=True) for couple in couples: assert couple[0] - couple[1] <= 3 for label in labels: assert len(label) == 2 def test_remove_long_seq(): maxlen = 5 seq = [ [1, 2, 3], [1, 2, 3, 4, 5, 6], ] label = ['a', 'b'] new_seq, new_label = sequence._remove_long_seq(maxlen, seq, label) assert new_seq == [[1, 2, 3]] assert new_label == ['a'] def test_TimeseriesGenerator_serde(): data = np.array([[i] for i in range(50)]) targets = np.array([[i] for i in range(50)]) data_gen = sequence.TimeseriesGenerator(data, targets, length=10, sampling_rate=2, batch_size=2) json_gen = data_gen.to_json() recovered_gen = sequence.timeseries_generator_from_json(json_gen) assert data_gen.batch_size == recovered_gen.batch_size assert data_gen.end_index == recovered_gen.end_index assert data_gen.length == recovered_gen.length assert data_gen.reverse == recovered_gen.reverse assert data_gen.sampling_rate == recovered_gen.sampling_rate assert data_gen.shuffle == recovered_gen.shuffle assert data_gen.start_index == data_gen.start_index assert data_gen.stride == data_gen.stride assert (data_gen.data == recovered_gen.data).all() assert (data_gen.targets == recovered_gen.targets).all() def test_TimeseriesGenerator(): data = np.array([[i] for i in range(50)]) targets = np.array([[i] for i in range(50)]) data_gen = sequence.TimeseriesGenerator(data, targets, length=10, sampling_rate=2, batch_size=2) assert len(data_gen) == 20 assert (np.allclose(data_gen[0][0], np.array([[[0], [2], [4], [6], [8]], [[1], [3], [5], [7], [9]]]))) assert (np.allclose(data_gen[0][1], np.array([[10], [11]]))) assert (np.allclose(data_gen[1][0], np.array([[[2], [4], [6], [8], [10]], [[3], [5], [7], [9], [11]]]))) assert (np.allclose(data_gen[1][1], np.array([[12], [13]]))) data_gen = sequence.TimeseriesGenerator(data, targets, length=10, sampling_rate=2, reverse=True, batch_size=2) assert len(data_gen) == 20 assert (np.allclose(data_gen[0][0], np.array([[[8], [6], [4], [2], [0]], [[9], [7], [5], [3], [1]]]))) assert (np.allclose(data_gen[0][1], np.array([[10], [11]]))) data_gen = sequence.TimeseriesGenerator(data, targets, length=10, sampling_rate=2, shuffle=True, batch_size=1) batch = data_gen[0] r = batch[1][0][0] assert (np.allclose(batch[0], np.array([[[r - 10], [r - 8], [r - 6], [r - 4], [r - 2]]]))) assert (np.allclose(batch[1], np.array([[r], ]))) data_gen = sequence.TimeseriesGenerator(data, targets, length=10, sampling_rate=2, stride=2, batch_size=2) assert len(data_gen) == 10 assert (np.allclose(data_gen[1][0], np.array([[[4], [6], [8], [10], [12]], [[6], [8], [10], [12], [14]]]))) assert (np.allclose(data_gen[1][1], np.array([[14], [16]]))) data_gen = sequence.TimeseriesGenerator(data, targets, length=10, sampling_rate=2, start_index=10, end_index=30, batch_size=2) assert len(data_gen) == 6 assert (np.allclose(data_gen[0][0], np.array([[[10], [12], [14], [16], [18]], [[11], [13], [15], [17], [19]]]))) assert (np.allclose(data_gen[0][1], np.array([[20], [21]]))) data = np.array([np.random.random_sample((1, 2, 3, 4)) for i in range(50)]) targets = np.array([np.random.random_sample((3, 2, 1)) for i in range(50)]) data_gen = sequence.TimeseriesGenerator(data, targets, length=10, sampling_rate=2, start_index=10, end_index=30, batch_size=2) assert len(data_gen) == 6 assert np.allclose(data_gen[0][0], np.array( [np.array(data[10:19:2]), np.array(data[11:20:2])])) assert (np.allclose(data_gen[0][1], np.array([targets[20], targets[21]]))) with assert_raises(ValueError) as context: sequence.TimeseriesGenerator(data, targets, length=50) error = str(context.exception) assert '`start_index+length=50 > end_index=49` is disallowed' in error def test_TimeSeriesGenerator_doesnt_miss_any_sample(): x = np.array([[i] for i in range(10)]) for length in range(3, 10): g = sequence.TimeseriesGenerator(x, x, length=length, batch_size=1) expected = max(0, len(x) - length) actual = len(g) assert expected == actual if len(g) > 0: # All elements in range(length, 10) should be used as current step expected = np.arange(length, 10).reshape(-1, 1) y = np.concatenate([g[ix][1] for ix in range(len(g))], axis=0) assert_allclose(y, expected) x = np.array([[i] for i in range(23)]) strides = (1, 1, 5, 7, 3, 5, 3) lengths = (3, 3, 4, 3, 1, 3, 7) batch_sizes = (6, 6, 6, 5, 6, 6, 6) shuffles = (False, True, True, False, False, False, False) for stride, length, batch_size, shuffle in zip(strides, lengths, batch_sizes, shuffles): g = sequence.TimeseriesGenerator(x, x, length=length, sampling_rate=1, stride=stride, start_index=0, end_index=None, shuffle=shuffle, reverse=False, batch_size=batch_size) if shuffle: # all batches have the same size when shuffle is True. expected_sequences = ceil( (23 - length) / float(batch_size * stride)) * batch_size else: # last batch will be different if `(samples - length) / stride` # is not a multiple of `batch_size`. expected_sequences = ceil((23 - length) / float(stride)) expected_batches = ceil(expected_sequences / float(batch_size)) y = [g[ix][1] for ix in range(len(g))] actual_sequences = sum(len(_y) for _y in y) actual_batches = len(y) assert expected_sequences == actual_sequences assert expected_batches == actual_batches if __name__ == '__main__': pytest.main([__file__])
keras-preprocessing/tests/sequence_test.py/0
{ "file_path": "keras-preprocessing/tests/sequence_test.py", "repo_id": "keras-preprocessing", "token_count": 6759 }
136
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/keras_tuner/hyperparameters/'" />
keras-tuner/docs/site/documentation/hyperparameters/index.html/0
{ "file_path": "keras-tuner/docs/site/documentation/hyperparameters/index.html", "repo_id": "keras-tuner", "token_count": 38 }
137
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_tuner.api_export import keras_tuner_export from keras_tuner.backend import keras from keras_tuner.backend import ops from keras_tuner.backend.keras import layers from keras_tuner.engine import hypermodel @keras_tuner_export("keras_tuner.applications.HyperXception") class HyperXception(hypermodel.HyperModel): """An Xception hypermodel. Models built by `HyperXception` take images with shape (height, width, channels) as input. The output are one-hot encoded with the length matching the number of classes specified by the `classes` argument. Args: include_top: Boolean, whether to include the fully-connected layer at the top of the network. input_shape: Optional shape tuple, e.g. `(256, 256, 3)`. One of `input_shape` or `input_tensor` must be specified. input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. One of `input_shape` or `input_tensor` must be specified. classes: Optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. **kwargs: Additional keyword arguments that apply to all hypermodels. See `keras_tuner.HyperModel`. """ def __init__( self, include_top=True, input_shape=None, input_tensor=None, classes=None, **kwargs, ): super().__init__(**kwargs) if include_top and classes is None: raise ValueError( "You must specify `classes` when `include_top=True`" ) if input_shape is None and input_tensor is None: raise ValueError( "You must specify either `input_shape` or `input_tensor`." ) self.include_top = include_top self.input_shape = input_shape self.input_tensor = input_tensor self.classes = classes def build(self, hp): activation = hp.Choice("activation", ["relu", "selu"]) # Model definition. if self.input_tensor is not None: inputs = keras.utils.get_source_inputs(self.input_tensor) x = self.input_tensor else: inputs = layers.Input(shape=self.input_shape) x = inputs # Initial conv2d. conv2d_num_filters = hp.Choice( "conv2d_num_filters", [32, 64, 128], default=64 ) kernel_size = hp.Choice("kernel_size", [3, 5]) initial_strides = hp.Choice("initial_strides", [2]) x = conv( x, conv2d_num_filters, kernel_size=kernel_size, activation=activation, strides=initial_strides, ) # Separable convs. sep_num_filters = hp.Int( "sep_num_filters", 128, 768, step=128, default=256 ) num_residual_blocks = hp.Int("num_residual_blocks", 2, 8, default=4) for _ in range(num_residual_blocks): x = residual( x, sep_num_filters, activation=activation, max_pooling=False ) # Exit flow. x = residual( x, 2 * sep_num_filters, activation=activation, max_pooling=True ) pooling = hp.Choice("pooling", ["avg", "flatten", "max"]) if pooling == "flatten": x = layers.Flatten()(x) elif pooling == "avg": x = layers.GlobalAveragePooling2D()(x) else: x = layers.GlobalMaxPooling2D()(x) if not self.include_top: return keras.Model(inputs, x, name="Xception") # Dense num_dense_layers = hp.Int("num_dense_layers", 1, 3) dropout_rate = hp.Float("dropout_rate", 0.0, 0.6, step=0.1, default=0.5) dense_use_bn = hp.Choice("dense_use_bn", [True, False]) for _ in range(num_dense_layers): x = dense( x, self.classes, activation=activation, batchnorm=dense_use_bn, dropout_rate=dropout_rate, ) output = layers.Dense(self.classes, activation="softmax")(x) model = keras.Model(inputs, output, name="Xception") model.compile( optimizer=keras.optimizers.Adam( hp.Choice("learning_rate", [1e-3, 1e-4, 1e-5]) ), loss="categorical_crossentropy", metrics=["accuracy"], ) return model def sep_conv(x, num_filters, kernel_size=(3, 3), activation="relu"): if activation == "selu": x = layers.SeparableConv2D( num_filters, kernel_size, activation="selu", padding="same", depthwise_initializer="lecun_normal", pointwise_initializer="lecun_normal", )(x) elif activation == "relu": x = layers.SeparableConv2D( num_filters, kernel_size, padding="same", use_bias=False )(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) return x def residual( x, num_filters, kernel_size=(3, 3), activation="relu", pool_strides=(2, 2), max_pooling=True, ): "Residual block." if max_pooling: res = layers.Conv2D( num_filters, kernel_size=(1, 1), strides=pool_strides, padding="same", )(x) elif num_filters != ops.shape(x)[-1]: res = layers.Conv2D(num_filters, kernel_size=(1, 1), padding="same")(x) else: res = x x = sep_conv(x, num_filters, kernel_size, activation) x = sep_conv(x, num_filters, kernel_size, activation) if max_pooling: x = layers.MaxPooling2D( kernel_size, strides=pool_strides, padding="same" )(x) x = layers.add([x, res]) return x def conv(x, num_filters, kernel_size=(3, 3), activation="relu", strides=(2, 2)): "2d convolution block." if activation == "selu": x = layers.Conv2D( num_filters, kernel_size, strides=strides, activation="selu", padding="same", kernel_initializer="lecun_normal", bias_initializer="zeros", )(x) elif activation == "relu": x = layers.Conv2D( num_filters, kernel_size, strides=strides, padding="same", use_bias=False, )(x) x = layers.BatchNormalization()(x) x = layers.Activation("relu")(x) return x def dense(x, dims, activation="relu", batchnorm=True, dropout_rate=0): if activation == "selu": x = layers.Dense( dims, activation="selu", kernel_initializer="lecun_normal", bias_initializer="zeros", )(x) if dropout_rate: x = layers.Dropout(dropout_rate)(x) elif activation == "relu": x = layers.Dense(dims, activation="relu")(x) if batchnorm: x = layers.BatchNormalization()(x) if dropout_rate: x = layers.Dropout(dropout_rate)(x) return x
keras-tuner/keras_tuner/applications/xception.py/0
{ "file_path": "keras-tuner/keras_tuner/applications/xception.py", "repo_id": "keras-tuner", "token_count": 3696 }
138
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from unittest import mock import pytest from keras_tuner.distribute import utils from keras_tuner.test_utils import mock_distribute def test_no_port_error(): with mock.patch.object(os, "environ", mock_distribute.MockEnvVars()): os.environ["KERASTUNER_ORACLE_IP"] = "127.0.0.1" os.environ["KERASTUNER_TUNER_ID"] = "worker0" with pytest.raises(RuntimeError, match="KERASTUNER_ORACLE_PORT"): utils.has_chief_oracle() def test_no_id_error(): with mock.patch.object(os, "environ", mock_distribute.MockEnvVars()): os.environ["KERASTUNER_ORACLE_IP"] = "127.0.0.1" os.environ["KERASTUNER_ORACLE_PORT"] = "80" with pytest.raises(RuntimeError, match="KERASTUNER_TUNER_ID"): utils.has_chief_oracle()
keras-tuner/keras_tuner/distribute/utils_test.py/0
{ "file_path": "keras-tuner/keras_tuner/distribute/utils_test.py", "repo_id": "keras-tuner", "token_count": 513 }
139
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from keras_tuner import protos from keras_tuner.engine import hyperparameters as hp_module def test_float(): # Test with step arg linear = hp_module.Float( "linear", min_value=0.5, max_value=9.5, step=0.1, default=9.0 ) linear = hp_module.Float.from_config(linear.get_config()) assert linear.default == 9.0 assert 0.5 <= linear.random_sample() <= 9.5 assert isinstance(linear.random_sample(), float) assert linear.random_sample(123) == linear.random_sample(123) # Test without step arg linear = hp_module.Float( "linear", min_value=0.5, max_value=6.5, default=2.0 ) linear = hp_module.Float.from_config(linear.get_config()) assert linear.default == 2.0 assert 0.5 <= linear.random_sample() < 6.5 assert isinstance(linear.random_sample(), float) assert linear.random_sample(123) == linear.random_sample(123) # No default linear = hp_module.Float("linear", min_value=0.5, max_value=9.5, step=0.1) assert linear.default == 0.5 def test_float_linear_value_to_prob_no_step(): rg = hp_module.Float("rg", min_value=1.0, max_value=11.0) assert abs(rg.value_to_prob(4.5) - 0.35) < 1e-4 assert rg.prob_to_value(0.35) == 4.5 def test_float_log_with_step(): rg = hp_module.Float( "rg", min_value=0.01, max_value=100, step=10, sampling="log" ) for _ in range(10): assert rg.random_sample() in [0.01, 0.1, 1.0, 10.0, 100.0] assert abs(rg.value_to_prob(0.1) - 0.3) < 1e-4 assert rg.prob_to_value(0.3) == 0.1 def test_float_reverse_log_with_step(): rg = hp_module.Float( "rg", min_value=0.01, max_value=100, step=10, sampling="reverse_log" ) for _ in range(10): # print(rg.random_sample()) # assert rg.random_sample() in [0.01, 0.1, 1.0, 10.0, 100.0] # [0.09, 0.9, 9, 90] # [90, 9, 0.9, 0.09] sample = rg.random_sample() assert any( abs(sample - x) < 1e-4 for x in [0.01, 90.01, 99.01, 99.91, 100.0] ) assert abs(rg.value_to_prob(99.91) - 0.3) < 1e-4 assert abs(rg.prob_to_value(0.3) - 99.91) < 1e-4 def test_sampling_zero_length_intervals(): f = hp_module.Float("f", 2, 2) rand_sample = f.random_sample() assert rand_sample == 2 val = 2 prob = f.value_to_prob(val) assert prob == 0.5 def test_log_sampling_random_state(): f = hp_module.Float("f", 1e-3, 1e3, sampling="log") rand_sample = f.random_sample() assert rand_sample >= f.min_value assert rand_sample <= f.max_value val = 1e-3 prob = f.value_to_prob(val) assert prob == 0 new_val = f.prob_to_value(prob) assert np.isclose(val, new_val) val = 1 prob = f.value_to_prob(val) assert prob == 0.5 new_val = f.prob_to_value(prob) assert np.isclose(val, new_val) val = 1e3 prob = f.value_to_prob(val) assert prob == 1 new_val = f.prob_to_value(prob) assert np.isclose(val, new_val) def test_reverse_log_sampling_random_state(): f = hp_module.Float("f", 1e-3, 1e3, sampling="reverse_log") rand_sample = f.random_sample() assert rand_sample >= f.min_value assert rand_sample <= f.max_value val = 1e-3 prob = f.value_to_prob(val) assert prob == 0 new_val = f.prob_to_value(prob) assert np.isclose(val, new_val) val = 1 prob = f.value_to_prob(val) assert prob > 0 and prob < 1 new_val = f.prob_to_value(prob) assert np.isclose(val, new_val) def test_float_sampling_arg(): f = hp_module.Float("f", 1e-20, 1e10, sampling="log") f = hp_module.Float.from_config(f.get_config()) assert f.sampling == "log" def test_float_proto(): hp = hp_module.Float("a", -10, 10, sampling="linear", default=3) proto = hp.to_proto() assert proto.name == "a" assert proto.min_value == -10.0 assert proto.max_value == 10.0 assert proto.sampling == protos.get_proto().Sampling.LINEAR assert proto.default == 3.0 # Zero is the default, gets converted to `None` in `from_proto`. assert proto.step == 0.0 new_hp = hp_module.Float.from_proto(proto) assert new_hp.get_config() == hp.get_config() def test_float_values_property_with_step(): assert list(hp_module.Float("float", 2, 8, 2).values) == [ 2.0, 4.0, 6.0, 8.0, ] assert isinstance(list(hp_module.Float("float", 2, 8, 2).values)[0], float) assert list( hp_module.Float("float", 0.1, 100.0, 10, sampling="log").values ) == [ 0.1, 1.0, 10.0, 100.0, ] def test_float_values_property_without_step(): assert len(list(hp_module.Float("float", 2, 4).values)) == 10 assert len(list(hp_module.Float("float", 2, 20).values)) == 10 assert ( len(list(hp_module.Float("float", 2, 1024, sampling="log").values)) == 10 )
keras-tuner/keras_tuner/engine/hyperparameters/hp_types/float_hp_test.py/0
{ "file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/float_hp_test.py", "repo_id": "keras-tuner", "token_count": 2367 }
140
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "Tuner base class." from keras_tuner import utils class Stateful: """The base class for saving and restoring the state. The functionalities in this class is for the user to resume a previously stopped program by runing the same code again. Usually, the arguments in `__init__()` should not be added to the state. The `Stateful` objects can get the arguments again just by running the same code even without loading the previous states. The state of an object here is the results produced during running the program, which can save time for the user if restored. """ def get_state(self): """Returns the current state of this object. This method is called during `save`. Returns: A dictionary of serializable objects as the state. """ raise NotImplementedError def set_state(self, state): """Sets the current state of this object. This method is called during `reload`. Args: state: A dictionary of serialized objects as the state to restore. """ raise NotImplementedError def save(self, fname): """Saves this object using `get_state`. Args: fname: A string, the file name to save to. Returns: String. The serialized state of the object. """ return utils.save_json(fname, self.get_state()) def reload(self, fname): """Reloads this object using `set_state`. Args: fname: A string, the file name to restore from. """ self.set_state(utils.load_json(fname))
keras-tuner/keras_tuner/engine/stateful.py/0
{ "file_path": "keras-tuner/keras_tuner/engine/stateful.py", "repo_id": "keras-tuner", "token_count": 756 }
141
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. "Basic exhaustive search tuner." import collections import copy from keras_tuner.api_export import keras_tuner_export from keras_tuner.engine import oracle as oracle_module from keras_tuner.engine import trial as trial_module from keras_tuner.engine import tuner as tuner_module class LinkedList: """A simplified linked list with limited supported operations. It doesn't copy any data pass to it but directly refer to it. """ def __init__(self): # _memory is a list to store data. # Its index is the address for the linked list. # index to data self._memory = [] self._data_to_index = {} # index to index self._next_index = collections.defaultdict(lambda: None) self._last_index = None def insert(self, data, data_pre=None): """Insert data after another data. `data` is inserted after `data_pre` in the linked list. Args: data: The data to insert. data_pre: Optional. The data marking the insertion location. If left unspecified, the data will be appended to the rear of the linked list. """ self._memory.append(data) new_index = len(self._memory) - 1 self._data_to_index[data] = new_index index = ( self._last_index if data_pre is None else self._data_to_index[data_pre] ) self._next_index[new_index] = self._next_index[index] self._next_index[index] = new_index # Update self._last_index. while self._next_index[self._last_index] is not None: self._last_index = self._next_index[self._last_index] def next(self, data): """Get the next data for a given data. Args: data: The data used to get its next data in the linked list. Returns: The next data if exists. Otherwise, return None. """ index = self._data_to_index[data] next_index = self._next_index[index] if next_index is None: return None return self._memory[next_index] @keras_tuner_export("keras_tuner.oracles.GridSearchOracle") class GridSearchOracle(oracle_module.Oracle): """Grid search oracle. Args: objective: A string, `keras_tuner.Objective` instance, or a list of `keras_tuner.Objective`s and strings. If a string, the direction of the optimization (min or max) will be inferred. If a list of `keras_tuner.Objective`, we will minimize the sum of all the objectives to minimize subtracting the sum of all the objectives to maximize. The `objective` argument is optional when `Tuner.run_trial()` or `HyperModel.fit()` returns a single float as the objective to minimize. max_trials: Optional integer, the total number of trials (model configurations) to test at most. Note that the oracle may interrupt the search before `max_trial` models have been tested if the search space has been exhausted. If left unspecified, it runs till the search space is exhausted. seed: Optional integer, the random seed. hyperparameters: Optional `HyperParameters` instance. Can be used to override (or register in advance) hyperparameters in the search space. tune_new_entries: Boolean, whether hyperparameter entries that are requested by the hypermodel but that were not specified in `hyperparameters` should be added to the search space, or not. If not, then the default value for these parameters will be used. Defaults to True. allow_new_entries: Boolean, whether the hypermodel is allowed to request hyperparameter entries not listed in `hyperparameters`. Defaults to True. max_retries_per_trial: Integer. Defaults to 0. The maximum number of times to retry a `Trial` if the trial crashed or the results are invalid. max_consecutive_failed_trials: Integer. Defaults to 3. The maximum number of consecutive failed `Trial`s. When this number is reached, the search will be stopped. A `Trial` is marked as failed when none of the retries succeeded. """ def __init__( self, objective=None, max_trials=None, seed=None, hyperparameters=None, allow_new_entries=True, tune_new_entries=True, max_retries_per_trial=0, max_consecutive_failed_trials=3, ): super().__init__( objective=objective, max_trials=max_trials, hyperparameters=hyperparameters, tune_new_entries=tune_new_entries, allow_new_entries=allow_new_entries, seed=seed, max_retries_per_trial=max_retries_per_trial, max_consecutive_failed_trials=max_consecutive_failed_trials, ) # List of trial_id sorting in ascending alphabetical order of their hp # values. self._ordered_ids = LinkedList() # Queue of trial_ids pending to find their next combinations. self._populate_next = [] def populate_space(self, trial_id): """Fill the hyperparameter space with values. Args: trial_id: A string, the ID for this Trial. Returns: A dictionary with keys "values" and "status", where "values" is a mapping of parameter names to suggested values, and "status" should be one of "RUNNING" (the trial can start normally), "IDLE" (the oracle is waiting on something and cannot create a trial), or "STOPPED" (the oracle has finished searching and no new trial should be created). """ values = None # See if this is the first trial. if len(self.start_order) == 0: # Use all default values for the first trial. self._ordered_ids.insert(trial_id) hps = self.get_space() values = { hp.name: hp.default for hp in self.get_space().space if hps.is_active(hp) } # Although the trial is not finished, we still push it into # _populate_next to quickly generate values for the first few trials # for multiple workers. The same trial_id will be pushed into # _populate_next again when the trial is finished just in case of # new hps appeared during the trial. self._populate_next.append(trial_id) # Pick tried values to create its next combination if not tried. while len(self._populate_next) > 0 and values is None: old_trial_id = self._populate_next.pop(0) # Create its immediate next combination. old_values = self.trials[old_trial_id].hyperparameters.values new_values = self._get_next_combination(old_values) if new_values is None: continue # Skip if tried next combination. next_id = self._ordered_ids.next(old_trial_id) if next_id is not None: next_values = self.trials[next_id].hyperparameters.values if self._compare(new_values, next_values) >= 0: continue self._ordered_ids.insert(trial_id, old_trial_id) values = new_values if values is not None: return { "status": trial_module.TrialStatus.RUNNING, "values": values, } # Wait for the ongoing trials to finish when the values queue is empty # in case of any new hp discovered. if len(self.ongoing_trials) > 0: return {"status": trial_module.TrialStatus.IDLE, "values": None} # Reaching this point means ongoing_trial, values, populate_next # are all empty. return {"status": trial_module.TrialStatus.STOPPED, "values": None} def _compare(self, a, b): """Compare two `HyperParameters`' values. The smallest index where a differs from b decides which one is larger. In the values of one `HyperParameter`, the default value is the smallest. The rest are sorted according to their order in `HyperParameter.values`. If one value is the prefix of another, the longer one is larger. Args: a: Dict. HyperParameters values. Only active values are included. b: Dict. HyperParameters values. Only active values are included. Returns: -1 if a < b, 0 if a == b, 1 if a > b. """ hps = self.get_space() for hp in hps.space: # The hp is not active in neither a or b. # Whether it is active should be the same in a and b, # or the loop have stopped at the parent values which are different. if hp.name not in a: continue if a[hp.name] == b[hp.name]: continue # Get a ordered list of the values of the hp. value_list = list(hp.values) if hp.default in value_list: value_list.remove(hp.default) value_list.insert(0, hp.default) index_a = value_list.index(a[hp.name]) index_b = value_list.index(b[hp.name]) return -1 if index_a < index_b else 1 return 0 def _get_next_combination(self, values): """Get the next value combination to try. Given the last trial's values dictionary, this method retrieves the next hyperparameter values to try. As it requires the last trial's values as input, it should not be called on the first trial. The first trial will always use default hp values. This oracle iterates over the search space entirely deterministically. When a new hp appears in a trial, the first value tried for that hp will be its default value. Args: values: Dict. The keys are hp names. The values are the hp values from the last trial. Returns: Dict or None. The next possible value combination for the hyperparameters. If no next combination exist (values is the last combination), it returns None. The return values only include the active ones. """ hps = self.get_space() all_values = {} for hp in hps.space: value_list = list(hp.values) if hp.default in value_list: value_list.remove(hp.default) # Put the default value first. all_values[hp.name] = [hp.default] + value_list default_values = {hp.name: hp.default for hp in hps.space} hps.values = copy.deepcopy(values) bumped_value = False # Iterate in reverse order so that we can change the value under # conditional scope first instead of change the condition value first. for hp in reversed(hps.space): name = hp.name # Bump up the hp value if possible and active. if hps.is_active(hp): value = hps.values[name] if value != all_values[name][-1]: index = all_values[name].index(value) + 1 hps.values[name] = all_values[name][index] bumped_value = True break # Otherwise, reset to its first value. hps.values[name] = default_values[name] hps.ensure_active_values() return hps.values if bumped_value else None @oracle_module.synchronized def end_trial(self, trial): super().end_trial(trial) # It is OK for a trial_id to be pushed into _populate_next multiple # times. It will be skipped during _populate_space if its next # combination has been tried. # For not blocking _populate_space, we push it regardless of the status. self._populate_next.append(trial.trial_id) @keras_tuner_export(["keras_tuner.GridSearch", "keras_tuner.tuners.GridSearch"]) class GridSearch(tuner_module.Tuner): """The grid search tuner. This tuner iterates over all possible hyperparameter combinations. For example, with: ```py optimizer = hp.Choice("model_name", values=["sgd", "adam"]) learning_rate = hp.Choice("learning_rate", values=[0.01, 0.1]) ``` This tuner will cover the following combinations: `["sgd", 0.01], ["sgd", 0.1], ["adam", 0.01] ["adam", 0.1]`. For the following hyperparameter types, GridSearch will not exhaust all possible values: * `hp.Float()` when `step` is left unspecified. * `hp.Int()` with `sampling` set to `"log"` or `"reverse_log"`, and `step` is left unspecified. For these cases, KerasTuner will pick 10 samples in the range evenly by default. To configure the granularity of sampling for `hp.Float()` and `hp.Int()`, please use the `step` argument in their initializers. Args: hypermodel: Instance of `HyperModel` class (or callable that takes hyperparameters and returns a Model instance). It is optional when `Tuner.run_trial()` is overridden and does not use `self.hypermodel`. objective: A string, `keras_tuner.Objective` instance, or a list of `keras_tuner.Objective`s and strings. If a string, the direction of the optimization (min or max) will be inferred. If a list of `keras_tuner.Objective`, we will minimize the sum of all the objectives to minimize subtracting the sum of all the objectives to maximize. The `objective` argument is optional when `Tuner.run_trial()` or `HyperModel.fit()` returns a single float as the objective to minimize. max_trials: Optional integer, the total number of trials (model configurations) to test at most. Note that the oracle may interrupt the search before `max_trial` models have been tested if the search space has been exhausted. If left unspecified, it runs till the search space is exhausted. seed: Optional integer, the random seed. hyperparameters: Optional `HyperParameters` instance. Can be used to override (or register in advance) hyperparameters in the search space. tune_new_entries: Boolean, whether hyperparameter entries that are requested by the hypermodel but that were not specified in `hyperparameters` should be added to the search space, or not. If not, then the default value for these parameters will be used. Defaults to True. allow_new_entries: Boolean, whether the hypermodel is allowed to request hyperparameter entries not listed in `hyperparameters`. Defaults to True. max_retries_per_trial: Integer. Defaults to 0. The maximum number of times to retry a `Trial` if the trial crashed or the results are invalid. max_consecutive_failed_trials: Integer. Defaults to 3. The maximum number of consecutive failed `Trial`s. When this number is reached, the search will be stopped. A `Trial` is marked as failed when none of the retries succeeded. **kwargs: Keyword arguments relevant to all `Tuner` subclasses. Please see the docstring for `Tuner`. """ def __init__( self, hypermodel=None, objective=None, max_trials=None, seed=None, hyperparameters=None, tune_new_entries=True, allow_new_entries=True, max_retries_per_trial=0, max_consecutive_failed_trials=3, **kwargs, ): self.seed = seed oracle = GridSearchOracle( objective=objective, max_trials=max_trials, seed=seed, hyperparameters=hyperparameters, tune_new_entries=tune_new_entries, allow_new_entries=allow_new_entries, max_retries_per_trial=max_retries_per_trial, max_consecutive_failed_trials=max_consecutive_failed_trials, ) super().__init__(oracle, hypermodel, **kwargs)
keras-tuner/keras_tuner/tuners/gridsearch.py/0
{ "file_path": "keras-tuner/keras_tuner/tuners/gridsearch.py", "repo_id": "keras-tuner", "token_count": 7058 }
142