text
stringlengths
5
261k
id
stringlengths
16
106
metadata
dict
__index_level_0__
int64
0
266
# Copyright 2023 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import numpy as np from keras_cv import bounding_box from keras_cv import utils from keras_cv.utils import assert_cv2_installed from keras_cv.utils import assert_matplotlib_installed from keras_cv.visualization.draw_bounding_boxes import draw_bounding_boxes from keras_cv.visualization.plot_image_gallery import plot_image_gallery try: from matplotlib import patches except: patches = None from keras_cv.api_export import keras_cv_export @keras_cv_export("keras_cv.visualization.plot_bounding_box_gallery") def plot_bounding_box_gallery( images, value_range, bounding_box_format, y_true=None, y_pred=None, true_color=(0, 188, 212), pred_color=(255, 235, 59), line_thickness=2, font_scale=1.0, text_thickness=None, class_mapping=None, ground_truth_mapping=None, prediction_mapping=None, legend=False, legend_handles=None, rows=3, cols=3, **kwargs ): """Plots a gallery of images with corresponding bounding box annotations. Usage: ```python train_ds = tfds.load( "voc/2007", split="train", with_info=False, shuffle_files=True ) def unpackage_tfds_inputs(inputs): image = inputs["image"] boxes = inputs["objects"]["bbox"] bounding_boxes = {"classes": classes, "boxes": boxes} return image, bounding_boxes train_ds = train_ds.map(unpackage_tfds_inputs) train_ds = train_ds.apply(tf.data.experimental.dense_to_ragged_batch(16)) images, boxes = next(iter(train_ds.take(1))) keras_cv.visualization.plot_bounding_box_gallery( images, value_range=(0, 255), bounding_box_format="xywh", y_true=boxes, scale=3, rows=2, cols=2, line_thickness=4, font_scale=1, legend=True, ) ``` ![Example bounding box gallery](https://i.imgur.com/tJpb8hZ.png) Args: images: a Tensor or NumPy array containing images to show in the gallery. value_range: value range of the images. Common examples include `(0, 255)` and `(0, 1)`. bounding_box_format: the bounding_box_format the provided bounding boxes are in. y_true: (Optional) a KerasCV bounding box dictionary representing the ground truth bounding boxes. y_pred: (Optional) a KerasCV bounding box dictionary representing the predicted bounding boxes. pred_color: three element tuple representing the color to use for plotting predicted bounding boxes. true_color: three element tuple representing the color to use for plotting true bounding boxes. class_mapping: (Optional) class mapping from class IDs to strings ground_truth_mapping: (Optional) class mapping from class IDs to strings, defaults to `class_mapping` prediction_mapping: (Optional) class mapping from class IDs to strings, defaults to `class_mapping` line_thickness: (Optional) line_thickness for the box and text labels. Defaults to 2. text_thickness: (Optional) the line_thickness for the text, defaults to `1.0`. font_scale: (Optional) font size to draw bounding boxes in. legend: whether to create a legend with the specified colors for `y_true` and `y_pred`, defaults to False. kwargs: keyword arguments to propagate to `keras_cv.visualization.plot_image_gallery()`. """ assert_matplotlib_installed("plot_bounding_box_gallery") assert_cv2_installed("plot_bounding_box_gallery") prediction_mapping = prediction_mapping or class_mapping ground_truth_mapping = ground_truth_mapping or class_mapping plotted_images = utils.to_numpy(images) draw_fn = functools.partial( draw_bounding_boxes, bounding_box_format="xyxy", line_thickness=line_thickness, text_thickness=text_thickness, font_scale=font_scale, ) if y_true is not None: y_true = y_true.copy() y_true["boxes"] = utils.to_numpy(y_true["boxes"]) y_true["classes"] = utils.to_numpy(y_true["classes"]) y_true = bounding_box.convert_format( y_true, images=images, source=bounding_box_format, target="xyxy" ) plotted_images = draw_fn( plotted_images, y_true, true_color, class_mapping=ground_truth_mapping, ) if y_pred is not None: y_pred = y_pred.copy() y_pred["boxes"] = utils.to_numpy(y_pred["boxes"]) y_pred["classes"] = utils.to_numpy(y_pred["classes"]) y_pred = bounding_box.convert_format( y_pred, images=images, source=bounding_box_format, target="xyxy" ) plotted_images = draw_fn( plotted_images, y_pred, pred_color, class_mapping=prediction_mapping ) if legend: if legend_handles: raise ValueError( "Only pass `legend` OR `legend_handles` to " "`luketils.visualization.plot_bounding_box_gallery()`." ) legend_handles = [ patches.Patch( color=np.array(true_color) / 255.0, label="Ground Truth", ), patches.Patch( color=np.array(pred_color) / 255.0, label="Prediction", ), ] return plot_image_gallery( plotted_images, value_range, legend_handles=legend_handles, rows=rows, cols=cols, **kwargs )
keras-cv/keras_cv/visualization/plot_bounding_box_gallery.py/0
{ "file_path": "keras-cv/keras_cv/visualization/plot_bounding_box_gallery.py", "repo_id": "keras-cv", "token_count": 2665 }
82
#!/bin/bash # Usage: # lint.sh can be used without arguments to lint the entire project: # # ./lint.sh # # or with arguments to lint a subset of files # # ./lint.sh examples/* files="." if [ $# -ne 0 ] then files=$@ fi isort -c $files if ! [ $? -eq 0 ] then echo "Please run \"sh shell/format.sh\" to format the code." isort --version black --version exit 1 fi [ $# -eq 0 ] && echo "no issues with isort" flake8 $files if ! [ $? -eq 0 ] then echo "Please fix the code style issue." exit 1 fi [ $# -eq 0 ] && echo "no issues with flake8" black --check $files if ! [ $? -eq 0 ] then echo "Please run \"sh shell/format.sh\" to format the code." exit 1 fi [ $# -eq 0 ] && echo "no issues with black" for i in $(find keras_cv -name '*.py') # or whatever other pattern... do if ! grep -q Copyright $i then echo "Copyright not found in $i" exit 1 fi done echo "linting success!"
keras-cv/shell/lint.sh/0
{ "file_path": "keras-cv/shell/lint.sh", "repo_id": "keras-cv", "token_count": 348 }
83
site_name: Keras Documentation theme: readthedocs docs_dir: sources repo_url: https://github.com/keras-team/keras-docs-ja site_url: http://keras.io/ja/ # theme_dir: ../keras/docs/theme site_description: 'Documentation for Keras, the Python Deep Learning library.' dev_addr: '0.0.0.0:8000' google_analytics: ['UA-61785484-1', 'keras.io'] pages: - Home: index.md - なぜKerasを使うか?: why-use-keras.md - 初めに: - Sequentialモデルのガイド: getting-started/sequential-model-guide.md - Functional APIのガイド: getting-started/functional-api-guide.md - FAQ: getting-started/faq.md - モデル: - モデルについて: models/about-keras-models.md - Sequentialモデル: models/sequential.md - Modelクラス (functional API): models/model.md - レイヤー: - レイヤーについて: layers/about-keras-layers.md - Coreレイヤー: layers/core.md - Convolutionalレイヤー: layers/convolutional.md - Poolingレイヤー: layers/pooling.md - Locally-connectedレイヤー: layers/local.md - Recurrentレイヤー: layers/recurrent.md - Embeddingレイヤー: layers/embeddings.md - Mergeレイヤー: layers/merge.md - Advanced Activationsレイヤー: layers/advanced-activations.md - Normalizationレイヤー: layers/normalization.md - Noiseレイヤー: layers/noise.md - レイヤーラッパー: layers/wrappers.md - オリジナルのKerasレイヤーを作成する: layers/writing-your-own-keras-layers.md - データの前処理: - シーケンスの前処理: preprocessing/sequence.md - テキストの前処理: preprocessing/text.md - 画像の前処理: preprocessing/image.md - 損失関数: losses.md - 評価関数: metrics.md - 最適化: optimizers.md - 活性化関数: activations.md - コールバック: callbacks.md - データセット: datasets.md - Applications: applications.md - バックエンド: backend.md - 初期化: initializers.md - 正則化: regularizers.md - 制約: constraints.md - 可視化: visualization.md - Scikit-learn API: scikit-learn-api.md - ユーティリティ: utils.md - 貢献 : contributing.md
keras-docs-ja/mkdocs.yml/0
{ "file_path": "keras-docs-ja/mkdocs.yml", "repo_id": "keras-docs-ja", "token_count": 827 }
84
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L743)</span> ### Dense ```python keras.layers.Dense(units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None) ``` 通常の全結合ニューラルネットワークレイヤー. `Dense`が実行する操作:`output = activation(dot(input, kernel) + bias)`ただし,`activation`は`activation`引数として渡される要素単位の活性化関数で,`kernel`はレイヤーによって作成された重み行列であり,`bias`はレイヤーによって作成されたバイアスベクトルです.(`use_bias`が`True`の場合にのみ適用されます). - 注意:レイヤーへの入力のランクが2より大きい場合は,`kernel`を使用した最初のドット積の前に平坦化されます. __例__ ```python # as first layer in a sequential model: model = Sequential() model.add(Dense(32, input_shape=(16,))) # now the model will take as input arrays of shape (*, 16) # and output arrays of shape (*, 32) # after the first layer, you don't need to specify # the size of the input anymore: model.add(Dense(32)) ``` __引数__ - __units__:正の整数,出力空間の次元数 - __activation__: 使用する活性化関数名 ([activations](../activations.md)を参照) もしあなたが何も指定しなければ,活性化は適用されない. (すなわち,"線形"活性化: `a(x) = x`). - __use_bias__: 真理値,レイヤーがバイアスベクトルを使用するかどうか. - __kernel_initializer__: `kernel`重み行列の初期化([initializations](../initializers.md)を参照) - __bias_initializer__: バイアスベクトルの初期化([initializations](../initializers.md)を参照) - __kernel_regularizer__: `kernel`重み行列に適用される正則化関数([regularizers](../regularizers.md)を参照) - __bias_regularizer__: バイアスベクトルに適用される正則化関数([regularizers](../regularizers.md)を参照) - __activity_regularizer__: レイヤーの出力に適用される正則化関数("activation")([regularizers](../regularizers.md)を参照) - __kernel_constraint__: `kernel`重み行列に適用される制約関数([constraints](../constraints.md)を参照) - __bias_constraint__: バイアスベクトルに適用される制約関数([constraints](../constraints.md)を参照) __入力のshape__ 以下のshapeを持つn階テンソル: `(batch_size, ..., input_dim)`. 最も一般的なのは以下のshapeを持つ2階テンソル: `(batch_size, input_dim)`. __出力のshape__ 以下のshapeを持つn階テンソル: `(batch_size, ..., units)`. 例えば,以下のshapeを持つ2階テンソル `(batch_size, input_dim)`の入力に対して,アウトプットは以下のshapeを持つ`(batch_size, units)`. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L280)</span> ### Activation ```python keras.layers.Activation(activation) ``` 出力に活性化関数を適用する. __引数__ - __activation__: 使用する活性化関数名 ([activations](../activations.md)を参照), もしくは,TheanoかTensorFlowオペレーション. __入力のshape__ 任意.モデルの最初のレイヤーとしてこのレイヤーを使う時,キーワード引数`input_shape`(整数のタプルはサンプルの軸(axis)を含まない.)を使う. __出力のshape__ 入力と同じshape. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L78)</span> ### Dropout ```python keras.layers.Dropout(rate, noise_shape=None, seed=None) ``` 入力にドロップアウトを適用する. 訓練時の更新においてランダムに入力ユニットを0とする割合であり,過学習の防止に役立ちます. __引数__ - __rate__: 0と1の間の浮動小数点数.入力ユニットをドロップする割合. - __noise_shape__: 入力と乗算されるバイナリドロップアウトマスクのshapeは1階の整数テンソルで表す.例えば入力のshapeを`(batch_size, timesteps, features)`とし,ドロップアウトマスクをすべてのタイムステップで同じにしたい場合,`noise_shape=(batch_size, 1, features)`を使うことができる. - __seed__: random seedとして使うPythonの整数. __参考文献__ - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L465)</span> ### Flatten ```python keras.layers.Flatten() ``` 入力を平坦化する.バッチサイズに影響を与えません. __例__ ```python model = Sequential() model.add(Conv2D(64, 3, 3, border_mode='same', input_shape=(3, 32, 32))) # now: model.output_shape == (None, 64, 32, 32) model.add(Flatten()) # now: model.output_shape == (None, 65536) ``` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/engine/topology.py#L1393)</span> ### Input ```python keras.engine.topology.Input() ``` `Input()`はKerasテンソルのインスタンス化に使われます. Kerasテンソルは下位のバックエンド(TheanoやTensorFlow,あるいはCNTK)からなるテンソルオブジェクトです. モデルの入出力がわかっていれば,Kerasのモデルを構築するためにいくつかの属性を拡張できます. 例えばa, b, cがKerasのテンソルの場合,次のようにできます: `model = Model(input=[a, b], output=c)` 追加されたKerasの属性: - `_keras_shape`: Keras側の推論から伝達された整数のshapeのタプル. - `_keras_history`: テンソルに適用される最後のレイヤー.全体のレイヤーグラフはこのレイヤーから再帰的に取り出せます. __引数__ - __shape__: shapeのタプル(整数)で,バッチサイズを含みません. 例えば,`shape=(32,)`は期待される入力が32次元ベクトルのバッチであることを示します. - __batch_shape__: shapeのタプル(整数)で,バッチサイズを含みます. 例えば,`batch_shape=(10, 32)`は期待される入力が10個の32次元ベクトルのバッチであることを示します. `batch_shape=(None, 32)`は任意の数の32次元ベクトルのバッチを示します. - __name__: オプションとなるレイヤーの名前の文字列. モデルの中でユニークな値である必要があります(同じ名前は二回使えません). 指定しなければ自動生成されます. - __dtype__: 入力から期待されるデータの型で,文字列で指定します(`float32`, `float64`, `int32`...). - __sparse__: 生成されるプレースホルダをスパースにするか指定する真理値. - __tensor__: `Input`レイヤーをラップする既存のテンソル. 設定した場合,レイヤーはプレースホルダとなるテンソルを生成しません. __戻り値__ テンソル. __例__ ```python # this is a logistic regression in Keras x = Input(shape=(32,)) y = Dense(16, activation='softmax')(x) model = Model(x, y) ``` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L314)</span> ### Reshape ```python keras.layers.Reshape(target_shape) ``` あるshapeに出力を変形する. __引数__ - __target_shape__: ターゲットのshape.整数のタプル,サンプルの次元を含まない(バッチサイズ). __入力のshape__ 入力のshapeのすべての次元は固定されなければならないが,任意. モデルの最初のレイヤーとしてこのレイヤーを使うとき,キーワード引数`input_shape`(整数のタプルはサンプルの軸を含まない.)を使う. __出力のshape__ `(batch_size,) + target_shape` __例__ ```python # as first layer in a Sequential model model = Sequential() model.add(Reshape((3, 4), input_shape=(12,))) # now: model.output_shape == (None, 3, 4) # note: `None` is the batch dimension # as intermediate layer in a Sequential model model.add(Reshape((6, 2))) # now: model.output_shape == (None, 6, 2) # also supports shape inference using `-1` as dimension model.add(Reshape((-1, 2, 2))) # now: model.output_shape == (None, 3, 2, 2) ``` ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L413)</span> ### Permute ```python keras.layers.Permute(dims) ``` 与えられたパターンにより入力の次元を入れ替える. 例えば,RNNsやconvnetsの連結に対して役立ちます. __例__ ```python model = Sequential() model.add(Permute((2, 1), input_shape=(10, 64))) # now: model.output_shape == (None, 64, 10) # note: `None` is the batch dimension ``` __引数__ - __dims__: 整数のタプル.配列パターン,サンプルの次元を含まない.添字は1で始まる.例えば,`(2, 1)`は入力の1番目と2番目の次元を入れ替える. __入力のshape__ 任意. モデルの最初のレイヤーとしてこのレイヤーを使う時,キーワード引数`input_shape`(整数のタプルはサンプルの軸を含まない)を使う. __出力のshape__ 入力のshapeと同じだが,特定のパターンにより並べ替えられた次元を持つ. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L500)</span> ### RepeatVector ```python keras.layers.RepeatVector(n) ``` n回入力を繰り返す. __例__ ```python model = Sequential() model.add(Dense(32, input_dim=32)) # now: model.output_shape == (None, 32) # note: `None` is the batch dimension model.add(RepeatVector(3)) # now: model.output_shape == (None, 3, 32) ``` __引数__ - __n__: 整数,繰り返し因数. __入力のshape__ `(num_samples, features)`のshapeを持つ2階テンソル. __出力のshape__ `(num_samples, n, features)`のshapeを持つ3階テンソル. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L542)</span> ### Lambda ```python keras.layers.Lambda(function, output_shape=None, mask=None, arguments=None) ``` `Layer`オブジェクトのように,任意の式をラップする. __例__ ```python # add a x -> x^2 layer model.add(Lambda(lambda x: x ** 2)) ``` ```python # add a layer that returns the concatenation # of the positive part of the input and # the opposite of the negative part def antirectifier(x): x -= K.mean(x, axis=1, keepdims=True) x = K.l2_normalize(x, axis=1) pos = K.relu(x) neg = K.relu(-x) return K.concatenate([pos, neg], axis=1) def antirectifier_output_shape(input_shape): shape = list(input_shape) assert len(shape) == 2 # only valid for 2D tensors shape[-1] *= 2 return tuple(shape) model.add(Lambda(antirectifier, output_shape=antirectifier_output_shape)) ``` __引数__ - __function__: 評価される関数.第1引数として入力テンソルを取る - __output_shape__: 関数からの期待される出力のshape.Theanoを使用する場合のみ関連します.タプルもしくは関数. タプルなら,入力に近いほうの次元だけを指定する,データサンプルの次元は入力と同じ: `output_shape = (input_shape[0], ) + output_shape` か入力が `None` でかつサンプル次元も`None`: `output_shape = (None, ) + output_shape` のどちらかが推測される. 関数なら,入力のshapeの関数としてshape全体を指定する: `output_shape = f(input_shape)` - __arguments__: 関数に通されるキーワード引数の追加辞書 __入力のshape__ 任意.モデルの最初のレイヤーとしてこのレイヤーを使う時,キーワード引数`input_shape`(整数のタプル,サンプルの軸(axis)を含まない)を使う. __出力のshape__ `output_shape`引数によって特定される(TensorFlowを使用していると自動推論される). ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L886)</span> ### ActivityRegularization ```python keras.layers.ActivityRegularization(l1=0.0, l2=0.0) ``` コスト関数に基づく入力アクティビティに更新を適用するレイヤー __引数__ - __l1__: L1正則化係数(正の浮動小数点数). - __l2__: L2正則化係数(正の浮動小数点数). __入力のshape__ 任意.モデルの最初のレイヤーとしてこのレイヤーを使う時,キーワード引数`input_shape`(整数のタプル,サンプルの軸(axis)を含まない)を使う. __出力のshape__ 入力と同じshape. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/core.py#L28)</span> ### Masking ```python keras.layers.Masking(mask_value=0.0) ``` タイプステップをスキップするためのマスク値を用いてシーケンスをマスクします. 入力テンソル(テンソルの次元 #1)のそれぞれのタイムステップに対して, もしそのタイムステップの入力テンソルのすべての値が`mask_value`に等しいなら,そのときそのタイムステップはすべての下流レイヤー(それらがマスキングをサポートしている限り)でマスク(スキップ)されるでしょう. 下流レイヤーがマスキングをサポートしていないのにそのような入力マスクを受け取ると例外が発生します. __例__ LSTMレイヤーに与えるための`(samples, timesteps, features)`のshapeを持つのNumpy 配列`x`を考えてみましょう. あなたが#3と#5のタイムステップに関してデータを欠損しているので,これらのタイムステップをマスクしたい場合,あなたは以下のようにできます: - `x[:, 3, :] = 0.` と `x[:, 5, :] = 0.`をセットする. - LSTMレイヤーの前に`mask_value=0.`の`Masking`レイヤーを追加する: ```python model = Sequential() model.add(Masking(mask_value=0., input_shape=(timesteps, features))) model.add(LSTM(32)) ```
keras-docs-ja/sources/layers/core.md/0
{ "file_path": "keras-docs-ja/sources/layers/core.md", "repo_id": "keras-docs-ja", "token_count": 6932 }
85
## ImageDataGenerator ### ImageDataGeneratorクラス ```python keras.preprocessing.image.ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0.0, width_shift_range=0.0, height_shift_range=0.0, brightness_range=None, shear_range=0.0, zoom_range=0.0, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format=None, validation_split=0.0) ``` リアルタイムにデータ拡張しながら,テンソル画像データのバッチを生成します.また,このジェネレータは,データを無限にループするので,無限にバッチを生成します. __引数__ - __featurewise_center__: 真理値.データセット全体で,入力の平均を0にします. - __samplewise_center__: 真理値.各サンプルの平均を0にします. - __featurewise_std_normalization__: 真理値.入力をデータセットの標準偏差で正規化します. - __samplewise_std_normalization__: 真理値.各入力をその標準偏差で正規化します. - __zca_epsilon__: ZCA白色化のイプシロン.デフォルトは1e-6. - __zca_whitening__: 真理値.ZCA白色化を適用します. - __rotation_range__: 整数.画像をランダムに回転する回転範囲. - __width_shift_range__: 浮動小数点数(横幅に対する割合).ランダムに水平シフトする範囲. - __height_shift_range__: 浮動小数点数(縦幅に対する割合).ランダムに垂直シフトする範囲. - __shear_range__: 浮動小数点数.シアー強度(反時計回りのシアー角度). - __zoom_range__: 浮動小数点数または[lower,upper].ランダムにズームする範囲.浮動小数点数が与えられた場合,`[lower, upper] = [1-zoom_range, 1+zoom_range]`です. - __channel_shift_range__: 浮動小数点数.ランダムにチャンネルをシフトする範囲. - __fill_mode__: {"constant", "nearest", "reflect", "wrap"}のいずれか.デフォルトは 'nearest'です.指定されたモードに応じて,入力画像の境界周りを埋めます. * "constant": `kkkkkkkk|abcd|kkkkkkkk` (`cval=k`) * "nearest": `aaaaaaaa|abcd|dddddddd` * "reflect": `abcddcba|abcd|dcbaabcd` * "wrap": `abcdabcd|abcd|abcdabcd` - __cval__: 浮動小数点数または整数.`fill_mode = "constant"`のときに境界周辺で利用される値. - __horizontal_flip__: 真理値.水平方向に入力をランダムに反転します. - __vertical_flip__: 真理値.垂直方向に入力をランダムに反転します. - __rescale__: 画素値のリスケーリング係数.デフォルトはNone.Noneか0ならば,適用しない.それ以外であれば,(他の変換を行う前に) 与えられた値をデータに積算する. - __preprocessing_function__: 各入力に適用される関数です.この関数は他の変更が行われる前に実行されます.この関数は3次元のNumpyテンソルを引数にとり,同じshapeのテンソルを出力するように定義する必要があります. - __data_format__: {"channels_first", "channels_last"}のどちらか.`"channels_last"`の場合,入力のshapeは`(samples, height, width, channels)`となり,"channels_first"の場合は`(samples, channels, height, width)`となります.デフォルトはKerasの設定ファイル`~/.keras/keras.json`の`image_data_format`の値です.一度も値を変更していなければ,"channels_last"になります. - __validation_split__: 浮動小数点数.検証のために予約しておく画像の割合(厳密には0から1の間)です. __例__ `.flow(x, y)`の使用例: ```python (x_train, y_train), (x_test, y_test) = cifar10.load_data() y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) datagen = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True) # compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied) datagen.fit(x_train) # fits the model on batches with real-time data augmentation: model.fit_generator(datagen.flow(x_train, y_train, batch_size=32), steps_per_epoch=len(x_train) / 32, epochs=epochs) # here's a more "manual" example for e in range(epochs): print('Epoch', e) batches = 0 for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32): model.fit(x_batch, y_batch) batches += 1 if batches >= len(x_train) / 32: # we need to break the loop by hand because # the generator loops indefinitely break ``` `.flow_from_directory(directory)`の使用例: ```python train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( 'data/train', target_size=(150, 150), batch_size=32, class_mode='binary') validation_generator = test_datagen.flow_from_directory( 'data/validation', target_size=(150, 150), batch_size=32, class_mode='binary') model.fit_generator( train_generator, steps_per_epoch=2000, epochs=50, validation_data=validation_generator, validation_steps=800) ``` 画像とマスクに対して,同時に変更を加える例. ```python # we create two instances with the same arguments data_gen_args = dict(featurewise_center=True, featurewise_std_normalization=True, rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) image_datagen = ImageDataGenerator(**data_gen_args) mask_datagen = ImageDataGenerator(**data_gen_args) # Provide the same seed and keyword arguments to the fit and flow methods seed = 1 image_datagen.fit(images, augment=True, seed=seed) mask_datagen.fit(masks, augment=True, seed=seed) image_generator = image_datagen.flow_from_directory( 'data/images', class_mode=None, seed=seed) mask_generator = mask_datagen.flow_from_directory( 'data/masks', class_mode=None, seed=seed) # combine generators into one which yields image and masks train_generator = zip(image_generator, mask_generator) model.fit_generator( train_generator, steps_per_epoch=2000, epochs=50) ``` --- ### ImageDataGeneratorメソッド #### fit ```python fit(x, augment=False, rounds=1, seed=None) ``` 与えられたサンプルデータに基づいて,データに依存する統計量を計算します. featurewise_center,featurewise_std_normalization,または,zca_whiteningが指定されたときに必要です. __引数__ - __x__: サンプルデータ.4次元データである必要があります.グレースケールデータではチャネルを1に,RGBデータではチャネルを3にしてください. - __augment__: 真理値(デフォルト: False).ランダムにサンプルを拡張するかどうか. - __rounds__: 整数(デフォルト: 1).augumentが与えられたときに,利用するデータに対して何回データ拡張を行うか. - __seed__: 整数(デフォルト: None).乱数シード. --- #### flow ```python flow(x, y=None, batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None) ``` numpyデータとラベルの配列を受け取り,拡張/正規化したデータのバッチを生成します. __引数__ - __x__: データ.4次元データである必要があります.グレースケールデータではチャネルを1に,RGBデータではチャネルを3にしてください. - __y__: ラベル. - __batch_size__: 整数(デフォルト: 32). - __shuffle__: 真理値(デフォルト: True). - __seed__: 整数(デフォルト: None). - __save_to_dir__: Noneまたは文字列(デフォルト: None).生成された拡張画像を保存するディレクトリを指定できます(行ったことの可視化に有用です). - __save_prefix__: 文字列(デフォルト`''`).画像を保存する際にファイル名に付けるプリフィックス(`set_to_dir`に引数が与えられた時のみ有効). - __save_format__: "png"または"jpeg"(`set_to_dir`に引数が与えられた時のみ有効).デフォルトは"png". __戻り値__ `x`が画像データのNumpy配列で`y`がそれに対応したラベルのNumpy配列である`(x, y)`から生成されるイテレータです. --- #### flow_from_dataframe ```python flow_from_dataframe(dataframe, directory=None, x_col='filename', y_col='class', target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None, interpolation='nearest', drop_duplicates=True) ``` Pandasデータフレームとディレクトリのパスを受け取り,拡張/正規化されたデータのバッチを生成します. チュートリアルは[こちら](http://bit.ly/keras_flow_from_dataframe) __引数__ - __dataframe__: ディレクトリから画像への相対パス(`directory`がNoneの場合絶対パス)を含むPandasデータフレーム. `class_mode`によって単一,または複数の列が必要です. "categorical"(デフォルト)の場合,画像のクラスを指定する`y_col`列が必要です. 列の値は,単一のクラスの場合文字列かリストかタプル,複数クラスの場合はリストかタプルです. "binary"か"sparse"の場合,クラスを文字列で指定する`y_col`が必要です. "other"の場合は任意の値を含む単一、又は複数の`y_col`が必要です. "input"またはNoneの場合は特に必要ありません. - __directory__: 文字列. 画像を読み込むディレクトリのパス. Noneの場合,`x_col`に含まれるデータは絶対パスでなくてはなりません. - __x_col__: 文字列. ファイル名(`directory`がNoneの場合は絶対パス)を含む`dataframe`の列名 - __y_col__: 文字列かリスト. ターゲットとなるデータを含む`dataframe`の列名 - __target_size__: 整数のタプル(height, width).デフォルトは(256, 256).この値に全画像はリサイズされます. - __color_mode__: "grayscale"か"rbg"の一方.デフォルトは"rgb".画像を1か3チャンネルの画像に変換するかどうか. - __classes__: クラスサブディレクトリのリスト.(例えば,`['dogs', 'cats']`).デフォルトはNone.与えられなければ,クラスのリスト自動的に推論されます(そして,ラベルのインデックスと対応づいたクラスの順序はalphanumericになります).クラス名からクラスインデックスへのマッピングを含む辞書は`class_indices`属性を用いて取得できます. - __class_mode__: "categorical"か"binary"か"sparse"か"input"か"other"か"None"のいずれか1つ.デフォルトは"categorical".返すラベルの配列のshapeを決定します:"binary"は1次元の2値ラベル,"categorical"は2次元のone-hotにエンコード化されたラベル,"sparse"は1次元の整数ラベル,"input"は入力画像と同じ画像(主にオートエンコーダで用いられます),"other"は`y_col`のnumpy配列になります.Noneであれば,ラベルを返しません(ジェネレーターは画像のバッチのみ生成するため,`model.predict_generator()`や`model.evaluate_generator()`などを使う際に有用). - __batch_size__: データのバッチのサイズ(デフォルト: 32). - __shuffle__: データをシャッフルするかどうか(デフォルト: True). - __seed__: シャッフルや変換のためのオプションの乱数シード. - __save_to_dir__: Noneまたは文字列(デフォルト: None).生成された拡張画像を保存するディレクトリを指定できます(行ったことの可視化に有用です). - __save_prefix__: 文字列.画像を保存する際にファイル名に付けるプリフィックス(`set_to_dir`に引数が与えられた時のみ有効). - __save_format__: "png"または"jpeg"(`set_to_dir`に引数が与えられた時のみ有効).デフォルトは"png". - __follow_links__: サブディレクトリ内のシンボリックリンクに従うかどうか.デフォルトはFalse. - __subset__: データのサブセット(`"training"`か`"validation"`). `ImageDataGenerator`で`validation_split`が与えられた場合に指定します. - __interpolation__: `target_size`が読み込まれた画像のサイズと異なっていた場合の補間方法."nearest"か"bilinear"か"bicubic"が対応.バージョン1.1.3以上のPILがインストールされていれば"lanczos"も対応します.バージョン3.4.0以上のPILがインストールされていれば"box"と"hamming"も対応します.デフォルトは"nearst". - __drop_duplicates__: 真理値.ファイル名が重複した行を削除するかどうか. __戻り値__ `x`が画像データのNumpy配列で`y`がそれに対応したラベルのNumpy配列である`(x, y)`から生成されるDirectoryIteratorです. --- #### flow_from_directory ```python flow_from_directory(directory, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest') ``` ディレクトリへのパスを受け取り,拡張/正規化したデータのバッチを生成します. __引数__ - __directory__: ディレクトリへのパス.クラスごとに1つのサブディレクトリを含み,サブディレクトリはPNGかJPGかBMPかPPMかTIF形式の画像を含まなければいけません.詳細は[このスクリプト](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)を見てください. - __target_size__: 整数のタプル`(height, width)`.デフォルトは`(256, 256)`.この値に全画像はリサイズされます. - __color_mode__: "grayscale"か"rbg"の一方.デフォルトは"rgb".画像を1か3チャンネルの画像に変換するかどうか. - __classes__: クラスサブディレクトリのリスト.(例えば,`['dogs', 'cats']`).デフォルトはNone.与えられなければ,クラスのリスト自動的に推論されます(そして,ラベルのインデックスと対応づいたクラスの順序はalphanumericになります).クラス名からクラスインデックスへのマッピングを含む辞書は`class_indices`属性を用いて取得できます. - __class_mode__: "categorical"か"binary"か"sparse"か"input"か"None"のいずれか1つ.デフォルトは"categorical".返すラベルの配列のshapeを決定します:"categorical"は2次元のone-hotにエンコード化されたラベル,"binary"は1次元の2値ラベル,"sparse"は1次元の整数ラベル,"input"は入力画像と同じ画像になります(主にオートエンコーダで用いられます).Noneであれば,ラベルを返しません(ジェネレーターは画像のバッチのみ生成するため,`model.predict_generator()`や`model.evaluate_generator()`などを使う際に有用).class_modeがNoneの場合,正常に動作させるためには`directory`のサブディレクトリにデータが存在する必要があることに注意してください. - __batch_size__: データのバッチのサイズ(デフォルト: 32). - __shuffle__: データをシャッフルするかどうか(デフォルト: True). - __seed__: シャッフルや変換のためのオプションの乱数シード. - __save_to_dir__: Noneまたは文字列(デフォルト: None).生成された拡張画像を保存するディレクトリを指定できます(行ったことの可視化に有用です). - __save_prefix__: 文字列.画像を保存する際にファイル名に付けるプリフィックス(`set_to_dir`に引数が与えられた時のみ有効). - __save_format__: "png"または"jpeg"(`set_to_dir`に引数が与えられた時のみ有効).デフォルトは"png". - __follow_links__: サブディレクトリ内のシンボリックリンクに従うかどうか.デフォルトはFalse. __戻り値__ `x`が画像データのNumpy配列で`y`がそれに対応したラベルのNumpy配列である`(x, y)`から生成されるDirectoryIteratorです. --- #### random_transform ```python random_transform(x, seed=None) ``` 単一の画像のテンソルをランダムに拡張します. __引数__ - __x__: 単一の画像である3次元テンソル. - __seed__: ランダムシード. __戻り値__ 入力画像と同じshapeの入力画像をランダムに変換したもの. --- #### standardize ```python standardize(x) ``` 入力のバッチに対して正規化を適用します. __引数__ - __x__: 正規化対象の入力のバッチ. __戻り値__ 正規化された入力.
keras-docs-ja/sources/preprocessing/image.md/0
{ "file_path": "keras-docs-ja/sources/preprocessing/image.md", "repo_id": "keras-docs-ja", "token_count": 8473 }
86
## 초기화 함수 사용법<sub>Usage of initializers</sub> 초기화 함수<sub>initializer</sub>는 케라스 층<sub>layer</sub>의 파라미터 초기값을 어떤 방식으로 생성할 것인지를 결정합니다. 모든 층에서 다 똑같은 것은 아니지만, 대부분의 경우 `kernel_initializer`와 `bias_initializer` 인자<sub>argument</sub>를 사용해서 가중치<sub>weight</sub>와 편향<sub>bias</sub>의 초기화 함수를 지정합니다. ```python model.add(Dense(64, kernel_initializer='random_uniform', bias_initializer='zeros')) ``` ## 케라스가 제공하는 초기화 함수<sub>Available initializers</sub> 아래의 함수들은 `keras.initializers` 모듈에 내장되어 있습니다. <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L14)</span> ### Initializer ```python keras.initializers.Initializer() ``` 초기화 함수의 기본 클래스로 모든 초기화 함수는 이 클래스를 상속받습니다. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L33)</span> ### Zeros ```python keras.initializers.Zeros() ``` 지정한 파라미터 값을 모두 `0`으로 생성합니다. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L41)</span> ### Ones ```python keras.initializers.Ones() ``` 지정한 파라미터 값을 모두 `1`로 생성합니다. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L49)</span> ### Constant ```python keras.initializers.Constant(value=0) ``` 지정한 파라미터를 특정한 상수(`value`)값으로 생성합니다. __인자__ - __value__: `float`. 생성할 파라미터의 값입니다. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L66)</span> ### RandomNormal ```python keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None) ``` 지정한 평균 및 표준편차를 따르는 정규분포<sub>normal distribution</sub>로부터 대상 파라미터 값을 무작위로 생성합니다. __인자__ - __mean__: `int` 혹은 `float`형식의 스칼라 값 또는 같은 형식의 스칼라 텐서. 파라미터 생성에 사용할 정규분포의 평균을 정합니다. 기본값은 `0`입니다. - __stddev__: `int` 혹은 `float`형식의 스칼라 값 또는 같은 형식의 스칼라 텐서. 파라미터 생성에 사용할 정규분포의 표준편차를 정합니다. 기본값은 `0.05`입니다. - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L94)</span> ### RandomUniform ```python keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=None) ``` 지정한 범위의 균등분포<sub>uniform distribution</sub>로부터 대상 파라미터 값을 무작위로 생성합니다. __인자__ - __minval__: `int` 혹은 `float`형식의 스칼라 값 또는 같은 형식의 스칼라 텐서. 무작위 생성에 사용할 수치 범위의 최솟값입니다. 기본값은 `-0.05`입니다. - __maxval__: `int` 혹은 `float`형식의 스칼라 값 또는 같은 형식의 스칼라 텐서. 무작위 생성에 사용할 수치 범위의 최댓값입니다. 기본값은 `0.05`입니다. - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L122)</span> ### TruncatedNormal ```python keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None) ``` 절단 정규분포<sub>truncated normal distribution</sub>로부터 파라미터 값을 무작위로 생성합니다. 정규분포로부터 무작위 값을 선택한다는 점에서 `RandomNormal`과 비슷하지만, 선택 범위를 평균으로부터 `2` 표준편차 안쪽으로 제한하기 때문에 전체 분포는 `+-2` 표준편차 바깥이 잘려나간 형태가 됩니다. 신경망의 가중치 또는 필터 초기값 생성에 사용할 방식으로 권장합니다. __인자__ - __mean__: `int` 혹은 `float`형식의 스칼라 값 또는 같은 형식의 스칼라 텐서. 파라미터 생성에 사용할 정규분포의 평균을 정합니다. 기본값은 `0`입니다. - __stddev__: `int` 혹은 `float`형식의 스칼라 값 또는 같은 형식의 스칼라 텐서. 파라미터 생성에 사용할 정규분포의 표준편차를 정합니다. 기본값은 `0.05`입니다. - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L155)</span> ### VarianceScaling ```python keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None) ``` 가중치 텐서의 규모에 따라 초기값 생성에 사용할 분포를 조정합니다. 먼저 정규분포(`distribution='normal'`)를 선택할 경우, `0`을 평균으로 `sqrt(scale / n)`의 표준편차를 가진 절단 정규분포의 범위 안에서 값을 선택합니다. 이때 `n`값은 `mode`인자에 따라 다음과 같이 달라집니다. - `'fan_in'`: 가중치 텐서의 입력 차원 크기. - `'fan_out'`: 가중치 텐서의 출력 차원 크기. - `'fan_avg'`: 입력 차원, 출력 차원 크기의 평균값. 또는 균등분포(`distribution="uniform"`)를 선택할 경우 `[-limit, limit]`의 범위를 가진 균등분포로부터 값이 선택됩니다. 여기서 `limit`은 `sqrt(3 * scale / n)`으로 정의됩니다. 위와 마찬가지로 `n`값은 `mode`인자에 따라 정해집니다. __인자__ - __scale__: 스케일링 값. 양의 `float`를 입력해야 하며 기본값은 `1.0`입니다. - __mode__: `'fan_in'`, `'fan_out'`, `'fan_avg'`. 기본값은 `'fan_in'`입니다. - __distribution__: `'normal'`, `'uniform'`. 기본값은 `'normal'`입니다. - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. __오류__ - __ValueError__: `scale`, `mode` 혹은 `distribution` 인자에 잘못된 값을 전달한 경우 발생합니다. ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L229)</span> ### Orthogonal ```python keras.initializers.Orthogonal(gain=1.0, seed=None) ``` 무작위 직교행렬<sub>orthogonal matrix</sub>을 파라미터의 초기값으로 생성합니다. 먼저 평균 `0` 표준편차 `1`의 정규분포로부터 무작위 텐서를 생성한 뒤, 해당 텐서를 특이값분해<sub>singular value decomposition</sub>하여 얻은 직교행렬을 원하는 파라미터의 형태에 맞게 변형하여 사용합니다. 직교행렬의 가중치 초기값 활용에 대해서는 다음의 [논문](http://arxiv.org/abs/1312.6120)을 참고하십시오. __인자__ - __gain__: 생성될 직교행렬에 곱할 배수입니다. 기본값은 `1.`입니다. - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. __참조__ - [Exact solutions to the nonlinear dynamics of learning in deep linear neural networks](http://arxiv.org/abs/1312.6120) ---- <span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L267)</span> ### Identity ```python keras.initializers.Identity(gain=1.0) ``` 지정한 파라미터를 단위행렬로 생성합니다. 단위행렬의 특성상 2D 행렬에만 사용할 수 있습니다. 만약 사용하고자 하는 행렬이 정사각형이 아니라면 `0`을 채운 행 또는 열을 추가하여 형태를 맞춥니다. __인자__ - __gain__: 생성할 단위행렬에 곱할 배수입니다. 기본값은 `1.`입니다. ---- ### glorot_normal ```python keras.initializers.glorot_normal(seed=None) ``` Glorot 정규분포 방식으로 파라미터의 초기값을 생성합니다. Xavier 정규분포 방식이라고도 불리며, 가중치 텐서의 크기에 따라 값을 조절하는 방식의 하나입니다. 가중치 텐서의 입력 차원 크기를 `fan_in`, 출력 차원 크기를 `fan_out`이라고 할 때, 평균 `0`과 `sqrt(2 / (fan_in + fan_out))`의 표준편차를 가진 정규분포의 `+-2`표준편차 범위 안에서 무작위로 값을 추출합니다. __인자__ - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. __반환값__ Glorot 정규분포 방식을 따르는 초기화 함수. __참조__ - [Understanding the difficulty of training deep feedforward neural networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf) ---- ### glorot_uniform ```python keras.initializers.glorot_uniform(seed=None) ``` Glorot 균등분포 방식으로 파라미터의 초기값을 생성합니다. Xavier 균등분포 방식이라고도 불리며, 가중치 텐서의 크기에 따라 값을 조절하는 방식의 하나입니다. `[-limit, limit]`의 범위를 가진 균등분포로부터 값이 선택됩니다. 가중치 텐서의 입력 차원 크기를 `fan_in`, 출력 차원 크기를 `fan_out`이라고 할 때, `limit`은 `sqrt(6 / (fan_in + fan_out))`으로 구합니다. __인자__ - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. __반환값__ Glorot 균등분포 방식을 따르는 초기화 함수. __참조__ - [Understanding the difficulty of training deep feedforward neural networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf) ---- ### he_normal ```python keras.initializers.he_normal(seed=None) ``` He 정규분포 방식으로 파라미터의 초기값을 생성합니다. 가중치 텐서의 크기에 따라 값을 조절하는 방식의 하나입니다. 가중치 텐서의 입력 차원 크기를 `fan_in`이라고 할 때, 평균 `0`과 `sqrt(2 / fan_in)`의 표준편차를 가진 정규분포의 `+-2`표준편차 범위 안에서 무작위로 값을 추출합니다. __인자__ - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. __반환값__ He 정규분포 방식을 따르는 초기화 함수. __참조__ - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/abs/1502.01852) ---- ### he_uniform ```python keras.initializers.he_uniform(seed=None) ``` He 정규분포 방식으로 파라미터의 초기값을 생성합니다. 가중치 텐서의 크기에 따라 값을 조절하는 방식의 하나입니다. `[-limit, limit]`의 범위를 가진 균등분포로부터 값이 선택됩니다. 가중치 텐서의 입력 차원 크기를 `fan_in`이라고 할 때, `limit`은 `sqrt(6 / fan_in)`으로 구합니다. __인자__ - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. __반환값__ He 균등분포 방식을 따르는 초기화 함수. __참조__ - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/abs/1502.01852) ---- ### lecun_normal ```python keras.initializers.lecun_normal(seed=None) ``` LeCun 정규분포 방식으로 파라미터의 초기값을 생성합니다. 가중치 텐서의 크기에 따라 값을 조절하는 방식의 하나입니다. 가중치 텐서의 입력 차원 크기를 `fan_in`이라고 할 때, 평균 `0`과 `sqrt(1 / fan_in)`의 표준편차를 가진 정규분포의 `+-2`표준편차 범위 안에서 무작위로 값을 추출합니다. __인자__ - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. __반환값__ LeCun 정규분포 방식을 따르는 초기화 함수. __참조__ - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) - [Efficient Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) ---- ### lecun_uniform ```python keras.initializers.lecun_uniform(seed=None) ``` LeCun 정규분포 방식으로 파라미터의 초기값을 생성합니다. 가중치 텐서의 크기에 따라 값을 조절하는 방식의 하나입니다. `[-limit, limit]`의 범위를 가진 균등분포로부터 값이 선택됩니다. 가중치 텐서의 입력 차원 크기를 `fan_in`이라고 할 때, `limit`은 `sqrt(3 / fan_in)`으로 구합니다. __인자__ - __seed__: `int`. 무작위 생성에 사용할 시드를 정합니다. 기본값은 `None`입니다. __반환값__ LeCun 균등분포 방식을 따르는 초기화 함수. __참조__ - [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) ---- 케라스 모델의 각 층에 초기화 함수를 적용하는 방법은 크게 두 가지입니다. 먼저 케라스가 제공하는 함수인 경우 `(kernel|bias)_initializer` 인자에 초기화 함수의 이름과 동일한 문자열<sub>string</sub>을 지정하는 방법이 있습니다. 그리고 문자열 대신 호출 가능한 함수<sub>callable</sub>의 형태로 직접 지정하는 방법이 있습니다. ```python from keras import initializers model.add(Dense(64, kernel_initializer=initializers.random_normal(stddev=0.01))) # 함수 호출로 지정합니다. # also works; will use the default parameters. model.add(Dense(64, kernel_initializer='random_normal')) # 문자열로 지정합니다. ``` ---- ## 사용자 정의 초기화 함수<sub>Using custom initializers</sub> 사용자 정의 함수를 만들어 사용하는 경우, 인자로 `shape`(초기값을 생성할 파라미터의 형태)와 `dtype`(생성할 값의 자료형)을 전달받아야 합니다. ```python from keras import backend as K def my_init(shape, dtype=None): # 형태와 자료형 인자를 명시합니다. return K.random_normal(shape, dtype=dtype) model.add(Dense(64, kernel_initializer=my_init)) ```
keras-docs-ko/sources/initializers.md/0
{ "file_path": "keras-docs-ko/sources/initializers.md", "repo_id": "keras-docs-ko", "token_count": 9790 }
87
# Keras 모델에 관하여 Keras가 제공하는 모델에는 [Sequential 모델](./models/sequential.md)과 [함수형 API와 함께 사용되는 Model 클래스](/models/model) 두 가지 종류가 있습니다. 이 모델들은 아래의 메소드들과 속성들을 공통적으로 가지고 있습니다. - `model.layers`: 모델을 구성하는 층들이 저장된 1차원 리스트 입니다. - `model.inputs`: 모델의 입력 텐서들이 저장된 1차원 리스트 입니다. - `model.outputs`: 모델의 출력 텐서들이 저장된 1차원 리스트 입니다. - `model.summary()`: 모델의 구조를 요약해 출력해 줍니다. [`utils.print_summary(model)`](/utils/#print_summary)로도 동일한 출력을 얻을 수 있습니다. - `model.get_config()`: 모델의 설정이 저장된 딕셔너리를 반환합니다. 모든 모델은 다음과 같이 설정 내용으로부터 다시 인스턴스화 될 수 있습니다. ```python config = model.get_config() model = Model.from_config(config) # 또는, Sequential 모델의 경우: model = Sequential.from_config(config) ``` - `model.get_weights()`: 모델의 가중치 텐서들이 NumPy 배열로 저장된 1차원 리스트 입니다. - `model.set_weights(weights)`: 모델의 가중치 값을 NumPy 배열의 리스트로부터 설정합니다. 리스트에 있는 배열들의 크기는 `get_wieghts()`로부터 반환된 것과 동일해야 합니다. - `model.to_json()`: 모델의 구조를 JSON 문자열로 반환합니다. 이때, 모델의 가중치는 제외되고 오로지 구조만이 포함됩니다. 이 JSON 문자열로부터 다음과 같이 동일한 모델을 (다시 초기화된 가중치와 함께)다시 인스턴스화 할 수 있습니다. ```python from keras.models import model_from_json json_string = model.to_json() model = model_from_json(json_string) ``` - `model.to_yaml()`: 모델의 구조를 YAML 문자열로 반환합니다. 이때, 모델의 가중치는 제외되고 오로지 구조만이 포함됩니다. 이 YAML 문자열로부터 다음과 같이 동일한 모델을 (다시 초기화된 가중치와 함께)다시 인스턴스화 할 수 있습니다. ```python from keras.models import model_from_yaml yaml_string = model.to_yaml() model = model_from_yaml(yaml_string) ``` - `model.save_weights(filepath)`: 모델의 가중치를 HDF5 파일로 저장 합니다. - `model.load_weights(filepath, by_name=False)`: 모델의 가중치를 (`save_weights`에 의해 생성된)HDF5 파일로부터 불러옵니다. 기본 설정인 `by_name=False`는 모델과 가중치 파일의 네트워크 구조가 동일하다 가정합니다. 만약 구조가 다르다면, `by_name=True`를 사용해 동일한 이름을 가진 층들에 대해서만 가중치를 불러올 수도 있습니다. **Note**: `h5py`의 설치는 FAQ의 [Keras 모델을 저장하기 위한 HDF5 또는 h5py는 어떻게 설치할 수 있나요?](/getting-started/faq/#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras)를 참조하시길 바랍니다. ## Model 하위 클래스 만들기 위에 언급된 두 가지 방법 외에도, Keras 2.2.0 버전부터 `Model` 클래스의 하위 클래스를 만들어 모델을 구현할 수 있는 API가 추가되었습니다. 해당 API를 사용해 `Model`의 하위 클래스를 만들고, `call` 메소드에 사용자의 요구에 맞는 포워드 패스 과정을 구현하여 모델을 만들 수도 있습니다. 다음은 `Model`의 하위 클래스로서 만들어진 간단한 다층 퍼셉트론의 예제입니다. ```python import keras class SimpleMLP(keras.Model): def __init__(self, use_bn=False, use_dp=False, num_classes=10): super(SimpleMLP, self).__init__(name='mlp') self.use_bn = use_bn self.use_dp = use_dp self.num_classes = num_classes self.dense1 = keras.layers.Dense(32, activation='relu') self.dense2 = keras.layers.Dense(num_classes, activation='softmax') if self.use_dp: self.dp = keras.layers.Dropout(0.5) if self.use_bn: self.bn = keras.layers.BatchNormalization(axis=-1) def call(self, inputs): x = self.dense1(inputs) if self.use_dp: x = self.dp(x) if self.use_bn: x = self.bn(x) return self.dense2(x) model = SimpleMLP() model.compile(...) model.fit(...) ``` 모델에 사용되는 각 층은 `__init__(self, ...)`에 정의되어 있고, 포워드 패스는 `call(self, inputs)`에 구현되어 있습니다. 임의의 층을 지정해 주는 것과 같이 사용자는 `call` 내부에서 `self.add_loss(loss_tensor)`를 호출함으로써 임의의 손실 함수를 지정해줄 수도 있습니다. 하지만, 위와 같이 모델을 구현할 때 모델의 구조는 정적 그래프가 아닌 Python 코드로서 정의되기 때문에, 모델의 구조를 확인하거나 저장할 수 없습니다. 결과적으로 해당 API를 사용하여 모델을 구현한 경우 다음의 메소드들과 속성들을 **사용할 수 없습니다**. - `model.inputs` 그리고 `model.outputs`. - `model.to_yaml()` 그리고 `model.to_json()` - `model.get_config()` 그리고 `model.save()`. **중요한 점:** 작업에 적합한 API를 사용해야 합니다. `Model` 클래스의 하위 클래스를 만드는 API는 복잡한 모델을 구현하는데 있어서 높은 유연성을 제공해 줄 수는 있지만, 코드가 좀 더 길고 복잡해지며, 사용자 오류의 소지가 높아진다는 단점이 있습니다. 가능하면 사용자 친화적인 함수형 API를 사용하는 것이 좋습니다.
keras-docs-ko/sources/models/about-keras-models.md/0
{ "file_path": "keras-docs-ko/sources/models/about-keras-models.md", "repo_id": "keras-docs-ko", "token_count": 4027 }
88
# 实现一个用来执行加法的序列到序列学习模型 输入: "535+61" 输出: "596" 使用重复的标记字符(空格)处理填充。 输入可以选择性地反转,它被认为可以提高许多任务的性能,例如: [Learning to Execute](http://arxiv.org/abs/1410.4615) 以及 [Sequence to Sequence Learning with Neural Networks](http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf)。 从理论上讲,它引入了源和目标之间的短期依赖关系。 两个反转的数字 + 一个 LSTM 层(128个隐藏单元),在 55 个 epochs 后,5k 的训练样本取得了 99% 的训练/测试准确率。 三个反转的数字 + 一个 LSTM 层(128个隐藏单元),在 100 个 epochs 后,50k 的训练样本取得了 99% 的训练/测试准确率。 四个反转的数字 + 一个 LSTM 层(128个隐藏单元),在 20 个 epochs 后,400k 的训练样本取得了 99% 的训练/测试准确率。 五个反转的数字 + 一个 LSTM 层(128个隐藏单元),在 30 个 epochs 后,550k 的训练样本取得了 99% 的训练/测试准确率。 ```python from __future__ import print_function from keras.models import Sequential from keras import layers import numpy as np from six.moves import range class CharacterTable(object): """给定一组字符: + 将它们编码为 one-hot 整数表示 + 将 one-hot 或整数表示解码为字符输出 + 将一个概率向量解码为字符输出 """ def __init__(self, chars): """初始化字符表。 # 参数: chars: 可以出现在输入中的字符。 """ self.chars = sorted(set(chars)) self.char_indices = dict((c, i) for i, c in enumerate(self.chars)) self.indices_char = dict((i, c) for i, c in enumerate(self.chars)) def encode(self, C, num_rows): """给定字符串 C 的 one-hot 编码。 # 参数 C: 需要编码的字符串。 num_rows: 返回的 one-hot 编码的行数。 这用来保证每个数据的行数相同。 """ x = np.zeros((num_rows, len(self.chars))) for i, c in enumerate(C): x[i, self.char_indices[c]] = 1 return x def decode(self, x, calc_argmax=True): """将给定的向量或 2D array 解码为它们的字符输出。 # 参数 x: 一个向量或 2D 概率数组或 one-hot 表示, 或 字符索引的向量(如果 `calc_argmax=False`)。 calc_argmax: 是否根据最大概率来找到字符,默认为 `True`。 """ if calc_argmax: x = x.argmax(axis=-1) return ''.join(self.indices_char[x] for x in x) class colors: ok = '\033[92m' fail = '\033[91m' close = '\033[0m' # 模型和数据的参数 TRAINING_SIZE = 50000 DIGITS = 3 REVERSE = True # 输入的最大长度是 'int+int' (例如, '345+678'). int 的最大长度为 DIGITS。 MAXLEN = DIGITS + 1 + DIGITS # 所有的数字,加上符号,以及用于填充的空格。 chars = '0123456789+ ' ctable = CharacterTable(chars) questions = [] expected = [] seen = set() print('Generating data...') while len(questions) < TRAINING_SIZE: f = lambda: int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1)))) a, b = f(), f() # 跳过任何已有的加法问题 # 同事跳过任何 x+Y == Y+x 的情况(即排序)。 key = tuple(sorted((a, b))) if key in seen: continue seen.add(key) # 利用空格填充,是的长度始终为 MAXLEN。 q = '{}+{}'.format(a, b) query = q + ' ' * (MAXLEN - len(q)) ans = str(a + b) # 答案可能的最长长度为 DIGITS + 1。 ans += ' ' * (DIGITS + 1 - len(ans)) if REVERSE: # 反转查询,例如,'12+345 ' 变成 ' 543+21'. # (注意用于填充的空格) query = query[::-1] questions.append(query) expected.append(ans) print('Total addition questions:', len(questions)) print('Vectorization...') x = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool) y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool) for i, sentence in enumerate(questions): x[i] = ctable.encode(sentence, MAXLEN) for i, sentence in enumerate(expected): y[i] = ctable.encode(sentence, DIGITS + 1) # 混洗 (x, y),因为 x 的后半段几乎都是比较大的数字。 indices = np.arange(len(y)) np.random.shuffle(indices) x = x[indices] y = y[indices] # 显式地分离出 10% 的训练数据作为验证集。 split_at = len(x) - len(x) // 10 (x_train, x_val) = x[:split_at], x[split_at:] (y_train, y_val) = y[:split_at], y[split_at:] print('Training Data:') print(x_train.shape) print(y_train.shape) print('Validation Data:') print(x_val.shape) print(y_val.shape) # 可以尝试更改为 GRU, 或 SimpleRNN。 RNN = layers.LSTM HIDDEN_SIZE = 128 BATCH_SIZE = 128 LAYERS = 1 print('Build model...') model = Sequential() # 利用 RNN 将输入序列「编码」为一个 HIDDEN_SIZE 长度的输出向量。 # 注意:在输入序列具有可变长度的情况下, # 使用 input_shape=(None, num_feature). model.add(RNN(HIDDEN_SIZE, input_shape=(MAXLEN, len(chars)))) # 作为解码器 RNN 的输入,为每个时间步重复地提供 RNN 的最后输出。 # 重复 'DIGITS + 1' 次,因为它是最大输出长度。 # 例如,当 DIGITS=3, 最大输出为 999+999=1998。 model.add(layers.RepeatVector(DIGITS + 1)) # 解码器 RNN 可以是多个堆叠的层,或一个单独的层。 for _ in range(LAYERS): # 通过设置 return_sequences 为 True, 将不仅返回最后一个输出,而是返回目前的所有输出,形式为(num_samples, timesteps, output_dim)。 # 这是必须的,因为后面的 TimeDistributed 需要第一个维度是时间步。 model.add(RNN(HIDDEN_SIZE, return_sequences=True)) # 将全连接层应用于输入的每个时间片。 # 对于输出序列的每一步,决定应选哪个字符。 model.add(layers.TimeDistributed(layers.Dense(len(chars), activation='softmax'))) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # 训练模型,并在每一代显示验证数据的预测。 for iteration in range(1, 200): print() print('-' * 50) print('Iteration', iteration) model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=1, validation_data=(x_val, y_val)) # 从随机验证集中选择 10 个样本,以便我们可以看到错误。 for i in range(10): ind = np.random.randint(0, len(x_val)) rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])] preds = model.predict_classes(rowx, verbose=0) q = ctable.decode(rowx[0]) correct = ctable.decode(rowy[0]) guess = ctable.decode(preds[0], calc_argmax=False) print('Q', q[::-1] if REVERSE else q, end=' ') print('T', correct, end=' ') if correct == guess: print(colors.ok + '☑' + colors.close, end=' ') else: print(colors.fail + '☒' + colors.close, end=' ') print(guess) ```
keras-docs-zh/sources/examples/addition_rnn.md/0
{ "file_path": "keras-docs-zh/sources/examples/addition_rnn.md", "repo_id": "keras-docs-zh", "token_count": 4177 }
89
# 在 IMDB 情感分类任务上训练 LSTM 模型。 与 TF-IDF + LogReg 之类的简单且快得多的方法相比,LSTM 实际上由于数据集太小而无济于事。 **注意** - RNN 非常棘手。批次大小、损失和优化器的选择很重要,等等。某些配置无法收敛。 - 训练期间的 LSTM 损失减少模式可能与你在 CNN/MLP 等中看到的完全不同。 ```python from __future__ import print_function from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding from keras.layers import LSTM from keras.datasets import imdb max_features = 20000 # 在此数量的单词之后剪切文本(取最常见的 max_features 个单词) maxlen = 80 batch_size = 32 print('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) print(len(x_train), 'train sequences') print(len(x_test), 'test sequences') print('Pad sequences (samples x time)') x_train = sequence.pad_sequences(x_train, maxlen=maxlen) x_test = sequence.pad_sequences(x_test, maxlen=maxlen) print('x_train shape:', x_train.shape) print('x_test shape:', x_test.shape) print('Build model...') model = Sequential() model.add(Embedding(max_features, 128)) model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(1, activation='sigmoid')) # 尝试使用不同的优化器和优化器配置 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print('Train...') model.fit(x_train, y_train, batch_size=batch_size, epochs=15, validation_data=(x_test, y_test)) score, acc = model.evaluate(x_test, y_test, batch_size=batch_size) print('Test score:', score) print('Test accuracy:', acc) ```
keras-docs-zh/sources/examples/imdb_lstm.md/0
{ "file_path": "keras-docs-zh/sources/examples/imdb_lstm.md", "repo_id": "keras-docs-zh", "token_count": 879 }
90
# Keras 神经涂鸦 # 脚本用法 ## 参数 ``` --nlabels: 遮罩图像中的区域(颜色)数 --style-image: 从中学习风格的图像 --style-mask: 样式图像的语义标签 --target-mask: 目标图像的语义标签(你的 Doodle) --content-image: optional image to learn content from --target-image-prefix: path prefix for generated target images ``` ## 示例 1: 使用样式图像、样式蒙版和目标蒙版涂鸦。 ``` python neural_doodle.py --nlabels 4 --style-image Monet/style.png --style-mask Monet/style_mask.png --target-mask Monet/target_mask.png --target-image-prefix generated/monet ``` ## 示例二: 使用样式图像、样式蒙版、目标蒙版和可选的内容图像涂鸦。 ``` python neural_doodle.py --nlabels 4 --style-image Renoir/style.png --style-mask Renoir/style_mask.png --target-mask Renoir/target_mask.png --content-image Renoir/creek.jpg --target-image-prefix generated/renoir ``` # 参考文献 - [Dmitry Ulyanov's blog on fast-neural-doodle](http://dmitryulyanov.github.io/feed-forward-neural-doodle/) - [Torch code for fast-neural-doodle](https://github.com/DmitryUlyanov/fast-neural-doodle) - [Torch code for online-neural-doodle](https://github.com/DmitryUlyanov/online-neural-doodle) - [Paper Texture Networks: Feed-forward Synthesis of Textures and Stylized Images](http://arxiv.org/abs/1603.03417) - [Discussion on parameter tuning](https://github.com/keras-team/keras/issues/3705) # 资源 示例图像可以从此下载 https://github.com/DmitryUlyanov/fast-neural-doodle/tree/master/data ```python from __future__ import print_function import time import argparse import numpy as np from scipy.optimize import fmin_l_bfgs_b from keras import backend as K from keras.layers import Input, AveragePooling2D from keras.models import Model from keras.preprocessing.image import load_img, save_img, img_to_array from keras.applications import vgg19 # 命令行参数 parser = argparse.ArgumentParser(description='Keras neural doodle example') parser.add_argument('--nlabels', type=int, help='number of semantic labels' ' (regions in differnet colors)' ' in style_mask/target_mask') parser.add_argument('--style-image', type=str, help='path to image to learn style from') parser.add_argument('--style-mask', type=str, help='path to semantic mask of style image') parser.add_argument('--target-mask', type=str, help='path to semantic mask of target image') parser.add_argument('--content-image', type=str, default=None, help='path to optional content image') parser.add_argument('--target-image-prefix', type=str, help='path prefix for generated results') args = parser.parse_args() style_img_path = args.style_image style_mask_path = args.style_mask target_mask_path = args.target_mask content_img_path = args.content_image target_img_prefix = args.target_image_prefix use_content_img = content_img_path is not None num_labels = args.nlabels num_colors = 3 # RGB # 根据 target_mask 确定图像大小 ref_img = img_to_array(load_img(target_mask_path)) img_nrows, img_ncols = ref_img.shape[:2] num_iterations = 50 total_variation_weight = 50. style_weight = 1. content_weight = 0.1 if use_content_img else 0 content_feature_layers = ['block5_conv2'] # 为了获得更好的生成质量,请为样式特征使用更多的转换层 style_feature_layers = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1'] # 读取/处理图像的辅助功能 def preprocess_image(image_path): img = load_img(image_path, target_size=(img_nrows, img_ncols)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg19.preprocess_input(img) return img def deprocess_image(x): if K.image_data_format() == 'channels_first': x = x.reshape((3, img_nrows, img_ncols)) x = x.transpose((1, 2, 0)) else: x = x.reshape((img_nrows, img_ncols, 3)) # 通过平均像素去除零中心 x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 # 'BGR'->'RGB' x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x def kmeans(xs, k): assert xs.ndim == 2 try: from sklearn.cluster import k_means _, labels, _ = k_means(xs.astype('float64'), k) except ImportError: from scipy.cluster.vq import kmeans2 _, labels = kmeans2(xs, k, missing='raise') return labels def load_mask_labels(): '''加载目标和样式蒙版。 具有 m 个标签/颜色的蒙版图像 (nr x nc) 将作为 4D 布尔张量加载: (1, m, nr, nc) 对于 'channels_first' 或 (1, nr, nc, m) 对于 'channels_last' ''' target_mask_img = load_img(target_mask_path, target_size=(img_nrows, img_ncols)) target_mask_img = img_to_array(target_mask_img) style_mask_img = load_img(style_mask_path, target_size=(img_nrows, img_ncols)) style_mask_img = img_to_array(style_mask_img) if K.image_data_format() == 'channels_first': mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T, target_mask_img.reshape((3, -1)).T]) else: mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)), target_mask_img.reshape((-1, 3))]) labels = kmeans(mask_vecs, num_labels) style_mask_label = labels[:img_nrows * img_ncols].reshape((img_nrows, img_ncols)) target_mask_label = labels[img_nrows * img_ncols:].reshape((img_nrows, img_ncols)) stack_axis = 0 if K.image_data_format() == 'channels_first' else -1 style_mask = np.stack([style_mask_label == r for r in range(num_labels)], axis=stack_axis) target_mask = np.stack([target_mask_label == r for r in range(num_labels)], axis=stack_axis) return (np.expand_dims(style_mask, axis=0), np.expand_dims(target_mask, axis=0)) # 为图像创建张量变量 if K.image_data_format() == 'channels_first': shape = (1, num_colors, img_nrows, img_ncols) else: shape = (1, img_nrows, img_ncols, num_colors) style_image = K.variable(preprocess_image(style_img_path)) target_image = K.placeholder(shape=shape) if use_content_img: content_image = K.variable(preprocess_image(content_img_path)) else: content_image = K.zeros(shape=shape) images = K.concatenate([style_image, target_image, content_image], axis=0) # 为遮罩创建张量变量 raw_style_mask, raw_target_mask = load_mask_labels() style_mask = K.variable(raw_style_mask.astype('float32')) target_mask = K.variable(raw_target_mask.astype('float32')) masks = K.concatenate([style_mask, target_mask], axis=0) # 图像和任务变量的索引常量 STYLE, TARGET, CONTENT = 0, 1, 2 # 建立图像模型,遮罩模型,并将层输出用作特征图像模型(如 VGG19) image_model = vgg19.VGG19(include_top=False, input_tensor=images) # 蒙版模型作为一系列合并 mask_input = Input(tensor=masks, shape=(None, None, None), name='mask_input') x = mask_input for layer in image_model.layers[1:]: name = 'mask_%s' % layer.name if 'conv' in layer.name: x = AveragePooling2D((3, 3), padding='same', strides=( 1, 1), name=name)(x) elif 'pool' in layer.name: x = AveragePooling2D((2, 2), name=name)(x) mask_model = Model(mask_input, x) # 从 image_model 和 task_model 收集特征 image_features = {} mask_features = {} for img_layer, mask_layer in zip(image_model.layers, mask_model.layers): if 'conv' in img_layer.name: assert 'mask_' + img_layer.name == mask_layer.name layer_name = img_layer.name img_feat, mask_feat = img_layer.output, mask_layer.output image_features[layer_name] = img_feat mask_features[layer_name] = mask_feat # 定义损失函数 def gram_matrix(x): assert K.ndim(x) == 3 features = K.batch_flatten(x) gram = K.dot(features, K.transpose(features)) return gram def region_style_loss(style_image, target_image, style_mask, target_mask): '''计算由其(布尔)掩码指定的一个公共区域的 style_image 和 target_image 之间的样式损失 ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] num_channels = K.cast(num_channels, dtype='float32') s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c)) def style_loss(style_image, target_image, style_masks, target_masks): '''计算所有区域中 style_image 和 target_image 之间的样式损失。 ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 3 == K.ndim(style_masks) == K.ndim(target_masks) loss = K.variable(0) for i in range(num_labels): if K.image_data_format() == 'channels_first': style_mask = style_masks[i, :, :] target_mask = target_masks[i, :, :] else: style_mask = style_masks[:, :, i] target_mask = target_masks[:, :, i] loss = loss + region_style_loss(style_image, target_image, style_mask, target_mask) return loss def content_loss(content_image, target_image): return K.sum(K.square(target_image - content_image)) def total_variation_loss(x): assert 4 == K.ndim(x) if K.image_data_format() == 'channels_first': a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, 1:, :img_ncols - 1]) b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] - x[:, :, :img_nrows - 1, 1:]) else: a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, 1:, :img_ncols - 1, :]) b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] - x[:, :img_nrows - 1, 1:, :]) return K.sum(K.pow(a + b, 1.25)) # 总损失是 content_loss, style_loss 和 tv_loss 的加权总和 # 每个损失都使用图像/蒙版模型中的特征。 loss = K.variable(0) for layer in content_feature_layers: content_feat = image_features[layer][CONTENT, :, :, :] target_feat = image_features[layer][TARGET, :, :, :] loss = loss + content_weight * content_loss(content_feat, target_feat) for layer in style_feature_layers: style_feat = image_features[layer][STYLE, :, :, :] target_feat = image_features[layer][TARGET, :, :, :] style_masks = mask_features[layer][STYLE, :, :, :] target_masks = mask_features[layer][TARGET, :, :, :] sl = style_loss(style_feat, target_feat, style_masks, target_masks) loss = loss + (style_weight / len(style_feature_layers)) * sl loss = loss + total_variation_weight * total_variation_loss(target_image) loss_grads = K.gradients(loss, target_image) # 计算效率评估器类 outputs = [loss] if isinstance(loss_grads, (list, tuple)): outputs += loss_grads else: outputs.append(loss_grads) f_outputs = K.function([target_image], outputs) def eval_loss_and_grads(x): if K.image_data_format() == 'channels_first': x = x.reshape((1, 3, img_nrows, img_ncols)) else: x = x.reshape((1, img_nrows, img_ncols, 3)) outs = f_outputs([x]) loss_value = outs[0] if len(outs[1:]) == 1: grad_values = outs[1].flatten().astype('float64') else: grad_values = np.array(outs[1:]).flatten().astype('float64') return loss_value, grad_values class Evaluator(object): def __init__(self): self.loss_value = None self.grads_values = None def loss(self, x): assert self.loss_value is None loss_value, grad_values = eval_loss_and_grads(x) self.loss_value = loss_value self.grad_values = grad_values return self.loss_value def grads(self, x): assert self.loss_value is not None grad_values = np.copy(self.grad_values) self.loss_value = None self.grad_values = None return grad_values evaluator = Evaluator() # 通过迭代优化生成图像 if K.image_data_format() == 'channels_first': x = np.random.uniform(0, 255, (1, 3, img_nrows, img_ncols)) - 128. else: x = np.random.uniform(0, 255, (1, img_nrows, img_ncols, 3)) - 128. for i in range(num_iterations): print('Start of iteration', i, '/', num_iterations) start_time = time.time() x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20) print('Current loss value:', min_val) # 保存当前生成的图像 img = deprocess_image(x.copy()) fname = target_img_prefix + '_at_iteration_%d.png' % i save_img(fname, img) end_time = time.time() print('Image saved as', fname) print('Iteration %d completed in %ds' % (i, end_time - start_time)) ```
keras-docs-zh/sources/examples/neural_doodle.md/0
{ "file_path": "keras-docs-zh/sources/examples/neural_doodle.md", "repo_id": "keras-docs-zh", "token_count": 6567 }
91
# 编写你自己的 Keras 层 对于简单、无状态的自定义操作,你也许可以通过 `layers.core.Lambda` 层来实现。但是对于那些包含了可训练权重的自定义层,你应该自己实现这种层。 这是一个 **Keras 2.0** 中,Keras 层的骨架(如果你用的是旧的版本,请更新到新版)。你只需要实现三个方法即可: - `build(input_shape)`: 这是你定义权重的地方。这个方法必须设 `self.built = True`,可以通过调用 `super([Layer], self).build()` 完成。 - `call(x)`: 这里是编写层的功能逻辑的地方。你只需要关注传入 `call` 的第一个参数:输入张量,除非你希望你的层支持masking。 - `compute_output_shape(input_shape)`: 如果你的层更改了输入张量的形状,你应该在这里定义形状变化的逻辑,这让Keras能够自动推断各层的形状。 ```python from keras import backend as K from keras.engine.topology import Layer class MyLayer(Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(MyLayer, self).__init__(**kwargs) def build(self, input_shape): # 为该层创建一个可训练的权重 self.kernel = self.add_weight(name='kernel', shape=(input_shape[1], self.output_dim), initializer='uniform', trainable=True) super(MyLayer, self).build(input_shape) # 一定要在最后调用它 def call(self, x): return K.dot(x, self.kernel) def compute_output_shape(self, input_shape): return (input_shape[0], self.output_dim) ``` 还可以定义具有多个输入张量和多个输出张量的 Keras 层。 为此,你应该假设方法 `build(input_shape)`,`call(x)` 和 `compute_output_shape(input_shape)` 的输入输出都是列表。 这里是一个例子,与上面那个相似: ```python from keras import backend as K from keras.engine.topology import Layer class MyLayer(Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super(MyLayer, self).__init__(**kwargs) def build(self, input_shape): assert isinstance(input_shape, list) # 为该层创建一个可训练的权重 self.kernel = self.add_weight(name='kernel', shape=(input_shape[0][1], self.output_dim), initializer='uniform', trainable=True) super(MyLayer, self).build(input_shape) # 一定要在最后调用它 def call(self, x): assert isinstance(x, list) a, b = x return [K.dot(a, self.kernel) + b, K.mean(b, axis=-1)] def compute_output_shape(self, input_shape): assert isinstance(input_shape, list) shape_a, shape_b = input_shape return [(shape_a[0], self.output_dim), shape_b[:-1]] ``` 已有的 Keras 层就是实现任何层的很好例子。不要犹豫阅读源码!
keras-docs-zh/sources/layers/writing-your-own-keras-layers.md/0
{ "file_path": "keras-docs-zh/sources/layers/writing-your-own-keras-layers.md", "repo_id": "keras-docs-zh", "token_count": 1743 }
92
<jupyter_start><jupyter_text>GPT2 Text Generation with KerasNLP**Author:** Chen Qian**Date created:** 04/17/2023**Last modified:** 04/17/2023**Description:** Use KerasNLP GPT2 model and `samplers` to do text generation. In this tutorial, you will learn to use [KerasNLP](https://keras.io/keras_nlp/) to load apre-trained Large Language Model (LLM) - [GPT-2 model](https://openai.com/research/better-language-models)(originally invented by OpenAI), finetune it to a specific text style, andgenerate text based on users' input (also known as prompt). You will also learnhow GPT2 adapts quickly to non-English languages, such as Chinese. Before we beginColab offers different kinds of runtimes. Make sure to go to **Runtime ->Change runtime type** and choose the GPU Hardware Accelerator runtime(which should have >12G host RAM and ~15G GPU RAM) since you will finetune theGPT-2 model. Running this tutorial on CPU runtime will take hours. Install KerasNLP, Choose Backend and Import DependenciesThis examples uses [Keras Core](https://keras.io/keras_core/) to work in any of`"tensorflow"`, `"jax"` or `"torch"`. Support for Keras Core is baked intoKerasNLP, simply change the `"KERAS_BACKEND"` environment variable to selectthe backend of your choice. We select the JAX backend below.<jupyter_code>!pip install git+https://github.com/keras-team/keras-nlp.git -q import os os.environ["KERAS_BACKEND"] = "jax" # or "tensorflow" or "torch" import keras_nlp import keras import tensorflow as tf import time keras.mixed_precision.set_global_policy("mixed_float16")<jupyter_output><empty_output><jupyter_text>Introduction to Generative Large Language Models (LLMs)Large language models (LLMs) are a type of machine learning models that aretrained on a large corpus of text data to generate outputs for various naturallanguage processing (NLP) tasks, such as text generation, question answering,and machine translation.Generative LLMs are typically based on deep learning neural networks, such asthe [Transformer architecture](https://arxiv.org/abs/1706.03762) invented byGoogle researchers in 2017, and are trained on massive amounts of text data,often involving billions of words. These models, such as Google [LaMDA](https://blog.google/technology/ai/lamda/)and [PaLM](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html),are trained with a large dataset from various data sources which allows them togenerate output for many tasks. The core of Generative LLMs is predicting thenext word in a sentence, often referred as **Causal LM Pretraining**. In thisway LLMs can generate coherent text based on user prompts. For a morepedagogical discussion on language models, you can refer to the[Stanford CS324 LLM class](https://stanford-cs324.github.io/winter2022/lectures/introduction/). Introduction to KerasNLPLarge Language Models are complex to build and expensive to train from scratch.Luckily there are pretrained LLMs available for use right away. [KerasNLP](https://keras.io/keras_nlp/)provides a large number of pre-trained checkpoints that allow you to experimentwith SOTA models without needing to train them yourself.KerasNLP is a natural language processing library that supports users throughtheir entire development cycle. KerasNLP offers both pretrained models andmodularized building blocks, so developers could easily reuse pretrained modelsor stack their own LLM.In a nutshell, for generative LLM, KerasNLP offers:- Pretrained models with `generate()` method, e.g., `keras_nlp.models.GPT2CausalLM` and `keras_nlp.models.OPTCausalLM`.- Sampler class that implements generation algorithms such as Top-K, Beam and contrastive search. These samplers can be used to generate text with custom models. Load a pre-trained GPT-2 model and generate some textKerasNLP provides a number of pre-trained models, such as [GoogleBert](https://ai.googleblog.com/2018/11/open-sourcing-bert-state-of-art-pre.html)and [GPT-2](https://openai.com/research/better-language-models). You can seethe list of models available in the [KerasNLP repository](https://github.com/keras-team/keras-nlp/tree/master/keras_nlp/models).It's very easy to load the GPT-2 model as you can see below:<jupyter_code># To speed up training and generation, we use preprocessor of length 128 # instead of full length 1024. preprocessor = keras_nlp.models.GPT2CausalLMPreprocessor.from_preset( "gpt2_base_en", sequence_length=128, ) gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset( "gpt2_base_en", preprocessor=preprocessor )<jupyter_output><empty_output><jupyter_text>Once the model is loaded, you can use it to generate some text right away. Runthe cells below to give it a try. It's as simple as calling a single function*generate()*:<jupyter_code>start = time.time() output = gpt2_lm.generate("My trip to Yosemite was", max_length=200) print("\nGPT-2 output:") print(output) end = time.time() print(f"TOTAL TIME ELAPSED: {end - start:.2f}s")<jupyter_output><empty_output><jupyter_text>Try another one:<jupyter_code>start = time.time() output = gpt2_lm.generate("That Italian restaurant is", max_length=200) print("\nGPT-2 output:") print(output) end = time.time() print(f"TOTAL TIME ELAPSED: {end - start:.2f}s")<jupyter_output><empty_output><jupyter_text>Notice how much faster the second call is. This is because the computationalgraph is [XLA compiled](https://www.tensorflow.org/xla) in the 1st run andre-used in the 2nd behind the scenes.The quality of the generated text looks OK, but we can improve it viafine-tuning. More on the GPT-2 model from KerasNLPNext up, we will actually fine-tune the model to update its parameters, butbefore we do, let's take a look at the full set of tools we have to for workingwith for GPT2.The code of GPT2 can be found[here](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/gpt2/).Conceptually the `GPT2CausalLM` can be hierarchically broken down into severalmodules in KerasNLP, all of which have a *from_preset()* function that loads apretrained model:- `keras_nlp.models.GPT2Tokenizer`: The tokenizer used by GPT2 model, which is a [byte-pair encoder](https://huggingface.co/course/chapter6/5?fw=pt).- `keras_nlp.models.GPT2CausalLMPreprocessor`: the preprocessor used by GPT2 causal LM training. It does the tokenization along with other preprocessing works such as creating the label and appending the end token.- `keras_nlp.models.GPT2Backbone`: the GPT2 model, which is a stack of `keras_nlp.layers.TransformerDecoder`. This is usually just referred as `GPT2`.- `keras_nlp.models.GPT2CausalLM`: wraps `GPT2Backbone`, it multiplies the output of `GPT2Backbone` by embedding matrix to generate logits over vocab tokens. Finetune on Reddit datasetNow you have the knowledge of the GPT-2 model from KerasNLP, you can take onestep further to finetune the model so that it generates text in a specificstyle, short or long, strict or casual. In this tutorial, we will use redditdataset for example.<jupyter_code>import tensorflow_datasets as tfds reddit_ds = tfds.load("reddit_tifu", split="train", as_supervised=True)<jupyter_output><empty_output><jupyter_text>Let's take a look inside sample data from the reddit TensorFlow Dataset. Thereare two features:- **__document__**: text of the post.- **__title__**: the title.<jupyter_code>for document, title in reddit_ds: print(document.numpy()) print(title.numpy()) break<jupyter_output><empty_output><jupyter_text>In our case, we are performing next word prediction in a language model, so weonly need the 'document' feature.<jupyter_code>train_ds = ( reddit_ds.map(lambda document, _: document) .batch(32) .cache() .prefetch(tf.data.AUTOTUNE) )<jupyter_output><empty_output><jupyter_text>Now you can finetune the model using the familiar *fit()* function. Note that`preprocessor` will be automatically called inside `fit` method since`GPT2CausalLM` is a `keras_nlp.models.Task` instance.This step takes quite a bit of GPU memory and a long time if we were to trainit all the way to a fully trained state. Here we just use part of the datasetfor demo purposes.<jupyter_code>train_ds = train_ds.take(500) num_epochs = 1 # Linearly decaying learning rate. learning_rate = keras.optimizers.schedules.PolynomialDecay( 5e-5, decay_steps=train_ds.cardinality() * num_epochs, end_learning_rate=0.0, ) loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) gpt2_lm.compile( optimizer=keras.optimizers.Adam(learning_rate), loss=loss, weighted_metrics=["accuracy"], ) gpt2_lm.fit(train_ds, epochs=num_epochs)<jupyter_output><empty_output><jupyter_text>After fine-tuning is finished, you can again generate text using the same*generate()* function. This time, the text will be closer to Reddit writingstyle, and the generated length will be close to our preset length in thetraining set.<jupyter_code>start = time.time() output = gpt2_lm.generate("I like basketball", max_length=200) print("\nGPT-2 output:") print(output) end = time.time() print(f"TOTAL TIME ELAPSED: {end - start:.2f}s")<jupyter_output><empty_output><jupyter_text>Into the Sampling MethodIn KerasNLP, we offer a few sampling methods, e.g., contrastive search,Top-K and beam sampling. By default, our `GPT2CausalLM` uses Top-k search, butyou can choose your own sampling method.Much like optimizer and activations, there are two ways to specify your customsampler:- Use a string identifier, such as "greedy", you are using the defaultconfiguration via this way.- Pass a `keras_nlp.samplers.Sampler` instance, you can use custom configurationvia this way.<jupyter_code># Use a string identifier. gpt2_lm.compile(sampler="top_k") output = gpt2_lm.generate("I like basketball", max_length=200) print("\nGPT-2 output:") print(output) # Use a `Sampler` instance. `GreedySampler` tends to repeat itself, greedy_sampler = keras_nlp.samplers.GreedySampler() gpt2_lm.compile(sampler=greedy_sampler) output = gpt2_lm.generate("I like basketball", max_length=200) print("\nGPT-2 output:") print(output)<jupyter_output><empty_output><jupyter_text>For more details on KerasNLP `Sampler` class, you can check the code[here](https://github.com/keras-team/keras-nlp/tree/master/keras_nlp/samplers). Finetune on Chinese Poem DatasetWe can also finetune GPT2 on non-English datasets. For readers knowing Chinese,this part illustrates how to fine-tune GPT2 on Chinese poem dataset to teach ourmodel to become a poet!Because GPT2 uses byte-pair encoder, and the original pretraining datasetcontains some Chinese characters, we can use the original vocab to finetune onChinese dataset.<jupyter_code>!# Load chinese poetry dataset. !git clone https://github.com/chinese-poetry/chinese-poetry.git<jupyter_output><empty_output><jupyter_text>Load text from the json file. We only use《全唐诗》for demo purposes.<jupyter_code>import os import json poem_collection = [] for file in os.listdir("chinese-poetry/全唐诗"): if ".json" not in file or "poet" not in file: continue full_filename = "%s/%s" % ("chinese-poetry/全唐诗", file) with open(full_filename, "r") as f: content = json.load(f) poem_collection.extend(content) paragraphs = ["".join(data["paragraphs"]) for data in poem_collection]<jupyter_output><empty_output><jupyter_text>Let's take a look at sample data.<jupyter_code>print(paragraphs[0])<jupyter_output><empty_output><jupyter_text>Similar as Reddit example, we convert to TF dataset, and only use partial datato train.<jupyter_code>train_ds = ( tf.data.Dataset.from_tensor_slices(paragraphs) .batch(16) .cache() .prefetch(tf.data.AUTOTUNE) ) # Running through the whole dataset takes long, only take `500` and run 1 # epochs for demo purposes. train_ds = train_ds.take(500) num_epochs = 1 learning_rate = keras.optimizers.schedules.PolynomialDecay( 5e-4, decay_steps=train_ds.cardinality() * num_epochs, end_learning_rate=0.0, ) loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) gpt2_lm.compile( optimizer=keras.optimizers.Adam(learning_rate), loss=loss, weighted_metrics=["accuracy"], ) gpt2_lm.fit(train_ds, epochs=num_epochs)<jupyter_output><empty_output><jupyter_text>Let's check the result!<jupyter_code>output = gpt2_lm.generate("昨夜雨疏风骤", max_length=200) print(output)<jupyter_output><empty_output>
keras-io/examples/generative/ipynb/gpt2_text_generation_with_kerasnlp.ipynb/0
{ "file_path": "keras-io/examples/generative/ipynb/gpt2_text_generation_with_kerasnlp.ipynb", "repo_id": "keras-io", "token_count": 4063 }
93
# Neural Style Transfer with AdaIN **Author:** [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Ritwik Raha](https://twitter.com/ritwik_raha)<br> **Date created:** 2021/11/08<br> **Last modified:** 2021/11/08<br> <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/adain.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/adain.py) **Description:** Neural Style Transfer with Adaptive Instance Normalization. # Introduction [Neural Style Transfer](https://www.tensorflow.org/tutorials/generative/style_transfer) is the process of transferring the style of one image onto the content of another. This was first introduced in the seminal paper ["A Neural Algorithm of Artistic Style"](https://arxiv.org/abs/1508.06576) by Gatys et al. A major limitation of the technique proposed in this work is in its runtime, as the algorithm uses a slow iterative optimization process. Follow-up papers that introduced [Batch Normalization](https://arxiv.org/abs/1502.03167), [Instance Normalization](https://arxiv.org/abs/1701.02096) and [Conditional Instance Normalization](https://arxiv.org/abs/1610.07629) allowed Style Transfer to be performed in new ways, no longer requiring a slow iterative process. Following these papers, the authors Xun Huang and Serge Belongie propose [Adaptive Instance Normalization](https://arxiv.org/abs/1703.06868) (AdaIN), which allows arbitrary style transfer in real time. In this example we implement Adaptive Instance Normalization for Neural Style Transfer. We show in the below figure the output of our AdaIN model trained for only **30 epochs**. ![Style transfer sample gallery](https://i.imgur.com/zDjDuea.png) You can also try out the model with your own images with this [Hugging Face demo](https://huggingface.co/spaces/ariG23498/nst). # Setup We begin with importing the necessary packages. We also set the seed for reproducibility. The global variables are hyperparameters which we can change as we like. ```python import os import numpy as np import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import tensorflow_datasets as tfds from tensorflow.keras import layers # Defining the global variables. IMAGE_SIZE = (224, 224) BATCH_SIZE = 64 # Training for single epoch for time constraint. # Please use atleast 30 epochs to see good results. EPOCHS = 1 AUTOTUNE = tf.data.AUTOTUNE ``` --- ## Style transfer sample gallery For Neural Style Transfer we need style images and content images. In this example we will use the [Best Artworks of All Time](https://www.kaggle.com/ikarus777/best-artworks-of-all-time) as our style dataset and [Pascal VOC](https://www.tensorflow.org/datasets/catalog/voc) as our content dataset. This is a deviation from the original paper implementation by the authors, where they use [WIKI-Art](https://paperswithcode.com/dataset/wikiart) as style and [MSCOCO](https://cocodataset.org/#home) as content datasets respectively. We do this to create a minimal yet reproducible example. --- ## Downloading the dataset from Kaggle The [Best Artworks of All Time](https://www.kaggle.com/ikarus777/best-artworks-of-all-time) dataset is hosted on Kaggle and one can easily download it in Colab by following these steps: - Follow the instructions [here](https://github.com/Kaggle/kaggle-api) in order to obtain your Kaggle API keys in case you don't have them. - Use the following command to upload the Kaggle API keys. ```python from google.colab import files files.upload() ``` - Use the following commands to move the API keys to the proper directory and download the dataset. ```shell $ mkdir ~/.kaggle $ cp kaggle.json ~/.kaggle/ $ chmod 600 ~/.kaggle/kaggle.json $ kaggle datasets download ikarus777/best-artworks-of-all-time $ unzip -qq best-artworks-of-all-time.zip $ rm -rf images $ mv resized artwork $ rm best-artworks-of-all-time.zip artists.csv ``` --- ## `tf.data` pipeline In this section, we will build the `tf.data` pipeline for the project. For the style dataset, we decode, convert and resize the images from the folder. For the content images we are already presented with a `tf.data` dataset as we use the `tfds` module. After we have our style and content data pipeline ready, we zip the two together to obtain the data pipeline that our model will consume. ```python def decode_and_resize(image_path): """Decodes and resizes an image from the image file path. Args: image_path: The image file path. Returns: A resized image. """ image = tf.io.read_file(image_path) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.convert_image_dtype(image, dtype="float32") image = tf.image.resize(image, IMAGE_SIZE) return image def extract_image_from_voc(element): """Extracts image from the PascalVOC dataset. Args: element: A dictionary of data. Returns: A resized image. """ image = element["image"] image = tf.image.convert_image_dtype(image, dtype="float32") image = tf.image.resize(image, IMAGE_SIZE) return image # Get the image file paths for the style images. style_images = os.listdir("/content/artwork/resized") style_images = [os.path.join("/content/artwork/resized", path) for path in style_images] # split the style images in train, val and test total_style_images = len(style_images) train_style = style_images[: int(0.8 * total_style_images)] val_style = style_images[int(0.8 * total_style_images) : int(0.9 * total_style_images)] test_style = style_images[int(0.9 * total_style_images) :] # Build the style and content tf.data datasets. train_style_ds = ( tf.data.Dataset.from_tensor_slices(train_style) .map(decode_and_resize, num_parallel_calls=AUTOTUNE) .repeat() ) train_content_ds = tfds.load("voc", split="train").map(extract_image_from_voc).repeat() val_style_ds = ( tf.data.Dataset.from_tensor_slices(val_style) .map(decode_and_resize, num_parallel_calls=AUTOTUNE) .repeat() ) val_content_ds = ( tfds.load("voc", split="validation").map(extract_image_from_voc).repeat() ) test_style_ds = ( tf.data.Dataset.from_tensor_slices(test_style) .map(decode_and_resize, num_parallel_calls=AUTOTUNE) .repeat() ) test_content_ds = ( tfds.load("voc", split="test") .map(extract_image_from_voc, num_parallel_calls=AUTOTUNE) .repeat() ) # Zipping the style and content datasets. train_ds = ( tf.data.Dataset.zip((train_style_ds, train_content_ds)) .shuffle(BATCH_SIZE * 2) .batch(BATCH_SIZE) .prefetch(AUTOTUNE) ) val_ds = ( tf.data.Dataset.zip((val_style_ds, val_content_ds)) .shuffle(BATCH_SIZE * 2) .batch(BATCH_SIZE) .prefetch(AUTOTUNE) ) test_ds = ( tf.data.Dataset.zip((test_style_ds, test_content_ds)) .shuffle(BATCH_SIZE * 2) .batch(BATCH_SIZE) .prefetch(AUTOTUNE) ) ``` <div class="k-default-codeblock"> ``` Downloading and preparing dataset voc/2007/4.0.0 (download: 868.85 MiB, generated: Unknown size, total: 868.85 MiB) to /root/tensorflow_datasets/voc/2007/4.0.0... Dl Completed...: 0 url [00:00, ? url/s] Dl Size...: 0 MiB [00:00, ? MiB/s] Extraction completed...: 0 file [00:00, ? file/s] ``` </div> <div class="k-default-codeblock"> ``` 0 examples [00:00, ? examples/s] Shuffling and writing examples to /root/tensorflow_datasets/voc/2007/4.0.0.incompleteP16YU5/voc-test.tfrecord 0%| | 0/4952 [00:00<?, ? examples/s] 0 examples [00:00, ? examples/s] Shuffling and writing examples to /root/tensorflow_datasets/voc/2007/4.0.0.incompleteP16YU5/voc-train.tfrecord 0%| | 0/2501 [00:00<?, ? examples/s] 0 examples [00:00, ? examples/s] Shuffling and writing examples to /root/tensorflow_datasets/voc/2007/4.0.0.incompleteP16YU5/voc-validation.tfrecord 0%| | 0/2510 [00:00<?, ? examples/s] Dataset voc downloaded and prepared to /root/tensorflow_datasets/voc/2007/4.0.0. Subsequent calls will reuse this data. ``` </div> --- ## Visualizing the data It is always better to visualize the data before training. To ensure the correctness of our preprocessing pipeline, we visualize 10 samples from our dataset. ```python style, content = next(iter(train_ds)) fig, axes = plt.subplots(nrows=10, ncols=2, figsize=(5, 30)) [ax.axis("off") for ax in np.ravel(axes)] for (axis, style_image, content_image) in zip(axes, style[0:10], content[0:10]): (ax_style, ax_content) = axis ax_style.imshow(style_image) ax_style.set_title("Style Image") ax_content.imshow(content_image) ax_content.set_title("Content Image") ``` ![png](/img/examples/generative/adain/adain_8_0.png) --- ## Architecture The style transfer network takes a content image and a style image as inputs and outputs the style transferred image. The authors of AdaIN propose a simple encoder-decoder structure for achieving this. ![AdaIN architecture](https://i.imgur.com/JbIfoyE.png) The content image (`C`) and the style image (`S`) are both fed to the encoder networks. The output from these encoder networks (feature maps) are then fed to the AdaIN layer. The AdaIN layer computes a combined feature map. This feature map is then fed into a randomly initialized decoder network that serves as the generator for the neural style transferred image. ![AdaIn equation](https://i.imgur.com/hqhcBQS.png) The style feature map (`fs`) and the content feature map (`fc`) are fed to the AdaIN layer. This layer produced the combined feature map `t`. The function `g` represents the decoder (generator) network. ### Encoder The encoder is a part of the pretrained (pretrained on [imagenet](https://www.image-net.org/)) VGG19 model. We slice the model from the `block4-conv1` layer. The output layer is as suggested by the authors in their paper. ```python def get_encoder(): vgg19 = keras.applications.VGG19( include_top=False, weights="imagenet", input_shape=(*IMAGE_SIZE, 3), ) vgg19.trainable = False mini_vgg19 = keras.Model(vgg19.input, vgg19.get_layer("block4_conv1").output) inputs = layers.Input([*IMAGE_SIZE, 3]) mini_vgg19_out = mini_vgg19(inputs) return keras.Model(inputs, mini_vgg19_out, name="mini_vgg19") ``` ### Adaptive Instance Normalization The AdaIN layer takes in the features of the content and style image. The layer can be defined via the following equation: ![AdaIn formula](https://i.imgur.com/tWq3VKP.png) where `sigma` is the standard deviation and `mu` is the mean for the concerned variable. In the above equation the mean and variance of the content feature map `fc` is aligned with the mean and variance of the style feature maps `fs`. It is important to note that the AdaIN layer proposed by the authors uses no other parameters apart from mean and variance. The layer also does not have any trainable parameters. This is why we use a *Python function* instead of using a *Keras layer*. The function takes style and content feature maps, computes the mean and standard deviation of the images and returns the adaptive instance normalized feature map. ```python def get_mean_std(x, epsilon=1e-5): axes = [1, 2] # Compute the mean and standard deviation of a tensor. mean, variance = tf.nn.moments(x, axes=axes, keepdims=True) standard_deviation = tf.sqrt(variance + epsilon) return mean, standard_deviation def ada_in(style, content): """Computes the AdaIn feature map. Args: style: The style feature map. content: The content feature map. Returns: The AdaIN feature map. """ content_mean, content_std = get_mean_std(content) style_mean, style_std = get_mean_std(style) t = style_std * (content - content_mean) / content_std + style_mean return t ``` ### Decoder The authors specify that the decoder network must mirror the encoder network. We have symmetrically inverted the encoder to build our decoder. We have used `UpSampling2D` layers to increase the spatial resolution of the feature maps. Note that the authors warn against using any normalization layer in the decoder network, and do indeed go on to show that including batch normalization or instance normalization hurts the performance of the overall network. This is the only portion of the entire architecture that is trainable. ```python def get_decoder(): config = {"kernel_size": 3, "strides": 1, "padding": "same", "activation": "relu"} decoder = keras.Sequential( [ layers.InputLayer((None, None, 512)), layers.Conv2D(filters=512, **config), layers.UpSampling2D(), layers.Conv2D(filters=256, **config), layers.Conv2D(filters=256, **config), layers.Conv2D(filters=256, **config), layers.Conv2D(filters=256, **config), layers.UpSampling2D(), layers.Conv2D(filters=128, **config), layers.Conv2D(filters=128, **config), layers.UpSampling2D(), layers.Conv2D(filters=64, **config), layers.Conv2D( filters=3, kernel_size=3, strides=1, padding="same", activation="sigmoid", ), ] ) return decoder ``` ### Loss functions Here we build the loss functions for the neural style transfer model. The authors propose to use a pretrained VGG-19 to compute the loss function of the network. It is important to keep in mind that this will be used for training only the decoder network. The total loss (`Lt`) is a weighted combination of content loss (`Lc`) and style loss (`Ls`). The `lambda` term is used to vary the amount of style transferred. ![The total loss](https://i.imgur.com/Q5y1jUM.png) ### Content Loss This is the Euclidean distance between the content image features and the features of the neural style transferred image. ![The content loss](https://i.imgur.com/dZ0uD0N.png) Here the authors propose to use the output from the AdaIn layer `t` as the content target rather than using features of the original image as target. This is done to speed up convergence. ### Style Loss Rather than using the more commonly used [Gram Matrix](https://mathworld.wolfram.com/GramMatrix.html), the authors propose to compute the difference between the statistical features (mean and variance) which makes it conceptually cleaner. This can be easily visualized via the following equation: ![The style loss](https://i.imgur.com/Ctclhn3.png) where `theta` denotes the layers in VGG-19 used to compute the loss. In this case this corresponds to: - `block1_conv1` - `block1_conv2` - `block1_conv3` - `block1_conv4` ```python def get_loss_net(): vgg19 = keras.applications.VGG19( include_top=False, weights="imagenet", input_shape=(*IMAGE_SIZE, 3) ) vgg19.trainable = False layer_names = ["block1_conv1", "block2_conv1", "block3_conv1", "block4_conv1"] outputs = [vgg19.get_layer(name).output for name in layer_names] mini_vgg19 = keras.Model(vgg19.input, outputs) inputs = layers.Input([*IMAGE_SIZE, 3]) mini_vgg19_out = mini_vgg19(inputs) return keras.Model(inputs, mini_vgg19_out, name="loss_net") ``` --- ## Neural Style Transfer This is the trainer module. We wrap the encoder and decoder inside a `tf.keras.Model` subclass. This allows us to customize what happens in the `model.fit()` loop. ```python class NeuralStyleTransfer(tf.keras.Model): def __init__(self, encoder, decoder, loss_net, style_weight, **kwargs): super().__init__(**kwargs) self.encoder = encoder self.decoder = decoder self.loss_net = loss_net self.style_weight = style_weight def compile(self, optimizer, loss_fn): super().compile() self.optimizer = optimizer self.loss_fn = loss_fn self.style_loss_tracker = keras.metrics.Mean(name="style_loss") self.content_loss_tracker = keras.metrics.Mean(name="content_loss") self.total_loss_tracker = keras.metrics.Mean(name="total_loss") def train_step(self, inputs): style, content = inputs # Initialize the content and style loss. loss_content = 0.0 loss_style = 0.0 with tf.GradientTape() as tape: # Encode the style and content image. style_encoded = self.encoder(style) content_encoded = self.encoder(content) # Compute the AdaIN target feature maps. t = ada_in(style=style_encoded, content=content_encoded) # Generate the neural style transferred image. reconstructed_image = self.decoder(t) # Compute the losses. reconstructed_vgg_features = self.loss_net(reconstructed_image) style_vgg_features = self.loss_net(style) loss_content = self.loss_fn(t, reconstructed_vgg_features[-1]) for inp, out in zip(style_vgg_features, reconstructed_vgg_features): mean_inp, std_inp = get_mean_std(inp) mean_out, std_out = get_mean_std(out) loss_style += self.loss_fn(mean_inp, mean_out) + self.loss_fn( std_inp, std_out ) loss_style = self.style_weight * loss_style total_loss = loss_content + loss_style # Compute gradients and optimize the decoder. trainable_vars = self.decoder.trainable_variables gradients = tape.gradient(total_loss, trainable_vars) self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Update the trackers. self.style_loss_tracker.update_state(loss_style) self.content_loss_tracker.update_state(loss_content) self.total_loss_tracker.update_state(total_loss) return { "style_loss": self.style_loss_tracker.result(), "content_loss": self.content_loss_tracker.result(), "total_loss": self.total_loss_tracker.result(), } def test_step(self, inputs): style, content = inputs # Initialize the content and style loss. loss_content = 0.0 loss_style = 0.0 # Encode the style and content image. style_encoded = self.encoder(style) content_encoded = self.encoder(content) # Compute the AdaIN target feature maps. t = ada_in(style=style_encoded, content=content_encoded) # Generate the neural style transferred image. reconstructed_image = self.decoder(t) # Compute the losses. recons_vgg_features = self.loss_net(reconstructed_image) style_vgg_features = self.loss_net(style) loss_content = self.loss_fn(t, recons_vgg_features[-1]) for inp, out in zip(style_vgg_features, recons_vgg_features): mean_inp, std_inp = get_mean_std(inp) mean_out, std_out = get_mean_std(out) loss_style += self.loss_fn(mean_inp, mean_out) + self.loss_fn( std_inp, std_out ) loss_style = self.style_weight * loss_style total_loss = loss_content + loss_style # Update the trackers. self.style_loss_tracker.update_state(loss_style) self.content_loss_tracker.update_state(loss_content) self.total_loss_tracker.update_state(total_loss) return { "style_loss": self.style_loss_tracker.result(), "content_loss": self.content_loss_tracker.result(), "total_loss": self.total_loss_tracker.result(), } @property def metrics(self): return [ self.style_loss_tracker, self.content_loss_tracker, self.total_loss_tracker, ] ``` --- ## Train Monitor callback This callback is used to visualize the style transfer output of the model at the end of each epoch. The objective of style transfer cannot be quantified properly, and is to be subjectively evaluated by an audience. For this reason, visualization is a key aspect of evaluating the model. ```python test_style, test_content = next(iter(test_ds)) class TrainMonitor(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): # Encode the style and content image. test_style_encoded = self.model.encoder(test_style) test_content_encoded = self.model.encoder(test_content) # Compute the AdaIN features. test_t = ada_in(style=test_style_encoded, content=test_content_encoded) test_reconstructed_image = self.model.decoder(test_t) # Plot the Style, Content and the NST image. fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(20, 5)) ax[0].imshow(tf.keras.utils.array_to_img(test_style[0])) ax[0].set_title(f"Style: {epoch:03d}") ax[1].imshow(tf.keras.utils.array_to_img(test_content[0])) ax[1].set_title(f"Content: {epoch:03d}") ax[2].imshow( tf.keras.utils.array_to_img(test_reconstructed_image[0]) ) ax[2].set_title(f"NST: {epoch:03d}") plt.show() plt.close() ``` --- ## Train the model In this section, we define the optimizer, the loss function, and the trainer module. We compile the trainer module with the optimizer and the loss function and then train it. *Note*: We train the model for a single epoch for time constraints, but we will need to train is for atleast 30 epochs to see good results. ```python optimizer = keras.optimizers.Adam(learning_rate=1e-5) loss_fn = keras.losses.MeanSquaredError() encoder = get_encoder() loss_net = get_loss_net() decoder = get_decoder() model = NeuralStyleTransfer( encoder=encoder, decoder=decoder, loss_net=loss_net, style_weight=4.0 ) model.compile(optimizer=optimizer, loss_fn=loss_fn) history = model.fit( train_ds, epochs=EPOCHS, steps_per_epoch=50, validation_data=val_ds, validation_steps=50, callbacks=[TrainMonitor()], ) ``` <div class="k-default-codeblock"> ``` Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg19/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5 80142336/80134624 [==============================] - 1s 0us/step 80150528/80134624 [==============================] - 1s 0us/step 50/50 [==============================] - ETA: 0s - style_loss: 213.1439 - content_loss: 141.1564 - total_loss: 354.3002 ``` </div> ![png](/img/examples/generative/adain/adain_23_1.png) <div class="k-default-codeblock"> ``` 50/50 [==============================] - 124s 2s/step - style_loss: 213.1439 - content_loss: 141.1564 - total_loss: 354.3002 - val_style_loss: 167.0819 - val_content_loss: 129.0497 - val_total_loss: 296.1316 ``` </div> --- ## Inference After we train the model, we now need to run inference with it. We will pass arbitrary content and style images from the test dataset and take a look at the output images. *NOTE*: To try out the model on your own images, you can use this [Hugging Face demo](https://huggingface.co/spaces/ariG23498/nst). ```python for style, content in test_ds.take(1): style_encoded = model.encoder(style) content_encoded = model.encoder(content) t = ada_in(style=style_encoded, content=content_encoded) reconstructed_image = model.decoder(t) fig, axes = plt.subplots(nrows=10, ncols=3, figsize=(10, 30)) [ax.axis("off") for ax in np.ravel(axes)] for axis, style_image, content_image, reconstructed_image in zip( axes, style[0:10], content[0:10], reconstructed_image[0:10] ): (ax_style, ax_content, ax_reconstructed) = axis ax_style.imshow(style_image) ax_style.set_title("Style Image") ax_content.imshow(content_image) ax_content.set_title("Content Image") ax_reconstructed.imshow(reconstructed_image) ax_reconstructed.set_title("NST Image") ``` ![png](/img/examples/generative/adain/adain_25_0.png) --- ## Conclusion Adaptive Instance Normalization allows arbitrary style transfer in real time. It is also important to note that the novel proposition of the authors is to achieve this only by aligning the statistical features (mean and standard deviation) of the style and the content images. *Note*: AdaIN also serves as the base for [Style-GANs](https://arxiv.org/abs/1812.04948). --- ## Reference - [TF implementation](https://github.com/ftokarev/tf-adain) --- ## Acknowledgement We thank [Luke Wood](https://lukewood.xyz) for his detailed review.
keras-io/examples/generative/md/adain.md/0
{ "file_path": "keras-io/examples/generative/md/adain.md", "repo_id": "keras-io", "token_count": 9379 }
94
# PixelCNN **Author:** [ADMoreau](https://github.com/ADMoreau)<br> **Date created:** 2020/05/17<br> **Last modified:** 2020/05/23<br> **Description:** PixelCNN implemented in Keras. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/pixelcnn.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/pixelcnn.py) --- ## Introduction PixelCNN is a generative model proposed in 2016 by van den Oord et al. (reference: [Conditional Image Generation with PixelCNN Decoders](https://arxiv.org/abs/1606.05328)). It is designed to generate images (or other data types) iteratively from an input vector where the probability distribution of prior elements dictates the probability distribution of later elements. In the following example, images are generated in this fashion, pixel-by-pixel, via a masked convolution kernel that only looks at data from previously generated pixels (origin at the top left) to generate later pixels. During inference, the output of the network is used as a probability ditribution from which new pixel values are sampled to generate a new image (here, with MNIST, the pixels values are either black or white). ```python import numpy as np import keras from keras import layers from keras import ops from tqdm import tqdm ``` --- ## Getting the Data ```python # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) n_residual_blocks = 5 # The data, split between train and test sets (x, _), (y, _) = keras.datasets.mnist.load_data() # Concatenate all the images together data = np.concatenate((x, y), axis=0) # Round all pixel values less than 33% of the max 256 value to 0 # anything above this value gets rounded up to 1 so that all values are either # 0 or 1 data = np.where(data < (0.33 * 256), 0, 1) data = data.astype(np.float32) ``` --- ## Create two classes for the requisite Layers for the model ```python # The first layer is the PixelCNN layer. This layer simply # builds on the 2D convolutional layer, but includes masking. class PixelConvLayer(layers.Layer): def __init__(self, mask_type, **kwargs): super().__init__() self.mask_type = mask_type self.conv = layers.Conv2D(**kwargs) def build(self, input_shape): # Build the conv2d layer to initialize kernel variables self.conv.build(input_shape) # Use the initialized kernel to create the mask kernel_shape = ops.shape(self.conv.kernel) self.mask = np.zeros(shape=kernel_shape) self.mask[: kernel_shape[0] // 2, ...] = 1.0 self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0 if self.mask_type == "B": self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0 def call(self, inputs): self.conv.kernel.assign(self.conv.kernel * self.mask) return self.conv(inputs) # Next, we build our residual block layer. # This is just a normal residual block, but based on the PixelConvLayer. class ResidualBlock(keras.layers.Layer): def __init__(self, filters, **kwargs): super().__init__(**kwargs) self.conv1 = keras.layers.Conv2D( filters=filters, kernel_size=1, activation="relu" ) self.pixel_conv = PixelConvLayer( mask_type="B", filters=filters // 2, kernel_size=3, activation="relu", padding="same", ) self.conv2 = keras.layers.Conv2D( filters=filters, kernel_size=1, activation="relu" ) def call(self, inputs): x = self.conv1(inputs) x = self.pixel_conv(x) x = self.conv2(x) return keras.layers.add([inputs, x]) ``` --- ## Build the model based on the original paper ```python inputs = keras.Input(shape=input_shape, batch_size=128) x = PixelConvLayer( mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same" )(inputs) for _ in range(n_residual_blocks): x = ResidualBlock(filters=128)(x) for _ in range(2): x = PixelConvLayer( mask_type="B", filters=128, kernel_size=1, strides=1, activation="relu", padding="valid", )(x) out = keras.layers.Conv2D( filters=1, kernel_size=1, strides=1, activation="sigmoid", padding="valid" )(x) pixel_cnn = keras.Model(inputs, out) adam = keras.optimizers.Adam(learning_rate=0.0005) pixel_cnn.compile(optimizer=adam, loss="binary_crossentropy") pixel_cnn.summary() pixel_cnn.fit( x=data, y=data, batch_size=128, epochs=50, validation_split=0.1, verbose=2 ) ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_1"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ pixel_conv_layer │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">6,400</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">PixelConvLayer</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ residual_block (<span style="color: #0087ff; text-decoration-color: #0087ff">ResidualBlock</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">98,624</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ residual_block_1 │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">98,624</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">ResidualBlock</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ residual_block_2 │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">98,624</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">ResidualBlock</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ residual_block_3 │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">98,624</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">ResidualBlock</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ residual_block_4 │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">98,624</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">ResidualBlock</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ pixel_conv_layer_6 │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,512</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">PixelConvLayer</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ pixel_conv_layer_7 │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,512</span> │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">PixelConvLayer</span>) │ │ │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_18 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">128</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">129</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">532,673</span> (2.03 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">532,673</span> (2.03 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> <div class="k-default-codeblock"> ``` Epoch 1/50 493/493 - 26s - 53ms/step - loss: 0.1137 - val_loss: 0.0933 Epoch 2/50 493/493 - 14s - 29ms/step - loss: 0.0915 - val_loss: 0.0901 Epoch 3/50 493/493 - 14s - 29ms/step - loss: 0.0893 - val_loss: 0.0888 Epoch 4/50 493/493 - 14s - 29ms/step - loss: 0.0882 - val_loss: 0.0880 Epoch 5/50 493/493 - 14s - 29ms/step - loss: 0.0874 - val_loss: 0.0870 Epoch 6/50 493/493 - 14s - 29ms/step - loss: 0.0867 - val_loss: 0.0867 Epoch 7/50 493/493 - 14s - 29ms/step - loss: 0.0863 - val_loss: 0.0867 Epoch 8/50 493/493 - 14s - 29ms/step - loss: 0.0859 - val_loss: 0.0860 Epoch 9/50 493/493 - 14s - 29ms/step - loss: 0.0855 - val_loss: 0.0856 Epoch 10/50 493/493 - 14s - 29ms/step - loss: 0.0853 - val_loss: 0.0861 Epoch 11/50 493/493 - 14s - 29ms/step - loss: 0.0850 - val_loss: 0.0860 Epoch 12/50 493/493 - 14s - 29ms/step - loss: 0.0847 - val_loss: 0.0873 Epoch 13/50 493/493 - 14s - 29ms/step - loss: 0.0846 - val_loss: 0.0852 Epoch 14/50 493/493 - 14s - 29ms/step - loss: 0.0844 - val_loss: 0.0846 Epoch 15/50 493/493 - 14s - 29ms/step - loss: 0.0842 - val_loss: 0.0848 Epoch 16/50 493/493 - 14s - 29ms/step - loss: 0.0840 - val_loss: 0.0843 Epoch 17/50 493/493 - 14s - 29ms/step - loss: 0.0838 - val_loss: 0.0847 Epoch 18/50 493/493 - 14s - 29ms/step - loss: 0.0837 - val_loss: 0.0841 Epoch 19/50 493/493 - 14s - 29ms/step - loss: 0.0835 - val_loss: 0.0842 Epoch 20/50 493/493 - 14s - 29ms/step - loss: 0.0834 - val_loss: 0.0844 Epoch 21/50 493/493 - 14s - 29ms/step - loss: 0.0834 - val_loss: 0.0843 Epoch 22/50 493/493 - 14s - 29ms/step - loss: 0.0832 - val_loss: 0.0838 Epoch 23/50 493/493 - 14s - 29ms/step - loss: 0.0831 - val_loss: 0.0840 Epoch 24/50 493/493 - 14s - 29ms/step - loss: 0.0830 - val_loss: 0.0841 Epoch 25/50 493/493 - 14s - 29ms/step - loss: 0.0829 - val_loss: 0.0837 Epoch 26/50 493/493 - 14s - 29ms/step - loss: 0.0828 - val_loss: 0.0837 Epoch 27/50 493/493 - 14s - 29ms/step - loss: 0.0827 - val_loss: 0.0836 Epoch 28/50 493/493 - 14s - 29ms/step - loss: 0.0827 - val_loss: 0.0836 Epoch 29/50 493/493 - 14s - 29ms/step - loss: 0.0825 - val_loss: 0.0838 Epoch 30/50 493/493 - 14s - 29ms/step - loss: 0.0825 - val_loss: 0.0834 Epoch 31/50 493/493 - 14s - 29ms/step - loss: 0.0824 - val_loss: 0.0832 Epoch 32/50 493/493 - 14s - 29ms/step - loss: 0.0823 - val_loss: 0.0833 Epoch 33/50 493/493 - 14s - 29ms/step - loss: 0.0822 - val_loss: 0.0836 Epoch 34/50 493/493 - 14s - 29ms/step - loss: 0.0822 - val_loss: 0.0832 Epoch 35/50 493/493 - 14s - 29ms/step - loss: 0.0821 - val_loss: 0.0832 Epoch 36/50 493/493 - 14s - 29ms/step - loss: 0.0820 - val_loss: 0.0835 Epoch 37/50 493/493 - 14s - 29ms/step - loss: 0.0820 - val_loss: 0.0834 Epoch 38/50 493/493 - 14s - 29ms/step - loss: 0.0819 - val_loss: 0.0833 Epoch 39/50 493/493 - 14s - 29ms/step - loss: 0.0818 - val_loss: 0.0832 Epoch 40/50 493/493 - 14s - 29ms/step - loss: 0.0818 - val_loss: 0.0834 Epoch 41/50 493/493 - 14s - 29ms/step - loss: 0.0817 - val_loss: 0.0832 Epoch 42/50 493/493 - 14s - 29ms/step - loss: 0.0816 - val_loss: 0.0834 Epoch 43/50 493/493 - 14s - 29ms/step - loss: 0.0816 - val_loss: 0.0839 Epoch 44/50 493/493 - 14s - 29ms/step - loss: 0.0815 - val_loss: 0.0831 Epoch 45/50 493/493 - 14s - 29ms/step - loss: 0.0815 - val_loss: 0.0832 Epoch 46/50 493/493 - 14s - 29ms/step - loss: 0.0814 - val_loss: 0.0835 Epoch 47/50 493/493 - 14s - 29ms/step - loss: 0.0814 - val_loss: 0.0830 Epoch 48/50 493/493 - 14s - 29ms/step - loss: 0.0813 - val_loss: 0.0832 Epoch 49/50 493/493 - 14s - 29ms/step - loss: 0.0812 - val_loss: 0.0833 Epoch 50/50 493/493 - 14s - 29ms/step - loss: 0.0812 - val_loss: 0.0831 <keras.src.callbacks.history.History at 0x7f45e6d78760> ``` </div> --- ## Demonstration The PixelCNN cannot generate the full image at once. Instead, it must generate each pixel in order, append the last generated pixel to the current image, and feed the image back into the model to repeat the process. ```python from IPython.display import Image, display # Create an empty array of pixels. batch = 4 pixels = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:]) batch, rows, cols, channels = pixels.shape # Iterate over the pixels because generation has to be done sequentially pixel by pixel. for row in tqdm(range(rows)): for col in range(cols): for channel in range(channels): # Feed the whole array and retrieving the pixel value probabilities for the next # pixel. probs = pixel_cnn.predict(pixels)[:, row, col, channel] # Use the probabilities to pick pixel values and append the values to the image # frame. pixels[:, row, col, channel] = ops.ceil( probs - keras.random.uniform(probs.shape) ) def deprocess_image(x): # Stack the single channeled black and white image to rgb values. x = np.stack((x, x, x), 2) # Undo preprocessing x *= 255.0 # Convert to uint8 and clip to the valid range [0, 255] x = np.clip(x, 0, 255).astype("uint8") return x # Iterate over the generated images and plot them with matplotlib. for i, pic in enumerate(pixels): keras.utils.save_img( "generated_image_{}.png".format(i), deprocess_image(np.squeeze(pic, -1)) ) display(Image("generated_image_0.png")) display(Image("generated_image_1.png")) display(Image("generated_image_2.png")) display(Image("generated_image_3.png")) ``` <div class="k-default-codeblock"> ``` 100%|███████████████████████████████████████████████████████████████████████████| 28/28 [00:06<00:00, 4.51it/s] ``` </div> ![png](/img/examples/generative/pixelcnn/pixelcnn_10_57.png) ![png](/img/examples/generative/pixelcnn/pixelcnn_10_58.png) ![png](/img/examples/generative/pixelcnn/pixelcnn_10_59.png) ![png](/img/examples/generative/pixelcnn/pixelcnn_10_60.png)
keras-io/examples/generative/md/pixelcnn.md/0
{ "file_path": "keras-io/examples/generative/md/pixelcnn.md", "repo_id": "keras-io", "token_count": 7804 }
95
""" Title: Face image generation with StyleGAN Author: [Soon-Yau Cheong](https://www.linkedin.com/in/soonyau/) Date created: 2021/07/01 Last modified: 2021/07/01 Description: Implementation of StyleGAN for image generation. Accelerator: GPU """ """ ## Introduction The key idea of StyleGAN is to progressively increase the resolution of the generated images and to incorporate style features in the generative process.This [StyleGAN](https://arxiv.org/abs/1812.04948) implementation is based on the book [Hands-on Image Generation with TensorFlow](https://www.amazon.com/dp/1838826785). The code from the book's [GitHub repository](https://github.com/PacktPublishing/Hands-On-Image-Generation-with-TensorFlow-2.0/tree/master/Chapter07) was refactored to leverage a custom `train_step()` to enable faster training time via compilation and distribution. """ """ ## Setup """ """ ### Install latest TFA """ """shell pip install tensorflow_addons """ import os import numpy as np import matplotlib.pyplot as plt from functools import partial import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.models import Sequential from tensorflow_addons.layers import InstanceNormalization import gdown from zipfile import ZipFile """ ## Prepare the dataset In this example, we will train using the CelebA from the project GDrive. """ def log2(x): return int(np.log2(x)) # we use different batch size for different resolution, so larger image size # could fit into GPU memory. The keys is image resolution in log2 batch_sizes = {2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 8, 8: 4, 9: 2, 10: 1} # We adjust the train step accordingly train_step_ratio = {k: batch_sizes[2] / v for k, v in batch_sizes.items()} os.makedirs("celeba_gan") url = "https://drive.google.com/uc?id=1O7m1010EJjLE5QxLZiM9Fpjs7Oj6e684" output = "celeba_gan/data.zip" gdown.download(url, output, quiet=True) with ZipFile("celeba_gan/data.zip", "r") as zipobj: zipobj.extractall("celeba_gan") # Create a dataset from our folder, and rescale the images to the [0-1] range: ds_train = keras.utils.image_dataset_from_directory( "celeba_gan", label_mode=None, image_size=(64, 64), batch_size=32 ) def resize_image(res, image): # only downsampling, so use nearest neighbor that is faster to run image = tf.image.resize( image, (res, res), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR ) image = tf.cast(image, tf.float32) / 127.5 - 1.0 return image def create_dataloader(res): batch_size = batch_sizes[log2(res)] # NOTE: we unbatch the dataset so we can `batch()` it again with the `drop_remainder=True` option # since the model only supports a single batch size dl = ds_train.map( partial(resize_image, res), num_parallel_calls=tf.data.AUTOTUNE ).unbatch() dl = dl.shuffle(200).batch(batch_size, drop_remainder=True).prefetch(1).repeat() return dl """ ## Utility function to display images after each epoch """ def plot_images(images, log2_res, fname=""): scales = {2: 0.5, 3: 1, 4: 2, 5: 3, 6: 4, 7: 5, 8: 6, 9: 7, 10: 8} scale = scales[log2_res] grid_col = min(images.shape[0], int(32 // scale)) grid_row = 1 f, axarr = plt.subplots( grid_row, grid_col, figsize=(grid_col * scale, grid_row * scale) ) for row in range(grid_row): ax = axarr if grid_row == 1 else axarr[row] for col in range(grid_col): ax[col].imshow(images[row * grid_col + col]) ax[col].axis("off") plt.show() if fname: f.savefig(fname) """ ## Custom Layers The following are building blocks that will be used to construct the generators and discriminators of the StyleGAN model. """ def fade_in(alpha, a, b): return alpha * a + (1.0 - alpha) * b def wasserstein_loss(y_true, y_pred): return -tf.reduce_mean(y_true * y_pred) def pixel_norm(x, epsilon=1e-8): return x / tf.math.sqrt(tf.reduce_mean(x**2, axis=-1, keepdims=True) + epsilon) def minibatch_std(input_tensor, epsilon=1e-8): n, h, w, c = tf.shape(input_tensor) group_size = tf.minimum(4, n) x = tf.reshape(input_tensor, [group_size, -1, h, w, c]) group_mean, group_var = tf.nn.moments(x, axes=(0), keepdims=False) group_std = tf.sqrt(group_var + epsilon) avg_std = tf.reduce_mean(group_std, axis=[1, 2, 3], keepdims=True) x = tf.tile(avg_std, [group_size, h, w, 1]) return tf.concat([input_tensor, x], axis=-1) class EqualizedConv(layers.Layer): def __init__(self, out_channels, kernel=3, gain=2, **kwargs): super().__init__(**kwargs) self.kernel = kernel self.out_channels = out_channels self.gain = gain self.pad = kernel != 1 def build(self, input_shape): self.in_channels = input_shape[-1] initializer = keras.initializers.RandomNormal(mean=0.0, stddev=1.0) self.w = self.add_weight( shape=[self.kernel, self.kernel, self.in_channels, self.out_channels], initializer=initializer, trainable=True, name="kernel", ) self.b = self.add_weight( shape=(self.out_channels,), initializer="zeros", trainable=True, name="bias" ) fan_in = self.kernel * self.kernel * self.in_channels self.scale = tf.sqrt(self.gain / fan_in) def call(self, inputs): if self.pad: x = tf.pad(inputs, [[0, 0], [1, 1], [1, 1], [0, 0]], mode="REFLECT") else: x = inputs output = ( tf.nn.conv2d(x, self.scale * self.w, strides=1, padding="VALID") + self.b ) return output class EqualizedDense(layers.Layer): def __init__(self, units, gain=2, learning_rate_multiplier=1, **kwargs): super().__init__(**kwargs) self.units = units self.gain = gain self.learning_rate_multiplier = learning_rate_multiplier def build(self, input_shape): self.in_channels = input_shape[-1] initializer = keras.initializers.RandomNormal( mean=0.0, stddev=1.0 / self.learning_rate_multiplier ) self.w = self.add_weight( shape=[self.in_channels, self.units], initializer=initializer, trainable=True, name="kernel", ) self.b = self.add_weight( shape=(self.units,), initializer="zeros", trainable=True, name="bias" ) fan_in = self.in_channels self.scale = tf.sqrt(self.gain / fan_in) def call(self, inputs): output = tf.add(tf.matmul(inputs, self.scale * self.w), self.b) return output * self.learning_rate_multiplier class AddNoise(layers.Layer): def build(self, input_shape): n, h, w, c = input_shape[0] initializer = keras.initializers.RandomNormal(mean=0.0, stddev=1.0) self.b = self.add_weight( shape=[1, 1, 1, c], initializer=initializer, trainable=True, name="kernel" ) def call(self, inputs): x, noise = inputs output = x + self.b * noise return output class AdaIN(layers.Layer): def __init__(self, gain=1, **kwargs): super().__init__(**kwargs) self.gain = gain def build(self, input_shapes): x_shape = input_shapes[0] w_shape = input_shapes[1] self.w_channels = w_shape[-1] self.x_channels = x_shape[-1] self.dense_1 = EqualizedDense(self.x_channels, gain=1) self.dense_2 = EqualizedDense(self.x_channels, gain=1) def call(self, inputs): x, w = inputs ys = tf.reshape(self.dense_1(w), (-1, 1, 1, self.x_channels)) yb = tf.reshape(self.dense_2(w), (-1, 1, 1, self.x_channels)) return ys * x + yb """ Next we build the following: - A model mapping to map the random noise into style code - The generator - The discriminator For the generator, we build generator blocks at multiple resolutions, e.g. 4x4, 8x8, ...up to 1024x1024. We only use 4x4 in the beginning and we use progressively larger-resolution blocks as the training proceeds. Same for the discriminator. """ def Mapping(num_stages, input_shape=512): z = layers.Input(shape=(input_shape)) w = pixel_norm(z) for i in range(8): w = EqualizedDense(512, learning_rate_multiplier=0.01)(w) w = layers.LeakyReLU(0.2)(w) w = tf.tile(tf.expand_dims(w, 1), (1, num_stages, 1)) return keras.Model(z, w, name="mapping") class Generator: def __init__(self, start_res_log2, target_res_log2): self.start_res_log2 = start_res_log2 self.target_res_log2 = target_res_log2 self.num_stages = target_res_log2 - start_res_log2 + 1 # list of generator blocks at increasing resolution self.g_blocks = [] # list of layers to convert g_block activation to RGB self.to_rgb = [] # list of noise input of different resolutions into g_blocks self.noise_inputs = [] # filter size to use at each stage, keys are log2(resolution) self.filter_nums = { 0: 512, 1: 512, 2: 512, # 4x4 3: 512, # 8x8 4: 512, # 16x16 5: 512, # 32x32 6: 256, # 64x64 7: 128, # 128x128 8: 64, # 256x256 9: 32, # 512x512 10: 16, } # 1024x1024 start_res = 2**start_res_log2 self.input_shape = (start_res, start_res, self.filter_nums[start_res_log2]) self.g_input = layers.Input(self.input_shape, name="generator_input") for i in range(start_res_log2, target_res_log2 + 1): filter_num = self.filter_nums[i] res = 2**i self.noise_inputs.append( layers.Input(shape=(res, res, 1), name=f"noise_{res}x{res}") ) to_rgb = Sequential( [ layers.InputLayer(input_shape=(res, res, filter_num)), EqualizedConv(3, 1, gain=1), ], name=f"to_rgb_{res}x{res}", ) self.to_rgb.append(to_rgb) is_base = i == self.start_res_log2 if is_base: input_shape = (res, res, self.filter_nums[i - 1]) else: input_shape = (2 ** (i - 1), 2 ** (i - 1), self.filter_nums[i - 1]) g_block = self.build_block( filter_num, res=res, input_shape=input_shape, is_base=is_base ) self.g_blocks.append(g_block) def build_block(self, filter_num, res, input_shape, is_base): input_tensor = layers.Input(shape=input_shape, name=f"g_{res}") noise = layers.Input(shape=(res, res, 1), name=f"noise_{res}") w = layers.Input(shape=512) x = input_tensor if not is_base: x = layers.UpSampling2D((2, 2))(x) x = EqualizedConv(filter_num, 3)(x) x = AddNoise()([x, noise]) x = layers.LeakyReLU(0.2)(x) x = InstanceNormalization()(x) x = AdaIN()([x, w]) x = EqualizedConv(filter_num, 3)(x) x = AddNoise()([x, noise]) x = layers.LeakyReLU(0.2)(x) x = InstanceNormalization()(x) x = AdaIN()([x, w]) return keras.Model([input_tensor, w, noise], x, name=f"genblock_{res}x{res}") def grow(self, res_log2): res = 2**res_log2 num_stages = res_log2 - self.start_res_log2 + 1 w = layers.Input(shape=(self.num_stages, 512), name="w") alpha = layers.Input(shape=(1), name="g_alpha") x = self.g_blocks[0]([self.g_input, w[:, 0], self.noise_inputs[0]]) if num_stages == 1: rgb = self.to_rgb[0](x) else: for i in range(1, num_stages - 1): x = self.g_blocks[i]([x, w[:, i], self.noise_inputs[i]]) old_rgb = self.to_rgb[num_stages - 2](x) old_rgb = layers.UpSampling2D((2, 2))(old_rgb) i = num_stages - 1 x = self.g_blocks[i]([x, w[:, i], self.noise_inputs[i]]) new_rgb = self.to_rgb[i](x) rgb = fade_in(alpha[0], new_rgb, old_rgb) return keras.Model( [self.g_input, w, self.noise_inputs, alpha], rgb, name=f"generator_{res}_x_{res}", ) class Discriminator: def __init__(self, start_res_log2, target_res_log2): self.start_res_log2 = start_res_log2 self.target_res_log2 = target_res_log2 self.num_stages = target_res_log2 - start_res_log2 + 1 # filter size to use at each stage, keys are log2(resolution) self.filter_nums = { 0: 512, 1: 512, 2: 512, # 4x4 3: 512, # 8x8 4: 512, # 16x16 5: 512, # 32x32 6: 256, # 64x64 7: 128, # 128x128 8: 64, # 256x256 9: 32, # 512x512 10: 16, } # 1024x1024 # list of discriminator blocks at increasing resolution self.d_blocks = [] # list of layers to convert RGB into activation for d_blocks inputs self.from_rgb = [] for res_log2 in range(self.start_res_log2, self.target_res_log2 + 1): res = 2**res_log2 filter_num = self.filter_nums[res_log2] from_rgb = Sequential( [ layers.InputLayer( input_shape=(res, res, 3), name=f"from_rgb_input_{res}" ), EqualizedConv(filter_num, 1), layers.LeakyReLU(0.2), ], name=f"from_rgb_{res}", ) self.from_rgb.append(from_rgb) input_shape = (res, res, filter_num) if len(self.d_blocks) == 0: d_block = self.build_base(filter_num, res) else: d_block = self.build_block( filter_num, self.filter_nums[res_log2 - 1], res ) self.d_blocks.append(d_block) def build_base(self, filter_num, res): input_tensor = layers.Input(shape=(res, res, filter_num), name=f"d_{res}") x = minibatch_std(input_tensor) x = EqualizedConv(filter_num, 3)(x) x = layers.LeakyReLU(0.2)(x) x = layers.Flatten()(x) x = EqualizedDense(filter_num)(x) x = layers.LeakyReLU(0.2)(x) x = EqualizedDense(1)(x) return keras.Model(input_tensor, x, name=f"d_{res}") def build_block(self, filter_num_1, filter_num_2, res): input_tensor = layers.Input(shape=(res, res, filter_num_1), name=f"d_{res}") x = EqualizedConv(filter_num_1, 3)(input_tensor) x = layers.LeakyReLU(0.2)(x) x = EqualizedConv(filter_num_2)(x) x = layers.LeakyReLU(0.2)(x) x = layers.AveragePooling2D((2, 2))(x) return keras.Model(input_tensor, x, name=f"d_{res}") def grow(self, res_log2): res = 2**res_log2 idx = res_log2 - self.start_res_log2 alpha = layers.Input(shape=(1), name="d_alpha") input_image = layers.Input(shape=(res, res, 3), name="input_image") x = self.from_rgb[idx](input_image) x = self.d_blocks[idx](x) if idx > 0: idx -= 1 downsized_image = layers.AveragePooling2D((2, 2))(input_image) y = self.from_rgb[idx](downsized_image) x = fade_in(alpha[0], x, y) for i in range(idx, -1, -1): x = self.d_blocks[i](x) return keras.Model([input_image, alpha], x, name=f"discriminator_{res}_x_{res}") """ ## Build StyleGAN with custom train step """ class StyleGAN(tf.keras.Model): def __init__(self, z_dim=512, target_res=64, start_res=4): super().__init__() self.z_dim = z_dim self.target_res_log2 = log2(target_res) self.start_res_log2 = log2(start_res) self.current_res_log2 = self.target_res_log2 self.num_stages = self.target_res_log2 - self.start_res_log2 + 1 self.alpha = tf.Variable(1.0, dtype=tf.float32, trainable=False, name="alpha") self.mapping = Mapping(num_stages=self.num_stages) self.d_builder = Discriminator(self.start_res_log2, self.target_res_log2) self.g_builder = Generator(self.start_res_log2, self.target_res_log2) self.g_input_shape = self.g_builder.input_shape self.phase = None self.train_step_counter = tf.Variable(0, dtype=tf.int32, trainable=False) self.loss_weights = {"gradient_penalty": 10, "drift": 0.001} def grow_model(self, res): tf.keras.backend.clear_session() res_log2 = log2(res) self.generator = self.g_builder.grow(res_log2) self.discriminator = self.d_builder.grow(res_log2) self.current_res_log2 = res_log2 print(f"\nModel resolution:{res}x{res}") def compile( self, steps_per_epoch, phase, res, d_optimizer, g_optimizer, *args, **kwargs ): self.loss_weights = kwargs.pop("loss_weights", self.loss_weights) self.steps_per_epoch = steps_per_epoch if res != 2**self.current_res_log2: self.grow_model(res) self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.train_step_counter.assign(0) self.phase = phase self.d_loss_metric = keras.metrics.Mean(name="d_loss") self.g_loss_metric = keras.metrics.Mean(name="g_loss") super().compile(*args, **kwargs) @property def metrics(self): return [self.d_loss_metric, self.g_loss_metric] def generate_noise(self, batch_size): noise = [ tf.random.normal((batch_size, 2**res, 2**res, 1)) for res in range(self.start_res_log2, self.target_res_log2 + 1) ] return noise def gradient_loss(self, grad): loss = tf.square(grad) loss = tf.reduce_sum(loss, axis=tf.range(1, tf.size(tf.shape(loss)))) loss = tf.sqrt(loss) loss = tf.reduce_mean(tf.square(loss - 1)) return loss def train_step(self, real_images): self.train_step_counter.assign_add(1) if self.phase == "TRANSITION": self.alpha.assign( tf.cast(self.train_step_counter / self.steps_per_epoch, tf.float32) ) elif self.phase == "STABLE": self.alpha.assign(1.0) else: raise NotImplementedError alpha = tf.expand_dims(self.alpha, 0) batch_size = tf.shape(real_images)[0] real_labels = tf.ones(batch_size) fake_labels = -tf.ones(batch_size) z = tf.random.normal((batch_size, self.z_dim)) const_input = tf.ones(tuple([batch_size] + list(self.g_input_shape))) noise = self.generate_noise(batch_size) # generator with tf.GradientTape() as g_tape: w = self.mapping(z) fake_images = self.generator([const_input, w, noise, alpha]) pred_fake = self.discriminator([fake_images, alpha]) g_loss = wasserstein_loss(real_labels, pred_fake) trainable_weights = ( self.mapping.trainable_weights + self.generator.trainable_weights ) gradients = g_tape.gradient(g_loss, trainable_weights) self.g_optimizer.apply_gradients(zip(gradients, trainable_weights)) # discriminator with tf.GradientTape() as gradient_tape, tf.GradientTape() as total_tape: # forward pass pred_fake = self.discriminator([fake_images, alpha]) pred_real = self.discriminator([real_images, alpha]) epsilon = tf.random.uniform((batch_size, 1, 1, 1)) interpolates = epsilon * real_images + (1 - epsilon) * fake_images gradient_tape.watch(interpolates) pred_fake_grad = self.discriminator([interpolates, alpha]) # calculate losses loss_fake = wasserstein_loss(fake_labels, pred_fake) loss_real = wasserstein_loss(real_labels, pred_real) loss_fake_grad = wasserstein_loss(fake_labels, pred_fake_grad) # gradient penalty gradients_fake = gradient_tape.gradient(loss_fake_grad, [interpolates]) gradient_penalty = self.loss_weights[ "gradient_penalty" ] * self.gradient_loss(gradients_fake) # drift loss all_pred = tf.concat([pred_fake, pred_real], axis=0) drift_loss = self.loss_weights["drift"] * tf.reduce_mean(all_pred**2) d_loss = loss_fake + loss_real + gradient_penalty + drift_loss gradients = total_tape.gradient( d_loss, self.discriminator.trainable_weights ) self.d_optimizer.apply_gradients( zip(gradients, self.discriminator.trainable_weights) ) # Update metrics self.d_loss_metric.update_state(d_loss) self.g_loss_metric.update_state(g_loss) return { "d_loss": self.d_loss_metric.result(), "g_loss": self.g_loss_metric.result(), } def call(self, inputs: dict()): style_code = inputs.get("style_code", None) z = inputs.get("z", None) noise = inputs.get("noise", None) batch_size = inputs.get("batch_size", 1) alpha = inputs.get("alpha", 1.0) alpha = tf.expand_dims(alpha, 0) if style_code is None: if z is None: z = tf.random.normal((batch_size, self.z_dim)) style_code = self.mapping(z) if noise is None: noise = self.generate_noise(batch_size) # self.alpha.assign(alpha) const_input = tf.ones(tuple([batch_size] + list(self.g_input_shape))) images = self.generator([const_input, style_code, noise, alpha]) images = np.clip((images * 0.5 + 0.5) * 255, 0, 255).astype(np.uint8) return images """ ## Training We first build the StyleGAN at smallest resolution, such as 4x4 or 8x8. Then we progressively grow the model to higher resolution by appending new generator and discriminator blocks. """ START_RES = 4 TARGET_RES = 128 style_gan = StyleGAN(start_res=START_RES, target_res=TARGET_RES) """ The training for each new resolution happens in two phases - "transition" and "stable". In the transition phase, the features from the previous resolution are mixed with the current resolution. This allows for a smoother transition when scaling up. We use each epoch in `model.fit()` as a phase. """ def train( start_res=START_RES, target_res=TARGET_RES, steps_per_epoch=5000, display_images=True, ): opt_cfg = {"learning_rate": 1e-3, "beta_1": 0.0, "beta_2": 0.99, "epsilon": 1e-8} val_batch_size = 16 val_z = tf.random.normal((val_batch_size, style_gan.z_dim)) val_noise = style_gan.generate_noise(val_batch_size) start_res_log2 = int(np.log2(start_res)) target_res_log2 = int(np.log2(target_res)) for res_log2 in range(start_res_log2, target_res_log2 + 1): res = 2**res_log2 for phase in ["TRANSITION", "STABLE"]: if res == start_res and phase == "TRANSITION": continue train_dl = create_dataloader(res) steps = int(train_step_ratio[res_log2] * steps_per_epoch) style_gan.compile( d_optimizer=tf.keras.optimizers.legacy.Adam(**opt_cfg), g_optimizer=tf.keras.optimizers.legacy.Adam(**opt_cfg), loss_weights={"gradient_penalty": 10, "drift": 0.001}, steps_per_epoch=steps, res=res, phase=phase, run_eagerly=False, ) prefix = f"res_{res}x{res}_{style_gan.phase}" ckpt_cb = keras.callbacks.ModelCheckpoint( f"checkpoints/stylegan_{res}x{res}.ckpt", save_weights_only=True, verbose=0, ) print(phase) style_gan.fit( train_dl, epochs=1, steps_per_epoch=steps, callbacks=[ckpt_cb] ) if display_images: images = style_gan({"z": val_z, "noise": val_noise, "alpha": 1.0}) plot_images(images, res_log2) """ StyleGAN can take a long time to train, in the code below, a small `steps_per_epoch` value of 1 is used to sanity-check the code is working alright. In practice, a larger `steps_per_epoch` value (over 10000) is required to get decent results. """ train(start_res=4, target_res=16, steps_per_epoch=1, display_images=False) """ ## Results We can now run some inference using pre-trained 64x64 checkpoints. In general, the image fidelity increases with the resolution. You can try to train this StyleGAN to resolutions above 128x128 with the CelebA HQ dataset. """ url = "https://github.com/soon-yau/stylegan_keras/releases/download/keras_example_v1.0/stylegan_128x128.ckpt.zip" weights_path = keras.utils.get_file( "stylegan_128x128.ckpt.zip", url, extract=True, cache_dir=os.path.abspath("."), cache_subdir="pretrained", ) style_gan.grow_model(128) style_gan.load_weights(os.path.join("pretrained/stylegan_128x128.ckpt")) tf.random.set_seed(196) batch_size = 2 z = tf.random.normal((batch_size, style_gan.z_dim)) w = style_gan.mapping(z) noise = style_gan.generate_noise(batch_size=batch_size) images = style_gan({"style_code": w, "noise": noise, "alpha": 1.0}) plot_images(images, 5) """ ## Style Mixing We can also mix styles from two images to create a new image. """ alpha = 0.4 w_mix = np.expand_dims(alpha * w[0] + (1 - alpha) * w[1], 0) noise_a = [np.expand_dims(n[0], 0) for n in noise] mix_images = style_gan({"style_code": w_mix, "noise": noise_a}) image_row = np.hstack([images[0], images[1], mix_images[0]]) plt.figure(figsize=(9, 3)) plt.imshow(image_row) plt.axis("off")
keras-io/examples/generative/stylegan.py/0
{ "file_path": "keras-io/examples/generative/stylegan.py", "repo_id": "keras-io", "token_count": 12300 }
96
""" Title: Probabilistic Bayesian Neural Networks Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/) Date created: 2021/01/15 Last modified: 2021/01/15 Description: Building probabilistic Bayesian neural network models with TensorFlow Probability. Accelerator: GPU """ """ ## Introduction Taking a probabilistic approach to deep learning allows to account for *uncertainty*, so that models can assign less levels of confidence to incorrect predictions. Sources of uncertainty can be found in the data, due to measurement error or noise in the labels, or the model, due to insufficient data availability for the model to learn effectively. This example demonstrates how to build basic probabilistic Bayesian neural networks to account for these two types of uncertainty. We use [TensorFlow Probability](https://www.tensorflow.org/probability) library, which is compatible with Keras API. This example requires TensorFlow 2.3 or higher. You can install Tensorflow Probability using the following command: ```python pip install tensorflow-probability ``` """ """ ## The dataset We use the [Wine Quality](https://archive.ics.uci.edu/ml/datasets/wine+quality) dataset, which is available in the [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/wine_quality). We use the red wine subset, which contains 4,898 examples. The dataset has 11numerical physicochemical features of the wine, and the task is to predict the wine quality, which is a score between 0 and 10. In this example, we treat this as a regression task. You can install TensorFlow Datasets using the following command: ```python pip install tensorflow-datasets ``` """ """ ## Setup """ import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import tensorflow_datasets as tfds import tensorflow_probability as tfp """ ## Create training and evaluation datasets Here, we load the `wine_quality` dataset using `tfds.load()`, and we convert the target feature to float. Then, we shuffle the dataset and split it into training and test sets. We take the first `train_size` examples as the train split, and the rest as the test split. """ def get_train_and_test_splits(train_size, batch_size=1): # We prefetch with a buffer the same size as the dataset because th dataset # is very small and fits into memory. dataset = ( tfds.load(name="wine_quality", as_supervised=True, split="train") .map(lambda x, y: (x, tf.cast(y, tf.float32))) .prefetch(buffer_size=dataset_size) .cache() ) # We shuffle with a buffer the same size as the dataset. train_dataset = ( dataset.take(train_size).shuffle(buffer_size=train_size).batch(batch_size) ) test_dataset = dataset.skip(train_size).batch(batch_size) return train_dataset, test_dataset """ ## Compile, train, and evaluate the model """ hidden_units = [8, 8] learning_rate = 0.001 def run_experiment(model, loss, train_dataset, test_dataset): model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=learning_rate), loss=loss, metrics=[keras.metrics.RootMeanSquaredError()], ) print("Start training the model...") model.fit(train_dataset, epochs=num_epochs, validation_data=test_dataset) print("Model training finished.") _, rmse = model.evaluate(train_dataset, verbose=0) print(f"Train RMSE: {round(rmse, 3)}") print("Evaluating model performance...") _, rmse = model.evaluate(test_dataset, verbose=0) print(f"Test RMSE: {round(rmse, 3)}") """ ## Create model inputs """ FEATURE_NAMES = [ "fixed acidity", "volatile acidity", "citric acid", "residual sugar", "chlorides", "free sulfur dioxide", "total sulfur dioxide", "density", "pH", "sulphates", "alcohol", ] def create_model_inputs(): inputs = {} for feature_name in FEATURE_NAMES: inputs[feature_name] = layers.Input( name=feature_name, shape=(1,), dtype=tf.float32 ) return inputs """ ## Experiment 1: standard neural network We create a standard deterministic neural network model as a baseline. """ def create_baseline_model(): inputs = create_model_inputs() input_values = [value for _, value in sorted(inputs.items())] features = keras.layers.concatenate(input_values) features = layers.BatchNormalization()(features) # Create hidden layers with deterministic weights using the Dense layer. for units in hidden_units: features = layers.Dense(units, activation="sigmoid")(features) # The output is deterministic: a single point estimate. outputs = layers.Dense(units=1)(features) model = keras.Model(inputs=inputs, outputs=outputs) return model """ Let's split the wine dataset into training and test sets, with 85% and 15% of the examples, respectively. """ dataset_size = 4898 batch_size = 256 train_size = int(dataset_size * 0.85) train_dataset, test_dataset = get_train_and_test_splits(train_size, batch_size) """ Now let's train the baseline model. We use the `MeanSquaredError` as the loss function. """ num_epochs = 100 mse_loss = keras.losses.MeanSquaredError() baseline_model = create_baseline_model() run_experiment(baseline_model, mse_loss, train_dataset, test_dataset) """ We take a sample from the test set use the model to obtain predictions for them. Note that since the baseline model is deterministic, we get a single a *point estimate* prediction for each test example, with no information about the uncertainty of the model nor the prediction. """ sample = 10 examples, targets = list(test_dataset.unbatch().shuffle(batch_size * 10).batch(sample))[ 0 ] predicted = baseline_model(examples).numpy() for idx in range(sample): print(f"Predicted: {round(float(predicted[idx][0]), 1)} - Actual: {targets[idx]}") """ ## Experiment 2: Bayesian neural network (BNN) The object of the Bayesian approach for modeling neural networks is to capture the *epistemic uncertainty*, which is uncertainty about the model fitness, due to limited training data. The idea is that, instead of learning specific weight (and bias) *values* in the neural network, the Bayesian approach learns weight *distributions* - from which we can sample to produce an output for a given input - to encode weight uncertainty. Thus, we need to define prior and the posterior distributions of these weights, and the training process is to learn the parameters of these distributions. """ # Define the prior weight distribution as Normal of mean=0 and stddev=1. # Note that, in this example, the we prior distribution is not trainable, # as we fix its parameters. def prior(kernel_size, bias_size, dtype=None): n = kernel_size + bias_size prior_model = keras.Sequential( [ tfp.layers.DistributionLambda( lambda t: tfp.distributions.MultivariateNormalDiag( loc=tf.zeros(n), scale_diag=tf.ones(n) ) ) ] ) return prior_model # Define variational posterior weight distribution as multivariate Gaussian. # Note that the learnable parameters for this distribution are the means, # variances, and covariances. def posterior(kernel_size, bias_size, dtype=None): n = kernel_size + bias_size posterior_model = keras.Sequential( [ tfp.layers.VariableLayer( tfp.layers.MultivariateNormalTriL.params_size(n), dtype=dtype ), tfp.layers.MultivariateNormalTriL(n), ] ) return posterior_model """ We use the `tfp.layers.DenseVariational` layer instead of the standard `keras.layers.Dense` layer in the neural network model. """ def create_bnn_model(train_size): inputs = create_model_inputs() features = keras.layers.concatenate(list(inputs.values())) features = layers.BatchNormalization()(features) # Create hidden layers with weight uncertainty using the DenseVariational layer. for units in hidden_units: features = tfp.layers.DenseVariational( units=units, make_prior_fn=prior, make_posterior_fn=posterior, kl_weight=1 / train_size, activation="sigmoid", )(features) # The output is deterministic: a single point estimate. outputs = layers.Dense(units=1)(features) model = keras.Model(inputs=inputs, outputs=outputs) return model """ The epistemic uncertainty can be reduced as we increase the size of the training data. That is, the more data the BNN model sees, the more it is certain about its estimates for the weights (distribution parameters). Let's test this behaviour by training the BNN model on a small subset of the training set, and then on the full training set, to compare the output variances. """ """ ### Train BNN with a small training subset. """ num_epochs = 500 train_sample_size = int(train_size * 0.3) small_train_dataset = train_dataset.unbatch().take(train_sample_size).batch(batch_size) bnn_model_small = create_bnn_model(train_sample_size) run_experiment(bnn_model_small, mse_loss, small_train_dataset, test_dataset) """ Since we have trained a BNN model, the model produces a different output each time we call it with the same input, since each time a new set of weights are sampled from the distributions to construct the network and produce an output. The less certain the mode weights are, the more variability (wider range) we will see in the outputs of the same inputs. """ def compute_predictions(model, iterations=100): predicted = [] for _ in range(iterations): predicted.append(model(examples).numpy()) predicted = np.concatenate(predicted, axis=1) prediction_mean = np.mean(predicted, axis=1).tolist() prediction_min = np.min(predicted, axis=1).tolist() prediction_max = np.max(predicted, axis=1).tolist() prediction_range = (np.max(predicted, axis=1) - np.min(predicted, axis=1)).tolist() for idx in range(sample): print( f"Predictions mean: {round(prediction_mean[idx], 2)}, " f"min: {round(prediction_min[idx], 2)}, " f"max: {round(prediction_max[idx], 2)}, " f"range: {round(prediction_range[idx], 2)} - " f"Actual: {targets[idx]}" ) compute_predictions(bnn_model_small) """ ### Train BNN with the whole training set. """ num_epochs = 500 bnn_model_full = create_bnn_model(train_size) run_experiment(bnn_model_full, mse_loss, train_dataset, test_dataset) compute_predictions(bnn_model_full) """ Notice that the model trained with the full training dataset shows smaller range (uncertainty) in the prediction values for the same inputs, compared to the model trained with a subset of the training dataset. """ """ ## Experiment 3: probabilistic Bayesian neural network So far, the output of the standard and the Bayesian NN models that we built is deterministic, that is, produces a point estimate as a prediction for a given example. We can create a probabilistic NN by letting the model output a distribution. In this case, the model captures the *aleatoric uncertainty* as well, which is due to irreducible noise in the data, or to the stochastic nature of the process generating the data. In this example, we model the output as a `IndependentNormal` distribution, with learnable mean and variance parameters. If the task was classification, we would have used `IndependentBernoulli` with binary classes, and `OneHotCategorical` with multiple classes, to model distribution of the model output. """ def create_probablistic_bnn_model(train_size): inputs = create_model_inputs() features = keras.layers.concatenate(list(inputs.values())) features = layers.BatchNormalization()(features) # Create hidden layers with weight uncertainty using the DenseVariational layer. for units in hidden_units: features = tfp.layers.DenseVariational( units=units, make_prior_fn=prior, make_posterior_fn=posterior, kl_weight=1 / train_size, activation="sigmoid", )(features) # Create a probabilisticå output (Normal distribution), and use the `Dense` layer # to produce the parameters of the distribution. # We set units=2 to learn both the mean and the variance of the Normal distribution. distribution_params = layers.Dense(units=2)(features) outputs = tfp.layers.IndependentNormal(1)(distribution_params) model = keras.Model(inputs=inputs, outputs=outputs) return model """ Since the output of the model is a distribution, rather than a point estimate, we use the [negative loglikelihood](https://en.wikipedia.org/wiki/Likelihood_function) as our loss function to compute how likely to see the true data (targets) from the estimated distribution produced by the model. """ def negative_loglikelihood(targets, estimated_distribution): return -estimated_distribution.log_prob(targets) num_epochs = 1000 prob_bnn_model = create_probablistic_bnn_model(train_size) run_experiment(prob_bnn_model, negative_loglikelihood, train_dataset, test_dataset) """ Now let's produce an output from the model given the test examples. The output is now a distribution, and we can use its mean and variance to compute the confidence intervals (CI) of the prediction. """ prediction_distribution = prob_bnn_model(examples) prediction_mean = prediction_distribution.mean().numpy().tolist() prediction_stdv = prediction_distribution.stddev().numpy() # The 95% CI is computed as mean ± (1.96 * stdv) upper = (prediction_mean + (1.96 * prediction_stdv)).tolist() lower = (prediction_mean - (1.96 * prediction_stdv)).tolist() prediction_stdv = prediction_stdv.tolist() for idx in range(sample): print( f"Prediction mean: {round(prediction_mean[idx][0], 2)}, " f"stddev: {round(prediction_stdv[idx][0], 2)}, " f"95% CI: [{round(upper[idx][0], 2)} - {round(lower[idx][0], 2)}]" f" - Actual: {targets[idx]}" )
keras-io/examples/keras_recipes/bayesian_neural_networks.py/0
{ "file_path": "keras-io/examples/keras_recipes/bayesian_neural_networks.py", "repo_id": "keras-io", "token_count": 4815 }
97
<jupyter_start><jupyter_text>Packaging Keras models for wide distribution using Functional Subclassing**Author:** Martin Görner**Date created:** 2023-12-13**Last modified:** 2023-12-13**Description:** When sharing your deep learning models, package them using the Functional Subclassing pattern. IntroductionKeras is the ideal framework for sharing your cutting-edge deep learning models, in alibrary of pre-trained (or not) models. Millions of ML engineers are fluent in thefamiliar Keras API, making your models accessible to a global community, whatever theirpreferred backend (Jax, PyTorch or TensorFlow).One of the benefits of the Keras API is that it lets users programmatically inspect oredit a model, a feature that is necessary when creating new architectures or workflowsbased on a pre-trained model.When distributing models, the Keras team recommends packaging them using the **FunctionalSubclassing** pattern. Models implemented in this way combine two benefits:* They can be instantiated in the normal pythonic way:`model = model_collection_xyz.AmazingModel()`* They are Keras functional models which means that they have a programmaticallyaccessible graph of layers, for introspection or model surgery.This guide explains [how to use](functional-subclassing-model) the FunctionalSubclassing pattern, and showcases its benefits for [programmatic modelintrospection](model-introspection) and [model surgery](model-surgery). It also showstwo other best practices for sharable Keras models: [configuringmodels](unconstrained-inputs) for the widest range of supported inputs, for exampleimages of various sizes, and [using dictionary inputs](model-with-dictionary-inputs) forclarity in more complex models. Setup<jupyter_code>import keras import tensorflow as tf # only for tf.data print("Keras version", keras.version()) print("Keras is running on", keras.config.backend())<jupyter_output><empty_output><jupyter_text>DatasetLet's load an MNIST dataset so that we have something to train with.<jupyter_code># tf.data is a great API for putting together a data stream. # It works wether you use the TensorFlow, PyTorch or Jax backend, # as long as you use it in the data stream only and not inside of a model. BATCH_SIZE = 256 (x_train, train_labels), (x_test, test_labels) = keras.datasets.mnist.load_data() train_data = tf.data.Dataset.from_tensor_slices((x_train, train_labels)) train_data = train_data.map( lambda x, y: (tf.expand_dims(x, axis=-1), y) ) # 1-channel monochrome train_data = train_data.batch(BATCH_SIZE) train_data = train_data.cache() train_data = train_data.shuffle(5000, reshuffle_each_iteration=True) train_data = train_data.repeat() test_data = tf.data.Dataset.from_tensor_slices((x_test, test_labels)) test_data = test_data.map( lambda x, y: (tf.expand_dims(x, axis=-1), y) ) # 1-channel monochrome test_data = test_data.batch(10000) test_data = test_data.cache() STEPS_PER_EPOCH = len(train_labels) // BATCH_SIZE EPOCHS = 5<jupyter_output><empty_output><jupyter_text>Functional Subclassing ModelThe model is wrapped in a class so that end users can instantiate it normally by callingthe constructor `MnistModel()` rather than calling a factory function.<jupyter_code>class MnistModel(keras.Model): def __init__(self, **kwargs): # Keras Functional model definition. This could have used Sequential as # well. Sequential is just syntactic sugar for simple functional models. # 1-channel monochrome input inputs = keras.layers.Input(shape=(None, None, 1), dtype="uint8") # pixel format conversion from uint8 to float32 y = keras.layers.Rescaling(1 / 255.0)(inputs) # 3 convolutional layers y = keras.layers.Conv2D( filters=16, kernel_size=3, padding="same", activation="relu" )(y) y = keras.layers.Conv2D( filters=32, kernel_size=6, padding="same", activation="relu", strides=2 )(y) y = keras.layers.Conv2D( filters=48, kernel_size=6, padding="same", activation="relu", strides=2 )(y) # 2 dense layers y = keras.layers.GlobalAveragePooling2D()(y) y = keras.layers.Dense(48, activation="relu")(y) y = keras.layers.Dropout(0.4)(y) outputs = keras.layers.Dense( 10, activation="softmax", name="classification_head" # 10 classes )(y) # A Keras Functional model is created by calling keras.Model(inputs, outputs) super().__init__(inputs=inputs, outputs=outputs, **kwargs)<jupyter_output><empty_output><jupyter_text>Let's instantiate and train this model.<jupyter_code>model = MnistModel() model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) history = model.fit( train_data, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=test_data, )<jupyter_output><empty_output><jupyter_text>Unconstrained inputsNotice, in the model definition above, that the input is specified with undefineddimensions: `Input(shape=(None, None, 1)`This allows the model to accept any image size as an input. However, thisonly works if the loosely defined shape can be propagated through all the layers andstill determine the size of all weights.* So if you have a model architecture that can handle different input sizeswith the same weights (like here), then your users will be able to instantiate it withoutparameters: `model = MnistModel()`* If on the other hand, the model must provision different weights for different inputsizes, you will have to ask your users to specify the size in the constructor:`model = ModelXYZ(input_size=...)` Model introspectionKeras maintains a programmatically accessible graph of layers for every model. It can beused for introspection and is accessed through the `model.layers` or `layer.layers`attribute. The utility function `model.summary()` also uses this mechanism internally.<jupyter_code>model = MnistModel() # Model summary works model.summary() # Recursively walking the layer graph works as well def walk_layers(layer): if hasattr(layer, "layers"): for layer in layer.layers: walk_layers(layer) else: print(layer.name) print("\nWalking model layers:\n") walk_layers(model)<jupyter_output><empty_output><jupyter_text>Model surgeryEnd users might want to instantiate the model from your library but modify it before use.Functional models have a programmatically accessible graph of layers. Edits are possibleby slicing and splicing the graph and creating a new functional model.The alternative is to fork the model code and make the modifications but that forcesusers to then maintain their fork indefinitely.Example: instantiate the model but change the classification head to do a binaryclassification, "0" or "not 0", instead of the original 10-way digits classification.<jupyter_code>model = MnistModel() input = model.input # cut before the classification head y = model.get_layer("classification_head").input # add a new classification head output = keras.layers.Dense( 1, # single class for binary classification activation="sigmoid", name="binary_classification_head", )(y) # create a new functional model binary_model = keras.Model(input, output) binary_model.summary()<jupyter_output><empty_output><jupyter_text>We can now train the new model as a binary classifier.<jupyter_code># new dataset with 0 / 1 labels (1 = digit '0', 0 = all other digits) bin_train_data = train_data.map( lambda x, y: (x, tf.cast(tf.math.equal(y, tf.zeros_like(y)), dtype=tf.uint8)) ) bin_test_data = test_data.map( lambda x, y: (x, tf.cast(tf.math.equal(y, tf.zeros_like(y)), dtype=tf.uint8)) ) # appropriate loss and metric for binary classification binary_model.compile( optimizer="adam", loss="binary_crossentropy", metrics=["binary_accuracy"] ) history = binary_model.fit( bin_train_data, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=bin_test_data, )<jupyter_output><empty_output><jupyter_text>Model with dictionary inputsIn more complex models, with multiple inputs, structuring the inputs as a dictionary canimprove readability and usability. This is straightforward to do with a functional model:<jupyter_code>class MnistDictModel(keras.Model): def __init__(self, **kwargs): # # The input is a dictionary # inputs = { "image": keras.layers.Input( shape=(None, None, 1), # 1-channel monochrome dtype="uint8", name="image", ) } # pixel format conversion from uint8 to float32 y = keras.layers.Rescaling(1 / 255.0)(inputs["image"]) # 3 conv layers y = keras.layers.Conv2D( filters=16, kernel_size=3, padding="same", activation="relu" )(y) y = keras.layers.Conv2D( filters=32, kernel_size=6, padding="same", activation="relu", strides=2 )(y) y = keras.layers.Conv2D( filters=48, kernel_size=6, padding="same", activation="relu", strides=2 )(y) # 2 dense layers y = keras.layers.GlobalAveragePooling2D()(y) y = keras.layers.Dense(48, activation="relu")(y) y = keras.layers.Dropout(0.4)(y) outputs = keras.layers.Dense( 10, activation="softmax", name="classification_head" # 10 classes )(y) # A Keras Functional model is created by calling keras.Model(inputs, outputs) super().__init__(inputs=inputs, outputs=outputs, **kwargs)<jupyter_output><empty_output><jupyter_text>We can now train the model on inputs structured as a dictionary.<jupyter_code>model = MnistDictModel() # reformat the dataset as a dictionary dict_train_data = train_data.map(lambda x, y: ({"image": x}, y)) dict_test_data = test_data.map(lambda x, y: ({"image": x}, y)) model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["sparse_categorical_accuracy"], ) history = model.fit( dict_train_data, steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=dict_test_data, )<jupyter_output><empty_output>
keras-io/examples/keras_recipes/ipynb/packaging_keras_models_for_wide_distribution.ipynb/0
{ "file_path": "keras-io/examples/keras_recipes/ipynb/packaging_keras_models_for_wide_distribution.ipynb", "repo_id": "keras-io", "token_count": 3576 }
98
# Memory-efficient embeddings for recommendation systems **Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)<br> **Date created:** 2021/02/15<br> **Last modified:** 2023/11/15<br> **Description:** Using compositional & mixed-dimension embeddings for memory-efficient recommendation models. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/keras_recipes/ipynb/memory_efficient_embeddings.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/keras_recipes/memory_efficient_embeddings.py) --- ## Introduction This example demonstrates two techniques for building memory-efficient recommendation models by reducing the size of the embedding tables, without sacrificing model effectiveness: 1. [Quotient-remainder trick](https://arxiv.org/abs/1909.02107), by Hao-Jun Michael Shi et al., which reduces the number of embedding vectors to store, yet produces unique embedding vector for each item without explicit definition. 2. [Mixed Dimension embeddings](https://arxiv.org/abs/1909.11810), by Antonio Ginart et al., which stores embedding vectors with mixed dimensions, where less popular items have reduced dimension embeddings. We use the [1M version of the Movielens dataset](https://grouplens.org/datasets/movielens/1m/). The dataset includes around 1 million ratings from 6,000 users on 4,000 movies. --- ## Setup ```python import os os.environ["KERAS_BACKEND"] = "tensorflow" from zipfile import ZipFile from urllib.request import urlretrieve import numpy as np import pandas as pd import tensorflow as tf import keras from keras import layers from keras.layers import StringLookup import matplotlib.pyplot as plt ``` --- ## Prepare the data --- ## Download and process data ```python urlretrieve("http://files.grouplens.org/datasets/movielens/ml-1m.zip", "movielens.zip") ZipFile("movielens.zip", "r").extractall() ratings_data = pd.read_csv( "ml-1m/ratings.dat", sep="::", names=["user_id", "movie_id", "rating", "unix_timestamp"], ) ratings_data["movie_id"] = ratings_data["movie_id"].apply(lambda x: f"movie_{x}") ratings_data["user_id"] = ratings_data["user_id"].apply(lambda x: f"user_{x}") ratings_data["rating"] = ratings_data["rating"].apply(lambda x: float(x)) del ratings_data["unix_timestamp"] print(f"Number of users: {len(ratings_data.user_id.unique())}") print(f"Number of movies: {len(ratings_data.movie_id.unique())}") print(f"Number of ratings: {len(ratings_data.index)}") ``` <div class="k-default-codeblock"> ``` /var/folders/8n/8w8cqnvj01xd4ghznl11nyn000_93_/T/ipykernel_33554/2288473197.py:4: ParserWarning: Falling back to the 'python' engine because the 'c' engine does not support regex separators (separators > 1 char and different from '\s+' are interpreted as regex); you can avoid this warning by specifying engine='python'. ratings_data = pd.read_csv( Number of users: 6040 Number of movies: 3706 Number of ratings: 1000209 ``` </div> --- ## Create train and eval data splits ```python random_selection = np.random.rand(len(ratings_data.index)) <= 0.85 train_data = ratings_data[random_selection] eval_data = ratings_data[~random_selection] train_data.to_csv("train_data.csv", index=False, sep="|", header=False) eval_data.to_csv("eval_data.csv", index=False, sep="|", header=False) print(f"Train data split: {len(train_data.index)}") print(f"Eval data split: {len(eval_data.index)}") print("Train and eval data files are saved.") ``` <div class="k-default-codeblock"> ``` Train data split: 850573 Eval data split: 149636 Train and eval data files are saved. ``` </div> --- ## Define dataset metadata and hyperparameters ```python csv_header = list(ratings_data.columns) user_vocabulary = list(ratings_data.user_id.unique()) movie_vocabulary = list(ratings_data.movie_id.unique()) target_feature_name = "rating" learning_rate = 0.001 batch_size = 128 num_epochs = 3 base_embedding_dim = 64 ``` --- ## Train and evaluate the model ```python def get_dataset_from_csv(csv_file_path, batch_size=128, shuffle=True): return tf.data.experimental.make_csv_dataset( csv_file_path, batch_size=batch_size, column_names=csv_header, label_name=target_feature_name, num_epochs=1, header=False, field_delim="|", shuffle=shuffle, ) def run_experiment(model): # Compile the model. model.compile( optimizer=keras.optimizers.Adam(learning_rate), loss=keras.losses.MeanSquaredError(), metrics=[keras.metrics.MeanAbsoluteError(name="mae")], ) # Read the training data. train_dataset = get_dataset_from_csv("train_data.csv", batch_size) # Read the test data. eval_dataset = get_dataset_from_csv("eval_data.csv", batch_size, shuffle=False) # Fit the model with the training data. history = model.fit( train_dataset, epochs=num_epochs, validation_data=eval_dataset, ) return history ``` --- ## Experiment 1: baseline collaborative filtering model ### Implement embedding encoder ```python def embedding_encoder(vocabulary, embedding_dim, num_oov_indices=0, name=None): return keras.Sequential( [ StringLookup( vocabulary=vocabulary, mask_token=None, num_oov_indices=num_oov_indices ), layers.Embedding( input_dim=len(vocabulary) + num_oov_indices, output_dim=embedding_dim ), ], name=f"{name}_embedding" if name else None, ) ``` ### Implement the baseline model ```python def create_baseline_model(): # Receive the user as an input. user_input = layers.Input(name="user_id", shape=(), dtype=tf.string) # Get user embedding. user_embedding = embedding_encoder( vocabulary=user_vocabulary, embedding_dim=base_embedding_dim, name="user" )(user_input) # Receive the movie as an input. movie_input = layers.Input(name="movie_id", shape=(), dtype=tf.string) # Get embedding. movie_embedding = embedding_encoder( vocabulary=movie_vocabulary, embedding_dim=base_embedding_dim, name="movie" )(movie_input) # Compute dot product similarity between user and movie embeddings. logits = layers.Dot(axes=1, name="dot_similarity")( [user_embedding, movie_embedding] ) # Convert to rating scale. prediction = keras.activations.sigmoid(logits) * 5 # Create the model. model = keras.Model( inputs=[user_input, movie_input], outputs=prediction, name="baseline_model" ) return model baseline_model = create_baseline_model() baseline_model.summary() ``` <div class="k-default-codeblock"> ``` /Users/fchollet/Library/Python/3.10/lib/python/site-packages/numpy/core/numeric.py:2468: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison return bool(asarray(a1 == a2).all()) ``` </div> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "baseline_model"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩ │ user_id │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ movie_id │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ user_embedding │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">386,560</span> │ user_id[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Sequential</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ movie_embedding │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">237,184</span> │ movie_id[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Sequential</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ dot_similarity │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ user_embedding[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Dot</span>) │ │ │ movie_embedding[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ sigmoid (<span style="color: #0087ff; text-decoration-color: #0087ff">Sigmoid</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ dot_similarity[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ multiply (<span style="color: #0087ff; text-decoration-color: #0087ff">Multiply</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ sigmoid[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ └─────────────────────┴───────────────────┴─────────┴──────────────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">623,744</span> (2.38 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">623,744</span> (2.38 MB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> Notice that the number of trainable parameters is 623,744 ```python history = run_experiment(baseline_model) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "eval"], loc="upper left") plt.show() ``` <div class="k-default-codeblock"> ``` Epoch 1/3 6629/Unknown 17s 3ms/step - loss: 1.4095 - mae: 0.9668 /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/contextlib.py:153: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset. self.gen.throw(typ, value, traceback) 6646/6646 ━━━━━━━━━━━━━━━━━━━━ 18s 3ms/step - loss: 1.4087 - mae: 0.9665 - val_loss: 0.9032 - val_mae: 0.7438 Epoch 2/3 6646/6646 ━━━━━━━━━━━━━━━━━━━━ 17s 3ms/step - loss: 0.8296 - mae: 0.7193 - val_loss: 0.7807 - val_mae: 0.6976 Epoch 3/3 6646/6646 ━━━━━━━━━━━━━━━━━━━━ 17s 3ms/step - loss: 0.7305 - mae: 0.6744 - val_loss: 0.7446 - val_mae: 0.6808 ``` </div> ![png](/img/examples/keras_recipes/memory_efficient_embeddings/memory_efficient_embeddings_17_3.png) --- ## Experiment 2: memory-efficient model ### Implement Quotient-Remainder embedding as a layer The Quotient-Remainder technique works as follows. For a set of vocabulary and embedding size `embedding_dim`, instead of creating a `vocabulary_size X embedding_dim` embedding table, we create *two* `num_buckets X embedding_dim` embedding tables, where `num_buckets` is much smaller than `vocabulary_size`. An embedding for a given item `index` is generated via the following steps: 1. Compute the `quotient_index` as `index // num_buckets`. 2. Compute the `remainder_index` as `index % num_buckets`. 3. Lookup `quotient_embedding` from the first embedding table using `quotient_index`. 4. Lookup `remainder_embedding` from the second embedding table using `remainder_index`. 5. Return `quotient_embedding` * `remainder_embedding`. This technique not only reduces the number of embedding vectors needs to be stored and trained, but also generates a *unique* embedding vector for each item of size `embedding_dim`. Note that `q_embedding` and `r_embedding` can be combined using other operations, like `Add` and `Concatenate`. ```python class QREmbedding(keras.layers.Layer): def __init__(self, vocabulary, embedding_dim, num_buckets, name=None): super().__init__(name=name) self.num_buckets = num_buckets self.index_lookup = StringLookup( vocabulary=vocabulary, mask_token=None, num_oov_indices=0 ) self.q_embeddings = layers.Embedding( num_buckets, embedding_dim, ) self.r_embeddings = layers.Embedding( num_buckets, embedding_dim, ) def call(self, inputs): # Get the item index. embedding_index = self.index_lookup(inputs) # Get the quotient index. quotient_index = tf.math.floordiv(embedding_index, self.num_buckets) # Get the reminder index. remainder_index = tf.math.floormod(embedding_index, self.num_buckets) # Lookup the quotient_embedding using the quotient_index. quotient_embedding = self.q_embeddings(quotient_index) # Lookup the remainder_embedding using the remainder_index. remainder_embedding = self.r_embeddings(remainder_index) # Use multiplication as a combiner operation return quotient_embedding * remainder_embedding ``` ### Implement Mixed Dimension embedding as a layer In the mixed dimension embedding technique, we train embedding vectors with full dimensions for the frequently queried items, while train embedding vectors with *reduced dimensions* for less frequent items, plus a *projection weights matrix* to bring low dimension embeddings to the full dimensions. More precisely, we define *blocks* of items of similar frequencies. For each block, a `block_vocab_size X block_embedding_dim` embedding table and `block_embedding_dim X full_embedding_dim` projection weights matrix are created. Note that, if `block_embedding_dim` equals `full_embedding_dim`, the projection weights matrix becomes an *identity* matrix. Embeddings for a given batch of item `indices` are generated via the following steps: 1. For each block, lookup the `block_embedding_dim` embedding vectors using `indices`, and project them to the `full_embedding_dim`. 2. If an item index does not belong to a given block, an out-of-vocabulary embedding is returned. Each block will return a `batch_size X full_embedding_dim` tensor. 3. A mask is applied to the embeddings returned from each block in order to convert the out-of-vocabulary embeddings to vector of zeros. That is, for each item in the batch, a single non-zero embedding vector is returned from the all block embeddings. 4. Embeddings retrieved from the blocks are combined using *sum* to produce the final `batch_size X full_embedding_dim` tensor. ```python class MDEmbedding(keras.layers.Layer): def __init__( self, blocks_vocabulary, blocks_embedding_dims, base_embedding_dim, name=None ): super().__init__(name=name) self.num_blocks = len(blocks_vocabulary) # Create vocab to block lookup. keys = [] values = [] for block_idx, block_vocab in enumerate(blocks_vocabulary): keys.extend(block_vocab) values.extend([block_idx] * len(block_vocab)) self.vocab_to_block = tf.lookup.StaticHashTable( tf.lookup.KeyValueTensorInitializer(keys, values), default_value=-1 ) self.block_embedding_encoders = [] self.block_embedding_projectors = [] # Create block embedding encoders and projectors. for idx in range(self.num_blocks): vocabulary = blocks_vocabulary[idx] embedding_dim = blocks_embedding_dims[idx] block_embedding_encoder = embedding_encoder( vocabulary, embedding_dim, num_oov_indices=1 ) self.block_embedding_encoders.append(block_embedding_encoder) if embedding_dim == base_embedding_dim: self.block_embedding_projectors.append(layers.Lambda(lambda x: x)) else: self.block_embedding_projectors.append( layers.Dense(units=base_embedding_dim) ) def call(self, inputs): # Get block index for each input item. block_indicies = self.vocab_to_block.lookup(inputs) # Initialize output embeddings to zeros. embeddings = tf.zeros(shape=(tf.shape(inputs)[0], base_embedding_dim)) # Generate embeddings from blocks. for idx in range(self.num_blocks): # Lookup embeddings from the current block. block_embeddings = self.block_embedding_encoders[idx](inputs) # Project embeddings to base_embedding_dim. block_embeddings = self.block_embedding_projectors[idx](block_embeddings) # Create a mask to filter out embeddings of items that do not belong to the current block. mask = tf.expand_dims(tf.cast(block_indicies == idx, tf.dtypes.float32), 1) # Set the embeddings for the items not belonging to the current block to zeros. block_embeddings = block_embeddings * mask # Add the block embeddings to the final embeddings. embeddings += block_embeddings return embeddings ``` ### Implement the memory-efficient model In this experiment, we are going to use the **Quotient-Remainder** technique to reduce the size of the user embeddings, and the **Mixed Dimension** technique to reduce the size of the movie embeddings. While in the [paper](https://arxiv.org/abs/1909.11810), an alpha-power rule is used to determined the dimensions of the embedding of each block, we simply set the number of blocks and the dimensions of embeddings of each block based on the histogram visualization of movies popularity. ```python movie_frequencies = ratings_data["movie_id"].value_counts() movie_frequencies.hist(bins=10) ``` <div class="k-default-codeblock"> ``` <Axes: > ``` </div> ![png](/img/examples/keras_recipes/memory_efficient_embeddings/memory_efficient_embeddings_24_1.png) You can see that we can group the movies into three blocks, and assign them 64, 32, and 16 embedding dimensions, respectively. Feel free to experiment with different number of blocks and dimensions. ```python sorted_movie_vocabulary = list(movie_frequencies.keys()) movie_blocks_vocabulary = [ sorted_movie_vocabulary[:400], # high popularity movies block sorted_movie_vocabulary[400:1700], # normal popularity movies block sorted_movie_vocabulary[1700:], # low popularity movies block ] movie_blocks_embedding_dims = [64, 32, 16] user_embedding_num_buckets = len(user_vocabulary) // 50 def create_memory_efficient_model(): # Take the user as an input. user_input = layers.Input(name="user_id", shape=(), dtype="string") # Get user embedding. user_embedding = QREmbedding( vocabulary=user_vocabulary, embedding_dim=base_embedding_dim, num_buckets=user_embedding_num_buckets, name="user_embedding", )(user_input) # Take the movie as an input. movie_input = layers.Input(name="movie_id", shape=(), dtype="string") # Get embedding. movie_embedding = MDEmbedding( blocks_vocabulary=movie_blocks_vocabulary, blocks_embedding_dims=movie_blocks_embedding_dims, base_embedding_dim=base_embedding_dim, name="movie_embedding", )(movie_input) # Compute dot product similarity between user and movie embeddings. logits = layers.Dot(axes=1, name="dot_similarity")( [user_embedding, movie_embedding] ) # Convert to rating scale. prediction = keras.activations.sigmoid(logits) * 5 # Create the model. model = keras.Model( inputs=[user_input, movie_input], outputs=prediction, name="baseline_model" ) return model memory_efficient_model = create_memory_efficient_model() memory_efficient_model.summary() ``` <div class="k-default-codeblock"> ``` /Users/fchollet/Library/Python/3.10/lib/python/site-packages/numpy/core/numeric.py:2468: FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison return bool(asarray(a1 == a2).all()) ``` </div> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "baseline_model"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩ │ user_id │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ movie_id │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ user_embedding │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">15,360</span> │ user_id[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">QREmbedding</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ movie_embedding │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">102,608</span> │ movie_id[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">MDEmbedding</span>) │ │ │ │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ dot_similarity │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ user_embedding[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>… │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Dot</span>) │ │ │ movie_embedding[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">…</span> │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ sigmoid_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Sigmoid</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ dot_similarity[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ ├─────────────────────┼───────────────────┼─────────┼──────────────────────┤ │ multiply_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ sigmoid_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │ │ (<span style="color: #0087ff; text-decoration-color: #0087ff">Multiply</span>) │ │ │ │ └─────────────────────┴───────────────────┴─────────┴──────────────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">117,968</span> (460.81 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">117,968</span> (460.81 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> Notice that the number of trainable parameters is 117,968, which is more than 5x less than the number of parameters in the baseline model. ```python history = run_experiment(memory_efficient_model) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.title("model loss") plt.ylabel("loss") plt.xlabel("epoch") plt.legend(["train", "eval"], loc="upper left") plt.show() ``` <div class="k-default-codeblock"> ``` Epoch 1/3 6622/Unknown 6s 891us/step - loss: 1.1938 - mae: 0.8780 /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/contextlib.py:153: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset. self.gen.throw(typ, value, traceback) 6646/6646 ━━━━━━━━━━━━━━━━━━━━ 7s 992us/step - loss: 1.1931 - mae: 0.8777 - val_loss: 1.1027 - val_mae: 0.8179 Epoch 2/3 6646/6646 ━━━━━━━━━━━━━━━━━━━━ 7s 1ms/step - loss: 0.8908 - mae: 0.7488 - val_loss: 0.9144 - val_mae: 0.7549 Epoch 3/3 6646/6646 ━━━━━━━━━━━━━━━━━━━━ 7s 980us/step - loss: 0.8419 - mae: 0.7278 - val_loss: 0.8806 - val_mae: 0.7419 ``` </div> ![png](/img/examples/keras_recipes/memory_efficient_embeddings/memory_efficient_embeddings_28_3.png)
keras-io/examples/keras_recipes/md/memory_efficient_embeddings.md/0
{ "file_path": "keras-io/examples/keras_recipes/md/memory_efficient_embeddings.md", "repo_id": "keras-io", "token_count": 12344 }
99
""" Title: Writing Keras Models With TensorFlow NumPy Author: [lukewood](https://lukewood.xyz) Date created: 2021/08/28 Last modified: 2021/08/28 Description: Overview of how to use the TensorFlow NumPy API to write Keras models. Accelerator: GPU """ """ ## Introduction [NumPy](https://numpy.org/) is a hugely successful Python linear algebra library. TensorFlow recently launched [tf_numpy](https://www.tensorflow.org/guide/tf_numpy), a TensorFlow implementation of a large subset of the NumPy API. Thanks to `tf_numpy`, you can write Keras layers or models in the NumPy style! The TensorFlow NumPy API has full integration with the TensorFlow ecosystem. Features such as automatic differentiation, TensorBoard, Keras model callbacks, TPU distribution and model exporting are all supported. Let's run through a few examples. """ """ ## Setup """ import os os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf import tensorflow.experimental.numpy as tnp import keras from keras import layers """ To test our models we will use the Boston housing prices regression dataset. """ (x_train, y_train), (x_test, y_test) = keras.datasets.boston_housing.load_data( path="boston_housing.npz", test_split=0.2, seed=113 ) input_dim = x_train.shape[1] def evaluate_model(model: keras.Model): loss, percent_error = model.evaluate(x_test, y_test, verbose=0) print("Mean absolute percent error before training: ", percent_error) model.fit(x_train, y_train, epochs=200, verbose=0) loss, percent_error = model.evaluate(x_test, y_test, verbose=0) print("Mean absolute percent error after training:", percent_error) """ ## Subclassing keras.Model with TNP The most flexible way to make use of the Keras API is to subclass the [`keras.Model`](https://keras.io/api/models/model/) class. Subclassing the Model class gives you the ability to fully customize what occurs in the training loop. This makes subclassing Model a popular option for researchers. In this example, we will implement a `Model` subclass that performs regression over the boston housing dataset using the TNP API. Note that differentiation and gradient descent is handled automatically when using the TNP API alongside keras. First let's define a simple `TNPForwardFeedRegressionNetwork` class. """ class TNPForwardFeedRegressionNetwork(keras.Model): def __init__(self, blocks=None, **kwargs): super().__init__(**kwargs) if not isinstance(blocks, list): raise ValueError(f"blocks must be a list, got blocks={blocks}") self.blocks = blocks self.block_weights = None self.biases = None def build(self, input_shape): current_shape = input_shape[1] self.block_weights = [] self.biases = [] for i, block in enumerate(self.blocks): self.block_weights.append( self.add_weight( shape=(current_shape, block), trainable=True, name=f"block-{i}", initializer="glorot_normal", ) ) self.biases.append( self.add_weight( shape=(block,), trainable=True, name=f"bias-{i}", initializer="zeros", ) ) current_shape = block self.linear_layer = self.add_weight( shape=(current_shape, 1), name="linear_projector", trainable=True, initializer="glorot_normal", ) def call(self, inputs): activations = inputs for w, b in zip(self.block_weights, self.biases): activations = tnp.matmul(activations, w) + b # ReLu activation function activations = tnp.maximum(activations, 0.0) return tnp.matmul(activations, self.linear_layer) """ Just like with any other Keras model we can utilize any supported optimizer, loss, metrics or callbacks that we want. Let's see how the model performs! """ model = TNPForwardFeedRegressionNetwork(blocks=[3, 3]) model.compile( optimizer="adam", loss="mean_squared_error", metrics=[keras.metrics.MeanAbsolutePercentageError()], ) evaluate_model(model) """ Great! Our model seems to be effectively learning to solve the problem at hand. We can also write our own custom loss function using TNP. """ def tnp_mse(y_true, y_pred): return tnp.mean(tnp.square(y_true - y_pred), axis=0) keras.backend.clear_session() model = TNPForwardFeedRegressionNetwork(blocks=[3, 3]) model.compile( optimizer="adam", loss=tnp_mse, metrics=[keras.metrics.MeanAbsolutePercentageError()], ) evaluate_model(model) """ ## Implementing a Keras Layer Based Model with TNP If desired, TNP can also be used in layer oriented Keras code structure. Let's implement the same model, but using a layered approach! """ def tnp_relu(x): return tnp.maximum(x, 0) class TNPDense(keras.layers.Layer): def __init__(self, units, activation=None): super().__init__() self.units = units self.activation = activation def build(self, input_shape): self.w = self.add_weight( name="weights", shape=(input_shape[1], self.units), initializer="random_normal", trainable=True, ) self.bias = self.add_weight( name="bias", shape=(self.units,), initializer="zeros", trainable=True, ) def call(self, inputs): outputs = tnp.matmul(inputs, self.w) + self.bias if self.activation: return self.activation(outputs) return outputs def create_layered_tnp_model(): return keras.Sequential( [ TNPDense(3, activation=tnp_relu), TNPDense(3, activation=tnp_relu), TNPDense(1), ] ) model = create_layered_tnp_model() model.compile( optimizer="adam", loss="mean_squared_error", metrics=[keras.metrics.MeanAbsolutePercentageError()], ) model.build((None, input_dim)) model.summary() evaluate_model(model) """ You can also seamlessly switch between TNP layers and native Keras layers! """ def create_mixed_model(): return keras.Sequential( [ TNPDense(3, activation=tnp_relu), # The model will have no issue using a normal Dense layer layers.Dense(3, activation="relu"), # ... or switching back to tnp layers! TNPDense(1), ] ) model = create_mixed_model() model.compile( optimizer="adam", loss="mean_squared_error", metrics=[keras.metrics.MeanAbsolutePercentageError()], ) model.build((None, input_dim)) model.summary() evaluate_model(model) """ The Keras API offers a wide variety of layers. The ability to use them alongside NumPy code can be a huge time saver in projects. """ """ ## Distribution Strategy TensorFlow NumPy and Keras integrate with [TensorFlow Distribution Strategies](https://www.tensorflow.org/guide/distributed_training). This makes it simple to perform distributed training across multiple GPUs, or even an entire TPU Pod. """ gpus = tf.config.list_logical_devices("GPU") if gpus: strategy = tf.distribute.MirroredStrategy(gpus) else: # We can fallback to a no-op CPU strategy. strategy = tf.distribute.get_strategy() print("Running with strategy:", str(strategy.__class__.__name__)) with strategy.scope(): model = create_layered_tnp_model() model.compile( optimizer="adam", loss="mean_squared_error", metrics=[keras.metrics.MeanAbsolutePercentageError()], ) model.build((None, input_dim)) model.summary() evaluate_model(model) """ ## TensorBoard Integration One of the many benefits of using the Keras API is the ability to monitor training through TensorBoard. Using the TensorFlow NumPy API alongside Keras allows you to easily leverage TensorBoard. """ keras.backend.clear_session() """ To load the TensorBoard from a Jupyter notebook, you can run the following magic: ``` %load_ext tensorboard ``` """ models = [ ( TNPForwardFeedRegressionNetwork(blocks=[3, 3]), "TNPForwardFeedRegressionNetwork", ), (create_layered_tnp_model(), "layered_tnp_model"), (create_mixed_model(), "mixed_model"), ] for model, model_name in models: model.compile( optimizer="adam", loss="mean_squared_error", metrics=[keras.metrics.MeanAbsolutePercentageError()], ) model.fit( x_train, y_train, epochs=200, verbose=0, callbacks=[keras.callbacks.TensorBoard(log_dir=f"logs/{model_name}")], ) """ To load the TensorBoard from a Jupyter notebook you can use the `%tensorboard` magic: ``` %tensorboard --logdir logs ``` The TensorBoard monitor metrics and examine the training curve. ![Tensorboard training graph](https://i.imgur.com/wsOuFnz.png) The TensorBoard also allows you to explore the computation graph used in your models. ![Tensorboard graph exploration](https://i.imgur.com/tOrezDL.png) The ability to introspect into your models can be valuable during debugging. """ """ ## Conclusion Porting existing NumPy code to Keras models using the `tensorflow_numpy` API is easy! By integrating with Keras you gain the ability to use existing Keras callbacks, metrics and optimizers, easily distribute your training and use Tensorboard. Migrating a more complex model, such as a ResNet, to the TensorFlow NumPy API would be a great follow up learning exercise. Several open source NumPy ResNet implementations are available online. """
keras-io/examples/keras_recipes/tensorflow_numpy_models.py/0
{ "file_path": "keras-io/examples/keras_recipes/tensorflow_numpy_models.py", "repo_id": "keras-io", "token_count": 3743 }
100
<jupyter_start><jupyter_text>End-to-end Masked Language Modeling with BERT**Author:** [Ankur Singh](https://twitter.com/ankur310794)**Date created:** 2020/09/18**Last modified:** 2020/09/18**Description:** Implement a Masked Language Model (MLM) with BERT and fine-tune it on the IMDB Reviews dataset. IntroductionMasked Language Modeling is a fill-in-the-blank task,where a model uses the context words surrounding a mask token to try to predict what themasked word should be.For an input that contains one or more mask tokens,the model will generate the most likely substitution for each.Example:- Input: "I have watched this [MASK] and it was awesome."- Output: "I have watched this movie and it was awesome."Masked language modeling is a great way to train a languagemodel in a self-supervised setting (without human-annotated labels).Such a model can then be fine-tuned to accomplish various supervisedNLP tasks.This example teaches you how to build a BERT model from scratch,train it with the masked language modeling task,and then fine-tune this model on a sentiment classification task.We will use the Keras `TextVectorization` and `MultiHeadAttention` layersto create a BERT Transformer-Encoder network architecture.Note: This example should be run with `tf-nightly`. SetupInstall `tf-nightly` via `pip install tf-nightly`.<jupyter_code>import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.layers import TextVectorization from dataclasses import dataclass import pandas as pd import numpy as np import glob import re from pprint import pprint<jupyter_output><empty_output><jupyter_text>Set-up Configuration<jupyter_code>@dataclass class Config: MAX_LEN = 256 BATCH_SIZE = 32 LR = 0.001 VOCAB_SIZE = 30000 EMBED_DIM = 128 NUM_HEAD = 8 # used in bert model FF_DIM = 128 # used in bert model NUM_LAYERS = 1 config = Config()<jupyter_output><empty_output><jupyter_text>Load the dataWe will first download the IMDB data and load into a Pandas dataframe.<jupyter_code>!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz !tar -xf aclImdb_v1.tar.gz def get_text_list_from_files(files): text_list = [] for name in files: with open(name) as f: for line in f: text_list.append(line) return text_list def get_data_from_text_files(folder_name): pos_files = glob.glob("aclImdb/" + folder_name + "/pos/*.txt") pos_texts = get_text_list_from_files(pos_files) neg_files = glob.glob("aclImdb/" + folder_name + "/neg/*.txt") neg_texts = get_text_list_from_files(neg_files) df = pd.DataFrame( { "review": pos_texts + neg_texts, "sentiment": [0] * len(pos_texts) + [1] * len(neg_texts), } ) df = df.sample(len(df)).reset_index(drop=True) return df train_df = get_data_from_text_files("train") test_df = get_data_from_text_files("test") all_data = train_df.append(test_df)<jupyter_output><empty_output><jupyter_text>Dataset preparationWe will use the `TextVectorization` layer to vectorize the text into integer token ids.It transforms a batch of strings into eithera sequence of token indices (one sample = 1D array of integer token indices, in order)or a dense representation (one sample = 1D array of float values encoding an unordered set of tokens).Below, we define 3 preprocessing functions.1. The `get_vectorize_layer` function builds the `TextVectorization` layer.2. The `encode` function encodes raw text into integer token ids.3. The `get_masked_input_and_labels` function will mask input token ids.It masks 15% of all input tokens in each sequence at random.<jupyter_code>def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ") return tf.strings.regex_replace( stripped_html, "[%s]" % re.escape("!#$%&'()*+,-./:;<=>?@\^_`{|}~"), "" ) def get_vectorize_layer(texts, vocab_size, max_seq, special_tokens=["[MASK]"]): """Build Text vectorization layer Args: texts (list): List of string i.e input texts vocab_size (int): vocab size max_seq (int): Maximum sequence lenght. special_tokens (list, optional): List of special tokens. Defaults to ['[MASK]']. Returns: layers.Layer: Return TextVectorization Keras Layer """ vectorize_layer = TextVectorization( max_tokens=vocab_size, output_mode="int", standardize=custom_standardization, output_sequence_length=max_seq, ) vectorize_layer.adapt(texts) # Insert mask token in vocabulary vocab = vectorize_layer.get_vocabulary() vocab = vocab[2 : vocab_size - len(special_tokens)] + ["[mask]"] vectorize_layer.set_vocabulary(vocab) return vectorize_layer vectorize_layer = get_vectorize_layer( all_data.review.values.tolist(), config.VOCAB_SIZE, config.MAX_LEN, special_tokens=["[mask]"], ) # Get mask token id for masked language model mask_token_id = vectorize_layer(["[mask]"]).numpy()[0][0] def encode(texts): encoded_texts = vectorize_layer(texts) return encoded_texts.numpy() def get_masked_input_and_labels(encoded_texts): # 15% BERT masking inp_mask = np.random.rand(*encoded_texts.shape) < 0.15 # Do not mask special tokens inp_mask[encoded_texts <= 2] = False # Set targets to -1 by default, it means ignore labels = -1 * np.ones(encoded_texts.shape, dtype=int) # Set labels for masked tokens labels[inp_mask] = encoded_texts[inp_mask] # Prepare input encoded_texts_masked = np.copy(encoded_texts) # Set input to [MASK] which is the last token for the 90% of tokens # This means leaving 10% unchanged inp_mask_2mask = inp_mask & (np.random.rand(*encoded_texts.shape) < 0.90) encoded_texts_masked[ inp_mask_2mask ] = mask_token_id # mask token is the last in the dict # Set 10% to a random token inp_mask_2random = inp_mask_2mask & (np.random.rand(*encoded_texts.shape) < 1 / 9) encoded_texts_masked[inp_mask_2random] = np.random.randint( 3, mask_token_id, inp_mask_2random.sum() ) # Prepare sample_weights to pass to .fit() method sample_weights = np.ones(labels.shape) sample_weights[labels == -1] = 0 # y_labels would be same as encoded_texts i.e input tokens y_labels = np.copy(encoded_texts) return encoded_texts_masked, y_labels, sample_weights # We have 25000 examples for training x_train = encode(train_df.review.values) # encode reviews with vectorizer y_train = train_df.sentiment.values train_classifier_ds = ( tf.data.Dataset.from_tensor_slices((x_train, y_train)) .shuffle(1000) .batch(config.BATCH_SIZE) ) # We have 25000 examples for testing x_test = encode(test_df.review.values) y_test = test_df.sentiment.values test_classifier_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch( config.BATCH_SIZE ) # Build dataset for end to end model input (will be used at the end) test_raw_classifier_ds = tf.data.Dataset.from_tensor_slices( (test_df.review.values, y_test) ).batch(config.BATCH_SIZE) # Prepare data for masked language model x_all_review = encode(all_data.review.values) x_masked_train, y_masked_labels, sample_weights = get_masked_input_and_labels( x_all_review ) mlm_ds = tf.data.Dataset.from_tensor_slices( (x_masked_train, y_masked_labels, sample_weights) ) mlm_ds = mlm_ds.shuffle(1000).batch(config.BATCH_SIZE)<jupyter_output><empty_output><jupyter_text>Create BERT model (Pretraining Model) for masked language modelingWe will create a BERT-like pretraining model architectureusing the `MultiHeadAttention` layer.It will take token ids as inputs (including masked tokens)and it will predict the correct ids for the masked input tokens.<jupyter_code>def bert_module(query, key, value, i): # Multi headed self-attention attention_output = layers.MultiHeadAttention( num_heads=config.NUM_HEAD, key_dim=config.EMBED_DIM // config.NUM_HEAD, name="encoder_{}/multiheadattention".format(i), )(query, key, value) attention_output = layers.Dropout(0.1, name="encoder_{}/att_dropout".format(i))( attention_output ) attention_output = layers.LayerNormalization( epsilon=1e-6, name="encoder_{}/att_layernormalization".format(i) )(query + attention_output) # Feed-forward layer ffn = keras.Sequential( [ layers.Dense(config.FF_DIM, activation="relu"), layers.Dense(config.EMBED_DIM), ], name="encoder_{}/ffn".format(i), ) ffn_output = ffn(attention_output) ffn_output = layers.Dropout(0.1, name="encoder_{}/ffn_dropout".format(i))( ffn_output ) sequence_output = layers.LayerNormalization( epsilon=1e-6, name="encoder_{}/ffn_layernormalization".format(i) )(attention_output + ffn_output) return sequence_output def get_pos_encoding_matrix(max_len, d_emb): pos_enc = np.array( [ [pos / np.power(10000, 2 * (j // 2) / d_emb) for j in range(d_emb)] if pos != 0 else np.zeros(d_emb) for pos in range(max_len) ] ) pos_enc[1:, 0::2] = np.sin(pos_enc[1:, 0::2]) # dim 2i pos_enc[1:, 1::2] = np.cos(pos_enc[1:, 1::2]) # dim 2i+1 return pos_enc loss_fn = keras.losses.SparseCategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE ) loss_tracker = tf.keras.metrics.Mean(name="loss") class MaskedLanguageModel(tf.keras.Model): def train_step(self, inputs): if len(inputs) == 3: features, labels, sample_weight = inputs else: features, labels = inputs sample_weight = None with tf.GradientTape() as tape: predictions = self(features, training=True) loss = loss_fn(labels, predictions, sample_weight=sample_weight) # Compute gradients trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Compute our own metrics loss_tracker.update_state(loss, sample_weight=sample_weight) # Return a dict mapping metric names to current value return {"loss": loss_tracker.result()} @property def metrics(self): # We list our `Metric` objects here so that `reset_states()` can be # called automatically at the start of each epoch # or at the start of `evaluate()`. # If you don't implement this property, you have to call # `reset_states()` yourself at the time of your choosing. return [loss_tracker] def create_masked_language_bert_model(): inputs = layers.Input((config.MAX_LEN,), dtype=tf.int64) word_embeddings = layers.Embedding( config.VOCAB_SIZE, config.EMBED_DIM, name="word_embedding" )(inputs) position_embeddings = layers.Embedding( input_dim=config.MAX_LEN, output_dim=config.EMBED_DIM, weights=[get_pos_encoding_matrix(config.MAX_LEN, config.EMBED_DIM)], name="position_embedding", )(tf.range(start=0, limit=config.MAX_LEN, delta=1)) embeddings = word_embeddings + position_embeddings encoder_output = embeddings for i in range(config.NUM_LAYERS): encoder_output = bert_module(encoder_output, encoder_output, encoder_output, i) mlm_output = layers.Dense(config.VOCAB_SIZE, name="mlm_cls", activation="softmax")( encoder_output ) mlm_model = MaskedLanguageModel(inputs, mlm_output, name="masked_bert_model") optimizer = keras.optimizers.Adam(learning_rate=config.LR) mlm_model.compile(optimizer=optimizer) return mlm_model id2token = dict(enumerate(vectorize_layer.get_vocabulary())) token2id = {y: x for x, y in id2token.items()} class MaskedTextGenerator(keras.callbacks.Callback): def __init__(self, sample_tokens, top_k=5): self.sample_tokens = sample_tokens self.k = top_k def decode(self, tokens): return " ".join([id2token[t] for t in tokens if t != 0]) def convert_ids_to_tokens(self, id): return id2token[id] def on_epoch_end(self, epoch, logs=None): prediction = self.model.predict(self.sample_tokens) masked_index = np.where(self.sample_tokens == mask_token_id) masked_index = masked_index[1] mask_prediction = prediction[0][masked_index] top_indices = mask_prediction[0].argsort()[-self.k :][::-1] values = mask_prediction[0][top_indices] for i in range(len(top_indices)): p = top_indices[i] v = values[i] tokens = np.copy(sample_tokens[0]) tokens[masked_index[0]] = p result = { "input_text": self.decode(sample_tokens[0].numpy()), "prediction": self.decode(tokens), "probability": v, "predicted mask token": self.convert_ids_to_tokens(p), } pprint(result) sample_tokens = vectorize_layer(["I have watched this [mask] and it was awesome"]) generator_callback = MaskedTextGenerator(sample_tokens.numpy()) bert_masked_model = create_masked_language_bert_model() bert_masked_model.summary()<jupyter_output><empty_output><jupyter_text>Train and Save<jupyter_code>bert_masked_model.fit(mlm_ds, epochs=5, callbacks=[generator_callback]) bert_masked_model.save("bert_mlm_imdb.h5")<jupyter_output><empty_output><jupyter_text>Fine-tune a sentiment classification modelWe will fine-tune our self-supervised model on a downstream task of sentiment classification.To do this, let's create a classifier by adding a pooling layer and a `Dense` layer on top of thepretrained BERT features.<jupyter_code># Load pretrained bert model mlm_model = keras.models.load_model( "bert_mlm_imdb.h5", custom_objects={"MaskedLanguageModel": MaskedLanguageModel} ) pretrained_bert_model = tf.keras.Model( mlm_model.input, mlm_model.get_layer("encoder_0/ffn_layernormalization").output ) # Freeze it pretrained_bert_model.trainable = False def create_classifier_bert_model(): inputs = layers.Input((config.MAX_LEN,), dtype=tf.int64) sequence_output = pretrained_bert_model(inputs) pooled_output = layers.GlobalMaxPooling1D()(sequence_output) hidden_layer = layers.Dense(64, activation="relu")(pooled_output) outputs = layers.Dense(1, activation="sigmoid")(hidden_layer) classifer_model = keras.Model(inputs, outputs, name="classification") optimizer = keras.optimizers.Adam() classifer_model.compile( optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"] ) return classifer_model classifer_model = create_classifier_bert_model() classifer_model.summary() # Train the classifier with frozen BERT stage classifer_model.fit( train_classifier_ds, epochs=5, validation_data=test_classifier_ds, ) # Unfreeze the BERT model for fine-tuning pretrained_bert_model.trainable = True optimizer = keras.optimizers.Adam() classifer_model.compile( optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"] ) classifer_model.fit( train_classifier_ds, epochs=5, validation_data=test_classifier_ds, )<jupyter_output><empty_output><jupyter_text>Create an end-to-end model and evaluate itWhen you want to deploy a model, it's best if it already includes its preprocessingpipeline, so that you don't have to reimplement the preprocessing logic in yourproduction environment. Let's create an end-to-end model that incorporatesthe `TextVectorization` layer, and let's evaluate. Our model will accept raw stringsas input.<jupyter_code>def get_end_to_end(model): inputs_string = keras.Input(shape=(1,), dtype="string") indices = vectorize_layer(inputs_string) outputs = model(indices) end_to_end_model = keras.Model(inputs_string, outputs, name="end_to_end_model") optimizer = keras.optimizers.Adam(learning_rate=config.LR) end_to_end_model.compile( optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"] ) return end_to_end_model end_to_end_classification_model = get_end_to_end(classifer_model) end_to_end_classification_model.evaluate(test_raw_classifier_ds)<jupyter_output><empty_output>
keras-io/examples/nlp/ipynb/masked_language_modeling.ipynb/0
{ "file_path": "keras-io/examples/nlp/ipynb/masked_language_modeling.ipynb", "repo_id": "keras-io", "token_count": 6419 }
101
<jupyter_start><jupyter_text>Text classification from scratch**Authors:** Mark Omernick, Francois Chollet**Date created:** 2019/11/06**Last modified:** 2020/05/17**Description:** Text sentiment classification starting from raw text files. IntroductionThis example shows how to do text classification starting from raw text (asa set of text files on disk). We demonstrate the workflow on the IMDB sentimentclassification dataset (unprocessed version). We use the `TextVectorization` layer for word splitting & indexing. Setup<jupyter_code>import os os.environ["KERAS_BACKEND"] = "tensorflow" import keras import tensorflow as tf import numpy as np from keras import layers<jupyter_output><empty_output><jupyter_text>Load the data: IMDB movie review sentiment classificationLet's download the data and inspect its structure.<jupyter_code>!curl -O https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz !tar -xf aclImdb_v1.tar.gz<jupyter_output><empty_output><jupyter_text>The `aclImdb` folder contains a `train` and `test` subfolder:<jupyter_code>!ls aclImdb !ls aclImdb/test !ls aclImdb/train<jupyter_output><empty_output><jupyter_text>The `aclImdb/train/pos` and `aclImdb/train/neg` folders contain text files, each of which represents one review (either positive or negative):<jupyter_code>!cat aclImdb/train/pos/6248_7.txt<jupyter_output><empty_output><jupyter_text>We are only interested in the `pos` and `neg` subfolders, so let's delete the other subfolder that has text files in it:<jupyter_code>!rm -r aclImdb/train/unsup<jupyter_output><empty_output><jupyter_text>You can use the utility `keras.utils.text_dataset_from_directory` togenerate a labeled `tf.data.Dataset` object from a set of text files on disk filed into class-specific folders.Let's use it to generate the training, validation, and test datasets. The validationand training datasets are generated from two subsets of the `train` directory, with 20%of samples going to the validation dataset and 80% going to the training dataset.Having a validation dataset in addition to the test dataset is useful for tuninghyperparameters, such as the model architecture, for which the test dataset should notbe used.Before putting the model out into the real world however, it should be retrained using allavailable training data (without creating a validation dataset), so its performance is maximized.When using the `validation_split` & `subset` arguments, make sure to either specify arandom seed, or to pass `shuffle=False`, so that the validation & training splits youget have no overlap.<jupyter_code>batch_size = 32 raw_train_ds = keras.utils.text_dataset_from_directory( "aclImdb/train", batch_size=batch_size, validation_split=0.2, subset="training", seed=1337, ) raw_val_ds = keras.utils.text_dataset_from_directory( "aclImdb/train", batch_size=batch_size, validation_split=0.2, subset="validation", seed=1337, ) raw_test_ds = keras.utils.text_dataset_from_directory( "aclImdb/test", batch_size=batch_size ) print(f"Number of batches in raw_train_ds: {raw_train_ds.cardinality()}") print(f"Number of batches in raw_val_ds: {raw_val_ds.cardinality()}") print(f"Number of batches in raw_test_ds: {raw_test_ds.cardinality()}")<jupyter_output><empty_output><jupyter_text>Let's preview a few samples:<jupyter_code># It's important to take a look at your raw data to ensure your normalization # and tokenization will work as expected. We can do that by taking a few # examples from the training set and looking at them. # This is one of the places where eager execution shines: # we can just evaluate these tensors using .numpy() # instead of needing to evaluate them in a Session/Graph context. for text_batch, label_batch in raw_train_ds.take(1): for i in range(5): print(text_batch.numpy()[i]) print(label_batch.numpy()[i])<jupyter_output><empty_output><jupyter_text>Prepare the dataIn particular, we remove `` tags.<jupyter_code>import string import re # Having looked at our data above, we see that the raw text contains HTML break # tags of the form '<br />'. These tags will not be removed by the default # standardizer (which doesn't strip HTML). Because of this, we will need to # create a custom standardization function. def custom_standardization(input_data): lowercase = tf.strings.lower(input_data) stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ") return tf.strings.regex_replace( stripped_html, f"[{re.escape(string.punctuation)}]", "" ) # Model constants. max_features = 20000 embedding_dim = 128 sequence_length = 500 # Now that we have our custom standardization, we can instantiate our text # vectorization layer. We are using this layer to normalize, split, and map # strings to integers, so we set our 'output_mode' to 'int'. # Note that we're using the default split function, # and the custom standardization defined above. # We also set an explicit maximum sequence length, since the CNNs later in our # model won't support ragged sequences. vectorize_layer = keras.layers.TextVectorization( standardize=custom_standardization, max_tokens=max_features, output_mode="int", output_sequence_length=sequence_length, ) # Now that the vectorize_layer has been created, call `adapt` on a text-only # dataset to create the vocabulary. You don't have to batch, but for very large # datasets this means you're not keeping spare copies of the dataset in memory. # Let's make a text-only dataset (no labels): text_ds = raw_train_ds.map(lambda x, y: x) # Let's call `adapt`: vectorize_layer.adapt(text_ds)<jupyter_output><empty_output><jupyter_text>Two options to vectorize the dataThere are 2 ways we can use our text vectorization layer:**Option 1: Make it part of the model**, so as to obtain a model that processes raw strings, like this: ```pythontext_input = keras.Input(shape=(1,), dtype=tf.string, name='text')x = vectorize_layer(text_input)x = layers.Embedding(max_features + 1, embedding_dim)(x)...```**Option 2: Apply it to the text dataset** to obtain a dataset of word indices, then feed it into a model that expects integer sequences as inputs.An important difference between the two is that option 2 enables you to do**asynchronous CPU processing and buffering** of your data when training on GPU.So if you're training the model on GPU, you probably want to go with this option to get the best performance. This is what we will do below.If we were to export our model to production, we'd ship a model that accepts rawstrings as input, like in the code snippet for option 1 above. This can be done after training. We do this in the last section.<jupyter_code>def vectorize_text(text, label): text = tf.expand_dims(text, -1) return vectorize_layer(text), label # Vectorize the data. train_ds = raw_train_ds.map(vectorize_text) val_ds = raw_val_ds.map(vectorize_text) test_ds = raw_test_ds.map(vectorize_text) # Do async prefetching / buffering of the data for best performance on GPU. train_ds = train_ds.cache().prefetch(buffer_size=10) val_ds = val_ds.cache().prefetch(buffer_size=10) test_ds = test_ds.cache().prefetch(buffer_size=10)<jupyter_output><empty_output><jupyter_text>Build a modelWe choose a simple 1D convnet starting with an `Embedding` layer.<jupyter_code># A integer input for vocab indices. inputs = keras.Input(shape=(None,), dtype="int64") # Next, we add a layer to map those vocab indices into a space of dimensionality # 'embedding_dim'. x = layers.Embedding(max_features, embedding_dim)(inputs) x = layers.Dropout(0.5)(x) # Conv1D + global max pooling x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x) x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x) x = layers.GlobalMaxPooling1D()(x) # We add a vanilla hidden layer: x = layers.Dense(128, activation="relu")(x) x = layers.Dropout(0.5)(x) # We project onto a single unit output layer, and squash it with a sigmoid: predictions = layers.Dense(1, activation="sigmoid", name="predictions")(x) model = keras.Model(inputs, predictions) # Compile the model with binary crossentropy loss and an adam optimizer. model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>epochs = 3 # Fit the model using the train and test datasets. model.fit(train_ds, validation_data=val_ds, epochs=epochs)<jupyter_output><empty_output><jupyter_text>Evaluate the model on the test set<jupyter_code>model.evaluate(test_ds)<jupyter_output><empty_output><jupyter_text>Make an end-to-end modelIf you want to obtain a model capable of processing raw strings, you can simplycreate a new model (using the weights we just trained):<jupyter_code># A string input inputs = keras.Input(shape=(1,), dtype="string") # Turn strings into vocab indices indices = vectorize_layer(inputs) # Turn vocab indices into predictions outputs = model(indices) # Our end to end model end_to_end_model = keras.Model(inputs, outputs) end_to_end_model.compile( loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"] ) # Test it with `raw_test_ds`, which yields raw strings end_to_end_model.evaluate(raw_test_ds)<jupyter_output><empty_output>
keras-io/examples/nlp/ipynb/text_classification_from_scratch.ipynb/0
{ "file_path": "keras-io/examples/nlp/ipynb/text_classification_from_scratch.ipynb", "repo_id": "keras-io", "token_count": 2932 }
102
# Text classification with Transformer **Author:** [Apoorv Nandan](https://twitter.com/NandanApoorv)<br> **Date created:** 2020/05/10<br> **Last modified:** 2024/01/18<br> **Description:** Implement a Transformer block as a Keras layer and use it for text classification. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/text_classification_with_transformer.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/text_classification_with_transformer.py) --- ## Setup ```python import keras from keras import ops from keras import layers ``` --- ## Implement a Transformer block as a layer ```python class TransformerBlock(layers.Layer): def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1): super().__init__() self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) self.ffn = keras.Sequential( [layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim),] ) self.layernorm1 = layers.LayerNormalization(epsilon=1e-6) self.layernorm2 = layers.LayerNormalization(epsilon=1e-6) self.dropout1 = layers.Dropout(rate) self.dropout2 = layers.Dropout(rate) def call(self, inputs): attn_output = self.att(inputs, inputs) attn_output = self.dropout1(attn_output) out1 = self.layernorm1(inputs + attn_output) ffn_output = self.ffn(out1) ffn_output = self.dropout2(ffn_output) return self.layernorm2(out1 + ffn_output) ``` --- ## Implement embedding layer Two seperate embedding layers, one for tokens, one for token index (positions). ```python class TokenAndPositionEmbedding(layers.Layer): def __init__(self, maxlen, vocab_size, embed_dim): super().__init__() self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim) self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim) def call(self, x): maxlen = ops.shape(x)[-1] positions = ops.arange(start=0, stop=maxlen, step=1) positions = self.pos_emb(positions) x = self.token_emb(x) return x + positions ``` --- ## Download and prepare dataset ```python vocab_size = 20000 # Only consider the top 20k words maxlen = 200 # Only consider the first 200 words of each movie review (x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size) print(len(x_train), "Training sequences") print(len(x_val), "Validation sequences") x_train = keras.utils.pad_sequences(x_train, maxlen=maxlen) x_val = keras.utils.pad_sequences(x_val, maxlen=maxlen) ``` <div class="k-default-codeblock"> ``` Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz 17465344/17464789 [==============================] - 0s 0us/step <string>:6: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/datasets/imdb.py:159: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx]) /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/datasets/imdb.py:160: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:]) 25000 Training sequences 25000 Validation sequences ``` </div> --- ## Create classifier model using transformer layer Transformer layer outputs one vector for each time step of our input sequence. Here, we take the mean across all time steps and use a feed forward network on top of it to classify text. ```python embed_dim = 32 # Embedding size for each token num_heads = 2 # Number of attention heads ff_dim = 32 # Hidden layer size in feed forward network inside transformer inputs = layers.Input(shape=(maxlen,)) embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim) x = embedding_layer(inputs) transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim) x = transformer_block(x) x = layers.GlobalAveragePooling1D()(x) x = layers.Dropout(0.1)(x) x = layers.Dense(20, activation="relu")(x) x = layers.Dropout(0.1)(x) outputs = layers.Dense(2, activation="softmax")(x) model = keras.Model(inputs=inputs, outputs=outputs) ``` --- ## Train and Evaluate ```python model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]) history = model.fit( x_train, y_train, batch_size=32, epochs=2, validation_data=(x_val, y_val) ) ``` <div class="k-default-codeblock"> ``` Epoch 1/2 782/782 [==============================] - 15s 18ms/step - loss: 0.5112 - accuracy: 0.7070 - val_loss: 0.3598 - val_accuracy: 0.8444 Epoch 2/2 782/782 [==============================] - 13s 17ms/step - loss: 0.1942 - accuracy: 0.9297 - val_loss: 0.2977 - val_accuracy: 0.8745 ``` </div>
keras-io/examples/nlp/md/text_classification_with_transformer.md/0
{ "file_path": "keras-io/examples/nlp/md/text_classification_with_transformer.md", "repo_id": "keras-io", "token_count": 2131 }
103
# Deep Deterministic Policy Gradient (DDPG) **Author:** [amifunny](https://github.com/amifunny)<br> **Date created:** 2020/06/04<br> **Last modified:** 2020/09/21<br> **Description:** Implementing DDPG algorithm on the Inverted Pendulum Problem. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/rl/ipynb/ddpg_pendulum.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/rl/ddpg_pendulum.py) --- ## Introduction **Deep Deterministic Policy Gradient (DDPG)** is a model-free off-policy algorithm for learning continous actions. It combines ideas from DPG (Deterministic Policy Gradient) and DQN (Deep Q-Network). It uses Experience Replay and slow-learning target networks from DQN, and it is based on DPG, which can operate over continuous action spaces. This tutorial closely follow this paper - [Continuous control with deep reinforcement learning](https://arxiv.org/pdf/1509.02971.pdf) --- ## Problem We are trying to solve the classic **Inverted Pendulum** control problem. In this setting, we can take only two actions: swing left or swing right. What make this problem challenging for Q-Learning Algorithms is that actions are **continuous** instead of being **discrete**. That is, instead of using two discrete actions like `-1` or `+1`, we have to select from infinite actions ranging from `-2` to `+2`. --- ## Quick theory Just like the Actor-Critic method, we have two networks: 1. Actor - It proposes an action given a state. 2. Critic - It predicts if the action is good (positive value) or bad (negative value) given a state and an action. DDPG uses two more techniques not present in the original DQN: **First, it uses two Target networks.** **Why?** Because it add stability to training. In short, we are learning from estimated targets and Target networks are updated slowly, hence keeping our estimated targets stable. Conceptually, this is like saying, "I have an idea of how to play this well, I'm going to try it out for a bit until I find something better", as opposed to saying "I'm going to re-learn how to play this entire game after every move". See this [StackOverflow answer](https://stackoverflow.com/a/54238556/13475679). **Second, it uses Experience Replay.** We store list of tuples `(state, action, reward, next_state)`, and instead of learning only from recent experience, we learn from sampling all of our experience accumulated so far. Now, let's see how is it implemented. ```python import gym import tensorflow as tf from tensorflow.keras import layers import numpy as np import matplotlib.pyplot as plt ``` We use [OpenAIGym](http://gym.openai.com/docs) to create the environment. We will use the `upper_bound` parameter to scale our actions later. ```python problem = "Pendulum-v1" env = gym.make(problem) num_states = env.observation_space.shape[0] print("Size of State Space -> {}".format(num_states)) num_actions = env.action_space.shape[0] print("Size of Action Space -> {}".format(num_actions)) upper_bound = env.action_space.high[0] lower_bound = env.action_space.low[0] print("Max Value of Action -> {}".format(upper_bound)) print("Min Value of Action -> {}".format(lower_bound)) ``` <div class="k-default-codeblock"> ``` Size of State Space -> 3 Size of Action Space -> 1 Max Value of Action -> 2.0 Min Value of Action -> -2.0 ``` </div> To implement better exploration by the Actor network, we use noisy perturbations, specifically an **Ornstein-Uhlenbeck process** for generating noise, as described in the paper. It samples noise from a correlated normal distribution. ```python class OUActionNoise: def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None): self.theta = theta self.mean = mean self.std_dev = std_deviation self.dt = dt self.x_initial = x_initial self.reset() def __call__(self): # Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process. x = ( self.x_prev + self.theta * (self.mean - self.x_prev) * self.dt + self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape) ) # Store x into x_prev # Makes next noise dependent on current one self.x_prev = x return x def reset(self): if self.x_initial is not None: self.x_prev = self.x_initial else: self.x_prev = np.zeros_like(self.mean) ``` The `Buffer` class implements Experience Replay. --- ![Algorithm](https://i.imgur.com/mS6iGyJ.jpg) --- **Critic loss** - Mean Squared Error of `y - Q(s, a)` where `y` is the expected return as seen by the Target network, and `Q(s, a)` is action value predicted by the Critic network. `y` is a moving target that the critic model tries to achieve; we make this target stable by updating the Target model slowly. **Actor loss** - This is computed using the mean of the value given by the Critic network for the actions taken by the Actor network. We seek to maximize this quantity. Hence we update the Actor network so that it produces actions that get the maximum predicted value as seen by the Critic, for a given state. ```python class Buffer: def __init__(self, buffer_capacity=100000, batch_size=64): # Number of "experiences" to store at max self.buffer_capacity = buffer_capacity # Num of tuples to train on. self.batch_size = batch_size # Its tells us num of times record() was called. self.buffer_counter = 0 # Instead of list of tuples as the exp.replay concept go # We use different np.arrays for each tuple element self.state_buffer = np.zeros((self.buffer_capacity, num_states)) self.action_buffer = np.zeros((self.buffer_capacity, num_actions)) self.reward_buffer = np.zeros((self.buffer_capacity, 1)) self.next_state_buffer = np.zeros((self.buffer_capacity, num_states)) # Takes (s,a,r,s') obervation tuple as input def record(self, obs_tuple): # Set index to zero if buffer_capacity is exceeded, # replacing old records index = self.buffer_counter % self.buffer_capacity self.state_buffer[index] = obs_tuple[0] self.action_buffer[index] = obs_tuple[1] self.reward_buffer[index] = obs_tuple[2] self.next_state_buffer[index] = obs_tuple[3] self.buffer_counter += 1 # Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows # TensorFlow to build a static graph out of the logic and computations in our function. # This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one. @tf.function def update( self, state_batch, action_batch, reward_batch, next_state_batch, ): # Training and updating Actor & Critic networks. # See Pseudo Code. with tf.GradientTape() as tape: target_actions = target_actor(next_state_batch, training=True) y = reward_batch + gamma * target_critic( [next_state_batch, target_actions], training=True ) critic_value = critic_model([state_batch, action_batch], training=True) critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value)) critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables) critic_optimizer.apply_gradients( zip(critic_grad, critic_model.trainable_variables) ) with tf.GradientTape() as tape: actions = actor_model(state_batch, training=True) critic_value = critic_model([state_batch, actions], training=True) # Used `-value` as we want to maximize the value given # by the critic for our actions actor_loss = -tf.math.reduce_mean(critic_value) actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables) actor_optimizer.apply_gradients( zip(actor_grad, actor_model.trainable_variables) ) # We compute the loss and update parameters def learn(self): # Get sampling range record_range = min(self.buffer_counter, self.buffer_capacity) # Randomly sample indices batch_indices = np.random.choice(record_range, self.batch_size) # Convert to tensors state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices]) action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices]) reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices]) reward_batch = tf.cast(reward_batch, dtype=tf.float32) next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices]) self.update(state_batch, action_batch, reward_batch, next_state_batch) # This update target parameters slowly # Based on rate `tau`, which is much less than one. @tf.function def update_target(target_weights, weights, tau): for (a, b) in zip(target_weights, weights): a.assign(b * tau + a * (1 - tau)) ``` Here we define the Actor and Critic networks. These are basic Dense models with `ReLU` activation. Note: We need the initialization for last layer of the Actor to be between `-0.003` and `0.003` as this prevents us from getting `1` or `-1` output values in the initial stages, which would squash our gradients to zero, as we use the `tanh` activation. ```python def get_actor(): # Initialize weights between -3e-3 and 3-e3 last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003) inputs = layers.Input(shape=(num_states,)) out = layers.Dense(256, activation="relu")(inputs) out = layers.Dense(256, activation="relu")(out) outputs = layers.Dense(1, activation="tanh", kernel_initializer=last_init)(out) # Our upper bound is 2.0 for Pendulum. outputs = outputs * upper_bound model = tf.keras.Model(inputs, outputs) return model def get_critic(): # State as input state_input = layers.Input(shape=(num_states)) state_out = layers.Dense(16, activation="relu")(state_input) state_out = layers.Dense(32, activation="relu")(state_out) # Action as input action_input = layers.Input(shape=(num_actions)) action_out = layers.Dense(32, activation="relu")(action_input) # Both are passed through seperate layer before concatenating concat = layers.Concatenate()([state_out, action_out]) out = layers.Dense(256, activation="relu")(concat) out = layers.Dense(256, activation="relu")(out) outputs = layers.Dense(1)(out) # Outputs single value for give state-action model = tf.keras.Model([state_input, action_input], outputs) return model ``` `policy()` returns an action sampled from our Actor network plus some noise for exploration. ```python def policy(state, noise_object): sampled_actions = tf.squeeze(actor_model(state)) noise = noise_object() # Adding noise to action sampled_actions = sampled_actions.numpy() + noise # We make sure action is within bounds legal_action = np.clip(sampled_actions, lower_bound, upper_bound) return [np.squeeze(legal_action)] ``` --- ## Training hyperparameters ```python std_dev = 0.2 ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1)) actor_model = get_actor() critic_model = get_critic() target_actor = get_actor() target_critic = get_critic() # Making the weights equal initially target_actor.set_weights(actor_model.get_weights()) target_critic.set_weights(critic_model.get_weights()) # Learning rate for actor-critic models critic_lr = 0.002 actor_lr = 0.001 critic_optimizer = tf.keras.optimizers.Adam(critic_lr) actor_optimizer = tf.keras.optimizers.Adam(actor_lr) total_episodes = 100 # Discount factor for future rewards gamma = 0.99 # Used to update target networks tau = 0.005 buffer = Buffer(50000, 64) ``` Now we implement our main training loop, and iterate over episodes. We sample actions using `policy()` and train with `learn()` at each time step, along with updating the Target networks at a rate `tau`. ```python # To store reward history of each episode ep_reward_list = [] # To store average reward history of last few episodes avg_reward_list = [] # Takes about 4 min to train for ep in range(total_episodes): prev_state = env.reset() episodic_reward = 0 while True: # Uncomment this to see the Actor in action # But not in a python notebook. # env.render() tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0) action = policy(tf_prev_state, ou_noise) # Recieve state and reward from environment. state, reward, done, info = env.step(action) buffer.record((prev_state, action, reward, state)) episodic_reward += reward buffer.learn() update_target(target_actor.variables, actor_model.variables, tau) update_target(target_critic.variables, critic_model.variables, tau) # End this episode when `done` is True if done: break prev_state = state ep_reward_list.append(episodic_reward) # Mean of last 40 episodes avg_reward = np.mean(ep_reward_list[-40:]) print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward)) avg_reward_list.append(avg_reward) # Plotting graph # Episodes versus Avg. Rewards plt.plot(avg_reward_list) plt.xlabel("Episode") plt.ylabel("Avg. Epsiodic Reward") plt.show() ``` <div class="k-default-codeblock"> ``` Episode * 0 * Avg Reward is ==> -1269.3278950595395 Episode * 1 * Avg Reward is ==> -1528.3008939716287 Episode * 2 * Avg Reward is ==> -1511.1737868279706 Episode * 3 * Avg Reward is ==> -1512.8568141261057 Episode * 4 * Avg Reward is ==> -1386.054573343386 Episode * 5 * Avg Reward is ==> -1411.4818856846339 Episode * 6 * Avg Reward is ==> -1431.6790621961388 Episode * 7 * Avg Reward is ==> -1427.9515009474867 Episode * 8 * Avg Reward is ==> -1392.9313930075857 Episode * 9 * Avg Reward is ==> -1346.6839043846012 Episode * 10 * Avg Reward is ==> -1325.5818224096574 Episode * 11 * Avg Reward is ==> -1271.778361283553 Episode * 12 * Avg Reward is ==> -1194.0784354001732 Episode * 13 * Avg Reward is ==> -1137.1096928093427 Episode * 14 * Avg Reward is ==> -1087.2426176918214 Episode * 15 * Avg Reward is ==> -1043.5265287176114 Episode * 16 * Avg Reward is ==> -990.0857409180443 Episode * 17 * Avg Reward is ==> -949.0661362879348 Episode * 18 * Avg Reward is ==> -906.1744575963231 Episode * 19 * Avg Reward is ==> -914.0098344966382 Episode * 20 * Avg Reward is ==> -886.8905055354011 Episode * 21 * Avg Reward is ==> -859.3416389004793 Episode * 22 * Avg Reward is ==> -827.5405203616622 Episode * 23 * Avg Reward is ==> -798.3875178404127 Episode * 24 * Avg Reward is ==> -771.289491103158 Episode * 25 * Avg Reward is ==> -741.6622445749622 Episode * 26 * Avg Reward is ==> -727.7080867854874 Episode * 27 * Avg Reward is ==> -710.485046117201 Episode * 28 * Avg Reward is ==> -690.3850022530833 Episode * 29 * Avg Reward is ==> -671.3205042911178 Episode * 30 * Avg Reward is ==> -653.4475135842247 Episode * 31 * Avg Reward is ==> -637.0057392119055 Episode * 32 * Avg Reward is ==> -629.2474166794424 Episode * 33 * Avg Reward is ==> -614.4655398230501 Episode * 34 * Avg Reward is ==> -603.3854873345723 Episode * 35 * Avg Reward is ==> -589.86534490467 Episode * 36 * Avg Reward is ==> -577.1806480684269 Episode * 37 * Avg Reward is ==> -565.1365286280546 Episode * 38 * Avg Reward is ==> -550.6647028563134 Episode * 39 * Avg Reward is ==> -540.0095147571197 Episode * 40 * Avg Reward is ==> -517.3861294233157 Episode * 41 * Avg Reward is ==> -478.705352005952 Episode * 42 * Avg Reward is ==> -444.8350788756713 Episode * 43 * Avg Reward is ==> -409.85293165991334 Episode * 44 * Avg Reward is ==> -390.83984710631546 Episode * 45 * Avg Reward is ==> -360.88156865913675 Episode * 46 * Avg Reward is ==> -325.26685315168595 Episode * 47 * Avg Reward is ==> -290.2315644399411 Episode * 48 * Avg Reward is ==> -268.0351126010609 Episode * 49 * Avg Reward is ==> -247.8952699063706 Episode * 50 * Avg Reward is ==> -222.99123461788048 Episode * 51 * Avg Reward is ==> -209.0830401020491 Episode * 52 * Avg Reward is ==> -205.65143423678765 Episode * 53 * Avg Reward is ==> -201.8910585767988 Episode * 54 * Avg Reward is ==> -192.18560466037357 Episode * 55 * Avg Reward is ==> -189.43475813660137 Episode * 56 * Avg Reward is ==> -191.92700535454787 Episode * 57 * Avg Reward is ==> -188.5196218645745 Episode * 58 * Avg Reward is ==> -188.17872234729674 Episode * 59 * Avg Reward is ==> -167.33043921566485 Episode * 60 * Avg Reward is ==> -165.01361185173954 Episode * 61 * Avg Reward is ==> -164.5316658073024 Episode * 62 * Avg Reward is ==> -164.4025677076815 Episode * 63 * Avg Reward is ==> -167.27842005634784 Episode * 64 * Avg Reward is ==> -167.12049955654845 Episode * 65 * Avg Reward is ==> -170.02761731078783 Episode * 66 * Avg Reward is ==> -167.56039601863873 Episode * 67 * Avg Reward is ==> -164.60482495249738 Episode * 68 * Avg Reward is ==> -167.45278232469394 Episode * 69 * Avg Reward is ==> -167.42407364484592 Episode * 70 * Avg Reward is ==> -167.57794933965346 Episode * 71 * Avg Reward is ==> -170.6408611483338 Episode * 72 * Avg Reward is ==> -163.96954092530822 Episode * 73 * Avg Reward is ==> -160.82007525469245 Episode * 74 * Avg Reward is ==> -158.38239222565778 Episode * 75 * Avg Reward is ==> -158.3554729720654 Episode * 76 * Avg Reward is ==> -158.51036948298994 Episode * 77 * Avg Reward is ==> -158.68906473090686 Episode * 78 * Avg Reward is ==> -164.60260866654318 Episode * 79 * Avg Reward is ==> -161.5493472156026 Episode * 80 * Avg Reward is ==> -152.48077012719403 Episode * 81 * Avg Reward is ==> -149.52532010375975 Episode * 82 * Avg Reward is ==> -149.61942419730423 Episode * 83 * Avg Reward is ==> -149.82443455067468 Episode * 84 * Avg Reward is ==> -149.80009937226978 Episode * 85 * Avg Reward is ==> -144.51659331262107 Episode * 86 * Avg Reward is ==> -150.7545561142967 Episode * 87 * Avg Reward is ==> -153.84772667131307 Episode * 88 * Avg Reward is ==> -151.35200443047225 Episode * 89 * Avg Reward is ==> -148.30392250041828 Episode * 90 * Avg Reward is ==> -151.33886235855053 Episode * 91 * Avg Reward is ==> -151.153096135589 Episode * 92 * Avg Reward is ==> -151.19626034791332 Episode * 93 * Avg Reward is ==> -151.15870791946685 Episode * 94 * Avg Reward is ==> -154.2673372216281 Episode * 95 * Avg Reward is ==> -150.40737651480134 Episode * 96 * Avg Reward is ==> -147.7969116731913 Episode * 97 * Avg Reward is ==> -147.88640802454557 Episode * 98 * Avg Reward is ==> -144.88997165191319 Episode * 99 * Avg Reward is ==> -142.22158276699662 ``` </div> ![png](/img/examples/rl/ddpg_pendulum/ddpg_pendulum_16_1.png) If training proceeds correctly, the average episodic reward will increase with time. Feel free to try different learning rates, `tau` values, and architectures for the Actor and Critic networks. The Inverted Pendulum problem has low complexity, but DDPG work great on many other problems. Another great environment to try this on is `LunarLandingContinuous-v2`, but it will take more episodes to obtain good results. ```python # Save the weights actor_model.save_weights("pendulum_actor.h5") critic_model.save_weights("pendulum_critic.h5") target_actor.save_weights("pendulum_target_actor.h5") target_critic.save_weights("pendulum_target_critic.h5") ``` Before Training: ![before_img](https://i.imgur.com/ox6b9rC.gif) After 100 episodes: ![after_img](https://i.imgur.com/eEH8Cz6.gif)
keras-io/examples/rl/md/ddpg_pendulum.md/0
{ "file_path": "keras-io/examples/rl/md/ddpg_pendulum.md", "repo_id": "keras-io", "token_count": 7210 }
104
<jupyter_start><jupyter_text>Structured data learning with Wide, Deep, and Cross networks**Author:** [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)**Date created:** 2020/12/31**Last modified:** 2021/05/05**Description:** Using Wide & Deep and Deep & Cross networks for structured data classification. IntroductionThis example demonstrates how to do structured data classification using the two modelingtechniques:1. [Wide & Deep](https://ai.googleblog.com/2016/06/wide-deep-learning-better-together-with.html) models2. [Deep & Cross](https://arxiv.org/abs/1708.05123) modelsNote that this example should be run with TensorFlow 2.5 or higher. The datasetThis example uses the [Covertype](https://archive.ics.uci.edu/ml/datasets/covertype) dataset from the UCIMachine Learning Repository. The task is to predict forest cover type from cartographic variables.The dataset includes 506,011 instances with 12 input features: 10 numerical features and 2categorical features. Each instance is categorized into 1 of 7 classes. Setup<jupyter_code>import os # Only the TensorFlow backend supports string inputs. os.environ["KERAS_BACKEND"] = "tensorflow" import math import numpy as np import pandas as pd from tensorflow import data as tf_data import keras from keras import layers<jupyter_output><empty_output><jupyter_text>Prepare the dataFirst, let's load the dataset from the UCI Machine Learning Repository into a PandasDataFrame:<jupyter_code>data_url = ( "https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz" ) raw_data = pd.read_csv(data_url, header=None) print(f"Dataset shape: {raw_data.shape}") raw_data.head()<jupyter_output><empty_output><jupyter_text>The two categorical features in the dataset are binary-encoded.We will convert this dataset representation to the typical representation, where eachcategorical feature is represented as a single integer value.<jupyter_code>soil_type_values = [f"soil_type_{idx+1}" for idx in range(40)] wilderness_area_values = [f"area_type_{idx+1}" for idx in range(4)] soil_type = raw_data.loc[:, 14:53].apply( lambda x: soil_type_values[0::1][x.to_numpy().nonzero()[0][0]], axis=1 ) wilderness_area = raw_data.loc[:, 10:13].apply( lambda x: wilderness_area_values[0::1][x.to_numpy().nonzero()[0][0]], axis=1 ) CSV_HEADER = [ "Elevation", "Aspect", "Slope", "Horizontal_Distance_To_Hydrology", "Vertical_Distance_To_Hydrology", "Horizontal_Distance_To_Roadways", "Hillshade_9am", "Hillshade_Noon", "Hillshade_3pm", "Horizontal_Distance_To_Fire_Points", "Wilderness_Area", "Soil_Type", "Cover_Type", ] data = pd.concat( [raw_data.loc[:, 0:9], wilderness_area, soil_type, raw_data.loc[:, 54]], axis=1, ignore_index=True, ) data.columns = CSV_HEADER # Convert the target label indices into a range from 0 to 6 (there are 7 labels in total). data["Cover_Type"] = data["Cover_Type"] - 1 print(f"Dataset shape: {data.shape}") data.head().T<jupyter_output><empty_output><jupyter_text>The shape of the DataFrame shows there are 13 columns per sample(12 for the features and 1 for the target label).Let's split the data into training (85%) and test (15%) sets.<jupyter_code>train_splits = [] test_splits = [] for _, group_data in data.groupby("Cover_Type"): random_selection = np.random.rand(len(group_data.index)) <= 0.85 train_splits.append(group_data[random_selection]) test_splits.append(group_data[~random_selection]) train_data = pd.concat(train_splits).sample(frac=1).reset_index(drop=True) test_data = pd.concat(test_splits).sample(frac=1).reset_index(drop=True) print(f"Train split size: {len(train_data.index)}") print(f"Test split size: {len(test_data.index)}")<jupyter_output><empty_output><jupyter_text>Next, store the training and test data in separate CSV files.<jupyter_code>train_data_file = "train_data.csv" test_data_file = "test_data.csv" train_data.to_csv(train_data_file, index=False) test_data.to_csv(test_data_file, index=False)<jupyter_output><empty_output><jupyter_text>Define dataset metadataHere, we define the metadata of the dataset that will be useful for reading and parsingthe data into input features, and encoding the input features with respect to their types.<jupyter_code>TARGET_FEATURE_NAME = "Cover_Type" TARGET_FEATURE_LABELS = ["0", "1", "2", "3", "4", "5", "6"] NUMERIC_FEATURE_NAMES = [ "Aspect", "Elevation", "Hillshade_3pm", "Hillshade_9am", "Hillshade_Noon", "Horizontal_Distance_To_Fire_Points", "Horizontal_Distance_To_Hydrology", "Horizontal_Distance_To_Roadways", "Slope", "Vertical_Distance_To_Hydrology", ] CATEGORICAL_FEATURES_WITH_VOCABULARY = { "Soil_Type": list(data["Soil_Type"].unique()), "Wilderness_Area": list(data["Wilderness_Area"].unique()), } CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys()) FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES COLUMN_DEFAULTS = [ [0] if feature_name in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME] else ["NA"] for feature_name in CSV_HEADER ] NUM_CLASSES = len(TARGET_FEATURE_LABELS)<jupyter_output><empty_output><jupyter_text>Experiment setupNext, let's define an input function that reads and parses the file, then converts featuresand labels into a[`tf.data.Dataset`](https://www.tensorflow.org/guide/datasets)for training or evaluation.<jupyter_code>def get_dataset_from_csv(csv_file_path, batch_size, shuffle=False): dataset = tf_data.experimental.make_csv_dataset( csv_file_path, batch_size=batch_size, column_names=CSV_HEADER, column_defaults=COLUMN_DEFAULTS, label_name=TARGET_FEATURE_NAME, num_epochs=1, header=True, shuffle=shuffle, ) return dataset.cache()<jupyter_output><empty_output><jupyter_text>Here we configure the parameters and implement the procedure for running a training andevaluation experiment given a model.<jupyter_code>learning_rate = 0.001 dropout_rate = 0.1 batch_size = 265 num_epochs = 50 hidden_units = [32, 32] def run_experiment(model): model.compile( optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) train_dataset = get_dataset_from_csv(train_data_file, batch_size, shuffle=True) test_dataset = get_dataset_from_csv(test_data_file, batch_size) print("Start training the model...") history = model.fit(train_dataset, epochs=num_epochs) print("Model training finished") _, accuracy = model.evaluate(test_dataset, verbose=0) print(f"Test accuracy: {round(accuracy * 100, 2)}%")<jupyter_output><empty_output><jupyter_text>Create model inputsNow, define the inputs for the models as a dictionary, where the key is the feature name,and the value is a `keras.layers.Input` tensor with the corresponding feature shapeand data type.<jupyter_code>def create_model_inputs(): inputs = {} for feature_name in FEATURE_NAMES: if feature_name in NUMERIC_FEATURE_NAMES: inputs[feature_name] = layers.Input( name=feature_name, shape=(), dtype="float32" ) else: inputs[feature_name] = layers.Input( name=feature_name, shape=(), dtype="string" ) return inputs<jupyter_output><empty_output><jupyter_text>Encode featuresWe create two representations of our input features: sparse and dense:1. In the **sparse** representation, the categorical features are encoded with one-hotencoding using the `CategoryEncoding` layer. This representation can be useful for themodel to *memorize* particular feature values to make certain predictions.2. In the **dense** representation, the categorical features are encoded withlow-dimensional embeddings using the `Embedding` layer. This representation helpsthe model to *generalize* well to unseen feature combinations.<jupyter_code>def encode_inputs(inputs, use_embedding=False): encoded_features = [] for feature_name in inputs: if feature_name in CATEGORICAL_FEATURE_NAMES: vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name] # Create a lookup to convert string values to an integer indices. # Since we are not using a mask token nor expecting any out of vocabulary # (oov) token, we set mask_token to None and num_oov_indices to 0. lookup = layers.StringLookup( vocabulary=vocabulary, mask_token=None, num_oov_indices=0, output_mode="int" if use_embedding else "binary", ) if use_embedding: # Convert the string input values into integer indices. encoded_feature = lookup(inputs[feature_name]) embedding_dims = int(math.sqrt(len(vocabulary))) # Create an embedding layer with the specified dimensions. embedding = layers.Embedding( input_dim=len(vocabulary), output_dim=embedding_dims ) # Convert the index values to embedding representations. encoded_feature = embedding(encoded_feature) else: # Convert the string input values into a one hot encoding. encoded_feature = lookup( keras.ops.expand_dims(inputs[feature_name], -1) ) else: # Use the numerical features as-is. encoded_feature = keras.ops.expand_dims(inputs[feature_name], -1) encoded_features.append(encoded_feature) all_features = layers.concatenate(encoded_features) return all_features<jupyter_output><empty_output><jupyter_text>Experiment 1: a baseline modelIn the first experiment, let's create a multi-layer feed-forward network,where the categorical features are one-hot encoded.<jupyter_code>def create_baseline_model(): inputs = create_model_inputs() features = encode_inputs(inputs) for units in hidden_units: features = layers.Dense(units)(features) features = layers.BatchNormalization()(features) features = layers.ReLU()(features) features = layers.Dropout(dropout_rate)(features) outputs = layers.Dense(units=NUM_CLASSES, activation="softmax")(features) model = keras.Model(inputs=inputs, outputs=outputs) return model baseline_model = create_baseline_model() keras.utils.plot_model(baseline_model, show_shapes=True, rankdir="LR")<jupyter_output><empty_output><jupyter_text>Let's run it:<jupyter_code>run_experiment(baseline_model)<jupyter_output><empty_output><jupyter_text>The baseline linear model achieves ~76% test accuracy. Experiment 2: Wide & Deep modelIn the second experiment, we create a Wide & Deep model. The wide part of the modela linear model, while the deep part of the model is a multi-layer feed-forward network.Use the sparse representation of the input features in the wide part of the model and thedense representation of the input features for the deep part of the model.Note that every input features contributes to both parts of the model with differentrepresentations.<jupyter_code>def create_wide_and_deep_model(): inputs = create_model_inputs() wide = encode_inputs(inputs) wide = layers.BatchNormalization()(wide) deep = encode_inputs(inputs, use_embedding=True) for units in hidden_units: deep = layers.Dense(units)(deep) deep = layers.BatchNormalization()(deep) deep = layers.ReLU()(deep) deep = layers.Dropout(dropout_rate)(deep) merged = layers.concatenate([wide, deep]) outputs = layers.Dense(units=NUM_CLASSES, activation="softmax")(merged) model = keras.Model(inputs=inputs, outputs=outputs) return model wide_and_deep_model = create_wide_and_deep_model() keras.utils.plot_model(wide_and_deep_model, show_shapes=True, rankdir="LR")<jupyter_output><empty_output><jupyter_text>Let's run it:<jupyter_code>run_experiment(wide_and_deep_model)<jupyter_output><empty_output><jupyter_text>The wide and deep model achieves ~79% test accuracy. Experiment 3: Deep & Cross modelIn the third experiment, we create a Deep & Cross model. The deep part of this modelis the same as the deep part created in the previous experiment. The key idea ofthe cross part is to apply explicit feature crossing in an efficient way,where the degree of cross features grows with layer depth.<jupyter_code>def create_deep_and_cross_model(): inputs = create_model_inputs() x0 = encode_inputs(inputs, use_embedding=True) cross = x0 for _ in hidden_units: units = cross.shape[-1] x = layers.Dense(units)(cross) cross = x0 * x + cross cross = layers.BatchNormalization()(cross) deep = x0 for units in hidden_units: deep = layers.Dense(units)(deep) deep = layers.BatchNormalization()(deep) deep = layers.ReLU()(deep) deep = layers.Dropout(dropout_rate)(deep) merged = layers.concatenate([cross, deep]) outputs = layers.Dense(units=NUM_CLASSES, activation="softmax")(merged) model = keras.Model(inputs=inputs, outputs=outputs) return model deep_and_cross_model = create_deep_and_cross_model() keras.utils.plot_model(deep_and_cross_model, show_shapes=True, rankdir="LR")<jupyter_output><empty_output><jupyter_text>Let's run it:<jupyter_code>run_experiment(deep_and_cross_model)<jupyter_output><empty_output>
keras-io/examples/structured_data/ipynb/wide_deep_cross_networks.ipynb/0
{ "file_path": "keras-io/examples/structured_data/ipynb/wide_deep_cross_networks.ipynb", "repo_id": "keras-io", "token_count": 5034 }
105
""" Title: Structured data learning with Wide, Deep, and Cross networks Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/) Date created: 2020/12/31 Last modified: 2021/05/05 Description: Using Wide & Deep and Deep & Cross networks for structured data classification. Accelerator: GPU """ """ ## Introduction This example demonstrates how to do structured data classification using the two modeling techniques: 1. [Wide & Deep](https://ai.googleblog.com/2016/06/wide-deep-learning-better-together-with.html) models 2. [Deep & Cross](https://arxiv.org/abs/1708.05123) models Note that this example should be run with TensorFlow 2.5 or higher. """ """ ## The dataset This example uses the [Covertype](https://archive.ics.uci.edu/ml/datasets/covertype) dataset from the UCI Machine Learning Repository. The task is to predict forest cover type from cartographic variables. The dataset includes 506,011 instances with 12 input features: 10 numerical features and 2 categorical features. Each instance is categorized into 1 of 7 classes. """ """ ## Setup """ import os # Only the TensorFlow backend supports string inputs. os.environ["KERAS_BACKEND"] = "tensorflow" import math import numpy as np import pandas as pd from tensorflow import data as tf_data import keras from keras import layers """ ## Prepare the data First, let's load the dataset from the UCI Machine Learning Repository into a Pandas DataFrame: """ data_url = ( "https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz" ) raw_data = pd.read_csv(data_url, header=None) print(f"Dataset shape: {raw_data.shape}") raw_data.head() """ The two categorical features in the dataset are binary-encoded. We will convert this dataset representation to the typical representation, where each categorical feature is represented as a single integer value. """ soil_type_values = [f"soil_type_{idx+1}" for idx in range(40)] wilderness_area_values = [f"area_type_{idx+1}" for idx in range(4)] soil_type = raw_data.loc[:, 14:53].apply( lambda x: soil_type_values[0::1][x.to_numpy().nonzero()[0][0]], axis=1 ) wilderness_area = raw_data.loc[:, 10:13].apply( lambda x: wilderness_area_values[0::1][x.to_numpy().nonzero()[0][0]], axis=1 ) CSV_HEADER = [ "Elevation", "Aspect", "Slope", "Horizontal_Distance_To_Hydrology", "Vertical_Distance_To_Hydrology", "Horizontal_Distance_To_Roadways", "Hillshade_9am", "Hillshade_Noon", "Hillshade_3pm", "Horizontal_Distance_To_Fire_Points", "Wilderness_Area", "Soil_Type", "Cover_Type", ] data = pd.concat( [raw_data.loc[:, 0:9], wilderness_area, soil_type, raw_data.loc[:, 54]], axis=1, ignore_index=True, ) data.columns = CSV_HEADER # Convert the target label indices into a range from 0 to 6 (there are 7 labels in total). data["Cover_Type"] = data["Cover_Type"] - 1 print(f"Dataset shape: {data.shape}") data.head().T """ The shape of the DataFrame shows there are 13 columns per sample (12 for the features and 1 for the target label). Let's split the data into training (85%) and test (15%) sets. """ train_splits = [] test_splits = [] for _, group_data in data.groupby("Cover_Type"): random_selection = np.random.rand(len(group_data.index)) <= 0.85 train_splits.append(group_data[random_selection]) test_splits.append(group_data[~random_selection]) train_data = pd.concat(train_splits).sample(frac=1).reset_index(drop=True) test_data = pd.concat(test_splits).sample(frac=1).reset_index(drop=True) print(f"Train split size: {len(train_data.index)}") print(f"Test split size: {len(test_data.index)}") """ Next, store the training and test data in separate CSV files. """ train_data_file = "train_data.csv" test_data_file = "test_data.csv" train_data.to_csv(train_data_file, index=False) test_data.to_csv(test_data_file, index=False) """ ## Define dataset metadata Here, we define the metadata of the dataset that will be useful for reading and parsing the data into input features, and encoding the input features with respect to their types. """ TARGET_FEATURE_NAME = "Cover_Type" TARGET_FEATURE_LABELS = ["0", "1", "2", "3", "4", "5", "6"] NUMERIC_FEATURE_NAMES = [ "Aspect", "Elevation", "Hillshade_3pm", "Hillshade_9am", "Hillshade_Noon", "Horizontal_Distance_To_Fire_Points", "Horizontal_Distance_To_Hydrology", "Horizontal_Distance_To_Roadways", "Slope", "Vertical_Distance_To_Hydrology", ] CATEGORICAL_FEATURES_WITH_VOCABULARY = { "Soil_Type": list(data["Soil_Type"].unique()), "Wilderness_Area": list(data["Wilderness_Area"].unique()), } CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys()) FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES COLUMN_DEFAULTS = [ [0] if feature_name in NUMERIC_FEATURE_NAMES + [TARGET_FEATURE_NAME] else ["NA"] for feature_name in CSV_HEADER ] NUM_CLASSES = len(TARGET_FEATURE_LABELS) """ ## Experiment setup Next, let's define an input function that reads and parses the file, then converts features and labels into a[`tf.data.Dataset`](https://www.tensorflow.org/guide/datasets) for training or evaluation. """ def get_dataset_from_csv(csv_file_path, batch_size, shuffle=False): dataset = tf_data.experimental.make_csv_dataset( csv_file_path, batch_size=batch_size, column_names=CSV_HEADER, column_defaults=COLUMN_DEFAULTS, label_name=TARGET_FEATURE_NAME, num_epochs=1, header=True, shuffle=shuffle, ) return dataset.cache() """ Here we configure the parameters and implement the procedure for running a training and evaluation experiment given a model. """ learning_rate = 0.001 dropout_rate = 0.1 batch_size = 265 num_epochs = 50 hidden_units = [32, 32] def run_experiment(model): model.compile( optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) train_dataset = get_dataset_from_csv(train_data_file, batch_size, shuffle=True) test_dataset = get_dataset_from_csv(test_data_file, batch_size) print("Start training the model...") history = model.fit(train_dataset, epochs=num_epochs) print("Model training finished") _, accuracy = model.evaluate(test_dataset, verbose=0) print(f"Test accuracy: {round(accuracy * 100, 2)}%") """ ## Create model inputs Now, define the inputs for the models as a dictionary, where the key is the feature name, and the value is a `keras.layers.Input` tensor with the corresponding feature shape and data type. """ def create_model_inputs(): inputs = {} for feature_name in FEATURE_NAMES: if feature_name in NUMERIC_FEATURE_NAMES: inputs[feature_name] = layers.Input( name=feature_name, shape=(), dtype="float32" ) else: inputs[feature_name] = layers.Input( name=feature_name, shape=(), dtype="string" ) return inputs """ ## Encode features We create two representations of our input features: sparse and dense: 1. In the **sparse** representation, the categorical features are encoded with one-hot encoding using the `CategoryEncoding` layer. This representation can be useful for the model to *memorize* particular feature values to make certain predictions. 2. In the **dense** representation, the categorical features are encoded with low-dimensional embeddings using the `Embedding` layer. This representation helps the model to *generalize* well to unseen feature combinations. """ def encode_inputs(inputs, use_embedding=False): encoded_features = [] for feature_name in inputs: if feature_name in CATEGORICAL_FEATURE_NAMES: vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name] # Create a lookup to convert string values to an integer indices. # Since we are not using a mask token nor expecting any out of vocabulary # (oov) token, we set mask_token to None and num_oov_indices to 0. lookup = layers.StringLookup( vocabulary=vocabulary, mask_token=None, num_oov_indices=0, output_mode="int" if use_embedding else "binary", ) if use_embedding: # Convert the string input values into integer indices. encoded_feature = lookup(inputs[feature_name]) embedding_dims = int(math.sqrt(len(vocabulary))) # Create an embedding layer with the specified dimensions. embedding = layers.Embedding( input_dim=len(vocabulary), output_dim=embedding_dims ) # Convert the index values to embedding representations. encoded_feature = embedding(encoded_feature) else: # Convert the string input values into a one hot encoding. encoded_feature = lookup( keras.ops.expand_dims(inputs[feature_name], -1) ) else: # Use the numerical features as-is. encoded_feature = keras.ops.expand_dims(inputs[feature_name], -1) encoded_features.append(encoded_feature) all_features = layers.concatenate(encoded_features) return all_features """ ## Experiment 1: a baseline model In the first experiment, let's create a multi-layer feed-forward network, where the categorical features are one-hot encoded. """ def create_baseline_model(): inputs = create_model_inputs() features = encode_inputs(inputs) for units in hidden_units: features = layers.Dense(units)(features) features = layers.BatchNormalization()(features) features = layers.ReLU()(features) features = layers.Dropout(dropout_rate)(features) outputs = layers.Dense(units=NUM_CLASSES, activation="softmax")(features) model = keras.Model(inputs=inputs, outputs=outputs) return model baseline_model = create_baseline_model() keras.utils.plot_model(baseline_model, show_shapes=True, rankdir="LR") """ Let's run it: """ run_experiment(baseline_model) """ The baseline linear model achieves ~76% test accuracy. """ """ ## Experiment 2: Wide & Deep model In the second experiment, we create a Wide & Deep model. The wide part of the model a linear model, while the deep part of the model is a multi-layer feed-forward network. Use the sparse representation of the input features in the wide part of the model and the dense representation of the input features for the deep part of the model. Note that every input features contributes to both parts of the model with different representations. """ def create_wide_and_deep_model(): inputs = create_model_inputs() wide = encode_inputs(inputs) wide = layers.BatchNormalization()(wide) deep = encode_inputs(inputs, use_embedding=True) for units in hidden_units: deep = layers.Dense(units)(deep) deep = layers.BatchNormalization()(deep) deep = layers.ReLU()(deep) deep = layers.Dropout(dropout_rate)(deep) merged = layers.concatenate([wide, deep]) outputs = layers.Dense(units=NUM_CLASSES, activation="softmax")(merged) model = keras.Model(inputs=inputs, outputs=outputs) return model wide_and_deep_model = create_wide_and_deep_model() keras.utils.plot_model(wide_and_deep_model, show_shapes=True, rankdir="LR") """ Let's run it: """ run_experiment(wide_and_deep_model) """ The wide and deep model achieves ~79% test accuracy. """ """ ## Experiment 3: Deep & Cross model In the third experiment, we create a Deep & Cross model. The deep part of this model is the same as the deep part created in the previous experiment. The key idea of the cross part is to apply explicit feature crossing in an efficient way, where the degree of cross features grows with layer depth. """ def create_deep_and_cross_model(): inputs = create_model_inputs() x0 = encode_inputs(inputs, use_embedding=True) cross = x0 for _ in hidden_units: units = cross.shape[-1] x = layers.Dense(units)(cross) cross = x0 * x + cross cross = layers.BatchNormalization()(cross) deep = x0 for units in hidden_units: deep = layers.Dense(units)(deep) deep = layers.BatchNormalization()(deep) deep = layers.ReLU()(deep) deep = layers.Dropout(dropout_rate)(deep) merged = layers.concatenate([cross, deep]) outputs = layers.Dense(units=NUM_CLASSES, activation="softmax")(merged) model = keras.Model(inputs=inputs, outputs=outputs) return model deep_and_cross_model = create_deep_and_cross_model() keras.utils.plot_model(deep_and_cross_model, show_shapes=True, rankdir="LR") """ Let's run it: """ run_experiment(deep_and_cross_model) """ The deep and cross model achieves ~81% test accuracy. """ """ ## Conclusion You can use Keras Preprocessing Layers to easily handle categorical features with different encoding mechanisms, including one-hot encoding and feature embedding. In addition, different model architectures — like wide, deep, and cross networks — have different advantages, with respect to different dataset properties. You can explore using them independently or combining them to achieve the best result for your dataset. """
keras-io/examples/structured_data/wide_deep_cross_networks.py/0
{ "file_path": "keras-io/examples/structured_data/wide_deep_cross_networks.py", "repo_id": "keras-io", "token_count": 4933 }
106
<jupyter_start><jupyter_text>Traffic forecasting using graph neural networks and LSTM**Author:** [Arash Khodadadi](https://www.linkedin.com/in/arash-khodadadi-08a02490/)**Date created:** 2021/12/28**Last modified:** 2023/11/22**Description:** This example demonstrates how to do timeseries forecasting over graphs. IntroductionThis example shows how to forecast traffic condition using graph neural networks and LSTM.Specifically, we are interested in predicting the future values of the traffic speed givena history of the traffic speed for a collection of road segments.One popular method tosolve this problem is to consider each road segment's traffic speed as a separatetimeseries and predict the future values of each timeseriesusing the past values of the same timeseries.This method, however, ignores the dependency of the traffic speed of one road segment onthe neighboring segments. To be able to take into account the complex interactions betweenthe traffic speed on a collection of neighboring roads, we can define the traffic networkas a graph and consider the traffic speed as a signal on this graph. In this example,we implement a neural network architecture which can process timeseries data over a graph.We first show how to process the data and create a[tf.data.Dataset](https://www.tensorflow.org/api_docs/python/tf/data/Dataset) forforecasting over graphs. Then, we implement a model which uses graph convolution andLSTM layers to perform forecasting over a graph.The data processing and the model architecture are inspired by this paper:Yu, Bing, Haoteng Yin, and Zhanxing Zhu. "Spatio-temporal graph convolutional networks:a deep learning framework for traffic forecasting." Proceedings of the 27th InternationalJoint Conference on Artificial Intelligence, 2018.([github](https://github.com/VeritasYin/STGCN_IJCAI-18)) Setup<jupyter_code>import os os.environ["KERAS_BACKEND"] = "tensorflow" import pandas as pd import numpy as np import typing import matplotlib.pyplot as plt import tensorflow as tf import keras from keras import layers from keras import ops<jupyter_output><empty_output><jupyter_text>Data preparation Data descriptionWe use a real-world traffic speed dataset named `PeMSD7`. We use the versioncollected and prepared by [Yu et al., 2018](https://arxiv.org/abs/1709.04875)and available[here](https://github.com/VeritasYin/STGCN_IJCAI-18/tree/master/dataset).The data consists of two files:- `PeMSD7_W_228.csv` contains the distances between 228stations across the District 7 of California.- `PeMSD7_V_228.csv` contains trafficspeed collected for those stations in the weekdays of May and June of 2012.The full description of the dataset can be found in[Yu et al., 2018](https://arxiv.org/abs/1709.04875). Loading data<jupyter_code>url = "https://github.com/VeritasYin/STGCN_IJCAI-18/raw/master/dataset/PeMSD7_Full.zip" data_dir = keras.utils.get_file(origin=url, extract=True, archive_format="zip") data_dir = data_dir.rstrip("PeMSD7_Full.zip") route_distances = pd.read_csv( os.path.join(data_dir, "PeMSD7_W_228.csv"), header=None ).to_numpy() speeds_array = pd.read_csv( os.path.join(data_dir, "PeMSD7_V_228.csv"), header=None ).to_numpy() print(f"route_distances shape={route_distances.shape}") print(f"speeds_array shape={speeds_array.shape}")<jupyter_output><empty_output><jupyter_text>sub-sampling roadsTo reduce the problem size and make the training faster, we will onlywork with a sample of 26 roads out of the 228 roads in the dataset.We have chosen the roads by starting from road 0, choosing the 5 closestroads to it, and continuing this process until we get 25 roads. You can chooseany other subset of the roads. We chose the roads in this way to increase the likelihoodof having roads with correlated speed timeseries.`sample_routes` contains the IDs of the selected roads.<jupyter_code>sample_routes = [ 0, 1, 4, 7, 8, 11, 15, 108, 109, 114, 115, 118, 120, 123, 124, 126, 127, 129, 130, 132, 133, 136, 139, 144, 147, 216, ] route_distances = route_distances[np.ix_(sample_routes, sample_routes)] speeds_array = speeds_array[:, sample_routes] print(f"route_distances shape={route_distances.shape}") print(f"speeds_array shape={speeds_array.shape}")<jupyter_output><empty_output><jupyter_text>Data visualizationHere are the timeseries of the traffic speed for two of the routes:<jupyter_code>plt.figure(figsize=(18, 6)) plt.plot(speeds_array[:, [0, -1]]) plt.legend(["route_0", "route_25"])<jupyter_output><empty_output><jupyter_text>We can also visualize the correlation between the timeseries in different routes.<jupyter_code>plt.figure(figsize=(8, 8)) plt.matshow(np.corrcoef(speeds_array.T), 0) plt.xlabel("road number") plt.ylabel("road number")<jupyter_output><empty_output><jupyter_text>Using this correlation heatmap, we can see that for example the speed inroutes 4, 5, 6 are highly correlated. Splitting and normalizing dataNext, we split the speed values array into train/validation/test sets,and normalize the resulting arrays:<jupyter_code>train_size, val_size = 0.5, 0.2 def preprocess(data_array: np.ndarray, train_size: float, val_size: float): """Splits data into train/val/test sets and normalizes the data. Args: data_array: ndarray of shape `(num_time_steps, num_routes)` train_size: A float value between 0.0 and 1.0 that represent the proportion of the dataset to include in the train split. val_size: A float value between 0.0 and 1.0 that represent the proportion of the dataset to include in the validation split. Returns: `train_array`, `val_array`, `test_array` """ num_time_steps = data_array.shape[0] num_train, num_val = ( int(num_time_steps * train_size), int(num_time_steps * val_size), ) train_array = data_array[:num_train] mean, std = train_array.mean(axis=0), train_array.std(axis=0) train_array = (train_array - mean) / std val_array = (data_array[num_train : (num_train + num_val)] - mean) / std test_array = (data_array[(num_train + num_val) :] - mean) / std return train_array, val_array, test_array train_array, val_array, test_array = preprocess(speeds_array, train_size, val_size) print(f"train set size: {train_array.shape}") print(f"validation set size: {val_array.shape}") print(f"test set size: {test_array.shape}")<jupyter_output><empty_output><jupyter_text>Creating TensorFlow DatasetsNext, we create the datasets for our forecasting problem. The forecasting problemcan be stated as follows: given a sequence of theroad speed values at times `t+1, t+2, ..., t+T`, we want to predict the future values ofthe roads speed for times `t+T+1, ..., t+T+h`. So for each time `t` the inputs to ourmodel are `T` vectors each of size `N` and the targets are `h` vectors each of size `N`,where `N` is the number of roads. We use the Keras built-in function`keras.utils.timeseries_dataset_from_array`.The function `create_tf_dataset()` below takes as input a `numpy.ndarray` and returns a`tf.data.Dataset`. In this function `input_sequence_length=T` and `forecast_horizon=h`.The argument `multi_horizon` needs more explanation. Assume `forecast_horizon=3`.If `multi_horizon=True` then the model will make a forecast for time steps`t+T+1, t+T+2, t+T+3`. So the target will have shape `(T,3)`. But if`multi_horizon=False`, the model will make a forecast only for time step `t+T+3` andso the target will have shape `(T, 1)`.You may notice that the input tensor in each batch has shape`(batch_size, input_sequence_length, num_routes, 1)`. The last dimension is added tomake the model more general: at each time step, the input features for each raod maycontain multiple timeseries. For instance, one might want to use temperature timeseriesin addition to historical values of the speed as input features. In this example,however, the last dimension of the input is always 1.We use the last 12 values of the speed in each road to forecast the speed for 3 timesteps ahead:<jupyter_code>batch_size = 64 input_sequence_length = 12 forecast_horizon = 3 multi_horizon = False def create_tf_dataset( data_array: np.ndarray, input_sequence_length: int, forecast_horizon: int, batch_size: int = 128, shuffle=True, multi_horizon=True, ): """Creates tensorflow dataset from numpy array. This function creates a dataset where each element is a tuple `(inputs, targets)`. `inputs` is a Tensor of shape `(batch_size, input_sequence_length, num_routes, 1)` containing the `input_sequence_length` past values of the timeseries for each node. `targets` is a Tensor of shape `(batch_size, forecast_horizon, num_routes)` containing the `forecast_horizon` future values of the timeseries for each node. Args: data_array: np.ndarray with shape `(num_time_steps, num_routes)` input_sequence_length: Length of the input sequence (in number of timesteps). forecast_horizon: If `multi_horizon=True`, the target will be the values of the timeseries for 1 to `forecast_horizon` timesteps ahead. If `multi_horizon=False`, the target will be the value of the timeseries `forecast_horizon` steps ahead (only one value). batch_size: Number of timeseries samples in each batch. shuffle: Whether to shuffle output samples, or instead draw them in chronological order. multi_horizon: See `forecast_horizon`. Returns: A tf.data.Dataset instance. """ inputs = keras.utils.timeseries_dataset_from_array( np.expand_dims(data_array[:-forecast_horizon], axis=-1), None, sequence_length=input_sequence_length, shuffle=False, batch_size=batch_size, ) target_offset = ( input_sequence_length if multi_horizon else input_sequence_length + forecast_horizon - 1 ) target_seq_length = forecast_horizon if multi_horizon else 1 targets = keras.utils.timeseries_dataset_from_array( data_array[target_offset:], None, sequence_length=target_seq_length, shuffle=False, batch_size=batch_size, ) dataset = tf.data.Dataset.zip((inputs, targets)) if shuffle: dataset = dataset.shuffle(100) return dataset.prefetch(16).cache() train_dataset, val_dataset = ( create_tf_dataset(data_array, input_sequence_length, forecast_horizon, batch_size) for data_array in [train_array, val_array] ) test_dataset = create_tf_dataset( test_array, input_sequence_length, forecast_horizon, batch_size=test_array.shape[0], shuffle=False, multi_horizon=multi_horizon, )<jupyter_output><empty_output><jupyter_text>Roads GraphAs mentioned before, we assume that the road segments form a graph.The `PeMSD7` dataset has the road segments distance. The next stepis to create the graph adjacency matrix from these distances. Following[Yu et al., 2018](https://arxiv.org/abs/1709.04875) (equation 10) we assume thereis an edge between two nodes in the graph if the distance between the corresponding roadsis less than a threshold.<jupyter_code>def compute_adjacency_matrix( route_distances: np.ndarray, sigma2: float, epsilon: float ): """Computes the adjacency matrix from distances matrix. It uses the formula in https://github.com/VeritasYin/STGCN_IJCAI-18#data-preprocessing to compute an adjacency matrix from the distance matrix. The implementation follows that paper. Args: route_distances: np.ndarray of shape `(num_routes, num_routes)`. Entry `i,j` of this array is the distance between roads `i,j`. sigma2: Determines the width of the Gaussian kernel applied to the square distances matrix. epsilon: A threshold specifying if there is an edge between two nodes. Specifically, `A[i,j]=1` if `np.exp(-w2[i,j] / sigma2) >= epsilon` and `A[i,j]=0` otherwise, where `A` is the adjacency matrix and `w2=route_distances * route_distances` Returns: A boolean graph adjacency matrix. """ num_routes = route_distances.shape[0] route_distances = route_distances / 10000.0 w2, w_mask = ( route_distances * route_distances, np.ones([num_routes, num_routes]) - np.identity(num_routes), ) return (np.exp(-w2 / sigma2) >= epsilon) * w_mask<jupyter_output><empty_output><jupyter_text>The function `compute_adjacency_matrix()` returns a boolean adjacency matrixwhere 1 means there is an edge between two nodes. We use the following classto store the information about the graph.<jupyter_code>class GraphInfo: def __init__(self, edges: typing.Tuple[list, list], num_nodes: int): self.edges = edges self.num_nodes = num_nodes sigma2 = 0.1 epsilon = 0.5 adjacency_matrix = compute_adjacency_matrix(route_distances, sigma2, epsilon) node_indices, neighbor_indices = np.where(adjacency_matrix == 1) graph = GraphInfo( edges=(node_indices.tolist(), neighbor_indices.tolist()), num_nodes=adjacency_matrix.shape[0], ) print(f"number of nodes: {graph.num_nodes}, number of edges: {len(graph.edges[0])}")<jupyter_output><empty_output><jupyter_text>Network architectureOur model for forecasting over the graph consists of a graph convolutionlayer and a LSTM layer. Graph convolution layerOur implementation of the graph convolution layer resembles the implementationin [this Keras example](https://keras.io/examples/graph/gnn_citations/). Note thatin that example input to the layer is a 2D tensor of shape `(num_nodes,in_feat)`but in our example the input to the layer is a 4D tensor of shape`(num_nodes, batch_size, input_seq_length, in_feat)`. The graph convolution layerperforms the following steps:- The nodes' representations are computed in `self.compute_nodes_representation()`by multiplying the input features by `self.weight`- The aggregated neighbors' messages are computed in `self.compute_aggregated_messages()`by first aggregating the neighbors' representations and then multiplying the results by`self.weight`- The final output of the layer is computed in `self.update()` by combining the nodesrepresentations and the neighbors' aggregated messages<jupyter_code>class GraphConv(layers.Layer): def __init__( self, in_feat, out_feat, graph_info: GraphInfo, aggregation_type="mean", combination_type="concat", activation: typing.Optional[str] = None, **kwargs, ): super().__init__(**kwargs) self.in_feat = in_feat self.out_feat = out_feat self.graph_info = graph_info self.aggregation_type = aggregation_type self.combination_type = combination_type self.weight = self.add_weight( initializer=keras.initializers.GlorotUniform(), shape=(in_feat, out_feat), dtype="float32", trainable=True, ) self.activation = layers.Activation(activation) def aggregate(self, neighbour_representations): aggregation_func = { "sum": tf.math.unsorted_segment_sum, "mean": tf.math.unsorted_segment_mean, "max": tf.math.unsorted_segment_max, }.get(self.aggregation_type) if aggregation_func: return aggregation_func( neighbour_representations, self.graph_info.edges[0], num_segments=self.graph_info.num_nodes, ) raise ValueError(f"Invalid aggregation type: {self.aggregation_type}") def compute_nodes_representation(self, features): """Computes each node's representation. The nodes' representations are obtained by multiplying the features tensor with `self.weight`. Note that `self.weight` has shape `(in_feat, out_feat)`. Args: features: Tensor of shape `(num_nodes, batch_size, input_seq_len, in_feat)` Returns: A tensor of shape `(num_nodes, batch_size, input_seq_len, out_feat)` """ return ops.matmul(features, self.weight) def compute_aggregated_messages(self, features): neighbour_representations = tf.gather(features, self.graph_info.edges[1]) aggregated_messages = self.aggregate(neighbour_representations) return ops.matmul(aggregated_messages, self.weight) def update(self, nodes_representation, aggregated_messages): if self.combination_type == "concat": h = ops.concatenate([nodes_representation, aggregated_messages], axis=-1) elif self.combination_type == "add": h = nodes_representation + aggregated_messages else: raise ValueError(f"Invalid combination type: {self.combination_type}.") return self.activation(h) def call(self, features): """Forward pass. Args: features: tensor of shape `(num_nodes, batch_size, input_seq_len, in_feat)` Returns: A tensor of shape `(num_nodes, batch_size, input_seq_len, out_feat)` """ nodes_representation = self.compute_nodes_representation(features) aggregated_messages = self.compute_aggregated_messages(features) return self.update(nodes_representation, aggregated_messages)<jupyter_output><empty_output><jupyter_text>LSTM plus graph convolutionBy applying the graph convolution layer to the input tensor, we get another tensorcontaining the nodes' representations over time (another 4D tensor). For each timestep, a node's representation is informed by the information from its neighbors.To make good forecasts, however, we need not only information from the neighborsbut also we need to process the information over time. To this end, we can pass eachnode's tensor through a recurrent layer. The `LSTMGC` layer below, first appliesa graph convolution layer to the inputs and then passes the results through a`LSTM` layer.<jupyter_code>class LSTMGC(layers.Layer): """Layer comprising a convolution layer followed by LSTM and dense layers.""" def __init__( self, in_feat, out_feat, lstm_units: int, input_seq_len: int, output_seq_len: int, graph_info: GraphInfo, graph_conv_params: typing.Optional[dict] = None, **kwargs, ): super().__init__(**kwargs) # graph conv layer if graph_conv_params is None: graph_conv_params = { "aggregation_type": "mean", "combination_type": "concat", "activation": None, } self.graph_conv = GraphConv(in_feat, out_feat, graph_info, **graph_conv_params) self.lstm = layers.LSTM(lstm_units, activation="relu") self.dense = layers.Dense(output_seq_len) self.input_seq_len, self.output_seq_len = input_seq_len, output_seq_len def call(self, inputs): """Forward pass. Args: inputs: tensor of shape `(batch_size, input_seq_len, num_nodes, in_feat)` Returns: A tensor of shape `(batch_size, output_seq_len, num_nodes)`. """ # convert shape to (num_nodes, batch_size, input_seq_len, in_feat) inputs = ops.transpose(inputs, [2, 0, 1, 3]) gcn_out = self.graph_conv( inputs ) # gcn_out has shape: (num_nodes, batch_size, input_seq_len, out_feat) shape = ops.shape(gcn_out) num_nodes, batch_size, input_seq_len, out_feat = ( shape[0], shape[1], shape[2], shape[3], ) # LSTM takes only 3D tensors as input gcn_out = ops.reshape( gcn_out, (batch_size * num_nodes, input_seq_len, out_feat) ) lstm_out = self.lstm( gcn_out ) # lstm_out has shape: (batch_size * num_nodes, lstm_units) dense_output = self.dense( lstm_out ) # dense_output has shape: (batch_size * num_nodes, output_seq_len) output = ops.reshape(dense_output, (num_nodes, batch_size, self.output_seq_len)) return ops.transpose( output, [1, 2, 0] ) # returns Tensor of shape (batch_size, output_seq_len, num_nodes)<jupyter_output><empty_output><jupyter_text>Model training<jupyter_code>in_feat = 1 batch_size = 64 epochs = 20 input_sequence_length = 12 forecast_horizon = 3 multi_horizon = False out_feat = 10 lstm_units = 64 graph_conv_params = { "aggregation_type": "mean", "combination_type": "concat", "activation": None, } st_gcn = LSTMGC( in_feat, out_feat, lstm_units, input_sequence_length, forecast_horizon, graph, graph_conv_params, ) inputs = layers.Input((input_sequence_length, graph.num_nodes, in_feat)) outputs = st_gcn(inputs) model = keras.models.Model(inputs, outputs) model.compile( optimizer=keras.optimizers.RMSprop(learning_rate=0.0002), loss=keras.losses.MeanSquaredError(), ) model.fit( train_dataset, validation_data=val_dataset, epochs=epochs, callbacks=[keras.callbacks.EarlyStopping(patience=10)], )<jupyter_output><empty_output><jupyter_text>Making forecasts on test setNow we can use the trained model to make forecasts for the test set. Below, wecompute the MAE of the model and compare it to the MAE of naive forecasts.The naive forecasts are the last value of the speed for each node.<jupyter_code>x_test, y = next(test_dataset.as_numpy_iterator()) y_pred = model.predict(x_test) plt.figure(figsize=(18, 6)) plt.plot(y[:, 0, 0]) plt.plot(y_pred[:, 0, 0]) plt.legend(["actual", "forecast"]) naive_mse, model_mse = ( np.square(x_test[:, -1, :, 0] - y[:, 0, :]).mean(), np.square(y_pred[:, 0, :] - y[:, 0, :]).mean(), ) print(f"naive MAE: {naive_mse}, model MAE: {model_mse}")<jupyter_output><empty_output>
keras-io/examples/timeseries/ipynb/timeseries_traffic_forecasting.ipynb/0
{ "file_path": "keras-io/examples/timeseries/ipynb/timeseries_traffic_forecasting.ipynb", "repo_id": "keras-io", "token_count": 8197 }
107
""" Title: Classification using Attention-based Deep Multiple Instance Learning (MIL). Author: [Mohamad Jaber](https://www.linkedin.com/in/mohamadjaber1/) Date created: 2021/08/16 Last modified: 2021/11/25 Description: MIL approach to classify bags of instances and get their individual instance score. Accelerator: GPU """ """ ## Introduction ### What is Multiple Instance Learning (MIL)? Usually, with supervised learning algorithms, the learner receives labels for a set of instances. In the case of MIL, the learner receives labels for a set of bags, each of which contains a set of instances. The bag is labeled positive if it contains at least one positive instance, and negative if it does not contain any. ### Motivation It is often assumed in image classification tasks that each image clearly represents a class label. In medical imaging (e.g. computational pathology, etc.) an *entire image* is represented by a single class label (cancerous/non-cancerous) or a region of interest could be given. However, one will be interested in knowing which patterns in the image is actually causing it to belong to that class. In this context, the image(s) will be divided and the subimages will form the bag of instances. Therefore, the goals are to: 1. Learn a model to predict a class label for a bag of instances. 2. Find out which instances within the bag caused a position class label prediction. ### Implementation The following steps describe how the model works: 1. The feature extractor layers extract feature embeddings. 2. The embeddings are fed into the MIL attention layer to get the attention scores. The layer is designed as permutation-invariant. 3. Input features and their corresponding attention scores are multiplied together. 4. The resulting output is passed to a softmax function for classification. ### References - [Attention-based Deep Multiple Instance Learning](https://arxiv.org/abs/1802.04712). - Some of the attention operator code implementation was inspired from https://github.com/utayao/Atten_Deep_MIL. - Imbalanced data [tutorial](https://www.tensorflow.org/tutorials/structured_data/imbalanced_data) by TensorFlow. """ """ ## Setup """ import numpy as np import keras from keras import layers from keras import ops from tqdm import tqdm from matplotlib import pyplot as plt plt.style.use("ggplot") """ ## Create dataset We will create a set of bags and assign their labels according to their contents. If at least one positive instance is available in a bag, the bag is considered as a positive bag. If it does not contain any positive instance, the bag will be considered as negative. ### Configuration parameters - `POSITIVE_CLASS`: The desired class to be kept in the positive bag. - `BAG_COUNT`: The number of training bags. - `VAL_BAG_COUNT`: The number of validation bags. - `BAG_SIZE`: The number of instances in a bag. - `PLOT_SIZE`: The number of bags to plot. - `ENSEMBLE_AVG_COUNT`: The number of models to create and average together. (Optional: often results in better performance - set to 1 for single model) """ POSITIVE_CLASS = 1 BAG_COUNT = 1000 VAL_BAG_COUNT = 300 BAG_SIZE = 3 PLOT_SIZE = 3 ENSEMBLE_AVG_COUNT = 1 """ ### Prepare bags Since the attention operator is a permutation-invariant operator, an instance with a positive class label is randomly placed among the instances in the positive bag. """ def create_bags(input_data, input_labels, positive_class, bag_count, instance_count): # Set up bags. bags = [] bag_labels = [] # Normalize input data. input_data = np.divide(input_data, 255.0) # Count positive samples. count = 0 for _ in range(bag_count): # Pick a fixed size random subset of samples. index = np.random.choice(input_data.shape[0], instance_count, replace=False) instances_data = input_data[index] instances_labels = input_labels[index] # By default, all bags are labeled as 0. bag_label = 0 # Check if there is at least a positive class in the bag. if positive_class in instances_labels: # Positive bag will be labeled as 1. bag_label = 1 count += 1 bags.append(instances_data) bag_labels.append(np.array([bag_label])) print(f"Positive bags: {count}") print(f"Negative bags: {bag_count - count}") return (list(np.swapaxes(bags, 0, 1)), np.array(bag_labels)) # Load the MNIST dataset. (x_train, y_train), (x_val, y_val) = keras.datasets.mnist.load_data() # Create training data. train_data, train_labels = create_bags( x_train, y_train, POSITIVE_CLASS, BAG_COUNT, BAG_SIZE ) # Create validation data. val_data, val_labels = create_bags( x_val, y_val, POSITIVE_CLASS, VAL_BAG_COUNT, BAG_SIZE ) """ ## Create the model We will now build the attention layer, prepare some utilities, then build and train the entire model. ### Attention operator implementation The output size of this layer is decided by the size of a single bag. The attention mechanism uses a weighted average of instances in a bag, in which the sum of the weights must equal to 1 (invariant of the bag size). The weight matrices (parameters) are **w** and **v**. To include positive and negative values, hyperbolic tangent element-wise non-linearity is utilized. A **Gated attention mechanism** can be used to deal with complex relations. Another weight matrix, **u**, is added to the computation. A sigmoid non-linearity is used to overcome approximately linear behavior for *x* ∈ [−1, 1] by hyperbolic tangent non-linearity. """ class MILAttentionLayer(layers.Layer): """Implementation of the attention-based Deep MIL layer. Args: weight_params_dim: Positive Integer. Dimension of the weight matrix. kernel_initializer: Initializer for the `kernel` matrix. kernel_regularizer: Regularizer function applied to the `kernel` matrix. use_gated: Boolean, whether or not to use the gated mechanism. Returns: List of 2D tensors with BAG_SIZE length. The tensors are the attention scores after softmax with shape `(batch_size, 1)`. """ def __init__( self, weight_params_dim, kernel_initializer="glorot_uniform", kernel_regularizer=None, use_gated=False, **kwargs, ): super().__init__(**kwargs) self.weight_params_dim = weight_params_dim self.use_gated = use_gated self.kernel_initializer = keras.initializers.get(kernel_initializer) self.kernel_regularizer = keras.regularizers.get(kernel_regularizer) self.v_init = self.kernel_initializer self.w_init = self.kernel_initializer self.u_init = self.kernel_initializer self.v_regularizer = self.kernel_regularizer self.w_regularizer = self.kernel_regularizer self.u_regularizer = self.kernel_regularizer def build(self, input_shape): # Input shape. # List of 2D tensors with shape: (batch_size, input_dim). input_dim = input_shape[0][1] self.v_weight_params = self.add_weight( shape=(input_dim, self.weight_params_dim), initializer=self.v_init, name="v", regularizer=self.v_regularizer, trainable=True, ) self.w_weight_params = self.add_weight( shape=(self.weight_params_dim, 1), initializer=self.w_init, name="w", regularizer=self.w_regularizer, trainable=True, ) if self.use_gated: self.u_weight_params = self.add_weight( shape=(input_dim, self.weight_params_dim), initializer=self.u_init, name="u", regularizer=self.u_regularizer, trainable=True, ) else: self.u_weight_params = None self.input_built = True def call(self, inputs): # Assigning variables from the number of inputs. instances = [self.compute_attention_scores(instance) for instance in inputs] # Stack instances into a single tensor. instances = ops.stack(instances) # Apply softmax over instances such that the output summation is equal to 1. alpha = ops.softmax(instances, axis=0) # Split to recreate the same array of tensors we had as inputs. return [alpha[i] for i in range(alpha.shape[0])] def compute_attention_scores(self, instance): # Reserve in-case "gated mechanism" used. original_instance = instance # tanh(v*h_k^T) instance = ops.tanh(ops.tensordot(instance, self.v_weight_params, axes=1)) # for learning non-linear relations efficiently. if self.use_gated: instance = instance * ops.sigmoid( ops.tensordot(original_instance, self.u_weight_params, axes=1) ) # w^T*(tanh(v*h_k^T)) / w^T*(tanh(v*h_k^T)*sigmoid(u*h_k^T)) return ops.tensordot(instance, self.w_weight_params, axes=1) """ ## Visualizer tool Plot the number of bags (given by `PLOT_SIZE`) with respect to the class. Moreover, if activated, the class label prediction with its associated instance score for each bag (after the model has been trained) can be seen. """ def plot(data, labels, bag_class, predictions=None, attention_weights=None): """ "Utility for plotting bags and attention weights. Args: data: Input data that contains the bags of instances. labels: The associated bag labels of the input data. bag_class: String name of the desired bag class. The options are: "positive" or "negative". predictions: Class labels model predictions. If you don't specify anything, ground truth labels will be used. attention_weights: Attention weights for each instance within the input data. If you don't specify anything, the values won't be displayed. """ return ## TODO labels = np.array(labels).reshape(-1) if bag_class == "positive": if predictions is not None: labels = np.where(predictions.argmax(1) == 1)[0] bags = np.array(data)[:, labels[0:PLOT_SIZE]] else: labels = np.where(labels == 1)[0] bags = np.array(data)[:, labels[0:PLOT_SIZE]] elif bag_class == "negative": if predictions is not None: labels = np.where(predictions.argmax(1) == 0)[0] bags = np.array(data)[:, labels[0:PLOT_SIZE]] else: labels = np.where(labels == 0)[0] bags = np.array(data)[:, labels[0:PLOT_SIZE]] else: print(f"There is no class {bag_class}") return print(f"The bag class label is {bag_class}") for i in range(PLOT_SIZE): figure = plt.figure(figsize=(8, 8)) print(f"Bag number: {labels[i]}") for j in range(BAG_SIZE): image = bags[j][i] figure.add_subplot(1, BAG_SIZE, j + 1) plt.grid(False) if attention_weights is not None: plt.title(np.around(attention_weights[labels[i]][j], 2)) plt.imshow(image) plt.show() # Plot some of validation data bags per class. plot(val_data, val_labels, "positive") plot(val_data, val_labels, "negative") """ ## Create model First we will create some embeddings per instance, invoke the attention operator and then use the softmax function to output the class probabilities. """ def create_model(instance_shape): # Extract features from inputs. inputs, embeddings = [], [] shared_dense_layer_1 = layers.Dense(128, activation="relu") shared_dense_layer_2 = layers.Dense(64, activation="relu") for _ in range(BAG_SIZE): inp = layers.Input(instance_shape) flatten = layers.Flatten()(inp) dense_1 = shared_dense_layer_1(flatten) dense_2 = shared_dense_layer_2(dense_1) inputs.append(inp) embeddings.append(dense_2) # Invoke the attention layer. alpha = MILAttentionLayer( weight_params_dim=256, kernel_regularizer=keras.regularizers.L2(0.01), use_gated=True, name="alpha", )(embeddings) # Multiply attention weights with the input layers. multiply_layers = [ layers.multiply([alpha[i], embeddings[i]]) for i in range(len(alpha)) ] # Concatenate layers. concat = layers.concatenate(multiply_layers, axis=1) # Classification output node. output = layers.Dense(2, activation="softmax")(concat) return keras.Model(inputs, output) """ ## Class weights Since this kind of problem could simply turn into imbalanced data classification problem, class weighting should be considered. Let's say there are 1000 bags. There often could be cases were ~90 % of the bags do not contain any positive label and ~10 % do. Such data can be referred to as **Imbalanced data**. Using class weights, the model will tend to give a higher weight to the rare class. """ def compute_class_weights(labels): # Count number of postive and negative bags. negative_count = len(np.where(labels == 0)[0]) positive_count = len(np.where(labels == 1)[0]) total_count = negative_count + positive_count # Build class weight dictionary. return { 0: (1 / negative_count) * (total_count / 2), 1: (1 / positive_count) * (total_count / 2), } """ ## Build and train model The model is built and trained in this section. """ def train(train_data, train_labels, val_data, val_labels, model): # Train model. # Prepare callbacks. # Path where to save best weights. # Take the file name from the wrapper. file_path = "/tmp/best_model.weights.h5" # Initialize model checkpoint callback. model_checkpoint = keras.callbacks.ModelCheckpoint( file_path, monitor="val_loss", verbose=0, mode="min", save_best_only=True, save_weights_only=True, ) # Initialize early stopping callback. # The model performance is monitored across the validation data and stops training # when the generalization error cease to decrease. early_stopping = keras.callbacks.EarlyStopping( monitor="val_loss", patience=10, mode="min" ) # Compile model. model.compile( optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"], ) # Fit model. model.fit( train_data, train_labels, validation_data=(val_data, val_labels), epochs=20, class_weight=compute_class_weights(train_labels), batch_size=1, callbacks=[early_stopping, model_checkpoint], verbose=0, ) # Load best weights. model.load_weights(file_path) return model # Building model(s). instance_shape = train_data[0][0].shape models = [create_model(instance_shape) for _ in range(ENSEMBLE_AVG_COUNT)] # Show single model architecture. print(models[0].summary()) # Training model(s). trained_models = [ train(train_data, train_labels, val_data, val_labels, model) for model in tqdm(models) ] """ ## Model evaluation The models are now ready for evaluation. With each model we also create an associated intermediate model to get the weights from the attention layer. We will compute a prediction for each of our `ENSEMBLE_AVG_COUNT` models, and average them together for our final prediction. """ def predict(data, labels, trained_models): # Collect info per model. models_predictions = [] models_attention_weights = [] models_losses = [] models_accuracies = [] for model in trained_models: # Predict output classes on data. predictions = model.predict(data) models_predictions.append(predictions) # Create intermediate model to get MIL attention layer weights. intermediate_model = keras.Model(model.input, model.get_layer("alpha").output) # Predict MIL attention layer weights. intermediate_predictions = intermediate_model.predict(data) attention_weights = np.squeeze(np.swapaxes(intermediate_predictions, 1, 0)) models_attention_weights.append(attention_weights) loss, accuracy = model.evaluate(data, labels, verbose=0) models_losses.append(loss) models_accuracies.append(accuracy) print( f"The average loss and accuracy are {np.sum(models_losses, axis=0) / ENSEMBLE_AVG_COUNT:.2f}" f" and {100 * np.sum(models_accuracies, axis=0) / ENSEMBLE_AVG_COUNT:.2f} % resp." ) return ( np.sum(models_predictions, axis=0) / ENSEMBLE_AVG_COUNT, np.sum(models_attention_weights, axis=0) / ENSEMBLE_AVG_COUNT, ) # Evaluate and predict classes and attention scores on validation data. class_predictions, attention_params = predict(val_data, val_labels, trained_models) # Plot some results from our validation data. plot( val_data, val_labels, "positive", predictions=class_predictions, attention_weights=attention_params, ) plot( val_data, val_labels, "negative", predictions=class_predictions, attention_weights=attention_params, ) """ ## Conclusion From the above plot, you can notice that the weights always sum to 1. In a positively predict bag, the instance which resulted in the positive labeling will have a substantially higher attention score than the rest of the bag. However, in a negatively predicted bag, there are two cases: * All instances will have approximately similar scores. * An instance will have relatively higher score (but not as high as of a positive instance). This is because the feature space of this instance is close to that of the positive instance. ## Remarks - If the model is overfit, the weights will be equally distributed for all bags. Hence, the regularization techniques are necessary. - In the paper, the bag sizes can differ from one bag to another. For simplicity, the bag sizes are fixed here. - In order not to rely on the random initial weights of a single model, averaging ensemble methods should be considered. """
keras-io/examples/vision/attention_mil_classification.py/0
{ "file_path": "keras-io/examples/vision/attention_mil_classification.py", "repo_id": "keras-io", "token_count": 6604 }
108
""" Title: Enhanced Deep Residual Networks for single-image super-resolution Author: Gitesh Chawda Date created: 2022/04/07 Last modified: 2022/04/07 Description: Training an EDSR model on the DIV2K Dataset. Accelerator: GPU """ """ ## Introduction In this example, we implement [Enhanced Deep Residual Networks for Single Image Super-Resolution (EDSR)](https://arxiv.org/abs/1707.02921) by Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Kyoung Mu Lee. The EDSR architecture is based on the SRResNet architecture and consists of multiple residual blocks. It uses constant scaling layers instead of batch normalization layers to produce consistent results (input and output have similar distributions, thus normalizing intermediate features may not be desirable). Instead of using a L2 loss (mean squared error), the authors employed an L1 loss (mean absolute error), which performs better empirically. Our implementation only includes 16 residual blocks with 64 channels. Alternatively, as shown in the Keras example [Image Super-Resolution using an Efficient Sub-Pixel CNN](https://keras.io/examples/vision/super_resolution_sub_pixel/#image-superresolution-using-an-efficient-subpixel-cnn), you can do super-resolution using an ESPCN Model. According to the survey paper, EDSR is one of the top-five best-performing super-resolution methods based on PSNR scores. However, it has more parameters and requires more computational power than other approaches. It has a PSNR value (≈34db) that is slightly higher than ESPCN (≈32db). As per the survey paper, EDSR performs better than ESPCN. Paper: [A comprehensive review of deep learning based single image super-resolution](https://arxiv.org/abs/2102.09351) Comparison Graph: <img src="https://dfzljdn9uc3pi.cloudfront.net/2021/cs-621/1/fig-11-2x.jpg" width="500" /> """ """ ## Imports """ import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import matplotlib.pyplot as plt from tensorflow import keras from tensorflow.keras import layers AUTOTUNE = tf.data.AUTOTUNE """ ## Download the training dataset We use the DIV2K Dataset, a prominent single-image super-resolution dataset with 1,000 images of scenes with various sorts of degradations, divided into 800 images for training, 100 images for validation, and 100 images for testing. We use 4x bicubic downsampled images as our "low quality" reference. """ # Download DIV2K from TF Datasets # Using bicubic 4x degradation type div2k_data = tfds.image.Div2k(config="bicubic_x4") div2k_data.download_and_prepare() # Taking train data from div2k_data object train = div2k_data.as_dataset(split="train", as_supervised=True) train_cache = train.cache() # Validation data val = div2k_data.as_dataset(split="validation", as_supervised=True) val_cache = val.cache() """ ## Flip, crop and resize images """ def flip_left_right(lowres_img, highres_img): """Flips Images to left and right.""" # Outputs random values from a uniform distribution in between 0 to 1 rn = tf.random.uniform(shape=(), maxval=1) # If rn is less than 0.5 it returns original lowres_img and highres_img # If rn is greater than 0.5 it returns flipped image return tf.cond( rn < 0.5, lambda: (lowres_img, highres_img), lambda: ( tf.image.flip_left_right(lowres_img), tf.image.flip_left_right(highres_img), ), ) def random_rotate(lowres_img, highres_img): """Rotates Images by 90 degrees.""" # Outputs random values from uniform distribution in between 0 to 4 rn = tf.random.uniform(shape=(), maxval=4, dtype=tf.int32) # Here rn signifies number of times the image(s) are rotated by 90 degrees return tf.image.rot90(lowres_img, rn), tf.image.rot90(highres_img, rn) def random_crop(lowres_img, highres_img, hr_crop_size=96, scale=4): """Crop images. low resolution images: 24x24 high resolution images: 96x96 """ lowres_crop_size = hr_crop_size // scale # 96//4=24 lowres_img_shape = tf.shape(lowres_img)[:2] # (height,width) lowres_width = tf.random.uniform( shape=(), maxval=lowres_img_shape[1] - lowres_crop_size + 1, dtype=tf.int32 ) lowres_height = tf.random.uniform( shape=(), maxval=lowres_img_shape[0] - lowres_crop_size + 1, dtype=tf.int32 ) highres_width = lowres_width * scale highres_height = lowres_height * scale lowres_img_cropped = lowres_img[ lowres_height : lowres_height + lowres_crop_size, lowres_width : lowres_width + lowres_crop_size, ] # 24x24 highres_img_cropped = highres_img[ highres_height : highres_height + hr_crop_size, highres_width : highres_width + hr_crop_size, ] # 96x96 return lowres_img_cropped, highres_img_cropped """ ## Prepare a `tf.data.Dataset` object We augment the training data with random horizontal flips and 90 rotations. As low resolution images, we use 24x24 RGB input patches. """ def dataset_object(dataset_cache, training=True): ds = dataset_cache ds = ds.map( lambda lowres, highres: random_crop(lowres, highres, scale=4), num_parallel_calls=AUTOTUNE, ) if training: ds = ds.map(random_rotate, num_parallel_calls=AUTOTUNE) ds = ds.map(flip_left_right, num_parallel_calls=AUTOTUNE) # Batching Data ds = ds.batch(16) if training: # Repeating Data, so that cardinality if dataset becomes infinte ds = ds.repeat() # prefetching allows later images to be prepared while the current image is being processed ds = ds.prefetch(buffer_size=AUTOTUNE) return ds train_ds = dataset_object(train_cache, training=True) val_ds = dataset_object(val_cache, training=False) """ ## Visualize the data Let's visualize a few sample images: """ lowres, highres = next(iter(train_ds)) # High Resolution Images plt.figure(figsize=(10, 10)) for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(highres[i].numpy().astype("uint8")) plt.title(highres[i].shape) plt.axis("off") # Low Resolution Images plt.figure(figsize=(10, 10)) for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.imshow(lowres[i].numpy().astype("uint8")) plt.title(lowres[i].shape) plt.axis("off") def PSNR(super_resolution, high_resolution): """Compute the peak signal-to-noise ratio, measures quality of image.""" # Max value of pixel is 255 psnr_value = tf.image.psnr(high_resolution, super_resolution, max_val=255)[0] return psnr_value """ ## Build the model In the paper, the authors train three models: EDSR, MDSR, and a baseline model. In this code example, we only train the baseline model. ### Comparison with model with three residual blocks The residual block design of EDSR differs from that of ResNet. Batch normalization layers have been removed (together with the final ReLU activation): since batch normalization layers normalize the features, they hurt output value range flexibility. It is thus better to remove them. Further, it also helps reduce the amount of GPU RAM required by the model, since the batch normalization layers consume the same amount of memory as the preceding convolutional layers. <img src="https://miro.medium.com/max/1050/1*EPviXGqlGWotVtV2gqVvNg.png" width="500" /> """ class EDSRModel(tf.keras.Model): def train_step(self, data): # Unpack the data. Its structure depends on your model and # on what you pass to `fit()`. x, y = data with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute the loss value # (the loss function is configured in `compile()`) loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses) # Compute gradients trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Update metrics (includes the metric that tracks the loss) self.compiled_metrics.update_state(y, y_pred) # Return a dict mapping metric names to current value return {m.name: m.result() for m in self.metrics} def predict_step(self, x): # Adding dummy dimension using tf.expand_dims and converting to float32 using tf.cast x = tf.cast(tf.expand_dims(x, axis=0), tf.float32) # Passing low resolution image to model super_resolution_img = self(x, training=False) # Clips the tensor from min(0) to max(255) super_resolution_img = tf.clip_by_value(super_resolution_img, 0, 255) # Rounds the values of a tensor to the nearest integer super_resolution_img = tf.round(super_resolution_img) # Removes dimensions of size 1 from the shape of a tensor and converting to uint8 super_resolution_img = tf.squeeze( tf.cast(super_resolution_img, tf.uint8), axis=0 ) return super_resolution_img # Residual Block def ResBlock(inputs): x = layers.Conv2D(64, 3, padding="same", activation="relu")(inputs) x = layers.Conv2D(64, 3, padding="same")(x) x = layers.Add()([inputs, x]) return x # Upsampling Block def Upsampling(inputs, factor=2, **kwargs): x = layers.Conv2D(64 * (factor**2), 3, padding="same", **kwargs)(inputs) x = tf.nn.depth_to_space(x, block_size=factor) x = layers.Conv2D(64 * (factor**2), 3, padding="same", **kwargs)(x) x = tf.nn.depth_to_space(x, block_size=factor) return x def make_model(num_filters, num_of_residual_blocks): # Flexible Inputs to input_layer input_layer = layers.Input(shape=(None, None, 3)) # Scaling Pixel Values x = layers.Rescaling(scale=1.0 / 255)(input_layer) x = x_new = layers.Conv2D(num_filters, 3, padding="same")(x) # 16 residual blocks for _ in range(num_of_residual_blocks): x_new = ResBlock(x_new) x_new = layers.Conv2D(num_filters, 3, padding="same")(x_new) x = layers.Add()([x, x_new]) x = Upsampling(x) x = layers.Conv2D(3, 3, padding="same")(x) output_layer = layers.Rescaling(scale=255)(x) return EDSRModel(input_layer, output_layer) model = make_model(num_filters=64, num_of_residual_blocks=16) """ ## Train the model """ # Using adam optimizer with initial learning rate as 1e-4, changing learning rate after 5000 steps to 5e-5 optim_edsr = keras.optimizers.Adam( learning_rate=keras.optimizers.schedules.PiecewiseConstantDecay( boundaries=[5000], values=[1e-4, 5e-5] ) ) # Compiling model with loss as mean absolute error(L1 Loss) and metric as psnr model.compile(optimizer=optim_edsr, loss="mae", metrics=[PSNR]) # Training for more epochs will improve results model.fit(train_ds, epochs=100, steps_per_epoch=200, validation_data=val_ds) """ ## Run inference on new images and plot the results """ def plot_results(lowres, preds): """ Displays low resolution image and super resolution image """ plt.figure(figsize=(24, 14)) plt.subplot(132), plt.imshow(lowres), plt.title("Low resolution") plt.subplot(133), plt.imshow(preds), plt.title("Prediction") plt.show() for lowres, highres in val.take(10): lowres = tf.image.random_crop(lowres, (150, 150, 3)) preds = model.predict_step(lowres) plot_results(lowres, preds) """ ## Final remarks In this example, we implemented the EDSR model (Enhanced Deep Residual Networks for Single Image Super-Resolution). You could improve the model accuracy by training the model for more epochs, as well as training the model with a wider variety of inputs with mixed downgrading factors, so as to be able to handle a greater range of real-world images. You could also improve on the given baseline EDSR model by implementing EDSR+, or MDSR( Multi-Scale super-resolution) and MDSR+, which were proposed in the same paper. | Trained Model | Demo | | :--: | :--: | | [![Generic badge](https://img.shields.io/badge/🤗%20Model-EDSR-red.svg)](https://huggingface.co/keras-io/EDSR) | [![Generic badge](https://img.shields.io/badge/🤗%20Spaces-EDSR-red.svg)](https://huggingface.co/spaces/keras-io/EDSR) | """
keras-io/examples/vision/edsr.py/0
{ "file_path": "keras-io/examples/vision/edsr.py", "repo_id": "keras-io", "token_count": 4490 }
109
<jupyter_start><jupyter_text>CutMix data augmentation for image classification**Author:** [Sayan Nath](https://twitter.com/sayannath2350)**Date created:** 2021/06/08**Last modified:** 2023/11/14**Description:** Data augmentation with CutMix for image classification on CIFAR-10. Introduction _CutMix_ is a data augmentation technique that addresses the issue of information lossand inefficiency present in regional dropout strategies.Instead of removing pixels and filling them with black or grey pixels or Gaussian noise,you replace the removed regions with a patch from another image,while the ground truth labels are mixed proportionally to the number of pixels of combined images.CutMix was proposed in[CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features](https://arxiv.org/abs/1905.04899)(Yun et al., 2019)It's implemented via the following formulas:where `M` is the binary mask which indicates the cutout and the fill-inregions from the two randomly drawn images and `λ` (in `[0, 1]`) is drawn from a[`Beta(α, α)` distribution](https://en.wikipedia.org/wiki/Beta_distribution)The coordinates of bounding boxes are:which indicates the cutout and fill-in regions in case of the images.The bounding box sampling is represented by:where `rx, ry` are randomly drawn from a uniform distribution with upper bound. Setup<jupyter_code>import numpy as np import keras import matplotlib.pyplot as plt from keras import layers # TF imports related to tf.data preprocessing from tensorflow import clip_by_value from tensorflow import data as tf_data from tensorflow import image as tf_image from tensorflow import random as tf_random keras.utils.set_random_seed(42)<jupyter_output><empty_output><jupyter_text>Load the CIFAR-10 datasetIn this example, we will use the[CIFAR-10 image classification dataset](https://www.cs.toronto.edu/~kriz/cifar.html).<jupyter_code>(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() y_train = keras.utils.to_categorical(y_train, num_classes=10) y_test = keras.utils.to_categorical(y_test, num_classes=10) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) class_names = [ "Airplane", "Automobile", "Bird", "Cat", "Deer", "Dog", "Frog", "Horse", "Ship", "Truck", ]<jupyter_output><empty_output><jupyter_text>Define hyperparameters<jupyter_code>AUTO = tf_data.AUTOTUNE BATCH_SIZE = 32 IMG_SIZE = 32<jupyter_output><empty_output><jupyter_text>Define the image preprocessing function<jupyter_code>def preprocess_image(image, label): image = tf_image.resize(image, (IMG_SIZE, IMG_SIZE)) image = tf_image.convert_image_dtype(image, "float32") / 255.0 label = keras.ops.cast(label, dtype="float32") return image, label<jupyter_output><empty_output><jupyter_text>Convert the data into TensorFlow `Dataset` objects<jupyter_code>train_ds_one = ( tf_data.Dataset.from_tensor_slices((x_train, y_train)) .shuffle(1024) .map(preprocess_image, num_parallel_calls=AUTO) ) train_ds_two = ( tf_data.Dataset.from_tensor_slices((x_train, y_train)) .shuffle(1024) .map(preprocess_image, num_parallel_calls=AUTO) ) train_ds_simple = tf_data.Dataset.from_tensor_slices((x_train, y_train)) test_ds = tf_data.Dataset.from_tensor_slices((x_test, y_test)) train_ds_simple = ( train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) # Combine two shuffled datasets from the same training data. train_ds = tf_data.Dataset.zip((train_ds_one, train_ds_two)) test_ds = ( test_ds.map(preprocess_image, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) )<jupyter_output><empty_output><jupyter_text>Define the CutMix data augmentation functionThe CutMix function takes two `image` and `label` pairs to perform the augmentation.It samples `λ(l)` from the [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution)and returns a bounding box from `get_box` function. We then crop the second image (`image2`)and pad this image in the final padded image at the same location.<jupyter_code>def sample_beta_distribution(size, concentration_0=0.2, concentration_1=0.2): gamma_1_sample = tf_random.gamma(shape=[size], alpha=concentration_1) gamma_2_sample = tf_random.gamma(shape=[size], alpha=concentration_0) return gamma_1_sample / (gamma_1_sample + gamma_2_sample) def get_box(lambda_value): cut_rat = keras.ops.sqrt(1.0 - lambda_value) cut_w = IMG_SIZE * cut_rat # rw cut_w = keras.ops.cast(cut_w, "int32") cut_h = IMG_SIZE * cut_rat # rh cut_h = keras.ops.cast(cut_h, "int32") cut_x = keras.random.uniform((1,), minval=0, maxval=IMG_SIZE) # rx cut_x = keras.ops.cast(cut_x, "int32") cut_y = keras.random.uniform((1,), minval=0, maxval=IMG_SIZE) # ry cut_y = keras.ops.cast(cut_y, "int32") boundaryx1 = clip_by_value(cut_x[0] - cut_w // 2, 0, IMG_SIZE) boundaryy1 = clip_by_value(cut_y[0] - cut_h // 2, 0, IMG_SIZE) bbx2 = clip_by_value(cut_x[0] + cut_w // 2, 0, IMG_SIZE) bby2 = clip_by_value(cut_y[0] + cut_h // 2, 0, IMG_SIZE) target_h = bby2 - boundaryy1 if target_h == 0: target_h += 1 target_w = bbx2 - boundaryx1 if target_w == 0: target_w += 1 return boundaryx1, boundaryy1, target_h, target_w def cutmix(train_ds_one, train_ds_two): (image1, label1), (image2, label2) = train_ds_one, train_ds_two alpha = [0.25] beta = [0.25] # Get a sample from the Beta distribution lambda_value = sample_beta_distribution(1, alpha, beta) # Define Lambda lambda_value = lambda_value[0][0] # Get the bounding box offsets, heights and widths boundaryx1, boundaryy1, target_h, target_w = get_box(lambda_value) # Get a patch from the second image (`image2`) crop2 = tf_image.crop_to_bounding_box( image2, boundaryy1, boundaryx1, target_h, target_w ) # Pad the `image2` patch (`crop2`) with the same offset image2 = tf_image.pad_to_bounding_box( crop2, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE ) # Get a patch from the first image (`image1`) crop1 = tf_image.crop_to_bounding_box( image1, boundaryy1, boundaryx1, target_h, target_w ) # Pad the `image1` patch (`crop1`) with the same offset img1 = tf_image.pad_to_bounding_box( crop1, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE ) # Modify the first image by subtracting the patch from `image1` # (before applying the `image2` patch) image1 = image1 - img1 # Add the modified `image1` and `image2` together to get the CutMix image image = image1 + image2 # Adjust Lambda in accordance to the pixel ration lambda_value = 1 - (target_w * target_h) / (IMG_SIZE * IMG_SIZE) lambda_value = keras.ops.cast(lambda_value, "float32") # Combine the labels of both images label = lambda_value * label1 + (1 - lambda_value) * label2 return image, label<jupyter_output><empty_output><jupyter_text>**Note**: we are combining two images to create a single one. Visualize the new dataset after applying the CutMix augmentation<jupyter_code># Create the new dataset using our `cutmix` utility train_ds_cmu = ( train_ds.shuffle(1024) .map(cutmix, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) # Let's preview 9 samples from the dataset image_batch, label_batch = next(iter(train_ds_cmu)) plt.figure(figsize=(10, 10)) for i in range(9): ax = plt.subplot(3, 3, i + 1) plt.title(class_names[np.argmax(label_batch[i])]) plt.imshow(image_batch[i]) plt.axis("off")<jupyter_output><empty_output><jupyter_text>Define a ResNet-20 model<jupyter_code>def resnet_layer( inputs, num_filters=16, kernel_size=3, strides=1, activation="relu", batch_normalization=True, conv_first=True, ): conv = layers.Conv2D( num_filters, kernel_size=kernel_size, strides=strides, padding="same", kernel_initializer="he_normal", kernel_regularizer=keras.regularizers.L2(1e-4), ) x = inputs if conv_first: x = conv(x) if batch_normalization: x = layers.BatchNormalization()(x) if activation is not None: x = layers.Activation(activation)(x) else: if batch_normalization: x = layers.BatchNormalization()(x) if activation is not None: x = layers.Activation(activation)(x) x = conv(x) return x def resnet_v20(input_shape, depth, num_classes=10): if (depth - 2) % 6 != 0: raise ValueError("depth should be 6n+2 (eg 20, 32, 44 in [a])") # Start model definition. num_filters = 16 num_res_blocks = int((depth - 2) / 6) inputs = layers.Input(shape=input_shape) x = resnet_layer(inputs=inputs) # Instantiate the stack of residual units for stack in range(3): for res_block in range(num_res_blocks): strides = 1 if stack > 0 and res_block == 0: # first layer but not first stack strides = 2 # downsample y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides) y = resnet_layer(inputs=y, num_filters=num_filters, activation=None) if stack > 0 and res_block == 0: # first layer but not first stack # linear projection residual shortcut connection to match # changed dims x = resnet_layer( inputs=x, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False, ) x = layers.add([x, y]) x = layers.Activation("relu")(x) num_filters *= 2 # Add classifier on top. # v1 does not use BN after last shortcut connection-ReLU x = layers.AveragePooling2D(pool_size=8)(x) y = layers.Flatten()(x) outputs = layers.Dense( num_classes, activation="softmax", kernel_initializer="he_normal" )(y) # Instantiate model. model = keras.Model(inputs=inputs, outputs=outputs) return model def training_model(): return resnet_v20((32, 32, 3), 20) initial_model = training_model() initial_model.save_weights("initial_weights.weights.h5")<jupyter_output><empty_output><jupyter_text>Train the model with the dataset augmented by CutMix<jupyter_code>model = training_model() model.load_weights("initial_weights.weights.h5") model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(train_ds_cmu, validation_data=test_ds, epochs=15) test_loss, test_accuracy = model.evaluate(test_ds) print("Test accuracy: {:.2f}%".format(test_accuracy * 100))<jupyter_output><empty_output><jupyter_text>Train the model using the original non-augmented dataset<jupyter_code>model = training_model() model.load_weights("initial_weights.weights.h5") model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(train_ds_simple, validation_data=test_ds, epochs=15) test_loss, test_accuracy = model.evaluate(test_ds) print("Test accuracy: {:.2f}%".format(test_accuracy * 100))<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/cutmix.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/cutmix.ipynb", "repo_id": "keras-io", "token_count": 4553 }
110
<jupyter_start><jupyter_text>Image Classification using Global Context Vision Transformer**Author:** Md Awsafur Rahman**Date created:** 2023/10/30**Last modified:** 2023/10/30**Description:** Implementation and fine-tuning of Global Context Vision Transformer for image classification. Setup<jupyter_code>!pip install --upgrade keras_cv tensorflow !pip install --upgrade keras import keras from keras_cv.layers import DropPath from keras import ops from keras import layers import tensorflow as tf # only for dataloader import tensorflow_datasets as tfds # for flower dataset from skimage.data import chelsea import matplotlib.pyplot as plt import numpy as np<jupyter_output><empty_output><jupyter_text>IntroductionIn this notebook, we will utilize multi-backend Keras 3.0 to implement the[**GCViT: Global Context Vision Transformer**](https://arxiv.org/abs/2206.09959) paper,presented at ICML 2023 by A Hatamizadeh et al. The, we will fine-tune the model on theFlower dataset for image classification task, leveraging the official ImageNet pre-trainedweights. A highlight of this notebook is its compatibility with multiple backends:TensorFlow, PyTorch, and JAX, showcasing the true potential of multi-backend Keras. Motivation> **Note:** In this section we'll learn about the backstory of GCViT and try tounderstand why it is proposed.* During recent years, **Transformers** have achieved dominance in **Natural LanguageProcessing (NLP)** tasks and with the **self-attention** mechanism which allows forcapturing both long and short-range information.* Following this trend, **Vision Transformer (ViT)** proposed to utilize image patches astokens in a gigantic architecture similar to encoder of the original Transformer.* Despite the historic dominance of **Convolutional Neural Network (CNN)** in computervision, **ViT-based** models have shown **SOTA or competitive performance** in variouscomputer vision tasks.* However, **quadratic [`O(n^2)`] computational complexity** of self-attention and **lackof multi-scale information** makes it difficult for **ViT** to be considered asgeneral-purpose architecture for Compute Vision tasks like **segmentation and objectdetection** where it requires **dense prediction at the pixel level**.* Swin Transformer has attempted to address the issues of **ViT** by proposing**multi-resolution/hierarchical** architectures in which the self-attention is computedin **local windows** and cross-window connections such as **window shifting** are usedfor modeling the interactions across different regions. But the **limited receptive fieldof local windows** can not capture long-range information, and cross-window-connectionschemes such as **window-shifting only cover a small neighborhood** in the vicinity ofeach window. Also, it lacks **inductive-bias** that encourages certain translationinvariance is still preferable for general-purpose visual modeling, particularly for thedense prediction tasks of object detection and semantic segmentation. * To address above limitations, **Global Context (GC) ViT** network is proposed. ArchitectureLet's have a quick **overview** of our key components,1. `Stem/PatchEmbed:` A stem/patchify layer processes images at the network’s beginning.For this network, it creates **patches/tokens** and converts them into **embeddings**.2. `Level:` It is the repetitive building block that extracts features using differentblocks.3. `Global Token Gen./FeatureExtraction:` It generates **global tokens/patches** with**Depthwise-CNN**, **SqueezeAndExcitation (Squeeze-Excitation)**, **CNN** and**MaxPooling**. So basicallyit's a Feature Extractor.4. `Block:` It is the repetitive module that applies attention to the features andprojects them to a certain dimension. 1. `Local-MSA:` Local Multi head Self Attention. 2. `Global-MSA:` Global Multi head Self Attention. 3. `MLP:` Linear layer that projects a vector to another dimension.5. `Downsample/ReduceSize:` It is very similar to **Global Token Gen.** module except ituses **CNN** instead of **MaxPooling** to downsample with additional **LayerNormalization** modules.6. `Head:` It is the module responsible for the classification task. 1. `Pooling:` It converts `N x 2D` features to `N x 1D` features. 2. `Classifier:` It processes `N x 1D` features to make a decision about class.I've annotated the architecture figure to make it easier to digest, Unit Blocks> **Note:** This blocks are used to build other modules throughout the paper. Most of theblocks are either borrowed from other work or modified version old work.1. `SqueezeAndExcitation`: **Squeeze-Excitation (SE)** aka **Bottleneck** module acts sdkind of **channelattention**. It consits of **AvgPooling**, **Dense/FullyConnected (FC)/Linear** ,**GELU** and **Sigmoid** module.2. `Fused-MBConv:` This is similar to the one used in **EfficientNetV2**. It uses**Depthwise-Conv**, **GELU**, **SqueezeAndExcitation**, **Conv**, to extract feature witha resiudalconnection. Note that, no new module is declared for this one, we simply appliedcorresponding modules directly.3. `ReduceSize`: It is a **CNN** based **downsample** module which abvobe mentioned`Fused-MBConv` module to extract feature, **Strided Conv** to simultaneously reducespatial dimension and increse channelwise dimention of the features and finally**LayerNormalization** module to normalize features. In the paper/figure this module isreferred as **downsample** module. I think it is mention worthy that **SwniTransformer**used `PatchMerging` module instead of `ReduceSize` to reduce the spatial dimention andincrease channelwise dimension which uses **fully-connected/dense/linear** module.According to the **GCViT** paper, one of the purposes of using `ReduceSize` is to addinductive bias through **CNN** module.4. `MLP:` This is our very own **Multi Layer Perceptron** module. This afeed-forward/fully-connected/linear module which simply projects input to an arbitarydimension.<jupyter_code>class SqueezeAndExcitation(layers.Layer): """Squeeze and excitation block. Args: output_dim: output features dimension, if `None` use same dim as input. expansion: expansion ratio. """ def __init__(self, output_dim=None, expansion=0.25, **kwargs): super().__init__(**kwargs) self.expansion = expansion self.output_dim = output_dim def build(self, input_shape): inp = input_shape[-1] self.output_dim = self.output_dim or inp self.avg_pool = layers.GlobalAvgPool2D(keepdims=True, name="avg_pool") self.fc = [ layers.Dense(int(inp * self.expansion), use_bias=False, name="fc_0"), layers.Activation("gelu", name="fc_1"), layers.Dense(self.output_dim, use_bias=False, name="fc_2"), layers.Activation("sigmoid", name="fc_3"), ] super().build(input_shape) def call(self, inputs, **kwargs): x = self.avg_pool(inputs) for layer in self.fc: x = layer(x) return x * inputs class ReduceSize(layers.Layer): """Down-sampling block. Args: keepdims: if False spatial dim is reduced and channel dim is increased """ def __init__(self, keepdims=False, **kwargs): super().__init__(**kwargs) self.keepdims = keepdims def build(self, input_shape): embed_dim = input_shape[-1] dim_out = embed_dim if self.keepdims else 2 * embed_dim self.pad1 = layers.ZeroPadding2D(1, name="pad1") self.pad2 = layers.ZeroPadding2D(1, name="pad2") self.conv = [ layers.DepthwiseConv2D( kernel_size=3, strides=1, padding="valid", use_bias=False, name="conv_0" ), layers.Activation("gelu", name="conv_1"), SqueezeAndExcitation(name="conv_2"), layers.Conv2D( embed_dim, kernel_size=1, strides=1, padding="valid", use_bias=False, name="conv_3", ), ] self.reduction = layers.Conv2D( dim_out, kernel_size=3, strides=2, padding="valid", use_bias=False, name="reduction", ) self.norm1 = layers.LayerNormalization( -1, 1e-05, name="norm1" ) # eps like PyTorch self.norm2 = layers.LayerNormalization(-1, 1e-05, name="norm2") def call(self, inputs, **kwargs): x = self.norm1(inputs) xr = self.pad1(x) for layer in self.conv: xr = layer(xr) x = x + xr x = self.pad2(x) x = self.reduction(x) x = self.norm2(x) return x class MLP(layers.Layer): """Multi-Layer Perceptron (MLP) block. Args: hidden_features: hidden features dimension. out_features: output features dimension. activation: activation function. dropout: dropout rate. """ def __init__( self, hidden_features=None, out_features=None, activation="gelu", dropout=0.0, **kwargs, ): super().__init__(**kwargs) self.hidden_features = hidden_features self.out_features = out_features self.activation = activation self.dropout = dropout def build(self, input_shape): self.in_features = input_shape[-1] self.hidden_features = self.hidden_features or self.in_features self.out_features = self.out_features or self.in_features self.fc1 = layers.Dense(self.hidden_features, name="fc1") self.act = layers.Activation(self.activation, name="act") self.fc2 = layers.Dense(self.out_features, name="fc2") self.drop1 = layers.Dropout(self.dropout, name="drop1") self.drop2 = layers.Dropout(self.dropout, name="drop2") def call(self, inputs, **kwargs): x = self.fc1(inputs) x = self.act(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x<jupyter_output><empty_output><jupyter_text>Stem> **Notes**: In the code, this module is referred to as **PatchEmbed** but on paper, itis referred to as **Stem**.In the model, we have first used `patch_embed` module. Let's try to understand thismodule. As we can see from the `call` method,1. This module first **pads** input2. Then uses **convolutions** to extract patches with embeddings.3. Finally, uses `ReduceSize` module to first extract features with **convolution** butneither reduces spatial dimension nor increases spatial dimension.4. One important point to notice, unlike **ViT** or **SwinTransformer**, **GCViT**creates **overlapping patches**. We can notice that from the code,`Conv2D(self.embed_dim, kernel_size=3, strides=2, name='proj')`. If we wanted**non-overlapping** patches then we would've used the same `kernel_size` and `stride`.5. This module reduces the spatial dimension of input by `4x`.> Summary: image → padding → convolution →(feature_extract + downsample)<jupyter_code>class PatchEmbed(layers.Layer): """Patch embedding block. Args: embed_dim: feature size dimension. """ def __init__(self, embed_dim, **kwargs): super().__init__(**kwargs) self.embed_dim = embed_dim def build(self, input_shape): self.pad = layers.ZeroPadding2D(1, name="pad") self.proj = layers.Conv2D(self.embed_dim, 3, 2, name="proj") self.conv_down = ReduceSize(keepdims=True, name="conv_down") def call(self, inputs, **kwargs): x = self.pad(inputs) x = self.proj(x) x = self.conv_down(x) return x<jupyter_output><empty_output><jupyter_text>Global Token Gen.> **Notes:** It is one of the two **CNN** modules that is used to imppose inductive bias.As we can see from above cell, in the `level` we have first used `to_q_global/GlobalToken Gen./FeatureExtraction`. Let's try to understand how it works,* This module is series of `FeatureExtract` module, according to paper we need torepeat this module `K` times, where `K = log2(H/h)`, `H = feature_map_height`,`W = feature_map_width`.* `FeatureExtraction:` This layer is very similar to `ReduceSize` module except it uses**MaxPooling** module to reduce the dimension, it doesn't increse feature dimension(channelsie) and it doesn't uses **LayerNormalizaton**. This module is used to in`Generate Token Gen.` module repeatedly to generte **global tokens** for**global-context-attention**.* One important point to notice from the figure is that, **global tokens** is sharedacross the whole image which means we use only **one global window** for **all localtokens** in a image. This makes the computation very efficient.* For input feature map with shape `(B, H, W, C)`, we'll get output shape `(B, h, w, C)`.If we copy these global tokens for total `M` local windows in an image where,`M = (H x W)/(h x w) = num_window`, then output shape: `(B * M, h, w, C)`."> Summary: This module is used to `resize` the image to fit window.<jupyter_code>class FeatureExtraction(layers.Layer): """Feature extraction block. Args: keepdims: bool argument for maintaining the resolution. """ def __init__(self, keepdims=False, **kwargs): super().__init__(**kwargs) self.keepdims = keepdims def build(self, input_shape): embed_dim = input_shape[-1] self.pad1 = layers.ZeroPadding2D(1, name="pad1") self.pad2 = layers.ZeroPadding2D(1, name="pad2") self.conv = [ layers.DepthwiseConv2D(3, 1, use_bias=False, name="conv_0"), layers.Activation("gelu", name="conv_1"), SqueezeAndExcitation(name="conv_2"), layers.Conv2D(embed_dim, 1, 1, use_bias=False, name="conv_3"), ] if not self.keepdims: self.pool = layers.MaxPool2D(3, 2, name="pool") super().build(input_shape) def call(self, inputs, **kwargs): x = inputs xr = self.pad1(x) for layer in self.conv: xr = layer(xr) x = x + xr if not self.keepdims: x = self.pool(self.pad2(x)) return x class GlobalQueryGenerator(layers.Layer): """Global query generator. Args: keepdims: to keep the dimension of FeatureExtraction layer. For instance, repeating log(56/7) = 3 blocks, with input window dimension 56 and output window dimension 7 at down-sampling ratio 2. Please check Fig.5 of GC ViT paper for details. """ def __init__(self, keepdims=False, **kwargs): super().__init__(**kwargs) self.keepdims = keepdims def build(self, input_shape): self.to_q_global = [ FeatureExtraction(keepdims, name=f"to_q_global_{i}") for i, keepdims in enumerate(self.keepdims) ] super().build(input_shape) def call(self, inputs, **kwargs): x = inputs for layer in self.to_q_global: x = layer(x) return x<jupyter_output><empty_output><jupyter_text>Attention> **Notes:** This is the core contribution of the paper.As we can see from the `call` method,1. `WindowAttention` module applies both **local** and **global** window attentiondepending on `global_query` parameter.2. First it converts input features into `query, key, value` for local attention and`key, value` for global attention. For global attention, it takes global query from`Global Token Gen.`. One thing to notice from the code is that we divide the **featuresor embed_dim** among all the **heads of Transformer** to reduce the computation.`qkv = tf.reshape(qkv, [B_, N, self.qkv_size, self.num_heads, C // self.num_heads])`3. Before sending query, key and value for attention, **global token** goes through animportant process. Same global tokens or one global window gets copied for all the localwindows to increase efficiency.`q_global = tf.repeat(q_global, repeats=B_//B, axis=0)`, here `B_//B` means `num_windows`in a image.4. Then simply applies `local-window-self-attention` or `global-window-attention`depending on `global_query` parameter. One thing to notice from the code is that we areadding **relative-positional-embedding** with the **attention mask** instead of the**patch embedding**.`attn = attn + relative_position_bias[tf.newaxis,]`5. Now, let's think for a bit and try to understand what is happening here. Let's focuson the figure below. We can see from the left, that in the **local-attention** the**query is local** and it's **limited to the local window** (red square border) hence wedon't have access to long-range information. But on the right that due to **globalquery** we're now **not limited to local-windows** (blue square border) and we haveaccess to long-range information.6. In **ViT** we compare (attention) image-tokens with image-tokens, in**SwinTransformer** we compare window-tokens with window-tokens but in **GCViT** wecompare image-tokens with window-tokens. But now you may ask, how can compare(attention)image-tokens with window-tokens even after image-tokens have larger dimensions thanwindow-tokens? (from above figure image-tokens have shape `(1, 8, 8, 3)` andwindow-tokens have shape `(1, 4, 4, 3)`). Yes, you are right we can't directly comparethem hence we resize image-tokens to fit window-tokens with `Global TokenGen./FeatureExtraction` **CNN** module. The following table should give you a clearcomparison,| Model | Query Tokens | Key-Value Tokens | Attention Type | Attention Coverage ||------------------|-----------------|-------------------|---------------------------|--------------------|| ViT | image | image | self-attention | global || SwinTransformer | window | window | self-attention | local || **GCViT** | **resized-image** | **window** | **image-window attention** | **global** |<jupyter_code>class WindowAttention(layers.Layer): """Local window attention. This implementation was proposed by [Liu et al., 2021](https://arxiv.org/abs/2103.14030) in SwinTransformer. Args: window_size: window size. num_heads: number of attention head. global_query: if the input contains global_query qkv_bias: bool argument for query, key, value learnable bias. qk_scale: bool argument to scaling query, key. attention_dropout: attention dropout rate. projection_dropout: output dropout rate. """ def __init__( self, window_size, num_heads, global_query, qkv_bias=True, qk_scale=None, attention_dropout=0.0, projection_dropout=0.0, **kwargs, ): super().__init__(**kwargs) window_size = (window_size, window_size) self.window_size = window_size self.num_heads = num_heads self.global_query = global_query self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.attention_dropout = attention_dropout self.projection_dropout = projection_dropout def build(self, input_shape): embed_dim = input_shape[0][-1] head_dim = embed_dim // self.num_heads self.scale = self.qk_scale or head_dim**-0.5 self.qkv_size = 3 - int(self.global_query) self.qkv = layers.Dense( embed_dim * self.qkv_size, use_bias=self.qkv_bias, name="qkv" ) self.relative_position_bias_table = self.add_weight( name="relative_position_bias_table", shape=[ (2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), self.num_heads, ], initializer=keras.initializers.TruncatedNormal(stddev=0.02), trainable=True, dtype=self.dtype, ) self.attn_drop = layers.Dropout(self.attention_dropout, name="attn_drop") self.proj = layers.Dense(embed_dim, name="proj") self.proj_drop = layers.Dropout(self.projection_dropout, name="proj_drop") self.softmax = layers.Activation("softmax", name="softmax") super().build(input_shape) def get_relative_position_index(self): coords_h = ops.arange(self.window_size[0]) coords_w = ops.arange(self.window_size[1]) coords = ops.stack(ops.meshgrid(coords_h, coords_w, indexing="ij"), axis=0) coords_flatten = ops.reshape(coords, [2, -1]) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = ops.transpose(relative_coords, axes=[1, 2, 0]) relative_coords_xx = relative_coords[:, :, 0] + self.window_size[0] - 1 relative_coords_yy = relative_coords[:, :, 1] + self.window_size[1] - 1 relative_coords_xx = relative_coords_xx * (2 * self.window_size[1] - 1) relative_position_index = relative_coords_xx + relative_coords_yy return relative_position_index def call(self, inputs, **kwargs): if self.global_query: inputs, q_global = inputs B = ops.shape(q_global)[0] # B, N, C else: inputs = inputs[0] B_, N, C = ops.shape(inputs) # B*num_window, num_tokens, channels qkv = self.qkv(inputs) qkv = ops.reshape( qkv, [B_, N, self.qkv_size, self.num_heads, C // self.num_heads] ) qkv = ops.transpose(qkv, [2, 0, 3, 1, 4]) if self.global_query: k, v = ops.split( qkv, indices_or_sections=2, axis=0 ) # for unknown shame num=None will throw error q_global = ops.repeat( q_global, repeats=B_ // B, axis=0 ) # num_windows = B_//B => q_global same for all windows in a img q = ops.reshape(q_global, [B_, N, self.num_heads, C // self.num_heads]) q = ops.transpose(q, axes=[0, 2, 1, 3]) else: q, k, v = ops.split(qkv, indices_or_sections=3, axis=0) q = ops.squeeze(q, axis=0) k = ops.squeeze(k, axis=0) v = ops.squeeze(v, axis=0) q = q * self.scale attn = q @ ops.transpose(k, axes=[0, 1, 3, 2]) relative_position_bias = ops.take( self.relative_position_bias_table, ops.reshape(self.get_relative_position_index(), [-1]), ) relative_position_bias = ops.reshape( relative_position_bias, [ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1, ], ) relative_position_bias = ops.transpose(relative_position_bias, axes=[2, 0, 1]) attn = attn + relative_position_bias[None,] attn = self.softmax(attn) attn = self.attn_drop(attn) x = ops.transpose((attn @ v), axes=[0, 2, 1, 3]) x = ops.reshape(x, [B_, N, C]) x = self.proj_drop(self.proj(x)) return x<jupyter_output><empty_output><jupyter_text>Block> **Notes:** This module doesn't have any Convolutional module.In the `level` second module that we have used is `block`. Let's try to understand how itworks. As we can see from the `call` method,1. `Block` module takes either only feature_maps for local attention or additional globalquery for global attention.2. Before sending feature maps for attention, this module converts **batch feature maps**to **batch windows** as we'll be applying **Window Attention**.3. Then we send batch **batch windows** for attention.4. After attention has been applied we revert **batch windows** to **batch feature maps**.5. Before sending the attention to applied features for output, this module applies**Stochastic Depth** regularization in the residual connection. Also, before applying**Stochastic Depth** it rescales the input with trainable parameters. Note that, this**Stochastic Depth** block hasn't been shown in the figure of the paper. WindowIn the `block` module, we have created **windows** before and after applying attention.Let's try to understand how we're creating windows,* Following module converts feature maps `(B, H, W, C)` to stacked windows`(B x H/h x W/w, h, w, C)` → `(num_windows_batch, window_size, window_size, channel)`* This module uses `reshape` & `transpose` to create these windows out of image insteadof iterating over them.<jupyter_code>class Block(layers.Layer): """GCViT block. Args: window_size: window size. num_heads: number of attention head. global_query: apply global window attention mlp_ratio: MLP ratio. qkv_bias: bool argument for query, key, value learnable bias. qk_scale: bool argument to scaling query, key. drop: dropout rate. attention_dropout: attention dropout rate. path_drop: drop path rate. activation: activation function. layer_scale: layer scaling coefficient. """ def __init__( self, window_size, num_heads, global_query, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, dropout=0.0, attention_dropout=0.0, path_drop=0.0, activation="gelu", layer_scale=None, **kwargs, ): super().__init__(**kwargs) self.window_size = window_size self.num_heads = num_heads self.global_query = global_query self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.dropout = dropout self.attention_dropout = attention_dropout self.path_drop = path_drop self.activation = activation self.layer_scale = layer_scale def build(self, input_shape): B, H, W, C = input_shape[0] self.norm1 = layers.LayerNormalization(-1, 1e-05, name="norm1") self.attn = WindowAttention( window_size=self.window_size, num_heads=self.num_heads, global_query=self.global_query, qkv_bias=self.qkv_bias, qk_scale=self.qk_scale, attention_dropout=self.attention_dropout, projection_dropout=self.dropout, name="attn", ) self.drop_path1 = DropPath(self.path_drop) self.drop_path2 = DropPath(self.path_drop) self.norm2 = layers.LayerNormalization(-1, 1e-05, name="norm2") self.mlp = MLP( hidden_features=int(C * self.mlp_ratio), dropout=self.dropout, activation=self.activation, name="mlp", ) if self.layer_scale is not None: self.gamma1 = self.add_weight( name="gamma1", shape=[C], initializer=keras.initializers.Constant(self.layer_scale), trainable=True, dtype=self.dtype, ) self.gamma2 = self.add_weight( name="gamma2", shape=[C], initializer=keras.initializers.Constant(self.layer_scale), trainable=True, dtype=self.dtype, ) else: self.gamma1 = 1.0 self.gamma2 = 1.0 self.num_windows = int(H // self.window_size) * int(W // self.window_size) super().build(input_shape) def call(self, inputs, **kwargs): if self.global_query: inputs, q_global = inputs else: inputs = inputs[0] B, H, W, C = ops.shape(inputs) x = self.norm1(inputs) # create windows and concat them in batch axis x = self.window_partition(x, self.window_size) # (B_, win_h, win_w, C) # flatten patch x = ops.reshape(x, [-1, self.window_size * self.window_size, C]) # attention if self.global_query: x = self.attn([x, q_global]) else: x = self.attn([x]) # reverse window partition x = self.window_reverse(x, self.window_size, H, W, C) # FFN x = inputs + self.drop_path1(x * self.gamma1) x = x + self.drop_path2(self.gamma2 * self.mlp(self.norm2(x))) return x def window_partition(self, x, window_size): """ Args: x: (B, H, W, C) window_size: window size Returns: local window features (num_windows*B, window_size, window_size, C) """ B, H, W, C = ops.shape(x) x = ops.reshape( x, [ -1, H // window_size, window_size, W // window_size, window_size, C, ], ) x = ops.transpose(x, axes=[0, 1, 3, 2, 4, 5]) windows = ops.reshape(x, [-1, window_size, window_size, C]) return windows def window_reverse(self, windows, window_size, H, W, C): """ Args: windows: local window features (num_windows*B, window_size, window_size, C) window_size: Window size H: Height of image W: Width of image C: Channel of image Returns: x: (B, H, W, C) """ x = ops.reshape( windows, [ -1, H // window_size, W // window_size, window_size, window_size, C, ], ) x = ops.transpose(x, axes=[0, 1, 3, 2, 4, 5]) x = ops.reshape(x, [-1, H, W, C]) return x<jupyter_output><empty_output><jupyter_text>Level> **Note:** This module has both Transformer and CNN modules.In the model, the second module that we have used is `level`. Let's try to understandthis module. As we can see from the `call` method,1. First it creates **global_token** with a series of `FeatureExtraction` modules. Aswe'll seelater that `FeatureExtraction` is nothing but a simple **CNN** based module.2. Then it uses series of`Block` modules to apply **local or global window attention**depending on depth level.3. Finally, it uses `ReduceSize` to reduce the dimension of **contextualized features**.> Summary: feature_map → global_token → local/global windowattention → dowsample<jupyter_code>class Level(layers.Layer): """GCViT level. Args: depth: number of layers in each stage. num_heads: number of heads in each stage. window_size: window size in each stage. keepdims: dims to keep in FeatureExtraction. downsample: bool argument for down-sampling. mlp_ratio: MLP ratio. qkv_bias: bool argument for query, key, value learnable bias. qk_scale: bool argument to scaling query, key. drop: dropout rate. attention_dropout: attention dropout rate. path_drop: drop path rate. layer_scale: layer scaling coefficient. """ def __init__( self, depth, num_heads, window_size, keepdims, downsample=True, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, dropout=0.0, attention_dropout=0.0, path_drop=0.0, layer_scale=None, **kwargs, ): super().__init__(**kwargs) self.depth = depth self.num_heads = num_heads self.window_size = window_size self.keepdims = keepdims self.downsample = downsample self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.dropout = dropout self.attention_dropout = attention_dropout self.path_drop = path_drop self.layer_scale = layer_scale def build(self, input_shape): path_drop = ( [self.path_drop] * self.depth if not isinstance(self.path_drop, list) else self.path_drop ) self.blocks = [ Block( window_size=self.window_size, num_heads=self.num_heads, global_query=bool(i % 2), mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, qk_scale=self.qk_scale, dropout=self.dropout, attention_dropout=self.attention_dropout, path_drop=path_drop[i], layer_scale=self.layer_scale, name=f"blocks_{i}", ) for i in range(self.depth) ] self.down = ReduceSize(keepdims=False, name="downsample") self.q_global_gen = GlobalQueryGenerator(self.keepdims, name="q_global_gen") super().build(input_shape) def call(self, inputs, **kwargs): x = inputs q_global = self.q_global_gen(x) # shape: (B, win_size, win_size, C) for i, blk in enumerate(self.blocks): if i % 2: x = blk([x, q_global]) # shape: (B, H, W, C) else: x = blk([x]) # shape: (B, H, W, C) if self.downsample: x = self.down(x) # shape: (B, H//2, W//2, 2*C) return x<jupyter_output><empty_output><jupyter_text>ModelLet's directly jump to the model. As we can see from the `call` method,1. It creates patch embeddings from an image. This layer doesn't flattens theseembeddings which means output of this module will be`(batch, height/window_size, width/window_size, embed_dim)` instead of`(batch, height x width/window_size^2, embed_dim)`.2. Then it applies `Dropout` module which randomly sets input units to 0.3. It passes these embeddings to series of `Level` modules which we are calling `level`where, 1. Global token is generated 1. Both local & global attention is applied 1. Finally downsample is applied.4. So, output after `n` number of **levels**, shape: `(batch, width/window_size x 2^{n-1},width/window_size x 2^{n-1}, embed_dim x 2^{n-1})`. In the last layer,paper doesn't use **downsample** and increase **channels**.5. Output of above layer is normalized using `LayerNormalization` module.6. In the head, 2D features are converted to 1D features with `Pooling` module. Outputshape after this module is `(batch, embed_dim x 2^{n-1})`7. Finally, pooled features are sent to `Dense/Linear` module for classification.> Sumamry: image → (patchs + embedding) → dropout→ (attention + feature extraction) → normalizaion →pooling → classify<jupyter_code>class GCViT(keras.Model): """GCViT model. Args: window_size: window size in each stage. embed_dim: feature size dimension. depths: number of layers in each stage. num_heads: number of heads in each stage. drop_rate: dropout rate. mlp_ratio: MLP ratio. qkv_bias: bool argument for query, key, value learnable bias. qk_scale: bool argument to scaling query, key. attention_dropout: attention dropout rate. path_drop: drop path rate. layer_scale: layer scaling coefficient. num_classes: number of classes. head_activation: activation function for head. """ def __init__( self, window_size, embed_dim, depths, num_heads, drop_rate=0.0, mlp_ratio=3.0, qkv_bias=True, qk_scale=None, attention_dropout=0.0, path_drop=0.1, layer_scale=None, num_classes=1000, head_activation="softmax", **kwargs, ): super().__init__(**kwargs) self.window_size = window_size self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.drop_rate = drop_rate self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.attention_dropout = attention_dropout self.path_drop = path_drop self.layer_scale = layer_scale self.num_classes = num_classes self.head_activation = head_activation self.patch_embed = PatchEmbed(embed_dim=embed_dim, name="patch_embed") self.pos_drop = layers.Dropout(drop_rate, name="pos_drop") path_drops = np.linspace(0.0, path_drop, sum(depths)) keepdims = [(0, 0, 0), (0, 0), (1,), (1,)] self.levels = [] for i in range(len(depths)): path_drop = path_drops[sum(depths[:i]) : sum(depths[: i + 1])].tolist() level = Level( depth=depths[i], num_heads=num_heads[i], window_size=window_size[i], keepdims=keepdims[i], downsample=(i < len(depths) - 1), mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, dropout=drop_rate, attention_dropout=attention_dropout, path_drop=path_drop, layer_scale=layer_scale, name=f"levels_{i}", ) self.levels.append(level) self.norm = layers.LayerNormalization(axis=-1, epsilon=1e-05, name="norm") self.pool = layers.GlobalAvgPool2D(name="pool") self.head = layers.Dense(num_classes, name="head", activation=head_activation) def build(self, input_shape): super().build(input_shape) self.built = True def call(self, inputs, **kwargs): x = self.patch_embed(inputs) # shape: (B, H, W, C) x = self.pos_drop(x) for level in self.levels: x = level(x) # shape: (B, H_, W_, C_) x = self.norm(x) x = self.pool(x) # shape: (B, C__) x = self.head(x) return x def build_graph(self, input_shape=(224, 224, 3)): """ ref: https://www.kaggle.com/code/ipythonx/tf-hybrid-efficientnet-swin-transformer-gradcam """ x = keras.Input(shape=input_shape) return keras.Model(inputs=[x], outputs=self.call(x), name=self.name) def summary(self, input_shape=(224, 224, 3)): return self.build_graph(input_shape).summary()<jupyter_output><empty_output><jupyter_text>Build Model* Let's build a complete model with all the modules that we've explained above. We'llbuild **GCViT-XXTiny** model with the configuration mentioned in the paper.* Also we'll load the ported official **pre-trained** weights and try for somepredictions.<jupyter_code># Model Configs config = { "window_size": (7, 7, 14, 7), "embed_dim": 64, "depths": (2, 2, 6, 2), "num_heads": (2, 4, 8, 16), "mlp_ratio": 3.0, "path_drop": 0.2, } ckpt_link = ( "https://github.com/awsaf49/gcvit-tf/releases/download/v1.1.6/gcvitxxtiny.keras" ) # Build Model model = GCViT(**config) inp = ops.array(np.random.uniform(size=(1, 224, 224, 3))) out = model(inp) # Load Weights ckpt_path = keras.utils.get_file(ckpt_link.split("/")[-1], ckpt_link) model.load_weights(ckpt_path) # Summary model.summary((224, 224, 3))<jupyter_output><empty_output><jupyter_text>Sanity check for Pre-Trained Weights<jupyter_code>img = keras.applications.imagenet_utils.preprocess_input( chelsea(), mode="torch" ) # Chelsea the cat img = ops.image.resize(img, (224, 224))[None,] # resize & create batch pred = model(img) pred_dec = keras.applications.imagenet_utils.decode_predictions(pred)[0] print("\n# Image:") plt.figure(figsize=(6, 6)) plt.imshow(chelsea()) plt.show() print() print("# Prediction (Top 5):") for i in range(5): print("{:<12} : {:0.2f}".format(pred_dec[i][1], pred_dec[i][2]))<jupyter_output><empty_output><jupyter_text>Fine-tune **GCViT** ModelIn the following cells, we will fine-tune **GCViT** model on Flower Dataset whichconsists `104` classes. Configs<jupyter_code># Model IMAGE_SIZE = (224, 224) # Hyper Params BATCH_SIZE = 32 EPOCHS = 5 # Dataset CLASSES = [ "dandelion", "daisy", "tulips", "sunflowers", "roses", ] # don't change the order # Other constants MEAN = 255 * np.array([0.485, 0.456, 0.406], dtype="float32") # imagenet mean STD = 255 * np.array([0.229, 0.224, 0.225], dtype="float32") # imagenet std AUTO = tf.data.AUTOTUNE<jupyter_output><empty_output><jupyter_text>Data Loader<jupyter_code>def make_dataset(dataset: tf.data.Dataset, train: bool, image_size: int = IMAGE_SIZE): def preprocess(image, label): # for training, do augmentation if train: if tf.random.uniform(shape=[]) > 0.5: image = tf.image.flip_left_right(image) image = tf.image.resize(image, size=image_size, method="bicubic") image = (image - MEAN) / STD # normalization return image, label if train: dataset = dataset.shuffle(BATCH_SIZE * 10) return dataset.map(preprocess, AUTO).batch(BATCH_SIZE).prefetch(AUTO)<jupyter_output><empty_output><jupyter_text>Flower Dataset<jupyter_code>train_dataset, val_dataset = tfds.load( "tf_flowers", split=["train[:90%]", "train[90%:]"], as_supervised=True, try_gcs=False, # gcs_path is necessary for tpu, ) train_dataset = make_dataset(train_dataset, True) val_dataset = make_dataset(val_dataset, False)<jupyter_output><empty_output><jupyter_text>Re-Build Model for Flower Dataset<jupyter_code># Re-Build Model model = GCViT(**config, num_classes=104) inp = ops.array(np.random.uniform(size=(1, 224, 224, 3))) out = model(inp) # Load Weights ckpt_path = keras.utils.get_file(ckpt_link.split("/")[-1], ckpt_link) model.load_weights(ckpt_path, skip_mismatch=True) model.compile( loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"] )<jupyter_output><empty_output><jupyter_text>Training<jupyter_code>history = model.fit( train_dataset, validation_data=val_dataset, epochs=EPOCHS, verbose=1 )<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/image_classification_using_global_context_vision_transformer.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/image_classification_using_global_context_vision_transformer.ipynb", "repo_id": "keras-io", "token_count": 17514 }
111
<jupyter_start><jupyter_text>3D volumetric rendering with NeRF**Authors:** [Aritra Roy Gosthipaty](https://twitter.com/arig23498), [Ritwik Raha](https://twitter.com/ritwik_raha)**Date created:** 2021/08/09**Last modified:** 2023/11/13**Description:** Minimal implementation of volumetric rendering as shown in NeRF. IntroductionIn this example, we present a minimal implementation of the research paper[**NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis**](https://arxiv.org/abs/2003.08934)by Ben Mildenhall et. al. The authors have proposed an ingenious wayto *synthesize novel views of a scene* by modelling the *volumetricscene function* through a neural network.To help you understand this intuitively, let's start with the following question:*would it be possible to give to a neuralnetwork the position of a pixel in an image, and ask the networkto predict the color at that position?*| || :---: || **Figure 1**: A neural network being given coordinates of an imageas input and asked to predict the color at the coordinates. |The neural network would hypothetically *memorize* (overfit on) theimage. This means that our neural network would have encoded the entire imagein its weights. We could query the neural network with each position,and it would eventually reconstruct the entire image.| || :---: || **Figure 2**: The trained neural network recreates the image from scratch. |A question now arises, how do we extend this idea to learn a 3Dvolumetric scene? Implementing a similar process as above wouldrequire the knowledge of every voxel (volume pixel). Turns out, thisis quite a challenging task to do.The authors of the paper propose a minimal and elegant way to learn a3D scene using a few images of the scene. They discard the use ofvoxels for training. The network learns to model the volumetric scene,thus generating novel views (images) of the 3D scene that the modelwas not shown at training time.There are a few prerequisites one needs to understand to fullyappreciate the process. We structure the example in such a way thatyou will have all the required knowledge before starting theimplementation. Setup<jupyter_code>import os os.environ["KERAS_BACKEND"] = "tensorflow" # Setting random seed to obtain reproducible results. import tensorflow as tf tf.random.set_seed(42) import keras from keras import layers import os import glob import imageio.v2 as imageio import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt # Initialize global variables. AUTO = tf.data.AUTOTUNE BATCH_SIZE = 5 NUM_SAMPLES = 32 POS_ENCODE_DIMS = 16 EPOCHS = 20<jupyter_output><empty_output><jupyter_text>Download and load the dataThe `npz` data file contains images, camera poses, and a focal length.The images are taken from multiple camera angles as shown in**Figure 3**.| || :---: || **Figure 3**: Multiple camera angles [Source: NeRF](https://arxiv.org/abs/2003.08934) |To understand camera poses in this context we have to first allowourselves to think that a *camera is a mapping between the real-worldand the 2-D image*.| || :---: || **Figure 4**: 3-D world to 2-D image mapping through a camera [Source: Mathworks](https://www.mathworks.com/help/vision/ug/camera-calibration.html) |Consider the following equation:Where **x** is the 2-D image point, **X** is the 3-D world point and**P** is the camera-matrix. **P** is a 3 x 4 matrix that plays thecrucial role of mapping the real world object onto an image plane.The camera-matrix is an *affine transform matrix* that isconcatenated with a 3 x 1 column `[image height, image width, focal length]`to produce the *pose matrix*. This matrix is ofdimensions 3 x 5 where the first 3 x 3 block is in the camera’s pointof view. The axes are `[down, right, backwards]` or `[-y, x, z]`where the camera is facing forwards `-z`.| || :---: || **Figure 5**: The affine transformation. |The COLMAP frame is `[right, down, forwards]` or `[x, -y, -z]`. Readmore about COLMAP [here](https://colmap.github.io/).<jupyter_code># Download the data if it does not already exist. url = ( "http://cseweb.ucsd.edu/~viscomp/projects/LF/papers/ECCV20/nerf/tiny_nerf_data.npz" ) data = keras.utils.get_file(origin=url) data = np.load(data) images = data["images"] im_shape = images.shape (num_images, H, W, _) = images.shape (poses, focal) = (data["poses"], data["focal"]) # Plot a random image from the dataset for visualization. plt.imshow(images[np.random.randint(low=0, high=num_images)]) plt.show()<jupyter_output><empty_output><jupyter_text>Data pipelineNow that you've understood the notion of camera matrixand the mapping from a 3D scene to 2D images,let's talk about the inverse mapping, i.e. from 2D image to the 3D scene.We'll need to talk about volumetric rendering with ray casting and tracing,which are common computer graphics techniques.This section will help you get to speed with these techniques.Consider an image with `N` pixels. We shoot a ray through each pixeland sample some points on the ray. A ray is commonly parameterized bythe equation `r(t) = o + td` where `t` is the parameter, `o` is theorigin and `d` is the unit directional vector as shown in **Figure 6**.| || :---: || **Figure 6**: `r(t) = o + td` where t is 3 |In **Figure 7**, we consider a ray, and we sample some random points onthe ray. These sample points each have a unique location `(x, y, z)`and the ray has a viewing angle `(theta, phi)`. The viewing angle isparticularly interesting as we can shoot a ray through a single pixelin a lot of different ways, each with a unique viewing angle. Anotherinteresting thing to notice here is the noise that is added to thesampling process. We add a uniform noise to each sample so that thesamples correspond to a continuous distribution. In **Figure 7** theblue points are the evenly distributed samples and the white points`(t1, t2, t3)` are randomly placed between the samples.| || :---: || **Figure 7**: Sampling the points from a ray. |**Figure 8** showcases the entire sampling process in 3D, where youcan see the rays coming out of the white image. This means that eachpixel will have its corresponding rays and each ray will be sampled atdistinct points.| || :---: || **Figure 8**: Shooting rays from all the pixels of an image in 3-D |These sampled points act as the input to the NeRF model. The model isthen asked to predict the RGB color and the volume density at thatpoint.| || :---: || **Figure 9**: Data pipeline [Source: NeRF](https://arxiv.org/abs/2003.08934) |<jupyter_code>def encode_position(x): """Encodes the position into its corresponding Fourier feature. Args: x: The input coordinate. Returns: Fourier features tensors of the position. """ positions = [x] for i in range(POS_ENCODE_DIMS): for fn in [tf.sin, tf.cos]: positions.append(fn(2.0**i * x)) return tf.concat(positions, axis=-1) def get_rays(height, width, focal, pose): """Computes origin point and direction vector of rays. Args: height: Height of the image. width: Width of the image. focal: The focal length between the images and the camera. pose: The pose matrix of the camera. Returns: Tuple of origin point and direction vector for rays. """ # Build a meshgrid for the rays. i, j = tf.meshgrid( tf.range(width, dtype=tf.float32), tf.range(height, dtype=tf.float32), indexing="xy", ) # Normalize the x axis coordinates. transformed_i = (i - width * 0.5) / focal # Normalize the y axis coordinates. transformed_j = (j - height * 0.5) / focal # Create the direction unit vectors. directions = tf.stack([transformed_i, -transformed_j, -tf.ones_like(i)], axis=-1) # Get the camera matrix. camera_matrix = pose[:3, :3] height_width_focal = pose[:3, -1] # Get origins and directions for the rays. transformed_dirs = directions[..., None, :] camera_dirs = transformed_dirs * camera_matrix ray_directions = tf.reduce_sum(camera_dirs, axis=-1) ray_origins = tf.broadcast_to(height_width_focal, tf.shape(ray_directions)) # Return the origins and directions. return (ray_origins, ray_directions) def render_flat_rays(ray_origins, ray_directions, near, far, num_samples, rand=False): """Renders the rays and flattens it. Args: ray_origins: The origin points for rays. ray_directions: The direction unit vectors for the rays. near: The near bound of the volumetric scene. far: The far bound of the volumetric scene. num_samples: Number of sample points in a ray. rand: Choice for randomising the sampling strategy. Returns: Tuple of flattened rays and sample points on each rays. """ # Compute 3D query points. # Equation: r(t) = o+td -> Building the "t" here. t_vals = tf.linspace(near, far, num_samples) if rand: # Inject uniform noise into sample space to make the sampling # continuous. shape = list(ray_origins.shape[:-1]) + [num_samples] noise = tf.random.uniform(shape=shape) * (far - near) / num_samples t_vals = t_vals + noise # Equation: r(t) = o + td -> Building the "r" here. rays = ray_origins[..., None, :] + ( ray_directions[..., None, :] * t_vals[..., None] ) rays_flat = tf.reshape(rays, [-1, 3]) rays_flat = encode_position(rays_flat) return (rays_flat, t_vals) def map_fn(pose): """Maps individual pose to flattened rays and sample points. Args: pose: The pose matrix of the camera. Returns: Tuple of flattened rays and sample points corresponding to the camera pose. """ (ray_origins, ray_directions) = get_rays(height=H, width=W, focal=focal, pose=pose) (rays_flat, t_vals) = render_flat_rays( ray_origins=ray_origins, ray_directions=ray_directions, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=True, ) return (rays_flat, t_vals) # Create the training split. split_index = int(num_images * 0.8) # Split the images into training and validation. train_images = images[:split_index] val_images = images[split_index:] # Split the poses into training and validation. train_poses = poses[:split_index] val_poses = poses[split_index:] # Make the training pipeline. train_img_ds = tf.data.Dataset.from_tensor_slices(train_images) train_pose_ds = tf.data.Dataset.from_tensor_slices(train_poses) train_ray_ds = train_pose_ds.map(map_fn, num_parallel_calls=AUTO) training_ds = tf.data.Dataset.zip((train_img_ds, train_ray_ds)) train_ds = ( training_ds.shuffle(BATCH_SIZE) .batch(BATCH_SIZE, drop_remainder=True, num_parallel_calls=AUTO) .prefetch(AUTO) ) # Make the validation pipeline. val_img_ds = tf.data.Dataset.from_tensor_slices(val_images) val_pose_ds = tf.data.Dataset.from_tensor_slices(val_poses) val_ray_ds = val_pose_ds.map(map_fn, num_parallel_calls=AUTO) validation_ds = tf.data.Dataset.zip((val_img_ds, val_ray_ds)) val_ds = ( validation_ds.shuffle(BATCH_SIZE) .batch(BATCH_SIZE, drop_remainder=True, num_parallel_calls=AUTO) .prefetch(AUTO) )<jupyter_output><empty_output><jupyter_text>NeRF modelThe model is a multi-layer perceptron (MLP), with ReLU as its non-linearity.An excerpt from the paper:*"We encourage the representation to be multiview-consistent byrestricting the network to predict the volume density sigma as afunction of only the location `x`, while allowing the RGB color `c` to bepredicted as a function of both location and viewing direction. Toaccomplish this, the MLP first processes the input 3D coordinate `x`with 8 fully-connected layers (using ReLU activations and 256 channelsper layer), and outputs sigma and a 256-dimensional feature vector.This feature vector is then concatenated with the camera ray's viewingdirection and passed to one additional fully-connected layer (using aReLU activation and 128 channels) that output the view-dependent RGBcolor."*Here we have gone for a minimal implementation and have used 64Dense units instead of 256 as mentioned in the paper.<jupyter_code>def get_nerf_model(num_layers, num_pos): """Generates the NeRF neural network. Args: num_layers: The number of MLP layers. num_pos: The number of dimensions of positional encoding. Returns: The `keras` model. """ inputs = keras.Input(shape=(num_pos, 2 * 3 * POS_ENCODE_DIMS + 3)) x = inputs for i in range(num_layers): x = layers.Dense(units=64, activation="relu")(x) if i % 4 == 0 and i > 0: # Inject residual connection. x = layers.concatenate([x, inputs], axis=-1) outputs = layers.Dense(units=4)(x) return keras.Model(inputs=inputs, outputs=outputs) def render_rgb_depth(model, rays_flat, t_vals, rand=True, train=True): """Generates the RGB image and depth map from model prediction. Args: model: The MLP model that is trained to predict the rgb and volume density of the volumetric scene. rays_flat: The flattened rays that serve as the input to the NeRF model. t_vals: The sample points for the rays. rand: Choice to randomise the sampling strategy. train: Whether the model is in the training or testing phase. Returns: Tuple of rgb image and depth map. """ # Get the predictions from the nerf model and reshape it. if train: predictions = model(rays_flat) else: predictions = model.predict(rays_flat) predictions = tf.reshape(predictions, shape=(BATCH_SIZE, H, W, NUM_SAMPLES, 4)) # Slice the predictions into rgb and sigma. rgb = tf.sigmoid(predictions[..., :-1]) sigma_a = tf.nn.relu(predictions[..., -1]) # Get the distance of adjacent intervals. delta = t_vals[..., 1:] - t_vals[..., :-1] # delta shape = (num_samples) if rand: delta = tf.concat( [delta, tf.broadcast_to([1e10], shape=(BATCH_SIZE, H, W, 1))], axis=-1 ) alpha = 1.0 - tf.exp(-sigma_a * delta) else: delta = tf.concat( [delta, tf.broadcast_to([1e10], shape=(BATCH_SIZE, 1))], axis=-1 ) alpha = 1.0 - tf.exp(-sigma_a * delta[:, None, None, :]) # Get transmittance. exp_term = 1.0 - alpha epsilon = 1e-10 transmittance = tf.math.cumprod(exp_term + epsilon, axis=-1, exclusive=True) weights = alpha * transmittance rgb = tf.reduce_sum(weights[..., None] * rgb, axis=-2) if rand: depth_map = tf.reduce_sum(weights * t_vals, axis=-1) else: depth_map = tf.reduce_sum(weights * t_vals[:, None, None], axis=-1) return (rgb, depth_map)<jupyter_output><empty_output><jupyter_text>TrainingThe training step is implemented as part of a custom `keras.Model` subclassso that we can make use of the `model.fit` functionality.<jupyter_code>class NeRF(keras.Model): def __init__(self, nerf_model): super().__init__() self.nerf_model = nerf_model def compile(self, optimizer, loss_fn): super().compile() self.optimizer = optimizer self.loss_fn = loss_fn self.loss_tracker = keras.metrics.Mean(name="loss") self.psnr_metric = keras.metrics.Mean(name="psnr") def train_step(self, inputs): # Get the images and the rays. (images, rays) = inputs (rays_flat, t_vals) = rays with tf.GradientTape() as tape: # Get the predictions from the model. rgb, _ = render_rgb_depth( model=self.nerf_model, rays_flat=rays_flat, t_vals=t_vals, rand=True ) loss = self.loss_fn(images, rgb) # Get the trainable variables. trainable_variables = self.nerf_model.trainable_variables # Get the gradeints of the trainiable variables with respect to the loss. gradients = tape.gradient(loss, trainable_variables) # Apply the grads and optimize the model. self.optimizer.apply_gradients(zip(gradients, trainable_variables)) # Get the PSNR of the reconstructed images and the source images. psnr = tf.image.psnr(images, rgb, max_val=1.0) # Compute our own metrics self.loss_tracker.update_state(loss) self.psnr_metric.update_state(psnr) return {"loss": self.loss_tracker.result(), "psnr": self.psnr_metric.result()} def test_step(self, inputs): # Get the images and the rays. (images, rays) = inputs (rays_flat, t_vals) = rays # Get the predictions from the model. rgb, _ = render_rgb_depth( model=self.nerf_model, rays_flat=rays_flat, t_vals=t_vals, rand=True ) loss = self.loss_fn(images, rgb) # Get the PSNR of the reconstructed images and the source images. psnr = tf.image.psnr(images, rgb, max_val=1.0) # Compute our own metrics self.loss_tracker.update_state(loss) self.psnr_metric.update_state(psnr) return {"loss": self.loss_tracker.result(), "psnr": self.psnr_metric.result()} @property def metrics(self): return [self.loss_tracker, self.psnr_metric] test_imgs, test_rays = next(iter(train_ds)) test_rays_flat, test_t_vals = test_rays loss_list = [] class TrainMonitor(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs=None): loss = logs["loss"] loss_list.append(loss) test_recons_images, depth_maps = render_rgb_depth( model=self.model.nerf_model, rays_flat=test_rays_flat, t_vals=test_t_vals, rand=True, train=False, ) # Plot the rgb, depth and the loss plot. fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(20, 5)) ax[0].imshow(keras.utils.array_to_img(test_recons_images[0])) ax[0].set_title(f"Predicted Image: {epoch:03d}") ax[1].imshow(keras.utils.array_to_img(depth_maps[0, ..., None])) ax[1].set_title(f"Depth Map: {epoch:03d}") ax[2].plot(loss_list) ax[2].set_xticks(np.arange(0, EPOCHS + 1, 5.0)) ax[2].set_title(f"Loss Plot: {epoch:03d}") fig.savefig(f"images/{epoch:03d}.png") plt.show() plt.close() num_pos = H * W * NUM_SAMPLES nerf_model = get_nerf_model(num_layers=8, num_pos=num_pos) model = NeRF(nerf_model) model.compile( optimizer=keras.optimizers.Adam(), loss_fn=keras.losses.MeanSquaredError() ) # Create a directory to save the images during training. if not os.path.exists("images"): os.makedirs("images") model.fit( train_ds, validation_data=val_ds, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=[TrainMonitor()], ) def create_gif(path_to_images, name_gif): filenames = glob.glob(path_to_images) filenames = sorted(filenames) images = [] for filename in tqdm(filenames): images.append(imageio.imread(filename)) kargs = {"duration": 0.25} imageio.mimsave(name_gif, images, "GIF", **kargs) create_gif("images/*.png", "training.gif")<jupyter_output><empty_output><jupyter_text>Visualize the training stepHere we see the training step. With the decreasing loss, the renderedimage and the depth maps are getting better. In your local system, youwill see the `training.gif` file generated. InferenceIn this section, we ask the model to build novel views of the scene.The model was given `106` views of the scene in the training step. Thecollections of training images cannot contain each and every angle ofthe scene. A trained model can represent the entire 3-D scene with asparse set of training images.Here we provide different poses to the model and ask for it to give usthe 2-D image corresponding to that camera view. If we infer the modelfor all the 360-degree views, it should provide an overview of theentire scenery from all around.<jupyter_code># Get the trained NeRF model and infer. nerf_model = model.nerf_model test_recons_images, depth_maps = render_rgb_depth( model=nerf_model, rays_flat=test_rays_flat, t_vals=test_t_vals, rand=True, train=False, ) # Create subplots. fig, axes = plt.subplots(nrows=5, ncols=3, figsize=(10, 20)) for ax, ori_img, recons_img, depth_map in zip( axes, test_imgs, test_recons_images, depth_maps ): ax[0].imshow(keras.utils.array_to_img(ori_img)) ax[0].set_title("Original") ax[1].imshow(keras.utils.array_to_img(recons_img)) ax[1].set_title("Reconstructed") ax[2].imshow(keras.utils.array_to_img(depth_map[..., None]), cmap="inferno") ax[2].set_title("Depth Map")<jupyter_output><empty_output><jupyter_text>Render 3D SceneHere we will synthesize novel 3D views and stitch all of them togetherto render a video encompassing the 360-degree view.<jupyter_code>def get_translation_t(t): """Get the translation matrix for movement in t.""" matrix = [ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, t], [0, 0, 0, 1], ] return tf.convert_to_tensor(matrix, dtype=tf.float32) def get_rotation_phi(phi): """Get the rotation matrix for movement in phi.""" matrix = [ [1, 0, 0, 0], [0, tf.cos(phi), -tf.sin(phi), 0], [0, tf.sin(phi), tf.cos(phi), 0], [0, 0, 0, 1], ] return tf.convert_to_tensor(matrix, dtype=tf.float32) def get_rotation_theta(theta): """Get the rotation matrix for movement in theta.""" matrix = [ [tf.cos(theta), 0, -tf.sin(theta), 0], [0, 1, 0, 0], [tf.sin(theta), 0, tf.cos(theta), 0], [0, 0, 0, 1], ] return tf.convert_to_tensor(matrix, dtype=tf.float32) def pose_spherical(theta, phi, t): """ Get the camera to world matrix for the corresponding theta, phi and t. """ c2w = get_translation_t(t) c2w = get_rotation_phi(phi / 180.0 * np.pi) @ c2w c2w = get_rotation_theta(theta / 180.0 * np.pi) @ c2w c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w return c2w rgb_frames = [] batch_flat = [] batch_t = [] # Iterate over different theta value and generate scenes. for index, theta in tqdm(enumerate(np.linspace(0.0, 360.0, 120, endpoint=False))): # Get the camera to world matrix. c2w = pose_spherical(theta, -30.0, 4.0) # ray_oris, ray_dirs = get_rays(H, W, focal, c2w) rays_flat, t_vals = render_flat_rays( ray_oris, ray_dirs, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=False ) if index % BATCH_SIZE == 0 and index > 0: batched_flat = tf.stack(batch_flat, axis=0) batch_flat = [rays_flat] batched_t = tf.stack(batch_t, axis=0) batch_t = [t_vals] rgb, _ = render_rgb_depth( nerf_model, batched_flat, batched_t, rand=False, train=False ) temp_rgb = [np.clip(255 * img, 0.0, 255.0).astype(np.uint8) for img in rgb] rgb_frames = rgb_frames + temp_rgb else: batch_flat.append(rays_flat) batch_t.append(t_vals) rgb_video = "rgb_video.mp4" imageio.mimwrite(rgb_video, rgb_frames, fps=30, quality=7, macro_block_size=None)<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/nerf.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/nerf.ipynb", "repo_id": "keras-io", "token_count": 8782 }
112
<jupyter_start><jupyter_text>Semi-supervised image classification using contrastive pretraining with SimCLR**Author:** [András Béres](https://www.linkedin.com/in/andras-beres-789190210)**Date created:** 2021/04/24**Last modified:** 2021/04/24**Description:** Contrastive pretraining with SimCLR for semi-supervised image classification on the STL-10 dataset. Introduction Semi-supervised learningSemi-supervised learning is a machine learning paradigm that deals with**partially labeled datasets**. When applying deep learning in the real world,one usually has to gather a large dataset to make it work well. However, whilethe cost of labeling scales linearly with the dataset size (labeling eachexample takes a constant time), model performance only scales[sublinearly](https://arxiv.org/abs/2001.08361) with it. This means thatlabeling more and more samples becomes less and less cost-efficient, whilegathering unlabeled data is generally cheap, as it is usually readily availablein large quantities.Semi-supervised learning offers to solve this problem by only requiring apartially labeled dataset, and by being label-efficient by utilizing theunlabeled examples for learning as well.In this example, we will pretrain an encoder with contrastive learning on the[STL-10](https://ai.stanford.edu/~acoates/stl10/) semi-supervised dataset usingno labels at all, and then fine-tune it using only its labeled subset. Contrastive learningOn the highest level, the main idea behind contrastive learning is to **learnrepresentations that are invariant to image augmentations** in a self-supervisedmanner. One problem with this objective is that it has a trivial degeneratesolution: the case where the representations are constant, and do not depend at all on theinput images.Contrastive learning avoids this trap by modifying the objective in thefollowing way: it pulls representations of augmented versions/views of the sameimage closer to each other (contracting positives), while simultaneously pushingdifferent images away from each other (contrasting negatives) in representationspace.One such contrastive approach is [SimCLR](https://arxiv.org/abs/2002.05709),which essentially identifies the core components needed to optimize thisobjective, and can achieve high performance by scaling this simple approach.Another approach is [SimSiam](https://arxiv.org/abs/2011.10566)([Keras example](https://keras.io/examples/vision/simsiam/)),whose main difference fromSimCLR is that the former does not use any negatives in its loss. Therefore, it does notexplicitly prevent the trivial solution, and, instead, avoids it implicitly byarchitecture design (asymmetric encoding paths using a predictor network andbatch normalization (BatchNorm) are applied in the final layers).For further reading about SimCLR, check out[the official Google AI blog post](https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html),and for an overview of self-supervised learning across both vision and languagecheck out[this blog post](https://ai.facebook.com/blog/self-supervised-learning-the-dark-matter-of-intelligence/). Setup<jupyter_code>import os os.environ["KERAS_BACKEND"] = "tensorflow" # Make sure we are able to handle large datasets import resource low, high = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (high, high)) import math import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_datasets as tfds import keras from keras import layers<jupyter_output><empty_output><jupyter_text>Hyperparameter setup<jupyter_code># Dataset hyperparameters unlabeled_dataset_size = 100000 labeled_dataset_size = 5000 image_channels = 3 # Algorithm hyperparameters num_epochs = 20 batch_size = 525 # Corresponds to 200 steps per epoch width = 128 temperature = 0.1 # Stronger augmentations for contrastive, weaker ones for supervised training contrastive_augmentation = {"min_area": 0.25, "brightness": 0.6, "jitter": 0.2} classification_augmentation = { "min_area": 0.75, "brightness": 0.3, "jitter": 0.1, }<jupyter_output><empty_output><jupyter_text>DatasetDuring training we will simultaneously load a large batch of unlabeled images along with asmaller batch of labeled images.<jupyter_code>def prepare_dataset(): # Labeled and unlabeled samples are loaded synchronously # with batch sizes selected accordingly steps_per_epoch = (unlabeled_dataset_size + labeled_dataset_size) // batch_size unlabeled_batch_size = unlabeled_dataset_size // steps_per_epoch labeled_batch_size = labeled_dataset_size // steps_per_epoch print( f"batch size is {unlabeled_batch_size} (unlabeled) + {labeled_batch_size} (labeled)" ) # Turning off shuffle to lower resource usage unlabeled_train_dataset = ( tfds.load("stl10", split="unlabelled", as_supervised=True, shuffle_files=False) .shuffle(buffer_size=10 * unlabeled_batch_size) .batch(unlabeled_batch_size) ) labeled_train_dataset = ( tfds.load("stl10", split="train", as_supervised=True, shuffle_files=False) .shuffle(buffer_size=10 * labeled_batch_size) .batch(labeled_batch_size) ) test_dataset = ( tfds.load("stl10", split="test", as_supervised=True) .batch(batch_size) .prefetch(buffer_size=tf.data.AUTOTUNE) ) # Labeled and unlabeled datasets are zipped together train_dataset = tf.data.Dataset.zip( (unlabeled_train_dataset, labeled_train_dataset) ).prefetch(buffer_size=tf.data.AUTOTUNE) return train_dataset, labeled_train_dataset, test_dataset # Load STL10 dataset train_dataset, labeled_train_dataset, test_dataset = prepare_dataset()<jupyter_output><empty_output><jupyter_text>Image augmentationsThe two most important image augmentations for contrastive learning are thefollowing:- Cropping: forces the model to encode different parts of the same imagesimilarly, we implement it with the[RandomTranslation](https://keras.io/api/layers/preprocessing_layers/image_augmentation/random_translation/)and[RandomZoom](https://keras.io/api/layers/preprocessing_layers/image_augmentation/random_zoom/)layers- Color jitter: prevents a trivial color histogram-based solution to the task bydistorting color histograms. A principled way to implement that is by affinetransformations in color space.In this example we use random horizontal flips as well. Stronger augmentationsare applied for contrastive learning, along with weaker ones for supervisedclassification to avoid overfitting on the few labeled examples.We implement random color jitter as a custom preprocessing layer. Usingpreprocessing layers for data augmentation has the following two advantages:- The data augmentation will run on GPU in batches, so the training will not bebottlenecked by the data pipeline in environments with constrained CPUresources (such as a Colab Notebook, or a personal machine)- Deployment is easier as the data preprocessing pipeline is encapsulated in themodel, and does not have to be reimplemented when deploying it<jupyter_code># Distorts the color distibutions of images class RandomColorAffine(layers.Layer): def __init__(self, brightness=0, jitter=0, **kwargs): super().__init__(**kwargs) self.brightness = brightness self.jitter = jitter def get_config(self): config = super().get_config() config.update({"brightness": self.brightness, "jitter": self.jitter}) return config def call(self, images, training=True): if training: batch_size = tf.shape(images)[0] # Same for all colors brightness_scales = 1 + tf.random.uniform( (batch_size, 1, 1, 1), minval=-self.brightness, maxval=self.brightness, ) # Different for all colors jitter_matrices = tf.random.uniform( (batch_size, 1, 3, 3), minval=-self.jitter, maxval=self.jitter ) color_transforms = ( tf.eye(3, batch_shape=[batch_size, 1]) * brightness_scales + jitter_matrices ) images = tf.clip_by_value(tf.matmul(images, color_transforms), 0, 1) return images # Image augmentation module def get_augmenter(min_area, brightness, jitter): zoom_factor = 1.0 - math.sqrt(min_area) return keras.Sequential( [ layers.Rescaling(1 / 255), layers.RandomFlip("horizontal"), layers.RandomTranslation(zoom_factor / 2, zoom_factor / 2), layers.RandomZoom((-zoom_factor, 0.0), (-zoom_factor, 0.0)), RandomColorAffine(brightness, jitter), ] ) def visualize_augmentations(num_images): # Sample a batch from a dataset images = next(iter(train_dataset))[0][0][:num_images] # Apply augmentations augmented_images = zip( images, get_augmenter(**classification_augmentation)(images), get_augmenter(**contrastive_augmentation)(images), get_augmenter(**contrastive_augmentation)(images), ) row_titles = [ "Original:", "Weakly augmented:", "Strongly augmented:", "Strongly augmented:", ] plt.figure(figsize=(num_images * 2.2, 4 * 2.2), dpi=100) for column, image_row in enumerate(augmented_images): for row, image in enumerate(image_row): plt.subplot(4, num_images, row * num_images + column + 1) plt.imshow(image) if column == 0: plt.title(row_titles[row], loc="left") plt.axis("off") plt.tight_layout() visualize_augmentations(num_images=8)<jupyter_output><empty_output><jupyter_text>Encoder architecture<jupyter_code># Define the encoder architecture def get_encoder(): return keras.Sequential( [ layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"), layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"), layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"), layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"), layers.Flatten(), layers.Dense(width, activation="relu"), ], name="encoder", )<jupyter_output><empty_output><jupyter_text>Supervised baseline modelA baseline supervised model is trained using random initialization.<jupyter_code># Baseline supervised training with random initialization baseline_model = keras.Sequential( [ get_augmenter(**classification_augmentation), get_encoder(), layers.Dense(10), ], name="baseline_model", ) baseline_model.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")], ) baseline_history = baseline_model.fit( labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset ) print( "Maximal validation accuracy: {:.2f}%".format( max(baseline_history.history["val_acc"]) * 100 ) )<jupyter_output><empty_output><jupyter_text>Self-supervised model for contrastive pretrainingWe pretrain an encoder on unlabeled images with a contrastive loss.A nonlinear projection head is attached to the top of the encoder, as itimproves the quality of representations of the encoder.We use the InfoNCE/NT-Xent/N-pairs loss, which can be interpreted in thefollowing way:1. We treat each image in the batch as if it had its own class.2. Then, we have two examples (a pair of augmented views) for each "class".3. Each view's representation is compared to every possible pair's one (for both augmented versions).4. We use the temperature-scaled cosine similarity of compared representations as logits.5. Finally, we use categorical cross-entropy as the "classification" lossThe following two metrics are used for monitoring the pretraining performance:- [Contrastive accuracy (SimCLR Table 5)](https://arxiv.org/abs/2002.05709):Self-supervised metric, the ratio of cases in which the representation of animage is more similar to its differently augmented version's one, than to therepresentation of any other image in the current batch. Self-supervisedmetrics can be used for hyperparameter tuning even in the case when there areno labeled examples.- [Linear probing accuracy](https://arxiv.org/abs/1603.08511): Linear probing isa popular metric to evaluate self-supervised classifiers. It is computed asthe accuracy of a logistic regression classifier trained on top of theencoder's features. In our case, this is done by training a single dense layeron top of the frozen encoder. Note that contrary to traditional approach wherethe classifier is trained after the pretraining phase, in this example wetrain it during pretraining. This might slightly decrease its accuracy, butthat way we can monitor its value during training, which helps withexperimentation and debugging.Another widely used supervised metric is the[KNN accuracy](https://arxiv.org/abs/1805.01978), which is the accuracy of a KNNclassifier trained on top of the encoder's features, which is not implemented inthis example.<jupyter_code># Define the contrastive model with model-subclassing class ContrastiveModel(keras.Model): def __init__(self): super().__init__() self.temperature = temperature self.contrastive_augmenter = get_augmenter(**contrastive_augmentation) self.classification_augmenter = get_augmenter(**classification_augmentation) self.encoder = get_encoder() # Non-linear MLP as projection head self.projection_head = keras.Sequential( [ keras.Input(shape=(width,)), layers.Dense(width, activation="relu"), layers.Dense(width), ], name="projection_head", ) # Single dense layer for linear probing self.linear_probe = keras.Sequential( [layers.Input(shape=(width,)), layers.Dense(10)], name="linear_probe", ) self.encoder.summary() self.projection_head.summary() self.linear_probe.summary() def compile(self, contrastive_optimizer, probe_optimizer, **kwargs): super().compile(**kwargs) self.contrastive_optimizer = contrastive_optimizer self.probe_optimizer = probe_optimizer # self.contrastive_loss will be defined as a method self.probe_loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) self.contrastive_loss_tracker = keras.metrics.Mean(name="c_loss") self.contrastive_accuracy = keras.metrics.SparseCategoricalAccuracy( name="c_acc" ) self.probe_loss_tracker = keras.metrics.Mean(name="p_loss") self.probe_accuracy = keras.metrics.SparseCategoricalAccuracy(name="p_acc") @property def metrics(self): return [ self.contrastive_loss_tracker, self.contrastive_accuracy, self.probe_loss_tracker, self.probe_accuracy, ] def contrastive_loss(self, projections_1, projections_2): # InfoNCE loss (information noise-contrastive estimation) # NT-Xent loss (normalized temperature-scaled cross entropy) # Cosine similarity: the dot product of the l2-normalized feature vectors projections_1 = tf.math.l2_normalize(projections_1, axis=1) projections_2 = tf.math.l2_normalize(projections_2, axis=1) similarities = ( tf.matmul(projections_1, projections_2, transpose_b=True) / self.temperature ) # The similarity between the representations of two augmented views of the # same image should be higher than their similarity with other views batch_size = tf.shape(projections_1)[0] contrastive_labels = tf.range(batch_size) self.contrastive_accuracy.update_state(contrastive_labels, similarities) self.contrastive_accuracy.update_state( contrastive_labels, tf.transpose(similarities) ) # The temperature-scaled similarities are used as logits for cross-entropy # a symmetrized version of the loss is used here loss_1_2 = keras.losses.sparse_categorical_crossentropy( contrastive_labels, similarities, from_logits=True ) loss_2_1 = keras.losses.sparse_categorical_crossentropy( contrastive_labels, tf.transpose(similarities), from_logits=True ) return (loss_1_2 + loss_2_1) / 2 def train_step(self, data): (unlabeled_images, _), (labeled_images, labels) = data # Both labeled and unlabeled images are used, without labels images = tf.concat((unlabeled_images, labeled_images), axis=0) # Each image is augmented twice, differently augmented_images_1 = self.contrastive_augmenter(images, training=True) augmented_images_2 = self.contrastive_augmenter(images, training=True) with tf.GradientTape() as tape: features_1 = self.encoder(augmented_images_1, training=True) features_2 = self.encoder(augmented_images_2, training=True) # The representations are passed through a projection mlp projections_1 = self.projection_head(features_1, training=True) projections_2 = self.projection_head(features_2, training=True) contrastive_loss = self.contrastive_loss(projections_1, projections_2) gradients = tape.gradient( contrastive_loss, self.encoder.trainable_weights + self.projection_head.trainable_weights, ) self.contrastive_optimizer.apply_gradients( zip( gradients, self.encoder.trainable_weights + self.projection_head.trainable_weights, ) ) self.contrastive_loss_tracker.update_state(contrastive_loss) # Labels are only used in evalutation for an on-the-fly logistic regression preprocessed_images = self.classification_augmenter( labeled_images, training=True ) with tf.GradientTape() as tape: # the encoder is used in inference mode here to avoid regularization # and updating the batch normalization paramers if they are used features = self.encoder(preprocessed_images, training=False) class_logits = self.linear_probe(features, training=True) probe_loss = self.probe_loss(labels, class_logits) gradients = tape.gradient(probe_loss, self.linear_probe.trainable_weights) self.probe_optimizer.apply_gradients( zip(gradients, self.linear_probe.trainable_weights) ) self.probe_loss_tracker.update_state(probe_loss) self.probe_accuracy.update_state(labels, class_logits) return {m.name: m.result() for m in self.metrics} def test_step(self, data): labeled_images, labels = data # For testing the components are used with a training=False flag preprocessed_images = self.classification_augmenter( labeled_images, training=False ) features = self.encoder(preprocessed_images, training=False) class_logits = self.linear_probe(features, training=False) probe_loss = self.probe_loss(labels, class_logits) self.probe_loss_tracker.update_state(probe_loss) self.probe_accuracy.update_state(labels, class_logits) # Only the probe metrics are logged at test time return {m.name: m.result() for m in self.metrics[2:]} # Contrastive pretraining pretraining_model = ContrastiveModel() pretraining_model.compile( contrastive_optimizer=keras.optimizers.Adam(), probe_optimizer=keras.optimizers.Adam(), ) pretraining_history = pretraining_model.fit( train_dataset, epochs=num_epochs, validation_data=test_dataset ) print( "Maximal validation accuracy: {:.2f}%".format( max(pretraining_history.history["val_p_acc"]) * 100 ) )<jupyter_output><empty_output><jupyter_text>Supervised finetuning of the pretrained encoderWe then finetune the encoder on the labeled examples, by attachinga single randomly initalized fully connected classification layer on its top.<jupyter_code># Supervised finetuning of the pretrained encoder finetuning_model = keras.Sequential( [ get_augmenter(**classification_augmentation), pretraining_model.encoder, layers.Dense(10), ], name="finetuning_model", ) finetuning_model.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")], ) finetuning_history = finetuning_model.fit( labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset ) print( "Maximal validation accuracy: {:.2f}%".format( max(finetuning_history.history["val_acc"]) * 100 ) )<jupyter_output><empty_output><jupyter_text>Comparison against the baseline<jupyter_code># The classification accuracies of the baseline and the pretraining + finetuning process: def plot_training_curves(pretraining_history, finetuning_history, baseline_history): for metric_key, metric_name in zip(["acc", "loss"], ["accuracy", "loss"]): plt.figure(figsize=(8, 5), dpi=100) plt.plot( baseline_history.history[f"val_{metric_key}"], label="supervised baseline", ) plt.plot( pretraining_history.history[f"val_p_{metric_key}"], label="self-supervised pretraining", ) plt.plot( finetuning_history.history[f"val_{metric_key}"], label="supervised finetuning", ) plt.legend() plt.title(f"Classification {metric_name} during training") plt.xlabel("epochs") plt.ylabel(f"validation {metric_name}") plot_training_curves(pretraining_history, finetuning_history, baseline_history)<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/semisupervised_simclr.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/semisupervised_simclr.ipynb", "repo_id": "keras-io", "token_count": 8117 }
113
<jupyter_start><jupyter_text>Efficient Object Detection with YOLOV8 and KerasCV**Author:** [Gitesh Chawda](https://twitter.com/gitesh12_)**Date created:** 2023/06/26**Last modified:** 2023/06/26**Description:** Train custom YOLOV8 object detection model with KerasCV. Introduction KerasCV is an extension of Keras for computer vision tasks. In this example, we'll seehow to train a YOLOV8 object detection model using KerasCV.KerasCV includes pre-trained models for popular computer vision datasets, such asImageNet, COCO, and Pascal VOC, which can be used for transfer learning. KerasCV alsoprovides a range of visualization tools for inspecting the intermediate representationslearned by the model and for visualizing the results of object detection and segmentationtasks. If you're interested in learning about object detection using KerasCV, I highly suggesttaking a look at the guide created by lukewood. This resource, available at[Object Detection With KerasCV](https://keras.io/guides/keras_cv/object_detection_keras_cv/object-detection-introduction),provides a comprehensive overview of the fundamental concepts and techniquesrequired for building object detection models with KerasCV.<jupyter_code>!pip install --upgrade git+https://github.com/keras-team/keras-cv -q<jupyter_output><empty_output><jupyter_text>Setup<jupyter_code>import os from tqdm.auto import tqdm import xml.etree.ElementTree as ET import tensorflow as tf from tensorflow import keras import keras_cv from keras_cv import bounding_box from keras_cv import visualization<jupyter_output><empty_output><jupyter_text>Load Data For this guide, we will be utilizing the Self-Driving Car Dataset obtained from[roboflow](https://public.roboflow.com/object-detection/self-driving-car). In order tomake the dataset more manageable, I have extracted a subset of the larger dataset, whichoriginally consisted of 15,000 data samples. From this subset, I have chosen 7,316samples for model training.To simplify the task at hand and focus our efforts, we will be working with a reducednumber of object classes. Specifically, we will be considering five primary classes fordetection and classification: car, pedestrian, traffic light, biker, and truck. Theseclasses represent some of the most common and significant objects encountered in thecontext of self-driving cars.By narrowing down the dataset to these specific classes, we can concentrate on building arobust object detection model that can accurately identify and classify these importantobjects. The TensorFlow Datasets library provides a convenient way to download and use variousdatasets, including the object detection dataset. This can be a great option for thosewho want to quickly start working with the data without having to manually download andpreprocess it.You can view various object detection datasets here[TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overviewobject_detection)However, in this code example, we will demonstrate how to load the dataset from scratchusing TensorFlow's `tf.data` pipeline. This approach provides more flexibility and allowsyou to customize the preprocessing steps as needed.Loading custom datasets that are not available in the TensorFlow Datasets library is oneof the main advantages of using the `tf.data` pipeline. This approach allows you tocreate a custom data preprocessing pipeline tailored to the specific needs andrequirements of your dataset. Hyperparameters<jupyter_code>SPLIT_RATIO = 0.2 BATCH_SIZE = 4 LEARNING_RATE = 0.001 EPOCH = 5 GLOBAL_CLIPNORM = 10.0<jupyter_output><empty_output><jupyter_text>A dictionary is created to map each class name to a unique numerical identifier. Thismapping is used to encode and decode the class labels during training and inference inobject detection tasks.<jupyter_code>class_ids = [ "car", "pedestrian", "trafficLight", "biker", "truck", ] class_mapping = dict(zip(range(len(class_ids)), class_ids)) # Path to images and annotations path_images = "/kaggle/input/dataset/data/images/" path_annot = "/kaggle/input/dataset/data/annotations/" # Get all XML file paths in path_annot and sort them xml_files = sorted( [ os.path.join(path_annot, file_name) for file_name in os.listdir(path_annot) if file_name.endswith(".xml") ] ) # Get all JPEG image file paths in path_images and sort them jpg_files = sorted( [ os.path.join(path_images, file_name) for file_name in os.listdir(path_images) if file_name.endswith(".jpg") ] )<jupyter_output><empty_output><jupyter_text>The function below reads the XML file and finds the image name and path, and theniterates over each object in the XML file to extract the bounding box coordinates andclass labels for each object.The function returns three values: the image path, a list of bounding boxes (eachrepresented as a list of four floats: xmin, ymin, xmax, ymax), and a list of class IDs(represented as integers) corresponding to each bounding box. The class IDs are obtainedby mapping the class labels to integer values using a dictionary called `class_mapping`.<jupyter_code>def parse_annotation(xml_file): tree = ET.parse(xml_file) root = tree.getroot() image_name = root.find("filename").text image_path = os.path.join(path_images, image_name) boxes = [] classes = [] for obj in root.iter("object"): cls = obj.find("name").text classes.append(cls) bbox = obj.find("bndbox") xmin = float(bbox.find("xmin").text) ymin = float(bbox.find("ymin").text) xmax = float(bbox.find("xmax").text) ymax = float(bbox.find("ymax").text) boxes.append([xmin, ymin, xmax, ymax]) class_ids = [ list(class_mapping.keys())[list(class_mapping.values()).index(cls)] for cls in classes ] return image_path, boxes, class_ids image_paths = [] bbox = [] classes = [] for xml_file in tqdm(xml_files): image_path, boxes, class_ids = parse_annotation(xml_file) image_paths.append(image_path) bbox.append(boxes) classes.append(class_ids)<jupyter_output><empty_output><jupyter_text>Here we are using `tf.ragged.constant` to create ragged tensors from the `bbox` and`classes` lists. A ragged tensor is a type of tensor that can handle varying lengths ofdata along one or more dimensions. This is useful when dealing with data that hasvariable-length sequences, such as text or time series data.```pythonclasses = [ [8, 8, 8, 8, 8], 5 classes [12, 14, 14, 14], 4 classes [1], 1 class [7, 7], 2 classes ...]``````pythonbbox = [ [[199.0, 19.0, 390.0, 401.0], [217.0, 15.0, 270.0, 157.0], [393.0, 18.0, 432.0, 162.0], [1.0, 15.0, 226.0, 276.0], [19.0, 95.0, 458.0, 443.0]], image 1 has 4 objects [[52.0, 117.0, 109.0, 177.0]], image 2 has 1 object [[88.0, 87.0, 235.0, 322.0], [113.0, 117.0, 218.0, 471.0]], image 3 has 2 objects ...]```In this case, the `bbox` and `classes` lists have different lengths for each image,depending on the number of objects in the image and the corresponding bounding boxes andclasses. To handle this variability, ragged tensors are used instead of regular tensors.Later, these ragged tensors are used to create a `tf.data.Dataset` using the`from_tensor_slices` method. This method creates a dataset from the input tensors byslicing them along the first dimension. By using ragged tensors, the dataset can handlevarying lengths of data for each image and provide a flexible input pipeline for furtherprocessing.<jupyter_code>bbox = tf.ragged.constant(bbox) classes = tf.ragged.constant(classes) image_paths = tf.ragged.constant(image_paths) data = tf.data.Dataset.from_tensor_slices((image_paths, classes, bbox))<jupyter_output><empty_output><jupyter_text>Splitting data in training and validation data<jupyter_code># Determine the number of validation samples num_val = int(len(xml_files) * SPLIT_RATIO) # Split the dataset into train and validation sets val_data = data.take(num_val) train_data = data.skip(num_val)<jupyter_output><empty_output><jupyter_text>Let's see about data loading and bounding box formatting to get things going. Boundingboxes in KerasCV have a predetermined format. To do this, you must bundle your boundingboxes into a dictionary that complies with the requirements listed below:```pythonbounding_boxes = { num_boxes may be a Ragged dimension 'boxes': Tensor(shape=[batch, num_boxes, 4]), 'classes': Tensor(shape=[batch, num_boxes])}```The dictionary has two keys, `'boxes'` and `'classes'`, each of which maps to aTensorFlow RaggedTensor or Tensor object. The `'boxes'` Tensor has a shape of `[batch,num_boxes, 4]`, where batch is the number of images in the batch and num_boxes is themaximum number of bounding boxes in any image. The 4 represents the four values needed todefine a bounding box: xmin, ymin, xmax, ymax.The `'classes'` Tensor has a shape of `[batch, num_boxes]`, where each element representsthe class label for the corresponding bounding box in the `'boxes'` Tensor. The num_boxesdimension may be ragged, which means that the number of boxes may vary across images inthe batch.Final dict should be:```python{"images": images, "bounding_boxes": bounding_boxes}```<jupyter_code>def load_image(image_path): image = tf.io.read_file(image_path) image = tf.image.decode_jpeg(image, channels=3) return image def load_dataset(image_path, classes, bbox): # Read Image image = load_image(image_path) bounding_boxes = { "classes": tf.cast(classes, dtype=tf.float32), "boxes": bbox, } return {"images": tf.cast(image, tf.float32), "bounding_boxes": bounding_boxes}<jupyter_output><empty_output><jupyter_text>Here we create a layer that resizes images to 640x640 pixels, while maintaining theoriginal aspect ratio. The bounding boxes associated with the image are specified in the`xyxy` format. If necessary, the resized image will be padded with zeros to maintain theoriginal aspect ratio.Bounding Box Formats supported by KerasCV:1. CENTER_XYWH2. XYWH3. XYXY4. REL_XYXY5. REL_XYWH6. YXYX7. REL_YXYXYou can read more about KerasCV bounding box formats in[docs](https://keras.io/api/keras_cv/bounding_box/formats/).Furthermore, it is possible to perform format conversion between any two pairs:```pythonboxes = keras_cv.bounding_box.convert_format( bounding_box, images=image, source="xyxy", Original Format target="xywh", Target Format (to which we want to convert) )``` Data AugmentationOne of the most challenging tasks when constructing object detection pipelines is dataaugmentation. It involves applying various transformations to the input images toincrease the diversity of the training data and improve the model's ability togeneralize. However, when working with object detection tasks, it becomes even morecomplex as these transformations need to be aware of the underlying bounding boxes andupdate them accordingly.KerasCV provides native support for bounding box augmentation. KerasCV offers anextensive collection of data augmentation layers specifically designed to handle boundingboxes. These layers intelligently adjust the bounding box coordinates as the image istransformed, ensuring that the bounding boxes remain accurate and aligned with theaugmented images.By leveraging KerasCV's capabilities, developers can conveniently integrate boundingbox-friendly data augmentation into their object detection pipelines. By performingon-the-fly augmentation within a tf.data pipeline, the process becomes seamless andefficient, enabling better training and more accurate object detection results.<jupyter_code>augmenter = keras.Sequential( layers=[ keras_cv.layers.RandomFlip(mode="horizontal", bounding_box_format="xyxy"), keras_cv.layers.RandomShear( x_factor=0.2, y_factor=0.2, bounding_box_format="xyxy" ), keras_cv.layers.JitteredResize( target_size=(640, 640), scale_factor=(0.75, 1.3), bounding_box_format="xyxy" ), ] )<jupyter_output><empty_output><jupyter_text>Creating Training Dataset<jupyter_code>train_ds = train_data.map(load_dataset, num_parallel_calls=tf.data.AUTOTUNE) train_ds = train_ds.shuffle(BATCH_SIZE * 4) train_ds = train_ds.ragged_batch(BATCH_SIZE, drop_remainder=True) train_ds = train_ds.map(augmenter, num_parallel_calls=tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Creating Validation Dataset<jupyter_code>resizing = keras_cv.layers.JitteredResize( target_size=(640, 640), scale_factor=(0.75, 1.3), bounding_box_format="xyxy", ) val_ds = val_data.map(load_dataset, num_parallel_calls=tf.data.AUTOTUNE) val_ds = val_ds.shuffle(BATCH_SIZE * 4) val_ds = val_ds.ragged_batch(BATCH_SIZE, drop_remainder=True) val_ds = val_ds.map(resizing, num_parallel_calls=tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Visualization<jupyter_code>def visualize_dataset(inputs, value_range, rows, cols, bounding_box_format): inputs = next(iter(inputs.take(1))) images, bounding_boxes = inputs["images"], inputs["bounding_boxes"] visualization.plot_bounding_box_gallery( images, value_range=value_range, rows=rows, cols=cols, y_true=bounding_boxes, scale=5, font_scale=0.7, bounding_box_format=bounding_box_format, class_mapping=class_mapping, ) visualize_dataset( train_ds, bounding_box_format="xyxy", value_range=(0, 255), rows=2, cols=2 ) visualize_dataset( val_ds, bounding_box_format="xyxy", value_range=(0, 255), rows=2, cols=2 )<jupyter_output><empty_output><jupyter_text>We need to extract the inputs from the preprocessing dictionary and get them ready to befed into the model.<jupyter_code>def dict_to_tuple(inputs): return inputs["images"], inputs["bounding_boxes"] train_ds = train_ds.map(dict_to_tuple, num_parallel_calls=tf.data.AUTOTUNE) train_ds = train_ds.prefetch(tf.data.AUTOTUNE) val_ds = val_ds.map(dict_to_tuple, num_parallel_calls=tf.data.AUTOTUNE) val_ds = val_ds.prefetch(tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Creating Model YOLOv8 is a cutting-edge YOLO model that is used for a variety of computer vision tasks,such as object detection, image classification, and instance segmentation. Ultralytics,the creators of YOLOv5, also developed YOLOv8, which incorporates many improvements andchanges in architecture and developer experience compared to its predecessor. YOLOv8 isthe latest state-of-the-art model that is highly regarded in the industry. Below table compares the performance metrics of five different YOLOv8 models withdifferent sizes (measured in pixels): YOLOv8n, YOLOv8s, YOLOv8m, YOLOv8l, and YOLOv8x.The metrics include mean average precision (mAP) values at differentintersection-over-union (IoU) thresholds for validation data, inference speed on CPU withONNX format and A100 TensorRT, number of parameters, and number of floating-pointoperations (FLOPs) (both in millions and billions, respectively). As the size of themodel increases, the mAP, parameters, and FLOPs generally increase while the speeddecreases. YOLOv8x has the highest mAP, parameters, and FLOPs but also the slowestinference speed, while YOLOv8n has the smallest size, fastest inference speed, and lowestmAP, parameters, and FLOPs.| Model |size(pixels) | mAPval50-95 | SpeedCPU ONNX(ms) |SpeedA100 TensorRT(ms) | params(M) | FLOPs(B) || ------------------------------------------------------------------------------------ |--------------------- | -------------------- | ------------------------------ |----------------------------------- | ------------------ | ----------------- || YOLOv8n | 640 | 37.3 | 80.4| 0.99 | 3.2 | 8.7 || YOLOv8s | 640 | 44.9 | 128.4| 1.20 | 11.2 | 28.6 || YOLOv8m | 640 | 50.2 | 234.7| 1.83 | 25.9 | 78.9 || YOLOv8l | 640 | 52.9 | 375.2| 2.39 | 43.7 | 165.2 || YOLOv8x | 640 | 53.9 | 479.1| 3.53 | 68.2 | 257.8 | You can read more about YOLOV8 and its architecture in this[RoboFlow Blog](https://blog.roboflow.com/whats-new-in-yolov8/) First we will create a instance of backbone which will be used by our yolov8 detectorclass.YOLOV8 Backbones available in KerasCV:1. Without Weights: 1. yolo_v8_xs_backbone 2. yolo_v8_s_backbone 3. yolo_v8_m_backbone 4. yolo_v8_l_backbone 5. yolo_v8_xl_backbone2. With Pre-trained coco weight: 1. yolo_v8_xs_backbone_coco 2. yolo_v8_s_backbone_coco 2. yolo_v8_m_backbone_coco 2. yolo_v8_l_backbone_coco 2. yolo_v8_xl_backbone_coco<jupyter_code>backbone = keras_cv.models.YOLOV8Backbone.from_preset( "yolo_v8_s_backbone_coco" # We will use yolov8 small backbone with coco weights )<jupyter_output><empty_output><jupyter_text>Next, let's build a YOLOV8 model using the `YOLOV8Detector`, which accepts a featureextractor as the `backbone` argument, a `num_classes` argument that specifies the numberof object classes to detect based on the size of the `class_mapping` list, a`bounding_box_format` argument that informs the model of the format of the bbox in thedataset, and a finally, the feature pyramid network (FPN) depth is specified by the`fpn_depth` argument.It is simple to build a YOLOV8 using any of the aforementioned backbones thanks toKerasCV.<jupyter_code>yolo = keras_cv.models.YOLOV8Detector( num_classes=len(class_mapping), bounding_box_format="xyxy", backbone=backbone, fpn_depth=1, )<jupyter_output><empty_output><jupyter_text>Compile the Model Loss used for YOLOV81. Classification Loss: This loss function calculates the discrepancy between anticipatedclass probabilities and actual class probabilities. In this instance,`binary_crossentropy`, a prominent solution for binary classification issues, isUtilized. We Utilized binary crossentropy since each thing that is identified is eitherclassed as belonging to or not belonging to a certain object class (such as a person, acar, etc.).2. Box Loss: `box_loss` is the loss function used to measure the difference between thepredicted bounding boxes and the ground truth. In this case, the Complete IoU (CIoU)metric is used, which not only measures the overlap between predicted and ground truthbounding boxes but also considers the difference in aspect ratio, center distance, andbox size. Together, these loss functions help optimize the model for object detection byminimizing the difference between the predicted and ground truth class probabilities andbounding boxes.<jupyter_code>optimizer = tf.keras.optimizers.Adam( learning_rate=LEARNING_RATE, global_clipnorm=GLOBAL_CLIPNORM, ) yolo.compile( optimizer=optimizer, classification_loss="binary_crossentropy", box_loss="ciou" )<jupyter_output><empty_output><jupyter_text>COCO Metric CallbackWe will be using `BoxCOCOMetrics` from KerasCV to evaluate the model and calculate theMap(Mean Average Precision) score, Recall and Precision. We also save our model when themAP score improves.<jupyter_code>class EvaluateCOCOMetricsCallback(keras.callbacks.Callback): def __init__(self, data, save_path): super().__init__() self.data = data self.metrics = keras_cv.metrics.BoxCOCOMetrics( bounding_box_format="xyxy", evaluate_freq=1e9, ) self.save_path = save_path self.best_map = -1.0 def on_epoch_end(self, epoch, logs): self.metrics.reset_state() for batch in self.data: images, y_true = batch[0], batch[1] y_pred = self.model.predict(images, verbose=0) self.metrics.update_state(y_true, y_pred) metrics = self.metrics.result(force=True) logs.update(metrics) current_map = metrics["MaP"] if current_map > self.best_map: self.best_map = current_map self.model.save(self.save_path) # Save the model when mAP improves return logs<jupyter_output><empty_output><jupyter_text>Train the Model<jupyter_code>yolo.fit( train_ds, validation_data=val_ds, epochs=3, callbacks=[EvaluateCOCOMetricsCallback(val_ds, "model.h5")], )<jupyter_output><empty_output><jupyter_text>Visualize Predictions<jupyter_code>def visualize_detections(model, dataset, bounding_box_format): images, y_true = next(iter(dataset.take(1))) y_pred = model.predict(images) y_pred = bounding_box.to_ragged(y_pred) visualization.plot_bounding_box_gallery( images, value_range=(0, 255), bounding_box_format=bounding_box_format, y_true=y_true, y_pred=y_pred, scale=4, rows=2, cols=2, show=True, font_scale=0.7, class_mapping=class_mapping, ) visualize_detections(yolo, dataset=val_ds, bounding_box_format="xyxy")<jupyter_output><empty_output>
keras-io/examples/vision/ipynb/yolov8.ipynb/0
{ "file_path": "keras-io/examples/vision/ipynb/yolov8.ipynb", "repo_id": "keras-io", "token_count": 7611 }
114
# Consistency training with supervision **Author:** [Sayak Paul](https://twitter.com/RisingSayak)<br> **Date created:** 2021/04/13<br> **Last modified:** 2021/04/19<br> **Description:** Training with consistency regularization for robustness against data distribution shifts. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/consistency_training.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/consistency_training.py) Deep learning models excel in many image recognition tasks when the data is independent and identically distributed (i.i.d.). However, they can suffer from performance degradation caused by subtle distribution shifts in the input data (such as random noise, contrast change, and blurring). So, naturally, there arises a question of why. As discussed in [A Fourier Perspective on Model Robustness in Computer Vision](https://arxiv.org/pdf/1906.08988.pdf)), there's no reason for deep learning models to be robust against such shifts. Standard model training procedures (such as standard image classification training workflows) *don't* enable a model to learn beyond what's fed to it in the form of training data. In this example, we will be training an image classification model enforcing a sense of *consistency* inside it by doing the following: * Train a standard image classification model. * Train an _equal or larger_ model on a noisy version of the dataset (augmented using [RandAugment](https://arxiv.org/abs/1909.13719)). * To do this, we will first obtain predictions of the previous model on the clean images of the dataset. * We will then use these predictions and train the second model to match these predictions on the noisy variant of the same images. This is identical to the workflow of [*Knowledge Distillation*](https://keras.io/examples/vision/knowledge_distillation/) but since the student model is equal or larger in size this process is also referred to as ***Self-Training***. This overall training workflow finds its roots in works like [FixMatch](https://arxiv.org/abs/2001.07685), [Unsupervised Data Augmentation for Consistency Training](https://arxiv.org/abs/1904.12848), and [Noisy Student Training](https://arxiv.org/abs/1911.04252). Since this training process encourages a model yield consistent predictions for clean as well as noisy images, it's often referred to as *consistency training* or *training with consistency regularization*. Although the example focuses on using consistency training to enhance the robustness of models to common corruptions this example can also serve a template for performing _weakly supervised learning_. This example requires TensorFlow 2.4 or higher, as well as TensorFlow Hub and TensorFlow Models, which can be installed using the following command: ```python !pip install -q tf-models-official tensorflow-addons ``` --- ## Imports and setup ```python from official.vision.image_classification.augment import RandAugment from tensorflow.keras import layers import tensorflow as tf import tensorflow_addons as tfa import matplotlib.pyplot as plt tf.random.set_seed(42) ``` --- ## Define hyperparameters ```python AUTO = tf.data.AUTOTUNE BATCH_SIZE = 128 EPOCHS = 5 CROP_TO = 72 RESIZE_TO = 96 ``` --- ## Load the CIFAR-10 dataset ```python (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() val_samples = 49500 new_train_x, new_y_train = x_train[: val_samples + 1], y_train[: val_samples + 1] val_x, val_y = x_train[val_samples:], y_train[val_samples:] ``` --- ## Create TensorFlow `Dataset` objects ```python # Initialize `RandAugment` object with 2 layers of # augmentation transforms and strength of 9. augmenter = RandAugment(num_layers=2, magnitude=9) ``` For training the teacher model, we will only be using two geometric augmentation transforms: random horizontal flip and random crop. ```python def preprocess_train(image, label, noisy=True): image = tf.image.random_flip_left_right(image) # We first resize the original image to a larger dimension # and then we take random crops from it. image = tf.image.resize(image, [RESIZE_TO, RESIZE_TO]) image = tf.image.random_crop(image, [CROP_TO, CROP_TO, 3]) if noisy: image = augmenter.distort(image) return image, label def preprocess_test(image, label): image = tf.image.resize(image, [CROP_TO, CROP_TO]) return image, label train_ds = tf.data.Dataset.from_tensor_slices((new_train_x, new_y_train)) validation_ds = tf.data.Dataset.from_tensor_slices((val_x, val_y)) test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) ``` We make sure `train_clean_ds` and `train_noisy_ds` are shuffled using the *same* seed to ensure their orders are exactly the same. This will be helpful during training the student model. ```python # This dataset will be used to train the first model. train_clean_ds = ( train_ds.shuffle(BATCH_SIZE * 10, seed=42) .map(lambda x, y: (preprocess_train(x, y, noisy=False)), num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) # This prepares the `Dataset` object to use RandAugment. train_noisy_ds = ( train_ds.shuffle(BATCH_SIZE * 10, seed=42) .map(preprocess_train, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) validation_ds = ( validation_ds.map(preprocess_test, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) test_ds = ( test_ds.map(preprocess_test, num_parallel_calls=AUTO) .batch(BATCH_SIZE) .prefetch(AUTO) ) # This dataset will be used to train the second model. consistency_training_ds = tf.data.Dataset.zip((train_clean_ds, train_noisy_ds)) ``` --- ## Visualize the datasets ```python sample_images, sample_labels = next(iter(train_clean_ds)) plt.figure(figsize=(10, 10)) for i, image in enumerate(sample_images[:9]): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().astype("int")) plt.axis("off") sample_images, sample_labels = next(iter(train_noisy_ds)) plt.figure(figsize=(10, 10)) for i, image in enumerate(sample_images[:9]): ax = plt.subplot(3, 3, i + 1) plt.imshow(image.numpy().astype("int")) plt.axis("off") ``` ![png](/img/examples/vision/consistency_training/consistency_training_16_0.png) ![png](/img/examples/vision/consistency_training/consistency_training_16_1.png) --- ## Define a model building utility function We now define our model building utility. Our model is based on the [ResNet50V2 architecture](https://arxiv.org/abs/1603.05027). ```python def get_training_model(num_classes=10): resnet50_v2 = tf.keras.applications.ResNet50V2( weights=None, include_top=False, input_shape=(CROP_TO, CROP_TO, 3), ) model = tf.keras.Sequential( [ layers.Input((CROP_TO, CROP_TO, 3)), layers.Rescaling(scale=1.0 / 127.5, offset=-1), resnet50_v2, layers.GlobalAveragePooling2D(), layers.Dense(num_classes), ] ) return model ``` In the interest of reproducibility, we serialize the initial random weights of the teacher network. ```python initial_teacher_model = get_training_model() initial_teacher_model.save_weights("initial_teacher_model.h5") ``` --- ## Train the teacher model As noted in Noisy Student Training, if the teacher model is trained with *geometric ensembling* and when the student model is forced to mimic that, it leads to better performance. The original work uses [Stochastic Depth](https://arxiv.org/abs/1603.09382) and [Dropout](https://jmlr.org/papers/v15/srivastava14a.html) to bring in the ensembling part but for this example, we will use [Stochastic Weight Averaging](https://arxiv.org/abs/1803.05407) (SWA) which also resembles geometric ensembling. ```python # Define the callbacks. reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(patience=3) early_stopping = tf.keras.callbacks.EarlyStopping( patience=10, restore_best_weights=True ) # Initialize SWA from tf-hub. SWA = tfa.optimizers.SWA # Compile and train the teacher model. teacher_model = get_training_model() teacher_model.load_weights("initial_teacher_model.h5") teacher_model.compile( # Notice that we are wrapping our optimizer within SWA optimizer=SWA(tf.keras.optimizers.Adam()), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"], ) history = teacher_model.fit( train_clean_ds, epochs=EPOCHS, validation_data=validation_ds, callbacks=[reduce_lr, early_stopping], ) # Evaluate the teacher model on the test set. _, acc = teacher_model.evaluate(test_ds, verbose=0) print(f"Test accuracy: {acc*100}%") ``` <div class="k-default-codeblock"> ``` Epoch 1/5 387/387 [==============================] - 73s 78ms/step - loss: 1.7785 - accuracy: 0.3582 - val_loss: 2.0589 - val_accuracy: 0.3920 Epoch 2/5 387/387 [==============================] - 28s 71ms/step - loss: 1.2493 - accuracy: 0.5542 - val_loss: 1.4228 - val_accuracy: 0.5380 Epoch 3/5 387/387 [==============================] - 28s 73ms/step - loss: 1.0294 - accuracy: 0.6350 - val_loss: 1.4422 - val_accuracy: 0.5900 Epoch 4/5 387/387 [==============================] - 28s 73ms/step - loss: 0.8954 - accuracy: 0.6864 - val_loss: 1.2189 - val_accuracy: 0.6520 Epoch 5/5 387/387 [==============================] - 28s 73ms/step - loss: 0.7879 - accuracy: 0.7231 - val_loss: 0.9790 - val_accuracy: 0.6500 Test accuracy: 65.83999991416931% ``` </div> --- ## Define a self-training utility For this part, we will borrow the `Distiller` class from [this Keras Example](https://keras.io/examples/vision/knowledge_distillation/). ```python # Majority of the code is taken from: # https://keras.io/examples/vision/knowledge_distillation/ class SelfTrainer(tf.keras.Model): def __init__(self, student, teacher): super().__init__() self.student = student self.teacher = teacher def compile( self, optimizer, metrics, student_loss_fn, distillation_loss_fn, temperature=3, ): super().compile(optimizer=optimizer, metrics=metrics) self.student_loss_fn = student_loss_fn self.distillation_loss_fn = distillation_loss_fn self.temperature = temperature def train_step(self, data): # Since our dataset is a zip of two independent datasets, # after initially parsing them, we segregate the # respective images and labels next. clean_ds, noisy_ds = data clean_images, _ = clean_ds noisy_images, y = noisy_ds # Forward pass of teacher teacher_predictions = self.teacher(clean_images, training=False) with tf.GradientTape() as tape: # Forward pass of student student_predictions = self.student(noisy_images, training=True) # Compute losses student_loss = self.student_loss_fn(y, student_predictions) distillation_loss = self.distillation_loss_fn( tf.nn.softmax(teacher_predictions / self.temperature, axis=1), tf.nn.softmax(student_predictions / self.temperature, axis=1), ) total_loss = (student_loss + distillation_loss) / 2 # Compute gradients trainable_vars = self.student.trainable_variables gradients = tape.gradient(total_loss, trainable_vars) # Update weights self.optimizer.apply_gradients(zip(gradients, trainable_vars)) # Update the metrics configured in `compile()` self.compiled_metrics.update_state( y, tf.nn.softmax(student_predictions, axis=1) ) # Return a dict of performance results = {m.name: m.result() for m in self.metrics} results.update({"total_loss": total_loss}) return results def test_step(self, data): # During inference, we only pass a dataset consisting images and labels. x, y = data # Compute predictions y_prediction = self.student(x, training=False) # Update the metrics self.compiled_metrics.update_state(y, tf.nn.softmax(y_prediction, axis=1)) # Return a dict of performance results = {m.name: m.result() for m in self.metrics} return results ``` The only difference in this implementation is the way loss is being calculated. **Instead of weighted the distillation loss and student loss differently we are taking their average following Noisy Student Training**. --- ## Train the student model ```python # Define the callbacks. # We are using a larger decay factor to stabilize the training. reduce_lr = tf.keras.callbacks.ReduceLROnPlateau( patience=3, factor=0.5, monitor="val_accuracy" ) early_stopping = tf.keras.callbacks.EarlyStopping( patience=10, restore_best_weights=True, monitor="val_accuracy" ) # Compile and train the student model. self_trainer = SelfTrainer(student=get_training_model(), teacher=teacher_model) self_trainer.compile( # Notice we are *not* using SWA here. optimizer="adam", metrics=["accuracy"], student_loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), distillation_loss_fn=tf.keras.losses.KLDivergence(), temperature=10, ) history = self_trainer.fit( consistency_training_ds, epochs=EPOCHS, validation_data=validation_ds, callbacks=[reduce_lr, early_stopping], ) # Evaluate the student model. acc = self_trainer.evaluate(test_ds, verbose=0) print(f"Test accuracy from student model: {acc*100}%") ``` <div class="k-default-codeblock"> ``` Epoch 1/5 387/387 [==============================] - 39s 84ms/step - accuracy: 0.2112 - total_loss: 1.0629 - val_accuracy: 0.4180 Epoch 2/5 387/387 [==============================] - 32s 82ms/step - accuracy: 0.3341 - total_loss: 0.9554 - val_accuracy: 0.3900 Epoch 3/5 387/387 [==============================] - 31s 81ms/step - accuracy: 0.3873 - total_loss: 0.8852 - val_accuracy: 0.4580 Epoch 4/5 387/387 [==============================] - 31s 81ms/step - accuracy: 0.4294 - total_loss: 0.8423 - val_accuracy: 0.5660 Epoch 5/5 387/387 [==============================] - 31s 81ms/step - accuracy: 0.4547 - total_loss: 0.8093 - val_accuracy: 0.5880 Test accuracy from student model: 58.490002155303955% ``` </div> --- ## Assess the robustness of the models A standard benchmark of assessing the robustness of vision models is to record their performance on corrupted datasets like ImageNet-C and CIFAR-10-C both of which were proposed in [Benchmarking Neural Network Robustness to Common Corruptions and Perturbations](https://arxiv.org/abs/1903.12261). For this example, we will be using the CIFAR-10-C dataset which has 19 different corruptions on 5 different severity levels. To assess the robustness of the models on this dataset, we will do the following: * Run the pre-trained models on the highest level of severities and obtain the top-1 accuracies. * Compute the mean top-1 accuracy. For the purpose of this example, we won't be going through these steps. This is why we trained the models for only 5 epochs. You can check out [this repository](https://github.com/sayakpaul/Consistency-Training-with-Supervision) that demonstrates the full-scale training experiments and also the aforementioned assessment. The figure below presents an executive summary of that assessment: ![](https://i.ibb.co/HBJkM9R/image.png) **Mean Top-1** results stand for the CIFAR-10-C dataset and **Test Top-1** results stand for the CIFAR-10 test set. It's clear that consistency training has an advantage on not only enhancing the model robustness but also on improving the standard test performance.
keras-io/examples/vision/md/consistency_training.md/0
{ "file_path": "keras-io/examples/vision/md/consistency_training.md", "repo_id": "keras-io", "token_count": 5641 }
115
# Simple MNIST convnet **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2015/06/19<br> **Last modified:** 2020/04/21<br> **Description:** A simple convnet that achieves ~99% test accuracy on MNIST. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/mnist_convnet.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/mnist_convnet.py) --- ## Setup ```python import numpy as np import keras from keras import layers ``` --- ## Prepare the data ```python # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) # Load the data and split it between train and test sets (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) print("x_train shape:", x_train.shape) print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) ``` <div class="k-default-codeblock"> ``` x_train shape: (60000, 28, 28, 1) 60000 train samples 10000 test samples ``` </div> --- ## Build the model ```python model = keras.Sequential( [ keras.Input(shape=input_shape), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dropout(0.5), layers.Dense(num_classes, activation="softmax"), ] ) model.summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">320</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">13</span>, <span style="color: #00af00; text-decoration-color: #00af00">13</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">11</span>, <span style="color: #00af00; text-decoration-color: #00af00">11</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">18,496</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ max_pooling2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1600</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1600</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,010</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">34,826</span> (136.04 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">34,826</span> (136.04 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> --- ## Train the model ```python batch_size = 128 epochs = 15 model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1) ``` <div class="k-default-codeblock"> ``` Epoch 1/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 7s 9ms/step - accuracy: 0.7668 - loss: 0.7644 - val_accuracy: 0.9803 - val_loss: 0.0815 Epoch 2/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9627 - loss: 0.1237 - val_accuracy: 0.9833 - val_loss: 0.0623 Epoch 3/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9732 - loss: 0.0898 - val_accuracy: 0.9850 - val_loss: 0.0539 Epoch 4/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9761 - loss: 0.0763 - val_accuracy: 0.9880 - val_loss: 0.0421 Epoch 5/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9795 - loss: 0.0647 - val_accuracy: 0.9887 - val_loss: 0.0389 Epoch 6/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9824 - loss: 0.0580 - val_accuracy: 0.9903 - val_loss: 0.0345 Epoch 7/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9828 - loss: 0.0537 - val_accuracy: 0.9895 - val_loss: 0.0371 Epoch 8/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9838 - loss: 0.0503 - val_accuracy: 0.9907 - val_loss: 0.0340 Epoch 9/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9861 - loss: 0.0451 - val_accuracy: 0.9907 - val_loss: 0.0330 Epoch 10/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9866 - loss: 0.0427 - val_accuracy: 0.9917 - val_loss: 0.0298 Epoch 11/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9871 - loss: 0.0389 - val_accuracy: 0.9920 - val_loss: 0.0297 Epoch 12/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9885 - loss: 0.0371 - val_accuracy: 0.9912 - val_loss: 0.0285 Epoch 13/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9901 - loss: 0.0332 - val_accuracy: 0.9922 - val_loss: 0.0290 Epoch 14/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9885 - loss: 0.0340 - val_accuracy: 0.9923 - val_loss: 0.0283 Epoch 15/15 422/422 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - accuracy: 0.9891 - loss: 0.0326 - val_accuracy: 0.9925 - val_loss: 0.0273 <keras.src.callbacks.history.History at 0x7f8497818af0> ``` </div> --- ## Evaluate the trained model ```python score = model.evaluate(x_test, y_test, verbose=0) print("Test loss:", score[0]) print("Test accuracy:", score[1]) ``` <div class="k-default-codeblock"> ``` Test loss: 0.02499214932322502 Test accuracy: 0.9919000267982483 ``` </div>
keras-io/examples/vision/md/mnist_convnet.md/0
{ "file_path": "keras-io/examples/vision/md/mnist_convnet.md", "repo_id": "keras-io", "token_count": 4269 }
116
# Segment Anything Model with 🤗Transformers **Authors:** [Merve Noyan](https://twitter.com/mervenoyann) & [Sayak Paul](https://twitter.com/RisingSayak)<br> **Date created:** 2023/07/11<br> **Last modified:** 2023/07/11<br> **Description:** Fine-tuning Segment Anything Model using Keras and 🤗 Transformers. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/sam.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/sam.py) --- ## Introduction Large language models (LLMs) make it easy for the end users to apply them to various applications through "prompting". For example if we wanted an LLM to predict the sentiment of the following sentence -- "That movie was amazing, I thoroughly enjoyed it" -- we'd do prompt the LLM with something like: > What's the sentiment of the following sentence: "That movie was amazing, I thoroughly enjoyed it"? In return, the LLM would return sentiment token. But when it comes to visual recognition tasks, how can we engineer "visual" cues to prompt foundation vision models? For example, we could have an input image and prompt the model with bounding box on that image and ask it to perform segmentation. The bounding box would serve as our visual prompt here. In the [Segment Anything Model](https://segment-anything.com/) (dubbed as SAM), researchers from Meta extended the space of language prompting to visual prompting. SAM is capable of performing zero-shot segmentation with a prompt input, inspired by large language models. The prompt here can be a set of foreground/background points, free text, a box or a mask. There are many downstream segmentation tasks, including semantic segmentation and edge detection. The goal of SAM is to enable all of these downstream segmentation tasks through prompting. In this example, we'll learn how to use the SAM model from 🤗 Transformers for performing inference and fine-tuning. --- ## Installation ```python !!pip install -q git+https://github.com/huggingface/transformers ``` <div class="k-default-codeblock"> ``` [] ``` </div> Let's import everything we need for this example. ```python from tensorflow import keras from transformers import TFSamModel, SamProcessor import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.python.ops.numpy_ops import np_config from PIL import Image import requests import glob import os ``` <div class="k-default-codeblock"> ``` /Users/mervenoyan/miniforge3/envs/py310/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html from .autonotebook import tqdm as notebook_tqdm ``` </div> --- ## SAM in a few words SAM has the following components: | ![](https://imgur.com/oLfdwuB) |:--:| | Image taken from the official [SAM blog post](https://ai.facebook.com/blog/segment-anything-foundation-model-image-segmentation/) | | The image encoder is responsible for computing image embeddings. When interacting with SAM, we compute the image embedding one time (as the image encoder is heavy) and then reuse it with different prompts mentioned above (points, bounding boxes, masks). Points and boxes (so-called sparse prompts) go through a lightweight prompt encoder, while masks (dense prompts) go through a convolutional layer. We couple the image embedding extracted from the image encoder and the prompt embedding and both go to a lightweight mask decoder. The decoder is responsible for predicting the mask. | ![](https://i.imgur.com/QQ9Ts5T.png) | |:--:| | Figure taken from the [SAM paper](https://arxiv.org/abs/2304.02643) | SAM was pre-trained to predict a _valid_ mask for any acceptable prompt. This requirement allows SAM to output a valid mask even when the prompt is ambiguous to understand -- this makes SAM ambiguity-aware. Moreover, SAM predicts multiple masks for a single prompt. We highly encourage you to check out the [SAM paper](https://arxiv.org/abs/2304.02643) and the [blog post](https://ai.facebook.com/blog/segment-anything-foundation-model-image-segmentation/) to learn more about the additional details of SAM and the dataset used to pre-trained it. --- ## Running inference with SAM There are three checkpoints for SAM: * [sam-vit-base](https://huggingface.co/facebook/sam-vit-base) * [sam-vit-large](https://huggingface.co/facebook/sam-vit-large) * [sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge). We load `sam-vit-base` in [`TFSamModel`](https://huggingface.co/docs/transformers/main/model_doc/sam#transformers.TFSamModel). We also need `SamProcessor`for the associated checkpoint. ```python model = TFSamModel.from_pretrained("facebook/sam-vit-base") processor = SamProcessor.from_pretrained("facebook/sam-vit-base") ``` <div class="k-default-codeblock"> ``` All model checkpoint layers were used when initializing TFSamModel. ``` </div> <div class="k-default-codeblock"> ``` All the layers of TFSamModel were initialized from the model checkpoint at facebook/sam-vit-base. If your task is similar to the task the model of the checkpoint was trained on, you can already use TFSamModel for predictions without further training. ``` </div> Next, we write some utility functions for visualization. Most of these functions are taken from [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/segment_anything.ipynb). ```python np_config.enable_numpy_behavior() def show_mask(mask, ax, random_color=False): if random_color: color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) else: color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6]) h, w = mask.shape[-2:] mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) ax.imshow(mask_image) def show_box(box, ax): x0, y0 = box[0], box[1] w, h = box[2] - box[0], box[3] - box[1] ax.add_patch( plt.Rectangle((x0, y0), w, h, edgecolor="green", facecolor=(0, 0, 0, 0), lw=2) ) def show_boxes_on_image(raw_image, boxes): plt.figure(figsize=(10, 10)) plt.imshow(raw_image) for box in boxes: show_box(box, plt.gca()) plt.axis("on") plt.show() def show_points_on_image(raw_image, input_points, input_labels=None): plt.figure(figsize=(10, 10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) plt.axis("on") plt.show() def show_points_and_boxes_on_image(raw_image, boxes, input_points, input_labels=None): plt.figure(figsize=(10, 10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) for box in boxes: show_box(box, plt.gca()) plt.axis("on") plt.show() def show_points_and_boxes_on_image(raw_image, boxes, input_points, input_labels=None): plt.figure(figsize=(10, 10)) plt.imshow(raw_image) input_points = np.array(input_points) if input_labels is None: labels = np.ones_like(input_points[:, 0]) else: labels = np.array(input_labels) show_points(input_points, labels, plt.gca()) for box in boxes: show_box(box, plt.gca()) plt.axis("on") plt.show() def show_points(coords, labels, ax, marker_size=375): pos_points = coords[labels == 1] neg_points = coords[labels == 0] ax.scatter( pos_points[:, 0], pos_points[:, 1], color="green", marker="*", s=marker_size, edgecolor="white", linewidth=1.25, ) ax.scatter( neg_points[:, 0], neg_points[:, 1], color="red", marker="*", s=marker_size, edgecolor="white", linewidth=1.25, ) def show_masks_on_image(raw_image, masks, scores): if len(masks[0].shape) == 4: final_masks = tf.squeeze(masks[0]) if scores.shape[0] == 1: final_scores = tf.squeeze(scores) nb_predictions = scores.shape[-1] fig, axes = plt.subplots(1, nb_predictions, figsize=(15, 15)) for i, (mask, score) in enumerate(zip(final_masks, final_scores)): mask = tf.stop_gradient(mask) axes[i].imshow(np.array(raw_image)) show_mask(mask, axes[i]) axes[i].title.set_text(f"Mask {i+1}, Score: {score.numpy().item():.3f}") axes[i].axis("off") plt.show() ``` We will segment a car image using a point prompt. Make sure to set `return_tensors` to `tf` when calling the processor. Let's load an image of a car and segment it. ```python img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") plt.imshow(raw_image) plt.show() ``` ![png](/img/examples/vision/sam/sam_14_0.png) Let's now define a set of points we will use as the prompt. ```python input_points = [[[450, 600]]] # Visualize a single point. show_points_on_image(raw_image, input_points[0]) ``` ![png](/img/examples/vision/sam/sam_16_0.png) And segment: ```python # Preprocess the input image. inputs = processor(raw_image, input_points=input_points, return_tensors="tf") # Predict for segmentation with the prompt. outputs = model(**inputs) ``` `outputs` has got two attributes of our interest: * `outputs.pred_masks`: which denotes the predicted masks. * `outputs.iou_scores`: which denotes the IoU scores associated with the masks. Let's post-process the masks and visualize them with their IoU scores: ```python masks = processor.image_processor.post_process_masks( outputs.pred_masks, inputs["original_sizes"], inputs["reshaped_input_sizes"], return_tensors="tf", ) show_masks_on_image(raw_image, masks, outputs.iou_scores) ``` ![png](/img/examples/vision/sam/sam_21_0.png) And there we go! As can be noticed, all the masks are _valid_ masks for the point prompt we provided. SAM is flexible enough to support different visual prompts and we encourage you to check out [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/segment_anything.ipy nb) to know more about them! --- ## Fine-tuning We'll use [this dataset](https://huggingface.co/datasets/nielsr/breast-cancer) consisting of breast cancer scans. In the medical imaging domain, being able to segment the cells containing malignancy is an important task. ### Data preparation Let's first get the dataset. ```python remote_path = "https://hf.co/datasets/sayakpaul/sample-datasets/resolve/main/breast-cancer-dataset.tar.gz" dataset_path = keras.utils.get_file( "breast-cancer-dataset.tar.gz", remote_path, untar=True ) ``` Let's now visualize a sample from the dataset. *(The `show_mask()` utility is taken from [this notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SAM/Fine_tune_SAM_(segment_anything)_on_a_custom_dataset.ipynb))* ```python def show_mask(mask, ax, random_color=False): if random_color: color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) else: color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6]) h, w = mask.shape[-2:] mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) ax.imshow(mask_image) # Load all the image and label paths. image_paths = sorted(glob.glob(os.path.join(dataset_path, "images/*.png"))) label_paths = sorted(glob.glob(os.path.join(dataset_path, "labels/*.png"))) # Load the image and label. idx = 15 image = Image.open(image_paths[idx]) label = Image.open(label_paths[idx]) image = np.array(image) ground_truth_seg = np.array(label) # Display. fig, axes = plt.subplots() axes.imshow(image) show_mask(ground_truth_seg, axes) axes.title.set_text(f"Ground truth mask") axes.axis("off") plt.show() tf.shape(ground_truth_seg) ``` ![png](/img/examples/vision/sam/sam_26_0.png) <div class="k-default-codeblock"> ``` <tf.Tensor: shape=(2,), dtype=int32, numpy=array([256, 256], dtype=int32)> ``` </div> ### Preparing `tf.data.Dataset` We now write a generator class to prepare the images and the segmentation masks using the `processor` utilized above. We will leverage this generator class to create a `tf.data.Dataset` object for our training set by using `tf.data.Dataset.from_generator()`. Utilities of this class have been adapted from [this notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SAM/Fine_tune_SAM_(segment_anything)_on_a_custom_dataset.ipynb). The generator is responsible for yielding the preprocessed images and the segmentation masks, and some other metadata needed by the SAM model. ```python class Generator: """Generator class for processing the images and the masks for SAM fine-tuning.""" def __init__(self, dataset_path, processor): self.dataset_path = dataset_path self.image_paths = sorted( glob.glob(os.path.join(self.dataset_path, "images/*.png")) ) self.label_paths = sorted( glob.glob(os.path.join(self.dataset_path, "labels/*.png")) ) self.processor = processor def __call__(self): for image_path, label_path in zip(self.image_paths, self.label_paths): image = np.array(Image.open(image_path)) ground_truth_mask = np.array(Image.open(label_path)) # get bounding box prompt prompt = self.get_bounding_box(ground_truth_mask) # prepare image and prompt for the model inputs = self.processor(image, input_boxes=[[prompt]], return_tensors="np") # remove batch dimension which the processor adds by default inputs = {k: v.squeeze(0) for k, v in inputs.items()} # add ground truth segmentation inputs["ground_truth_mask"] = ground_truth_mask yield inputs def get_bounding_box(self, ground_truth_map): # get bounding box from mask y_indices, x_indices = np.where(ground_truth_map > 0) x_min, x_max = np.min(x_indices), np.max(x_indices) y_min, y_max = np.min(y_indices), np.max(y_indices) # add perturbation to bounding box coordinates H, W = ground_truth_map.shape x_min = max(0, x_min - np.random.randint(0, 20)) x_max = min(W, x_max + np.random.randint(0, 20)) y_min = max(0, y_min - np.random.randint(0, 20)) y_max = min(H, y_max + np.random.randint(0, 20)) bbox = [x_min, y_min, x_max, y_max] return bbox ``` `get_bounding_box()` is responsible for turning the ground-truth segmentation maps into bounding boxes. These bounding boxes are fed to SAM as prompts (along with the original images) during fine-tuning and SAM is then trained to predict valid masks. The advantage of this first creating a generator and then using it to create a `tf.data.Dataset` is the flexbility. Sometimes, we may need to use utitlities from other libraries ([`albumentations`](https://albumentations.ai/), for example) which may not come in native TensorFlow implementations. By using this workflow, we can easily accommodate such use case. But the non-TF counterparts might introduce performance bottlenecks, though. However, for our example, it should work just fine. Now, we prepare the `tf.data.Dataset` from our training set. ```python # Define the output signature of the generator class. output_signature = { "pixel_values": tf.TensorSpec(shape=(3, None, None), dtype=tf.float32), "original_sizes": tf.TensorSpec(shape=(None,), dtype=tf.int64), "reshaped_input_sizes": tf.TensorSpec(shape=(None,), dtype=tf.int64), "input_boxes": tf.TensorSpec(shape=(None, None), dtype=tf.float64), "ground_truth_mask": tf.TensorSpec(shape=(None, None), dtype=tf.int32), } # Prepare the dataset object. train_dataset_gen = Generator(dataset_path, processor) train_ds = tf.data.Dataset.from_generator( train_dataset_gen, output_signature=output_signature ) ``` Next, we configure the dataset for performance. ```python auto = tf.data.AUTOTUNE batch_size = 2 shuffle_buffer = 4 train_ds = ( train_ds.cache() .shuffle(shuffle_buffer) .batch(batch_size) .prefetch(buffer_size=auto) ) ``` Take a single batch of data and inspect the shapes of the elements present inside of it. ```python sample = next(iter(train_ds)) for k in sample: print(k, sample[k].shape, sample[k].dtype, isinstance(sample[k], tf.Tensor)) ``` <div class="k-default-codeblock"> ``` pixel_values (2, 3, 1024, 1024) <dtype: 'float32'> True original_sizes (2, 2) <dtype: 'int64'> True reshaped_input_sizes (2, 2) <dtype: 'int64'> True input_boxes (2, 1, 4) <dtype: 'float64'> True ground_truth_mask (2, 256, 256) <dtype: 'int32'> True ``` </div> ### Training We will now write DICE loss. This implementation is based on [MONAI DICE loss](https://docs.monai.io/en/stable/_modules/monai/losses/dice.html#DiceLoss). ```python def dice_loss(y_true, y_pred, smooth=1e-5): y_pred = tf.sigmoid(y_pred) reduce_axis = list(range(2, len(y_pred.shape))) if batch_size > 1: # reducing spatial dimensions and batch reduce_axis = [0] + reduce_axis intersection = tf.reduce_sum(y_true * y_pred, axis=reduce_axis) y_true_sq = tf.math.pow(y_true, 2) y_pred_sq = tf.math.pow(y_pred, 2) ground_o = tf.reduce_sum(y_true_sq, axis=reduce_axis) pred_o = tf.reduce_sum(y_pred_sq, axis=reduce_axis) denominator = ground_o + pred_o # calculate DICE coefficient loss = 1.0 - (2.0 * intersection + 1e-5) / (denominator + 1e-5) loss = tf.reduce_mean(loss) return loss ``` ## Fine-tuning SAM We will now fine-tune SAM's decoder part. We will freeze the vision encoder and prompt encoder layers. ```python # initialize SAM model and optimizer sam = TFSamModel.from_pretrained("facebook/sam-vit-base") optimizer = keras.optimizers.Adam(1e-5) for layer in sam.layers: if layer.name in ["vision_encoder", "prompt_encoder"]: layer.trainable = False @tf.function def train_step(inputs): with tf.GradientTape() as tape: # pass inputs to SAM model outputs = sam( pixel_values=inputs["pixel_values"], input_boxes=inputs["input_boxes"], multimask_output=False, training=True, ) predicted_masks = tf.squeeze(outputs.pred_masks, 1) ground_truth_masks = tf.cast(inputs["ground_truth_mask"], tf.float32) # calculate loss over predicted and ground truth masks loss = dice_loss(tf.expand_dims(ground_truth_masks, 1), predicted_masks) # update trainable variables trainable_vars = sam.trainable_variables grads = tape.gradient(loss, trainable_vars) optimizer.apply_gradients(zip(grads, trainable_vars)) return loss ``` <div class="k-default-codeblock"> ``` All model checkpoint layers were used when initializing TFSamModel. ``` </div> <div class="k-default-codeblock"> ``` All the layers of TFSamModel were initialized from the model checkpoint at facebook/sam-vit-base. If your task is similar to the task the model of the checkpoint was trained on, you can already use TFSamModel for predictions without further training. WARNING:absl:At this time, the v2.11+ optimizer `tf.keras.optimizers.Adam` runs slowly on M1/M2 Macs, please use the legacy Keras optimizer instead, located at `tf.keras.optimizers.legacy.Adam`. ``` </div> We can now run the training for three epochs. We might have a warning about gradients not existing on IoU prediction head of mask decoder, we can safely ignore that. ```python # run training for epoch in range(3): for inputs in train_ds: loss = train_step(inputs) print(f"Epoch {epoch + 1}: Loss = {loss}") ``` <div class="k-default-codeblock"> ``` WARNING:tensorflow:Gradients do not exist for variables ['tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument? WARNING:tensorflow:Gradients do not exist for variables ['tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument? WARNING:tensorflow:Gradients do not exist for variables ['tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument? WARNING:tensorflow:Gradients do not exist for variables ['tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument? WARNING:tensorflow:Gradients do not exist for variables ['tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument? WARNING:tensorflow:Gradients do not exist for variables ['tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument? WARNING:tensorflow:Gradients do not exist for variables ['tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument? WARNING:tensorflow:Gradients do not exist for variables ['tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_in/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/proj_out/bias:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/kernel:0', 'tf_sam_model_1/mask_decoder/iou_prediction_head/layers_._0/bias:0'] when minimizing the loss. If you're using `model.compile()`, did you forget to provide a `loss` argument? Epoch 1: Loss = 0.08322787284851074 Epoch 2: Loss = 0.05677264928817749 Epoch 3: Loss = 0.07764029502868652 ``` </div> ### Serialize the model We serialized the model and pushed for you below. `push_to_hub` method serializes model, generates a model card and pushes it to Hugging Face Hub, so that other people can load the model using `from_pretrained` method to infer or further fine-tune. We also need to push the same preprocessor in the repository. Find the model and the preprocessor [here](https://huggingface.co/merve/sam-finetuned). ```python # sam.push_to_hub("merve/sam-finetuned") # processor.push_to_hub("merve/sam-finetuned") ``` We can now infer with the model. ```python # Load another image for inference. idx = 20 raw_image_inference = Image.open(image_paths[idx]) # process the image and infer preprocessed_img = processor(raw_image_inference) outputs = sam(preprocessed_img) ``` Lastly, we can visualize the results. ```python infer_masks = outputs["pred_masks"] iou_scores = outputs["iou_scores"] show_masks_on_image(raw_image_inference, masks=infer_masks, scores=iou_scores) ``` <div class="k-default-codeblock"> ``` WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). WARNING:matplotlib.image:Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). ``` </div> ![png](/img/examples/vision/sam/sam_48_1.png)
keras-io/examples/vision/md/sam.md/0
{ "file_path": "keras-io/examples/vision/md/sam.md", "repo_id": "keras-io", "token_count": 10090 }
117
# Train a Vision Transformer on small datasets **Author:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498)<br> **Date created:** 2022/01/07<br> **Last modified:** 2022/01/10<br> **Description:** Training a ViT from scratch on smaller datasets with shifted patch tokenization and locality self-attention. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/vit_small_ds.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/vit_small_ds.py) --- ## Introduction In the academic paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929), the authors mention that Vision Transformers (ViT) are data-hungry. Therefore, pretraining a ViT on a large-sized dataset like JFT300M and fine-tuning it on medium-sized datasets (like ImageNet) is the only way to beat state-of-the-art Convolutional Neural Network models. The self-attention layer of ViT lacks **locality inductive bias** (the notion that image pixels are locally correlated and that their correlation maps are translation-invariant). This is the reason why ViTs need more data. On the other hand, CNNs look at images through spatial sliding windows, which helps them get better results with smaller datasets. In the academic paper [Vision Transformer for Small-Size Datasets](https://arxiv.org/abs/2112.13492v1), the authors set out to tackle the problem of locality inductive bias in ViTs. The main ideas are: - **Shifted Patch Tokenization** - **Locality Self Attention** This example implements the ideas of the paper. A large part of this example is inspired from [Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/). _Note_: This example requires TensorFlow 2.6 or higher, as well as [TensorFlow Addons](https://www.tensorflow.org/addons), which can be installed using the following command: ```python pip install -qq -U tensorflow-addons ``` --- ## Setup ```python import math import numpy as np import tensorflow as tf from tensorflow import keras import tensorflow_addons as tfa import matplotlib.pyplot as plt from tensorflow.keras import layers # Setting seed for reproducibiltiy SEED = 42 keras.utils.set_random_seed(SEED) ``` --- ## Prepare the data ```python NUM_CLASSES = 100 INPUT_SHAPE = (32, 32, 3) (x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data() print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}") print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}") ``` <div class="k-default-codeblock"> ``` Downloading data from https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz 169009152/169001437 [==============================] - 16s 0us/step 169017344/169001437 [==============================] - 16s 0us/step x_train shape: (50000, 32, 32, 3) - y_train shape: (50000, 1) x_test shape: (10000, 32, 32, 3) - y_test shape: (10000, 1) ``` </div> --- ## Configure the hyperparameters The hyperparameters are different from the paper. Feel free to tune the hyperparameters yourself. ```python # DATA BUFFER_SIZE = 512 BATCH_SIZE = 256 # AUGMENTATION IMAGE_SIZE = 72 PATCH_SIZE = 6 NUM_PATCHES = (IMAGE_SIZE // PATCH_SIZE) ** 2 # OPTIMIZER LEARNING_RATE = 0.001 WEIGHT_DECAY = 0.0001 # TRAINING EPOCHS = 50 # ARCHITECTURE LAYER_NORM_EPS = 1e-6 TRANSFORMER_LAYERS = 8 PROJECTION_DIM = 64 NUM_HEADS = 4 TRANSFORMER_UNITS = [ PROJECTION_DIM * 2, PROJECTION_DIM, ] MLP_HEAD_UNITS = [2048, 1024] ``` --- ## Use data augmentation A snippet from the paper: *"According to DeiT, various techniques are required to effectively train ViTs. Thus, we applied data augmentations such as CutMix, Mixup, Auto Augment, Repeated Augment to all models."* In this example, we will focus solely on the novelty of the approach and not on reproducing the paper results. For this reason, we don't use the mentioned data augmentation schemes. Please feel free to add to or remove from the augmentation pipeline. ```python data_augmentation = keras.Sequential( [ layers.Normalization(), layers.Resizing(IMAGE_SIZE, IMAGE_SIZE), layers.RandomFlip("horizontal"), layers.RandomRotation(factor=0.02), layers.RandomZoom(height_factor=0.2, width_factor=0.2), ], name="data_augmentation", ) # Compute the mean and the variance of the training data for normalization. data_augmentation.layers[0].adapt(x_train) ``` --- ## Implement Shifted Patch Tokenization In a ViT pipeline, the input images are divided into patches that are then linearly projected into tokens. Shifted patch tokenization (STP) is introduced to combat the low receptive field of ViTs. The steps for Shifted Patch Tokenization are as follows: - Start with an image. - Shift the image in diagonal directions. - Concat the diagonally shifted images with the original image. - Extract patches of the concatenated images. - Flatten the spatial dimension of all patches. - Layer normalize the flattened patches and then project it. | ![Shifted Patch Toekenization](https://i.imgur.com/bUnHxd0.png) | | :--: | | Shifted Patch Tokenization [Source](https://arxiv.org/abs/2112.13492v1) | ```python class ShiftedPatchTokenization(layers.Layer): def __init__( self, image_size=IMAGE_SIZE, patch_size=PATCH_SIZE, num_patches=NUM_PATCHES, projection_dim=PROJECTION_DIM, vanilla=False, **kwargs, ): super().__init__(**kwargs) self.vanilla = vanilla # Flag to swtich to vanilla patch extractor self.image_size = image_size self.patch_size = patch_size self.half_patch = patch_size // 2 self.flatten_patches = layers.Reshape((num_patches, -1)) self.projection = layers.Dense(units=projection_dim) self.layer_norm = layers.LayerNormalization(epsilon=LAYER_NORM_EPS) def crop_shift_pad(self, images, mode): # Build the diagonally shifted images if mode == "left-up": crop_height = self.half_patch crop_width = self.half_patch shift_height = 0 shift_width = 0 elif mode == "left-down": crop_height = 0 crop_width = self.half_patch shift_height = self.half_patch shift_width = 0 elif mode == "right-up": crop_height = self.half_patch crop_width = 0 shift_height = 0 shift_width = self.half_patch else: crop_height = 0 crop_width = 0 shift_height = self.half_patch shift_width = self.half_patch # Crop the shifted images and pad them crop = tf.image.crop_to_bounding_box( images, offset_height=crop_height, offset_width=crop_width, target_height=self.image_size - self.half_patch, target_width=self.image_size - self.half_patch, ) shift_pad = tf.image.pad_to_bounding_box( crop, offset_height=shift_height, offset_width=shift_width, target_height=self.image_size, target_width=self.image_size, ) return shift_pad def call(self, images): if not self.vanilla: # Concat the shifted images with the original image images = tf.concat( [ images, self.crop_shift_pad(images, mode="left-up"), self.crop_shift_pad(images, mode="left-down"), self.crop_shift_pad(images, mode="right-up"), self.crop_shift_pad(images, mode="right-down"), ], axis=-1, ) # Patchify the images and flatten it patches = tf.image.extract_patches( images=images, sizes=[1, self.patch_size, self.patch_size, 1], strides=[1, self.patch_size, self.patch_size, 1], rates=[1, 1, 1, 1], padding="VALID", ) flat_patches = self.flatten_patches(patches) if not self.vanilla: # Layer normalize the flat patches and linearly project it tokens = self.layer_norm(flat_patches) tokens = self.projection(tokens) else: # Linearly project the flat patches tokens = self.projection(flat_patches) return (tokens, patches) ``` ### Visualize the patches ```python # Get a random image from the training dataset # and resize the image image = x_train[np.random.choice(range(x_train.shape[0]))] resized_image = tf.image.resize( tf.convert_to_tensor([image]), size=(IMAGE_SIZE, IMAGE_SIZE) ) # Vanilla patch maker: This takes an image and divides into # patches as in the original ViT paper (token, patch) = ShiftedPatchTokenization(vanilla=True)(resized_image / 255.0) (token, patch) = (token[0], patch[0]) n = patch.shape[0] count = 1 plt.figure(figsize=(4, 4)) for row in range(n): for col in range(n): plt.subplot(n, n, count) count = count + 1 image = tf.reshape(patch[row][col], (PATCH_SIZE, PATCH_SIZE, 3)) plt.imshow(image) plt.axis("off") plt.show() # Shifted Patch Tokenization: This layer takes the image, shifts it # diagonally and then extracts patches from the concatinated images (token, patch) = ShiftedPatchTokenization(vanilla=False)(resized_image / 255.0) (token, patch) = (token[0], patch[0]) n = patch.shape[0] shifted_images = ["ORIGINAL", "LEFT-UP", "LEFT-DOWN", "RIGHT-UP", "RIGHT-DOWN"] for index, name in enumerate(shifted_images): print(name) count = 1 plt.figure(figsize=(4, 4)) for row in range(n): for col in range(n): plt.subplot(n, n, count) count = count + 1 image = tf.reshape(patch[row][col], (PATCH_SIZE, PATCH_SIZE, 5 * 3)) plt.imshow(image[..., 3 * index : 3 * index + 3]) plt.axis("off") plt.show() ``` <div class="k-default-codeblock"> ``` 2022-01-12 04:50:54.960908: I tensorflow/stream_executor/cuda/cuda_blas.cc:1774] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once. ``` </div> ![png](/img/examples/vision/vit_small_ds/vit_small_ds_13_1.png) <div class="k-default-codeblock"> ``` ORIGINAL ``` </div> ![png](/img/examples/vision/vit_small_ds/vit_small_ds_13_3.png) <div class="k-default-codeblock"> ``` LEFT-UP ``` </div> ![png](/img/examples/vision/vit_small_ds/vit_small_ds_13_5.png) <div class="k-default-codeblock"> ``` LEFT-DOWN ``` </div> ![png](/img/examples/vision/vit_small_ds/vit_small_ds_13_7.png) <div class="k-default-codeblock"> ``` RIGHT-UP ``` </div> ![png](/img/examples/vision/vit_small_ds/vit_small_ds_13_9.png) <div class="k-default-codeblock"> ``` RIGHT-DOWN ``` </div> ![png](/img/examples/vision/vit_small_ds/vit_small_ds_13_11.png) --- ## Implement the patch encoding layer This layer accepts projected patches and then adds positional information to them. ```python class PatchEncoder(layers.Layer): def __init__( self, num_patches=NUM_PATCHES, projection_dim=PROJECTION_DIM, **kwargs ): super().__init__(**kwargs) self.num_patches = num_patches self.position_embedding = layers.Embedding( input_dim=num_patches, output_dim=projection_dim ) self.positions = tf.range(start=0, limit=self.num_patches, delta=1) def call(self, encoded_patches): encoded_positions = self.position_embedding(self.positions) encoded_patches = encoded_patches + encoded_positions return encoded_patches ``` --- ## Implement Locality Self Attention The regular attention equation is stated below. | ![Equation of attention](https://miro.medium.com/max/396/1*P9sV1xXM10t943bXy_G9yg.png) | | :--: | | [Source](https://towardsdatascience.com/attention-is-all-you-need-discovering-the-transformer-paper-73e5ff5e0634) | The attention module takes a query, key, and value. First, we compute the similarity between the query and key via a dot product. Then, the result is scaled by the square root of the key dimension. The scaling prevents the softmax function from having an overly small gradient. Softmax is then applied to the scaled dot product to produce the attention weights. The value is then modulated via the attention weights. In self-attention, query, key and value come from the same input. The dot product would result in large self-token relations rather than inter-token relations. This also means that the softmax gives higher probabilities to self-token relations than the inter-token relations. To combat this, the authors propose masking the diagonal of the dot product. This way, we force the attention module to pay more attention to the inter-token relations. The scaling factor is a constant in the regular attention module. This acts like a temperature term that can modulate the softmax function. The authors suggest a learnable temperature term instead of a constant. | ![Implementation of LSA](https://i.imgur.com/GTV99pk.png) | | :--: | | Locality Self Attention [Source](https://arxiv.org/abs/2112.13492v1) | The above two pointers make the Locality Self Attention. We have subclassed the [`layers.MultiHeadAttention`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MultiHeadAttention) and implemented the trainable temperature. The attention mask is built at a later stage. ```python class MultiHeadAttentionLSA(tf.keras.layers.MultiHeadAttention): def __init__(self, **kwargs): super().__init__(**kwargs) # The trainable temperature term. The initial value is # the square root of the key dimension. self.tau = tf.Variable(math.sqrt(float(self._key_dim)), trainable=True) def _compute_attention(self, query, key, value, attention_mask=None, training=None): query = tf.multiply(query, 1.0 / self.tau) attention_scores = tf.einsum(self._dot_product_equation, key, query) attention_scores = self._masked_softmax(attention_scores, attention_mask) attention_scores_dropout = self._dropout_layer( attention_scores, training=training ) attention_output = tf.einsum( self._combine_equation, attention_scores_dropout, value ) return attention_output, attention_scores ``` --- ## Implement the MLP ```python def mlp(x, hidden_units, dropout_rate): for units in hidden_units: x = layers.Dense(units, activation=tf.nn.gelu)(x) x = layers.Dropout(dropout_rate)(x) return x # Build the diagonal attention mask diag_attn_mask = 1 - tf.eye(NUM_PATCHES) diag_attn_mask = tf.cast([diag_attn_mask], dtype=tf.int8) ``` --- ## Build the ViT ```python def create_vit_classifier(vanilla=False): inputs = layers.Input(shape=INPUT_SHAPE) # Augment data. augmented = data_augmentation(inputs) # Create patches. (tokens, _) = ShiftedPatchTokenization(vanilla=vanilla)(augmented) # Encode patches. encoded_patches = PatchEncoder()(tokens) # Create multiple layers of the Transformer block. for _ in range(TRANSFORMER_LAYERS): # Layer normalization 1. x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) # Create a multi-head attention layer. if not vanilla: attention_output = MultiHeadAttentionLSA( num_heads=NUM_HEADS, key_dim=PROJECTION_DIM, dropout=0.1 )(x1, x1, attention_mask=diag_attn_mask) else: attention_output = layers.MultiHeadAttention( num_heads=NUM_HEADS, key_dim=PROJECTION_DIM, dropout=0.1 )(x1, x1) # Skip connection 1. x2 = layers.Add()([attention_output, encoded_patches]) # Layer normalization 2. x3 = layers.LayerNormalization(epsilon=1e-6)(x2) # MLP. x3 = mlp(x3, hidden_units=TRANSFORMER_UNITS, dropout_rate=0.1) # Skip connection 2. encoded_patches = layers.Add()([x3, x2]) # Create a [batch_size, projection_dim] tensor. representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches) representation = layers.Flatten()(representation) representation = layers.Dropout(0.5)(representation) # Add MLP. features = mlp(representation, hidden_units=MLP_HEAD_UNITS, dropout_rate=0.5) # Classify outputs. logits = layers.Dense(NUM_CLASSES)(features) # Create the Keras model. model = keras.Model(inputs=inputs, outputs=logits) return model ``` --- ## Compile, train, and evaluate the mode ```python # Some code is taken from: # https://www.kaggle.com/ashusma/training-rfcx-tensorflow-tpu-effnet-b2. class WarmUpCosine(keras.optimizers.schedules.LearningRateSchedule): def __init__( self, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps ): super().__init__() self.learning_rate_base = learning_rate_base self.total_steps = total_steps self.warmup_learning_rate = warmup_learning_rate self.warmup_steps = warmup_steps self.pi = tf.constant(np.pi) def __call__(self, step): if self.total_steps < self.warmup_steps: raise ValueError("Total_steps must be larger or equal to warmup_steps.") cos_annealed_lr = tf.cos( self.pi * (tf.cast(step, tf.float32) - self.warmup_steps) / float(self.total_steps - self.warmup_steps) ) learning_rate = 0.5 * self.learning_rate_base * (1 + cos_annealed_lr) if self.warmup_steps > 0: if self.learning_rate_base < self.warmup_learning_rate: raise ValueError( "Learning_rate_base must be larger or equal to " "warmup_learning_rate." ) slope = ( self.learning_rate_base - self.warmup_learning_rate ) / self.warmup_steps warmup_rate = slope * tf.cast(step, tf.float32) + self.warmup_learning_rate learning_rate = tf.where( step < self.warmup_steps, warmup_rate, learning_rate ) return tf.where( step > self.total_steps, 0.0, learning_rate, name="learning_rate" ) def run_experiment(model): total_steps = int((len(x_train) / BATCH_SIZE) * EPOCHS) warmup_epoch_percentage = 0.10 warmup_steps = int(total_steps * warmup_epoch_percentage) scheduled_lrs = WarmUpCosine( learning_rate_base=LEARNING_RATE, total_steps=total_steps, warmup_learning_rate=0.0, warmup_steps=warmup_steps, ) optimizer = tfa.optimizers.AdamW( learning_rate=LEARNING_RATE, weight_decay=WEIGHT_DECAY ) model.compile( optimizer=optimizer, loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[ keras.metrics.SparseCategoricalAccuracy(name="accuracy"), keras.metrics.SparseTopKCategoricalAccuracy(5, name="top-5-accuracy"), ], ) history = model.fit( x=x_train, y=y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_split=0.1, ) _, accuracy, top_5_accuracy = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE) print(f"Test accuracy: {round(accuracy * 100, 2)}%") print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%") return history # Run experiments with the vanilla ViT vit = create_vit_classifier(vanilla=True) history = run_experiment(vit) # Run experiments with the Shifted Patch Tokenization and # Locality Self Attention modified ViT vit_sl = create_vit_classifier(vanilla=False) history = run_experiment(vit_sl) ``` <div class="k-default-codeblock"> ``` Epoch 1/50 176/176 [==============================] - 22s 83ms/step - loss: 4.4912 - accuracy: 0.0427 - top-5-accuracy: 0.1549 - val_loss: 3.9409 - val_accuracy: 0.1030 - val_top-5-accuracy: 0.3036 Epoch 2/50 176/176 [==============================] - 14s 77ms/step - loss: 3.9749 - accuracy: 0.0897 - top-5-accuracy: 0.2802 - val_loss: 3.5721 - val_accuracy: 0.1550 - val_top-5-accuracy: 0.4058 Epoch 3/50 176/176 [==============================] - 14s 77ms/step - loss: 3.7129 - accuracy: 0.1282 - top-5-accuracy: 0.3601 - val_loss: 3.3235 - val_accuracy: 0.2022 - val_top-5-accuracy: 0.4788 Epoch 4/50 176/176 [==============================] - 14s 77ms/step - loss: 3.5518 - accuracy: 0.1544 - top-5-accuracy: 0.4078 - val_loss: 3.2432 - val_accuracy: 0.2132 - val_top-5-accuracy: 0.5056 Epoch 5/50 176/176 [==============================] - 14s 77ms/step - loss: 3.4098 - accuracy: 0.1828 - top-5-accuracy: 0.4471 - val_loss: 3.0910 - val_accuracy: 0.2462 - val_top-5-accuracy: 0.5376 Epoch 6/50 176/176 [==============================] - 14s 77ms/step - loss: 3.2835 - accuracy: 0.2037 - top-5-accuracy: 0.4838 - val_loss: 2.9803 - val_accuracy: 0.2704 - val_top-5-accuracy: 0.5606 Epoch 7/50 176/176 [==============================] - 14s 77ms/step - loss: 3.1756 - accuracy: 0.2205 - top-5-accuracy: 0.5113 - val_loss: 2.8608 - val_accuracy: 0.2802 - val_top-5-accuracy: 0.5908 Epoch 8/50 176/176 [==============================] - 14s 77ms/step - loss: 3.0585 - accuracy: 0.2439 - top-5-accuracy: 0.5432 - val_loss: 2.8055 - val_accuracy: 0.2960 - val_top-5-accuracy: 0.6144 Epoch 9/50 176/176 [==============================] - 14s 77ms/step - loss: 2.9457 - accuracy: 0.2654 - top-5-accuracy: 0.5697 - val_loss: 2.7034 - val_accuracy: 0.3210 - val_top-5-accuracy: 0.6242 Epoch 10/50 176/176 [==============================] - 14s 77ms/step - loss: 2.8458 - accuracy: 0.2863 - top-5-accuracy: 0.5918 - val_loss: 2.5899 - val_accuracy: 0.3416 - val_top-5-accuracy: 0.6500 Epoch 11/50 176/176 [==============================] - 14s 77ms/step - loss: 2.7530 - accuracy: 0.3052 - top-5-accuracy: 0.6191 - val_loss: 2.5275 - val_accuracy: 0.3526 - val_top-5-accuracy: 0.6660 Epoch 12/50 176/176 [==============================] - 14s 77ms/step - loss: 2.6561 - accuracy: 0.3250 - top-5-accuracy: 0.6355 - val_loss: 2.5111 - val_accuracy: 0.3544 - val_top-5-accuracy: 0.6554 Epoch 13/50 176/176 [==============================] - 14s 77ms/step - loss: 2.5833 - accuracy: 0.3398 - top-5-accuracy: 0.6538 - val_loss: 2.3931 - val_accuracy: 0.3792 - val_top-5-accuracy: 0.6888 Epoch 14/50 176/176 [==============================] - 14s 77ms/step - loss: 2.4988 - accuracy: 0.3594 - top-5-accuracy: 0.6724 - val_loss: 2.3695 - val_accuracy: 0.3868 - val_top-5-accuracy: 0.6958 Epoch 15/50 176/176 [==============================] - 14s 77ms/step - loss: 2.4342 - accuracy: 0.3706 - top-5-accuracy: 0.6877 - val_loss: 2.3076 - val_accuracy: 0.4072 - val_top-5-accuracy: 0.7074 Epoch 16/50 176/176 [==============================] - 14s 77ms/step - loss: 2.3654 - accuracy: 0.3841 - top-5-accuracy: 0.7024 - val_loss: 2.2346 - val_accuracy: 0.4202 - val_top-5-accuracy: 0.7174 Epoch 17/50 176/176 [==============================] - 14s 77ms/step - loss: 2.3062 - accuracy: 0.3967 - top-5-accuracy: 0.7130 - val_loss: 2.2277 - val_accuracy: 0.4206 - val_top-5-accuracy: 0.7190 Epoch 18/50 176/176 [==============================] - 14s 77ms/step - loss: 2.2415 - accuracy: 0.4100 - top-5-accuracy: 0.7271 - val_loss: 2.1605 - val_accuracy: 0.4398 - val_top-5-accuracy: 0.7366 Epoch 19/50 176/176 [==============================] - 14s 77ms/step - loss: 2.1802 - accuracy: 0.4240 - top-5-accuracy: 0.7386 - val_loss: 2.1533 - val_accuracy: 0.4428 - val_top-5-accuracy: 0.7382 Epoch 20/50 176/176 [==============================] - 14s 77ms/step - loss: 2.1264 - accuracy: 0.4357 - top-5-accuracy: 0.7486 - val_loss: 2.1395 - val_accuracy: 0.4428 - val_top-5-accuracy: 0.7404 Epoch 21/50 176/176 [==============================] - 14s 77ms/step - loss: 2.0856 - accuracy: 0.4442 - top-5-accuracy: 0.7564 - val_loss: 2.1025 - val_accuracy: 0.4512 - val_top-5-accuracy: 0.7448 Epoch 22/50 176/176 [==============================] - 14s 77ms/step - loss: 2.0320 - accuracy: 0.4566 - top-5-accuracy: 0.7668 - val_loss: 2.0677 - val_accuracy: 0.4600 - val_top-5-accuracy: 0.7534 Epoch 23/50 176/176 [==============================] - 14s 77ms/step - loss: 1.9903 - accuracy: 0.4666 - top-5-accuracy: 0.7761 - val_loss: 2.0273 - val_accuracy: 0.4650 - val_top-5-accuracy: 0.7610 Epoch 24/50 176/176 [==============================] - 14s 77ms/step - loss: 1.9398 - accuracy: 0.4772 - top-5-accuracy: 0.7877 - val_loss: 2.0253 - val_accuracy: 0.4694 - val_top-5-accuracy: 0.7636 Epoch 25/50 176/176 [==============================] - 14s 78ms/step - loss: 1.9027 - accuracy: 0.4865 - top-5-accuracy: 0.7933 - val_loss: 2.0584 - val_accuracy: 0.4606 - val_top-5-accuracy: 0.7520 Epoch 26/50 176/176 [==============================] - 14s 77ms/step - loss: 1.8529 - accuracy: 0.4964 - top-5-accuracy: 0.8010 - val_loss: 2.0128 - val_accuracy: 0.4752 - val_top-5-accuracy: 0.7654 Epoch 27/50 176/176 [==============================] - 14s 77ms/step - loss: 1.8161 - accuracy: 0.5047 - top-5-accuracy: 0.8111 - val_loss: 1.9630 - val_accuracy: 0.4898 - val_top-5-accuracy: 0.7746 Epoch 28/50 176/176 [==============================] - 13s 77ms/step - loss: 1.7792 - accuracy: 0.5136 - top-5-accuracy: 0.8140 - val_loss: 1.9931 - val_accuracy: 0.4780 - val_top-5-accuracy: 0.7640 Epoch 29/50 176/176 [==============================] - 14s 77ms/step - loss: 1.7268 - accuracy: 0.5211 - top-5-accuracy: 0.8250 - val_loss: 1.9748 - val_accuracy: 0.4854 - val_top-5-accuracy: 0.7708 Epoch 30/50 176/176 [==============================] - 14s 77ms/step - loss: 1.7115 - accuracy: 0.5298 - top-5-accuracy: 0.8265 - val_loss: 1.9669 - val_accuracy: 0.4884 - val_top-5-accuracy: 0.7796 Epoch 31/50 176/176 [==============================] - 14s 77ms/step - loss: 1.6795 - accuracy: 0.5361 - top-5-accuracy: 0.8329 - val_loss: 1.9428 - val_accuracy: 0.4972 - val_top-5-accuracy: 0.7852 Epoch 32/50 176/176 [==============================] - 14s 77ms/step - loss: 1.6411 - accuracy: 0.5448 - top-5-accuracy: 0.8412 - val_loss: 1.9318 - val_accuracy: 0.4952 - val_top-5-accuracy: 0.7864 Epoch 33/50 176/176 [==============================] - 14s 77ms/step - loss: 1.6015 - accuracy: 0.5547 - top-5-accuracy: 0.8466 - val_loss: 1.9233 - val_accuracy: 0.4996 - val_top-5-accuracy: 0.7882 Epoch 34/50 176/176 [==============================] - 14s 77ms/step - loss: 1.5651 - accuracy: 0.5655 - top-5-accuracy: 0.8525 - val_loss: 1.9285 - val_accuracy: 0.5082 - val_top-5-accuracy: 0.7888 Epoch 35/50 176/176 [==============================] - 14s 77ms/step - loss: 1.5437 - accuracy: 0.5672 - top-5-accuracy: 0.8570 - val_loss: 1.9268 - val_accuracy: 0.5028 - val_top-5-accuracy: 0.7842 Epoch 36/50 176/176 [==============================] - 14s 77ms/step - loss: 1.5103 - accuracy: 0.5748 - top-5-accuracy: 0.8620 - val_loss: 1.9262 - val_accuracy: 0.5014 - val_top-5-accuracy: 0.7890 Epoch 37/50 176/176 [==============================] - 14s 77ms/step - loss: 1.4784 - accuracy: 0.5822 - top-5-accuracy: 0.8690 - val_loss: 1.8698 - val_accuracy: 0.5130 - val_top-5-accuracy: 0.7948 Epoch 38/50 176/176 [==============================] - 14s 77ms/step - loss: 1.4449 - accuracy: 0.5922 - top-5-accuracy: 0.8728 - val_loss: 1.8734 - val_accuracy: 0.5136 - val_top-5-accuracy: 0.7980 Epoch 39/50 176/176 [==============================] - 14s 77ms/step - loss: 1.4312 - accuracy: 0.5928 - top-5-accuracy: 0.8755 - val_loss: 1.8736 - val_accuracy: 0.5150 - val_top-5-accuracy: 0.7956 Epoch 40/50 176/176 [==============================] - 14s 77ms/step - loss: 1.3996 - accuracy: 0.5999 - top-5-accuracy: 0.8808 - val_loss: 1.8718 - val_accuracy: 0.5178 - val_top-5-accuracy: 0.7970 Epoch 41/50 176/176 [==============================] - 14s 77ms/step - loss: 1.3859 - accuracy: 0.6075 - top-5-accuracy: 0.8817 - val_loss: 1.9097 - val_accuracy: 0.5084 - val_top-5-accuracy: 0.7884 Epoch 42/50 176/176 [==============================] - 14s 77ms/step - loss: 1.3586 - accuracy: 0.6119 - top-5-accuracy: 0.8860 - val_loss: 1.8620 - val_accuracy: 0.5148 - val_top-5-accuracy: 0.8010 Epoch 43/50 176/176 [==============================] - 14s 77ms/step - loss: 1.3384 - accuracy: 0.6154 - top-5-accuracy: 0.8911 - val_loss: 1.8509 - val_accuracy: 0.5202 - val_top-5-accuracy: 0.8014 Epoch 44/50 176/176 [==============================] - 14s 78ms/step - loss: 1.3090 - accuracy: 0.6236 - top-5-accuracy: 0.8954 - val_loss: 1.8607 - val_accuracy: 0.5242 - val_top-5-accuracy: 0.8020 Epoch 45/50 176/176 [==============================] - 14s 78ms/step - loss: 1.2873 - accuracy: 0.6292 - top-5-accuracy: 0.8964 - val_loss: 1.8729 - val_accuracy: 0.5208 - val_top-5-accuracy: 0.8056 Epoch 46/50 176/176 [==============================] - 14s 77ms/step - loss: 1.2658 - accuracy: 0.6367 - top-5-accuracy: 0.9007 - val_loss: 1.8573 - val_accuracy: 0.5278 - val_top-5-accuracy: 0.8066 Epoch 47/50 176/176 [==============================] - 14s 77ms/step - loss: 1.2628 - accuracy: 0.6346 - top-5-accuracy: 0.9023 - val_loss: 1.8240 - val_accuracy: 0.5292 - val_top-5-accuracy: 0.8112 Epoch 48/50 176/176 [==============================] - 14s 78ms/step - loss: 1.2396 - accuracy: 0.6431 - top-5-accuracy: 0.9057 - val_loss: 1.8342 - val_accuracy: 0.5362 - val_top-5-accuracy: 0.8096 Epoch 49/50 176/176 [==============================] - 14s 77ms/step - loss: 1.2163 - accuracy: 0.6464 - top-5-accuracy: 0.9081 - val_loss: 1.8836 - val_accuracy: 0.5246 - val_top-5-accuracy: 0.8044 Epoch 50/50 176/176 [==============================] - 14s 77ms/step - loss: 1.1919 - accuracy: 0.6541 - top-5-accuracy: 0.9122 - val_loss: 1.8513 - val_accuracy: 0.5336 - val_top-5-accuracy: 0.8048 40/40 [==============================] - 1s 26ms/step - loss: 1.8172 - accuracy: 0.5310 - top-5-accuracy: 0.8053 Test accuracy: 53.1% Test top 5 accuracy: 80.53% Epoch 1/50 176/176 [==============================] - 23s 90ms/step - loss: 4.4889 - accuracy: 0.0450 - top-5-accuracy: 0.1559 - val_loss: 3.9364 - val_accuracy: 0.1128 - val_top-5-accuracy: 0.3184 Epoch 2/50 176/176 [==============================] - 15s 85ms/step - loss: 3.9806 - accuracy: 0.0924 - top-5-accuracy: 0.2798 - val_loss: 3.6392 - val_accuracy: 0.1576 - val_top-5-accuracy: 0.4034 Epoch 3/50 176/176 [==============================] - 15s 84ms/step - loss: 3.7713 - accuracy: 0.1253 - top-5-accuracy: 0.3448 - val_loss: 3.3892 - val_accuracy: 0.1918 - val_top-5-accuracy: 0.4622 Epoch 4/50 176/176 [==============================] - 15s 85ms/step - loss: 3.6297 - accuracy: 0.1460 - top-5-accuracy: 0.3859 - val_loss: 3.2856 - val_accuracy: 0.2194 - val_top-5-accuracy: 0.4970 Epoch 5/50 176/176 [==============================] - 15s 85ms/step - loss: 3.4955 - accuracy: 0.1706 - top-5-accuracy: 0.4239 - val_loss: 3.1359 - val_accuracy: 0.2412 - val_top-5-accuracy: 0.5308 Epoch 6/50 176/176 [==============================] - 15s 85ms/step - loss: 3.3781 - accuracy: 0.1908 - top-5-accuracy: 0.4565 - val_loss: 3.0535 - val_accuracy: 0.2620 - val_top-5-accuracy: 0.5652 Epoch 7/50 176/176 [==============================] - 15s 85ms/step - loss: 3.2540 - accuracy: 0.2123 - top-5-accuracy: 0.4895 - val_loss: 2.9165 - val_accuracy: 0.2782 - val_top-5-accuracy: 0.5800 Epoch 8/50 176/176 [==============================] - 15s 85ms/step - loss: 3.1442 - accuracy: 0.2318 - top-5-accuracy: 0.5197 - val_loss: 2.8592 - val_accuracy: 0.2984 - val_top-5-accuracy: 0.6090 Epoch 9/50 176/176 [==============================] - 15s 85ms/step - loss: 3.0348 - accuracy: 0.2504 - top-5-accuracy: 0.5440 - val_loss: 2.7378 - val_accuracy: 0.3146 - val_top-5-accuracy: 0.6294 Epoch 10/50 176/176 [==============================] - 15s 84ms/step - loss: 2.9311 - accuracy: 0.2681 - top-5-accuracy: 0.5704 - val_loss: 2.6274 - val_accuracy: 0.3362 - val_top-5-accuracy: 0.6446 Epoch 11/50 176/176 [==============================] - 15s 85ms/step - loss: 2.8214 - accuracy: 0.2925 - top-5-accuracy: 0.5986 - val_loss: 2.5557 - val_accuracy: 0.3458 - val_top-5-accuracy: 0.6616 Epoch 12/50 176/176 [==============================] - 15s 85ms/step - loss: 2.7244 - accuracy: 0.3100 - top-5-accuracy: 0.6168 - val_loss: 2.4763 - val_accuracy: 0.3564 - val_top-5-accuracy: 0.6804 Epoch 13/50 176/176 [==============================] - 15s 85ms/step - loss: 2.6476 - accuracy: 0.3255 - top-5-accuracy: 0.6358 - val_loss: 2.3946 - val_accuracy: 0.3678 - val_top-5-accuracy: 0.6940 Epoch 14/50 176/176 [==============================] - 15s 85ms/step - loss: 2.5518 - accuracy: 0.3436 - top-5-accuracy: 0.6584 - val_loss: 2.3362 - val_accuracy: 0.3856 - val_top-5-accuracy: 0.7038 Epoch 15/50 176/176 [==============================] - 15s 85ms/step - loss: 2.4620 - accuracy: 0.3632 - top-5-accuracy: 0.6776 - val_loss: 2.2690 - val_accuracy: 0.4006 - val_top-5-accuracy: 0.7222 Epoch 16/50 176/176 [==============================] - 15s 85ms/step - loss: 2.4010 - accuracy: 0.3749 - top-5-accuracy: 0.6908 - val_loss: 2.1937 - val_accuracy: 0.4216 - val_top-5-accuracy: 0.7338 Epoch 17/50 176/176 [==============================] - 15s 85ms/step - loss: 2.3330 - accuracy: 0.3911 - top-5-accuracy: 0.7041 - val_loss: 2.1519 - val_accuracy: 0.4286 - val_top-5-accuracy: 0.7370 Epoch 18/50 176/176 [==============================] - 15s 85ms/step - loss: 2.2600 - accuracy: 0.4069 - top-5-accuracy: 0.7171 - val_loss: 2.1212 - val_accuracy: 0.4356 - val_top-5-accuracy: 0.7460 Epoch 19/50 176/176 [==============================] - 15s 85ms/step - loss: 2.1967 - accuracy: 0.4169 - top-5-accuracy: 0.7320 - val_loss: 2.0748 - val_accuracy: 0.4470 - val_top-5-accuracy: 0.7580 Epoch 20/50 176/176 [==============================] - 15s 85ms/step - loss: 2.1397 - accuracy: 0.4302 - top-5-accuracy: 0.7450 - val_loss: 2.1152 - val_accuracy: 0.4362 - val_top-5-accuracy: 0.7416 Epoch 21/50 176/176 [==============================] - 15s 85ms/step - loss: 2.0929 - accuracy: 0.4396 - top-5-accuracy: 0.7524 - val_loss: 2.0044 - val_accuracy: 0.4652 - val_top-5-accuracy: 0.7680 Epoch 22/50 176/176 [==============================] - 15s 85ms/step - loss: 2.0423 - accuracy: 0.4521 - top-5-accuracy: 0.7639 - val_loss: 2.0628 - val_accuracy: 0.4488 - val_top-5-accuracy: 0.7544 Epoch 23/50 176/176 [==============================] - 15s 85ms/step - loss: 1.9771 - accuracy: 0.4661 - top-5-accuracy: 0.7750 - val_loss: 1.9380 - val_accuracy: 0.4740 - val_top-5-accuracy: 0.7836 Epoch 24/50 176/176 [==============================] - 15s 84ms/step - loss: 1.9323 - accuracy: 0.4752 - top-5-accuracy: 0.7848 - val_loss: 1.9461 - val_accuracy: 0.4732 - val_top-5-accuracy: 0.7768 Epoch 25/50 176/176 [==============================] - 15s 85ms/step - loss: 1.8913 - accuracy: 0.4844 - top-5-accuracy: 0.7914 - val_loss: 1.9230 - val_accuracy: 0.4768 - val_top-5-accuracy: 0.7886 Epoch 26/50 176/176 [==============================] - 15s 84ms/step - loss: 1.8520 - accuracy: 0.4950 - top-5-accuracy: 0.7999 - val_loss: 1.9159 - val_accuracy: 0.4808 - val_top-5-accuracy: 0.7900 Epoch 27/50 176/176 [==============================] - 15s 85ms/step - loss: 1.8175 - accuracy: 0.5046 - top-5-accuracy: 0.8076 - val_loss: 1.8977 - val_accuracy: 0.4896 - val_top-5-accuracy: 0.7876 Epoch 28/50 176/176 [==============================] - 15s 85ms/step - loss: 1.7692 - accuracy: 0.5133 - top-5-accuracy: 0.8146 - val_loss: 1.8632 - val_accuracy: 0.4940 - val_top-5-accuracy: 0.7920 Epoch 29/50 176/176 [==============================] - 15s 85ms/step - loss: 1.7375 - accuracy: 0.5193 - top-5-accuracy: 0.8206 - val_loss: 1.8686 - val_accuracy: 0.4926 - val_top-5-accuracy: 0.7952 Epoch 30/50 176/176 [==============================] - 15s 85ms/step - loss: 1.6952 - accuracy: 0.5308 - top-5-accuracy: 0.8280 - val_loss: 1.8265 - val_accuracy: 0.5024 - val_top-5-accuracy: 0.7996 Epoch 31/50 176/176 [==============================] - 15s 85ms/step - loss: 1.6631 - accuracy: 0.5379 - top-5-accuracy: 0.8348 - val_loss: 1.8665 - val_accuracy: 0.4942 - val_top-5-accuracy: 0.7854 Epoch 32/50 176/176 [==============================] - 15s 85ms/step - loss: 1.6329 - accuracy: 0.5466 - top-5-accuracy: 0.8401 - val_loss: 1.8364 - val_accuracy: 0.5090 - val_top-5-accuracy: 0.7996 Epoch 33/50 176/176 [==============================] - 15s 85ms/step - loss: 1.5960 - accuracy: 0.5537 - top-5-accuracy: 0.8465 - val_loss: 1.8171 - val_accuracy: 0.5136 - val_top-5-accuracy: 0.8034 Epoch 34/50 176/176 [==============================] - 15s 85ms/step - loss: 1.5815 - accuracy: 0.5578 - top-5-accuracy: 0.8476 - val_loss: 1.8020 - val_accuracy: 0.5128 - val_top-5-accuracy: 0.8042 Epoch 35/50 176/176 [==============================] - 15s 85ms/step - loss: 1.5432 - accuracy: 0.5667 - top-5-accuracy: 0.8566 - val_loss: 1.8173 - val_accuracy: 0.5142 - val_top-5-accuracy: 0.8080 Epoch 36/50 176/176 [==============================] - 15s 85ms/step - loss: 1.5110 - accuracy: 0.5768 - top-5-accuracy: 0.8594 - val_loss: 1.8168 - val_accuracy: 0.5124 - val_top-5-accuracy: 0.8066 Epoch 37/50 176/176 [==============================] - 15s 85ms/step - loss: 1.4890 - accuracy: 0.5816 - top-5-accuracy: 0.8641 - val_loss: 1.7861 - val_accuracy: 0.5274 - val_top-5-accuracy: 0.8120 Epoch 38/50 176/176 [==============================] - 15s 85ms/step - loss: 1.4672 - accuracy: 0.5849 - top-5-accuracy: 0.8660 - val_loss: 1.7695 - val_accuracy: 0.5222 - val_top-5-accuracy: 0.8106 Epoch 39/50 176/176 [==============================] - 15s 85ms/step - loss: 1.4323 - accuracy: 0.5939 - top-5-accuracy: 0.8721 - val_loss: 1.7653 - val_accuracy: 0.5250 - val_top-5-accuracy: 0.8164 Epoch 40/50 176/176 [==============================] - 15s 85ms/step - loss: 1.4192 - accuracy: 0.5975 - top-5-accuracy: 0.8754 - val_loss: 1.7727 - val_accuracy: 0.5298 - val_top-5-accuracy: 0.8154 Epoch 41/50 176/176 [==============================] - 15s 85ms/step - loss: 1.3897 - accuracy: 0.6055 - top-5-accuracy: 0.8805 - val_loss: 1.7535 - val_accuracy: 0.5328 - val_top-5-accuracy: 0.8122 Epoch 42/50 176/176 [==============================] - 15s 85ms/step - loss: 1.3702 - accuracy: 0.6087 - top-5-accuracy: 0.8828 - val_loss: 1.7746 - val_accuracy: 0.5316 - val_top-5-accuracy: 0.8116 Epoch 43/50 176/176 [==============================] - 15s 85ms/step - loss: 1.3338 - accuracy: 0.6185 - top-5-accuracy: 0.8894 - val_loss: 1.7606 - val_accuracy: 0.5342 - val_top-5-accuracy: 0.8176 Epoch 44/50 176/176 [==============================] - 15s 85ms/step - loss: 1.3171 - accuracy: 0.6200 - top-5-accuracy: 0.8920 - val_loss: 1.7490 - val_accuracy: 0.5364 - val_top-5-accuracy: 0.8164 Epoch 45/50 176/176 [==============================] - 15s 85ms/step - loss: 1.3056 - accuracy: 0.6276 - top-5-accuracy: 0.8932 - val_loss: 1.7535 - val_accuracy: 0.5388 - val_top-5-accuracy: 0.8156 Epoch 46/50 176/176 [==============================] - 15s 85ms/step - loss: 1.2876 - accuracy: 0.6289 - top-5-accuracy: 0.8952 - val_loss: 1.7546 - val_accuracy: 0.5320 - val_top-5-accuracy: 0.8154 Epoch 47/50 176/176 [==============================] - 15s 85ms/step - loss: 1.2764 - accuracy: 0.6350 - top-5-accuracy: 0.8970 - val_loss: 1.7177 - val_accuracy: 0.5382 - val_top-5-accuracy: 0.8200 Epoch 48/50 176/176 [==============================] - 15s 85ms/step - loss: 1.2543 - accuracy: 0.6407 - top-5-accuracy: 0.9001 - val_loss: 1.7330 - val_accuracy: 0.5438 - val_top-5-accuracy: 0.8198 Epoch 49/50 176/176 [==============================] - 15s 84ms/step - loss: 1.2191 - accuracy: 0.6470 - top-5-accuracy: 0.9042 - val_loss: 1.7316 - val_accuracy: 0.5436 - val_top-5-accuracy: 0.8196 Epoch 50/50 176/176 [==============================] - 15s 85ms/step - loss: 1.2186 - accuracy: 0.6457 - top-5-accuracy: 0.9066 - val_loss: 1.7201 - val_accuracy: 0.5486 - val_top-5-accuracy: 0.8218 40/40 [==============================] - 1s 30ms/step - loss: 1.6760 - accuracy: 0.5611 - top-5-accuracy: 0.8227 Test accuracy: 56.11% Test top 5 accuracy: 82.27% ``` </div> # Final Notes With the help of Shifted Patch Tokenization and Locality Self Attention, we were able to get ~**3-4%** top-1 accuracy gains on CIFAR100. The ideas on Shifted Patch Tokenization and Locality Self Attention are very intuitive and easy to implement. The authors also ablates of different shifting strategies for Shifted Patch Tokenization in the supplementary of the paper. I would like to thank [Jarvislabs.ai](https://jarvislabs.ai/) for generously helping with GPU credits. You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/vit_small_ds_v2) and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/vit-small-ds).
keras-io/examples/vision/md/vit_small_ds.md/0
{ "file_path": "keras-io/examples/vision/md/vit_small_ds.md", "repo_id": "keras-io", "token_count": 16671 }
118
""" Title: Image similarity estimation using a Siamese Network with a triplet loss Authors: [Hazem Essam](https://twitter.com/hazemessamm) and [Santiago L. Valdarrama](https://twitter.com/svpino) Date created: 2021/03/25 Last modified: 2021/03/25 Description: Training a Siamese Network to compare the similarity of images using a triplet loss function. Accelerator: GPU """ """ ## Introduction A [Siamese Network](https://en.wikipedia.org/wiki/Siamese_neural_network) is a type of network architecture that contains two or more identical subnetworks used to generate feature vectors for each input and compare them. Siamese Networks can be applied to different use cases, like detecting duplicates, finding anomalies, and face recognition. This example uses a Siamese Network with three identical subnetworks. We will provide three images to the model, where two of them will be similar (_anchor_ and _positive_ samples), and the third will be unrelated (a _negative_ example.) Our goal is for the model to learn to estimate the similarity between images. For the network to learn, we use a triplet loss function. You can find an introduction to triplet loss in the [FaceNet paper](https://arxiv.org/abs/1503.03832) by Schroff et al,. 2015. In this example, we define the triplet loss function as follows: `L(A, P, N) = max(‖f(A) - f(P)‖² - ‖f(A) - f(N)‖² + margin, 0)` This example uses the [Totally Looks Like dataset](https://sites.google.com/view/totally-looks-like-dataset) by [Rosenfeld et al., 2018](https://arxiv.org/abs/1803.01485v3). """ """ ## Setup """ import matplotlib.pyplot as plt import numpy as np import os import random import tensorflow as tf from pathlib import Path from keras import applications from keras import layers from keras import losses from keras import ops from keras import optimizers from keras import metrics from keras import Model from keras.applications import resnet target_shape = (200, 200) """ ## Load the dataset We are going to load the *Totally Looks Like* dataset and unzip it inside the `~/.keras` directory in the local environment. The dataset consists of two separate files: * `left.zip` contains the images that we will use as the anchor. * `right.zip` contains the images that we will use as the positive sample (an image that looks like the anchor). """ cache_dir = Path(Path.home()) / ".keras" anchor_images_path = cache_dir / "left" positive_images_path = cache_dir / "right" """shell gdown --id 1jvkbTr_giSP3Ru8OwGNCg6B4PvVbcO34 gdown --id 1EzBZUb_mh_Dp_FKD0P4XiYYSd0QBH5zW unzip -oq left.zip -d $cache_dir unzip -oq right.zip -d $cache_dir """ """ ## Preparing the data We are going to use a `tf.data` pipeline to load the data and generate the triplets that we need to train the Siamese network. We'll set up the pipeline using a zipped list with anchor, positive, and negative filenames as the source. The pipeline will load and preprocess the corresponding images. """ def preprocess_image(filename): """ Load the specified file as a JPEG image, preprocess it and resize it to the target shape. """ image_string = tf.io.read_file(filename) image = tf.image.decode_jpeg(image_string, channels=3) image = tf.image.convert_image_dtype(image, tf.float32) image = tf.image.resize(image, target_shape) return image def preprocess_triplets(anchor, positive, negative): """ Given the filenames corresponding to the three images, load and preprocess them. """ return ( preprocess_image(anchor), preprocess_image(positive), preprocess_image(negative), ) """ Let's setup our data pipeline using a zipped list with an anchor, positive, and negative image filename as the source. The output of the pipeline contains the same triplet with every image loaded and preprocessed. """ # We need to make sure both the anchor and positive images are loaded in # sorted order so we can match them together. anchor_images = sorted( [str(anchor_images_path / f) for f in os.listdir(anchor_images_path)] ) positive_images = sorted( [str(positive_images_path / f) for f in os.listdir(positive_images_path)] ) image_count = len(anchor_images) anchor_dataset = tf.data.Dataset.from_tensor_slices(anchor_images) positive_dataset = tf.data.Dataset.from_tensor_slices(positive_images) # To generate the list of negative images, let's randomize the list of # available images and concatenate them together. rng = np.random.RandomState(seed=42) rng.shuffle(anchor_images) rng.shuffle(positive_images) negative_images = anchor_images + positive_images np.random.RandomState(seed=32).shuffle(negative_images) negative_dataset = tf.data.Dataset.from_tensor_slices(negative_images) negative_dataset = negative_dataset.shuffle(buffer_size=4096) dataset = tf.data.Dataset.zip((anchor_dataset, positive_dataset, negative_dataset)) dataset = dataset.shuffle(buffer_size=1024) dataset = dataset.map(preprocess_triplets) # Let's now split our dataset in train and validation. train_dataset = dataset.take(round(image_count * 0.8)) val_dataset = dataset.skip(round(image_count * 0.8)) train_dataset = train_dataset.batch(32, drop_remainder=False) train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE) val_dataset = val_dataset.batch(32, drop_remainder=False) val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE) """ Let's take a look at a few examples of triplets. Notice how the first two images look alike while the third one is always different. """ def visualize(anchor, positive, negative): """Visualize a few triplets from the supplied batches.""" def show(ax, image): ax.imshow(image) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig = plt.figure(figsize=(9, 9)) axs = fig.subplots(3, 3) for i in range(3): show(axs[i, 0], anchor[i]) show(axs[i, 1], positive[i]) show(axs[i, 2], negative[i]) visualize(*list(train_dataset.take(1).as_numpy_iterator())[0]) """ ## Setting up the embedding generator model Our Siamese Network will generate embeddings for each of the images of the triplet. To do this, we will use a ResNet50 model pretrained on ImageNet and connect a few `Dense` layers to it so we can learn to separate these embeddings. We will freeze the weights of all the layers of the model up until the layer `conv5_block1_out`. This is important to avoid affecting the weights that the model has already learned. We are going to leave the bottom few layers trainable, so that we can fine-tune their weights during training. """ base_cnn = resnet.ResNet50( weights="imagenet", input_shape=target_shape + (3,), include_top=False ) flatten = layers.Flatten()(base_cnn.output) dense1 = layers.Dense(512, activation="relu")(flatten) dense1 = layers.BatchNormalization()(dense1) dense2 = layers.Dense(256, activation="relu")(dense1) dense2 = layers.BatchNormalization()(dense2) output = layers.Dense(256)(dense2) embedding = Model(base_cnn.input, output, name="Embedding") trainable = False for layer in base_cnn.layers: if layer.name == "conv5_block1_out": trainable = True layer.trainable = trainable """ ## Setting up the Siamese Network model The Siamese network will receive each of the triplet images as an input, generate the embeddings, and output the distance between the anchor and the positive embedding, as well as the distance between the anchor and the negative embedding. To compute the distance, we can use a custom layer `DistanceLayer` that returns both values as a tuple. """ class DistanceLayer(layers.Layer): """ This layer is responsible for computing the distance between the anchor embedding and the positive embedding, and the anchor embedding and the negative embedding. """ def __init__(self, **kwargs): super().__init__(**kwargs) def call(self, anchor, positive, negative): ap_distance = ops.sum(tf.square(anchor - positive), -1) an_distance = ops.sum(tf.square(anchor - negative), -1) return (ap_distance, an_distance) anchor_input = layers.Input(name="anchor", shape=target_shape + (3,)) positive_input = layers.Input(name="positive", shape=target_shape + (3,)) negative_input = layers.Input(name="negative", shape=target_shape + (3,)) distances = DistanceLayer()( embedding(resnet.preprocess_input(anchor_input)), embedding(resnet.preprocess_input(positive_input)), embedding(resnet.preprocess_input(negative_input)), ) siamese_network = Model( inputs=[anchor_input, positive_input, negative_input], outputs=distances ) """ ## Putting everything together We now need to implement a model with custom training loop so we can compute the triplet loss using the three embeddings produced by the Siamese network. Let's create a `Mean` metric instance to track the loss of the training process. """ class SiameseModel(Model): """The Siamese Network model with a custom training and testing loops. Computes the triplet loss using the three embeddings produced by the Siamese Network. The triplet loss is defined as: L(A, P, N) = max(‖f(A) - f(P)‖² - ‖f(A) - f(N)‖² + margin, 0) """ def __init__(self, siamese_network, margin=0.5): super().__init__() self.siamese_network = siamese_network self.margin = margin self.loss_tracker = metrics.Mean(name="loss") def call(self, inputs): return self.siamese_network(inputs) def train_step(self, data): # GradientTape is a context manager that records every operation that # you do inside. We are using it here to compute the loss so we can get # the gradients and apply them using the optimizer specified in # `compile()`. with tf.GradientTape() as tape: loss = self._compute_loss(data) # Storing the gradients of the loss function with respect to the # weights/parameters. gradients = tape.gradient(loss, self.siamese_network.trainable_weights) # Applying the gradients on the model using the specified optimizer self.optimizer.apply_gradients( zip(gradients, self.siamese_network.trainable_weights) ) # Let's update and return the training loss metric. self.loss_tracker.update_state(loss) return {"loss": self.loss_tracker.result()} def test_step(self, data): loss = self._compute_loss(data) # Let's update and return the loss metric. self.loss_tracker.update_state(loss) return {"loss": self.loss_tracker.result()} def _compute_loss(self, data): # The output of the network is a tuple containing the distances # between the anchor and the positive example, and the anchor and # the negative example. ap_distance, an_distance = self.siamese_network(data) # Computing the Triplet Loss by subtracting both distances and # making sure we don't get a negative value. loss = ap_distance - an_distance loss = tf.maximum(loss + self.margin, 0.0) return loss @property def metrics(self): # We need to list our metrics here so the `reset_states()` can be # called automatically. return [self.loss_tracker] """ ## Training We are now ready to train our model. """ siamese_model = SiameseModel(siamese_network) siamese_model.compile(optimizer=optimizers.Adam(0.0001)) siamese_model.fit(train_dataset, epochs=10, validation_data=val_dataset) """ ## Inspecting what the network has learned At this point, we can check how the network learned to separate the embeddings depending on whether they belong to similar images. We can use [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) to measure the similarity between embeddings. Let's pick a sample from the dataset to check the similarity between the embeddings generated for each image. """ sample = next(iter(train_dataset)) visualize(*sample) anchor, positive, negative = sample anchor_embedding, positive_embedding, negative_embedding = ( embedding(resnet.preprocess_input(anchor)), embedding(resnet.preprocess_input(positive)), embedding(resnet.preprocess_input(negative)), ) """ Finally, we can compute the cosine similarity between the anchor and positive images and compare it with the similarity between the anchor and the negative images. We should expect the similarity between the anchor and positive images to be larger than the similarity between the anchor and the negative images. """ cosine_similarity = metrics.CosineSimilarity() positive_similarity = cosine_similarity(anchor_embedding, positive_embedding) print("Positive similarity:", positive_similarity.numpy()) negative_similarity = cosine_similarity(anchor_embedding, negative_embedding) print("Negative similarity", negative_similarity.numpy()) """ ## Summary 1. The `tf.data` API enables you to build efficient input pipelines for your model. It is particularly useful if you have a large dataset. You can learn more about `tf.data` pipelines in [tf.data: Build TensorFlow input pipelines](https://www.tensorflow.org/guide/data). 2. In this example, we use a pre-trained ResNet50 as part of the subnetwork that generates the feature embeddings. By using [transfer learning](https://www.tensorflow.org/guide/keras/transfer_learning?hl=en), we can significantly reduce the training time and size of the dataset. 3. Notice how we are [fine-tuning](https://www.tensorflow.org/guide/keras/transfer_learning?hl=en#fine-tuning) the weights of the final layers of the ResNet50 network but keeping the rest of the layers untouched. Using the name assigned to each layer, we can freeze the weights to a certain point and keep the last few layers open. 4. We can create custom layers by creating a class that inherits from `tf.keras.layers.Layer`, as we did in the `DistanceLayer` class. 5. We used a cosine similarity metric to measure how to 2 output embeddings are similar to each other. 6. You can implement a custom training loop by overriding the `train_step()` method. `train_step()` uses [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape), which records every operation that you perform inside it. In this example, we use it to access the gradients passed to the optimizer to update the model weights at every step. For more details, check out the [Intro to Keras for researchers](https://keras.io/getting_started/intro_to_keras_for_researchers/) and [Writing a training loop from scratch](https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch?hl=en). """
keras-io/examples/vision/siamese_network.py/0
{ "file_path": "keras-io/examples/vision/siamese_network.py", "repo_id": "keras-io", "token_count": 4835 }
119
""" Title: Multi-GPU and distributed training Author: [fchollet](https://twitter.com/fchollet) Date created: 2020/04/28 Last modified: 2020/04/29 Description: Guide to multi-GPU & distributed training for Keras models. Accelerator: GPU """ """ ## Introduction There are generally two ways to distribute computation across multiple devices: **Data parallelism**, where a single model gets replicated on multiple devices or multiple machines. Each of them processes different batches of data, then they merge their results. There exist many variants of this setup, that differ in how the different model replicas merge results, in whether they stay in sync at every batch or whether they are more loosely coupled, etc. **Model parallelism**, where different parts of a single model run on different devices, processing a single batch of data together. This works best with models that have a naturally-parallel architecture, such as models that feature multiple branches. This guide focuses on data parallelism, in particular **synchronous data parallelism**, where the different replicas of the model stay in sync after each batch they process. Synchronicity keeps the model convergence behavior identical to what you would see for single-device training. Specifically, this guide teaches you how to use the `tf.distribute` API to train Keras models on multiple GPUs, with minimal changes to your code, in the following two setups: - On multiple GPUs (typically 2 to 8) installed on a single machine (single host, multi-device training). This is the most common setup for researchers and small-scale industry workflows. - On a cluster of many machines, each hosting one or multiple GPUs (multi-worker distributed training). This is a good setup for large-scale industry workflows, e.g. training high-resolution image classification models on tens of millions of images using 20-100 GPUs. """ """ ## Setup """ import tensorflow as tf import keras """ ## Single-host, multi-device synchronous training In this setup, you have one machine with several GPUs on it (typically 2 to 8). Each device will run a copy of your model (called a **replica**). For simplicity, in what follows, we'll assume we're dealing with 8 GPUs, at no loss of generality. **How it works** At each step of training: - The current batch of data (called **global batch**) is split into 8 different sub-batches (called **local batches**). For instance, if the global batch has 512 samples, each of the 8 local batches will have 64 samples. - Each of the 8 replicas independently processes a local batch: they run a forward pass, then a backward pass, outputting the gradient of the weights with respect to the loss of the model on the local batch. - The weight updates originating from local gradients are efficiently merged across the 8 replicas. Because this is done at the end of every step, the replicas always stay in sync. In practice, the process of synchronously updating the weights of the model replicas is handled at the level of each individual weight variable. This is done through a **mirrored variable** object. **How to use it** To do single-host, multi-device synchronous training with a Keras model, you would use the [`tf.distribute.MirroredStrategy` API]( https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy). Here's how it works: - Instantiate a `MirroredStrategy`, optionally configuring which specific devices you want to use (by default the strategy will use all GPUs available). - Use the strategy object to open a scope, and within this scope, create all the Keras objects you need that contain variables. Typically, that means **creating & compiling the model** inside the distribution scope. - Train the model via `fit()` as usual. Importantly, we recommend that you use `tf.data.Dataset` objects to load data in a multi-device or distributed workflow. Schematically, it looks like this: ```python # Create a MirroredStrategy. strategy = tf.distribute.MirroredStrategy() print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) # Open a strategy scope. with strategy.scope(): # Everything that creates variables should be under the strategy scope. # In general this is only model construction & `compile()`. model = Model(...) model.compile(...) # Train the model on all available devices. model.fit(train_dataset, validation_data=val_dataset, ...) # Test the model on all available devices. model.evaluate(test_dataset) ``` Here's a simple end-to-end runnable example: """ def get_compiled_model(): # Make a simple 2-layer densely-connected neural network. inputs = keras.Input(shape=(784,)) x = keras.layers.Dense(256, activation="relu")(inputs) x = keras.layers.Dense(256, activation="relu")(x) outputs = keras.layers.Dense(10)(x) model = keras.Model(inputs, outputs) model.compile( optimizer=keras.optimizers.Adam(), loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=[keras.metrics.SparseCategoricalAccuracy()], ) return model def get_dataset(): batch_size = 32 num_val_samples = 10000 # Return the MNIST dataset in the form of a `tf.data.Dataset`. (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Preprocess the data (these are Numpy arrays) x_train = x_train.reshape(-1, 784).astype("float32") / 255 x_test = x_test.reshape(-1, 784).astype("float32") / 255 y_train = y_train.astype("float32") y_test = y_test.astype("float32") # Reserve num_val_samples samples for validation x_val = x_train[-num_val_samples:] y_val = y_train[-num_val_samples:] x_train = x_train[:-num_val_samples] y_train = y_train[:-num_val_samples] return ( tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size), tf.data.Dataset.from_tensor_slices((x_val, y_val)).batch(batch_size), tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size), ) # Create a MirroredStrategy. strategy = tf.distribute.MirroredStrategy() print("Number of devices: {}".format(strategy.num_replicas_in_sync)) # Open a strategy scope. with strategy.scope(): # Everything that creates variables should be under the strategy scope. # In general this is only model construction & `compile()`. model = get_compiled_model() # Train the model on all available devices. train_dataset, val_dataset, test_dataset = get_dataset() model.fit(train_dataset, epochs=2, validation_data=val_dataset) # Test the model on all available devices. model.evaluate(test_dataset) """ ## Using callbacks to ensure fault tolerance When using distributed training, you should always make sure you have a strategy to recover from failure (fault tolerance). The simplest way to handle this is to pass `ModelCheckpoint` callback to `fit()`, to save your model at regular intervals (e.g. every 100 batches or every epoch). You can then restart training from your saved model. Here's a simple example: """ import os from tensorflow import keras # Prepare a directory to store all the checkpoints. checkpoint_dir = "./ckpt" if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) def make_or_restore_model(): # Either restore the latest model, or create a fresh one # if there is no checkpoint available. checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)] if checkpoints: latest_checkpoint = max(checkpoints, key=os.path.getctime) print("Restoring from", latest_checkpoint) return keras.models.load_model(latest_checkpoint) print("Creating a new model") return get_compiled_model() def run_training(epochs=1): # Create a MirroredStrategy. strategy = tf.distribute.MirroredStrategy() # Open a strategy scope and create/restore the model with strategy.scope(): model = make_or_restore_model() callbacks = [ # This callback saves a SavedModel every epoch # We include the current epoch in the folder name. keras.callbacks.ModelCheckpoint( filepath=checkpoint_dir + "/ckpt-{epoch}", save_freq="epoch" ) ] model.fit( train_dataset, epochs=epochs, callbacks=callbacks, validation_data=val_dataset, verbose=2, ) # Running the first time creates the model run_training(epochs=1) # Calling the same function again will resume from where we left off run_training(epochs=1) """ ## `tf.data` performance tips When doing distributed training, the efficiency with which you load data can often become critical. Here are a few tips to make sure your `tf.data` pipelines run as fast as possible. **Note about dataset batching** When creating your dataset, make sure it is batched with the global batch size. For instance, if each of your 8 GPUs is capable of running a batch of 64 samples, you call use a global batch size of 512. **Calling `dataset.cache()`** If you call `.cache()` on a dataset, its data will be cached after running through the first iteration over the data. Every subsequent iteration will use the cached data. The cache can be in memory (default) or to a local file you specify. This can improve performance when: - Your data is not expected to change from iteration to iteration - You are reading data from a remote distributed filesystem - You are reading data from local disk, but your data would fit in memory and your workflow is significantly IO-bound (e.g. reading & decoding image files). **Calling `dataset.prefetch(buffer_size)`** You should almost always call `.prefetch(buffer_size)` after creating a dataset. It means your data pipeline will run asynchronously from your model, with new samples being preprocessed and stored in a buffer while the current batch samples are used to train the model. The next batch will be prefetched in GPU memory by the time the current batch is over. """ """ ## Multi-worker distributed synchronous training **How it works** In this setup, you have multiple machines (called **workers**), each with one or several GPUs on them. Much like what happens for single-host training, each available GPU will run one model replica, and the value of the variables of each replica is kept in sync after each batch. Importantly, the current implementation assumes that all workers have the same number of GPUs (homogeneous cluster). **How to use it** 1. Set up a cluster (we provide pointers below). 2. Set up an appropriate `TF_CONFIG` environment variable on each worker. This tells the worker what its role is and how to communicate with its peers. 3. On each worker, run your model construction & compilation code within the scope of a [`MultiWorkerMirroredStrategy` object]( https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/MultiWorkerMirroredStrategy), similarly to we did for single-host training. 4. Run evaluation code on a designated evaluator machine. **Setting up a cluster** First, set up a cluster (collective of machines). Each machine individually should be setup so as to be able to run your model (typically, each machine will run the same Docker image) and to able to access your data source (e.g. GCS). Cluster management is beyond the scope of this guide. [Here is a document]( https://cloud.google.com/ai-platform/training/docs/distributed-training-containers) to help you get started. You can also take a look at [Kubeflow](https://www.kubeflow.org/). **Setting up the `TF_CONFIG` environment variable** While the code running on each worker is almost the same as the code used in the single-host workflow (except with a different `tf.distribute` strategy object), one significant difference between the single-host workflow and the multi-worker workflow is that you need to set a `TF_CONFIG` environment variable on each machine running in your cluster. The `TF_CONFIG` environment variable is a JSON string that specifies: - The cluster configuration, while the list of addresses & ports of the machines that make up the cluster - The worker's "task", which is the role that this specific machine has to play within the cluster. One example of TF_CONFIG is: ``` os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"] }, 'task': {'type': 'worker', 'index': 0} }) ``` In the multi-worker synchronous training setup, valid roles (task types) for the machines are "worker" and "evaluator". For example, if you have 8 machines with 4 GPUs each, you could have 7 workers and one evaluator. - The workers train the model, each one processing sub-batches of a global batch. - One of the workers (worker 0) will serve as "chief", a particular kind of worker that is responsible for saving logs and checkpoints for later reuse (typically to a Cloud storage location). - The evaluator runs a continuous loop that loads the latest checkpoint saved by the chief worker, runs evaluation on it (asynchronously from the other workers) and writes evaluation logs (e.g. TensorBoard logs). **Running code on each worker** You would run training code on each worker (including the chief) and evaluation code on the evaluator. The training code is basically the same as what you would use in the single-host setup, except using `MultiWorkerMirroredStrategy` instead of `MirroredStrategy`. Each worker would run the same code (minus the difference explained in the note below), including the same callbacks. **Note:** Callbacks that save model checkpoints or logs should save to a different directory for each worker. It is standard practice that all workers should save to local disk (which is typically temporary), **except worker 0**, which would save TensorBoard logs checkpoints to a Cloud storage location for later access & reuse. The evaluator would simply use `MirroredStrategy` (since it runs on a single machine and does not need to communicate with other machines) and call `model.evaluate()`. It would be loading the latest checkpoint saved by the chief worker to a Cloud storage location, and would save evaluation logs to the same location as the chief logs. """ """ ### Example: code running in a multi-worker setup On the chief (worker 0): ```python # Set TF_CONFIG os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"] }, 'task': {'type': 'worker', 'index': 0} }) # Open a strategy scope and create/restore the model. strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() with strategy.scope(): model = make_or_restore_model() callbacks = [ # This callback saves a SavedModel every 100 batches keras.callbacks.ModelCheckpoint(filepath='path/to/cloud/location/ckpt', save_freq=100), keras.callbacks.TensorBoard('path/to/cloud/location/tb/') ] model.fit(train_dataset, callbacks=callbacks, ...) ``` On other workers: ```python # Set TF_CONFIG worker_index = 1 # For instance os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': ["localhost:12345", "localhost:23456"] }, 'task': {'type': 'worker', 'index': worker_index} }) # Open a strategy scope and create/restore the model. # You can restore from the checkpoint saved by the chief. strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() with strategy.scope(): model = make_or_restore_model() callbacks = [ keras.callbacks.ModelCheckpoint(filepath='local/path/ckpt', save_freq=100), keras.callbacks.TensorBoard('local/path/tb/') ] model.fit(train_dataset, callbacks=callbacks, ...) ``` On the evaluator: ```python strategy = tf.distribute.MirroredStrategy() with strategy.scope(): model = make_or_restore_model() # Restore from the checkpoint saved by the chief. results = model.evaluate(val_dataset) # Then, log the results on a shared location, write TensorBoard logs, etc ``` """ """ ### Further reading 1. [TensorFlow distributed training guide]( https://www.tensorflow.org/guide/distributed_training) 2. [Tutorial on multi-worker training with Keras]( https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras) 3. [MirroredStrategy docs]( https://www.tensorflow.org/api_docs/python/tf/distribute/MirroredStrategy) 4. [MultiWorkerMirroredStrategy docs]( https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/MultiWorkerMirroredStrategy) 5. [Distributed training in tf.keras with Weights & Biases]( https://towardsdatascience.com/distributed-training-in-tf-keras-with-w-b-ccf021f9322e) """
keras-io/guides/_distributed_training.py/0
{ "file_path": "keras-io/guides/_distributed_training.py", "repo_id": "keras-io", "token_count": 5039 }
120
<jupyter_start><jupyter_text>CutMix, MixUp, and RandAugment image augmentation with KerasCV**Author:** [lukewood](https://twitter.com/luke_wood_ml)**Date created:** 2022/04/08**Last modified:** 2022/04/08**Description:** Use KerasCV to augment images with CutMix, MixUp, RandAugment, and more. OverviewKerasCV makes it easy to assemble state-of-the-art, industry-grade data augmentationpipelines for image classification and object detection tasks. KerasCV offers a widesuite of preprocessing layers implementing common data augmentation techniques.Perhaps three of the most useful layers are `keras_cv.layers.CutMix`,`keras_cv.layers.MixUp`, and `keras_cv.layers.RandAugment`. Theselayers are used in nearly all state-of-the-art image classification pipelines.This guide will show you how to compose these layers into your own dataaugmentation pipeline for image classification tasks. This guide will also walk youthrough the process of customizing a KerasCV data augmentation pipeline. Imports & setupKerasCV uses Keras 3 to work with any of TensorFlow, PyTorch or Jax. In theguide below, we will use the `jax` backend. This guide runs inTensorFlow or PyTorch backends with zero changes, simply update the`KERAS_BACKEND` below.<jupyter_code>!pip install -q --upgrade keras-cv !pip install -q --upgrade keras # Upgrade to Keras 3.<jupyter_output><empty_output><jupyter_text>We begin by importing all required packages:<jupyter_code>import os os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"] import matplotlib.pyplot as plt # Import tensorflow for `tf.data` and its preprocessing map functions import tensorflow as tf import tensorflow_datasets as tfds import keras import keras_cv<jupyter_output><empty_output><jupyter_text>Data loadingThis guide uses the[102 Category Flower Dataset](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/)for demonstration purposes.To get started, we first load the dataset:<jupyter_code>BATCH_SIZE = 32 AUTOTUNE = tf.data.AUTOTUNE tfds.disable_progress_bar() data, dataset_info = tfds.load("oxford_flowers102", with_info=True, as_supervised=True) train_steps_per_epoch = dataset_info.splits["train"].num_examples // BATCH_SIZE val_steps_per_epoch = dataset_info.splits["test"].num_examples // BATCH_SIZE<jupyter_output><empty_output><jupyter_text>Next, we resize the images to a constant size, `(224, 224)`, and one-hot encode thelabels. Please note that `keras_cv.layers.CutMix` and `keras_cv.layers.MixUp` expecttargets to be one-hot encoded. This is because they modify the values of the targetsin a way that is not possible with a sparse label representation.<jupyter_code>IMAGE_SIZE = (224, 224) num_classes = dataset_info.features["label"].num_classes def to_dict(image, label): image = tf.image.resize(image, IMAGE_SIZE) image = tf.cast(image, tf.float32) label = tf.one_hot(label, num_classes) return {"images": image, "labels": label} def prepare_dataset(dataset, split): if split == "train": return ( dataset.shuffle(10 * BATCH_SIZE) .map(to_dict, num_parallel_calls=AUTOTUNE) .batch(BATCH_SIZE) ) if split == "test": return dataset.map(to_dict, num_parallel_calls=AUTOTUNE).batch(BATCH_SIZE) def load_dataset(split="train"): dataset = data[split] return prepare_dataset(dataset, split) train_dataset = load_dataset()<jupyter_output><empty_output><jupyter_text>Let's inspect some samples from our dataset:<jupyter_code>def visualize_dataset(dataset, title): plt.figure(figsize=(6, 6)).suptitle(title, fontsize=18) for i, samples in enumerate(iter(dataset.take(9))): images = samples["images"] plt.subplot(3, 3, i + 1) plt.imshow(images[0].numpy().astype("uint8")) plt.axis("off") plt.show() visualize_dataset(train_dataset, title="Before Augmentation")<jupyter_output><empty_output><jupyter_text>Great! Now we can move onto the augmentation step. RandAugment [RandAugment](https://arxiv.org/abs/1909.13719)has been shown to provide improved imageclassification results across numerous datasets.It performs a standard set of augmentations on an image.To use RandAugment in KerasCV, you need to provide a few values:- `value_range` describes the range of values covered in your images- `magnitude` is a value between 0 and 1, describing the strength of the perturbationsapplied- `augmentations_per_image` is an integer telling the layer how many augmentations to apply to eachindividual image- (Optional) `magnitude_stddev` allows `magnitude` to be randomly sampledfrom a distribution with a standard deviation of `magnitude_stddev`- (Optional) `rate` indicates the probability to apply the augmentationapplied at each layer.You can read more about theseparameters in the[`RandAugment` API documentation](/api/keras_cv/layers/preprocessing/rand_augment/).Let's use KerasCV's RandAugment implementation.<jupyter_code>rand_augment = keras_cv.layers.RandAugment( value_range=(0, 255), augmentations_per_image=3, magnitude=0.3, magnitude_stddev=0.2, rate=1.0, ) def apply_rand_augment(inputs): inputs["images"] = rand_augment(inputs["images"]) return inputs train_dataset = load_dataset().map(apply_rand_augment, num_parallel_calls=AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Finally, let's inspect some of the results:<jupyter_code>visualize_dataset(train_dataset, title="After RandAugment")<jupyter_output><empty_output><jupyter_text>Try tweaking the magnitude settings to see a wider variety of results. CutMix and MixUp: generate high-quality inter-class examples`CutMix` and `MixUp` allow us to produce inter-class examples. `CutMix` randomly cuts outportions of one image and places them over another, and `MixUp` interpolates the pixelvalues between two images. Both of these prevent the model from overfitting thetraining distribution and improve the likelihood that the model can generalize to out ofdistribution examples. Additionally, `CutMix` prevents your model from over-relying onany particular feature to perform its classifications. You can read more about thesetechniques in their respective papers:- [CutMix: Train Strong Classifiers](https://arxiv.org/abs/1905.04899)- [MixUp: Beyond Empirical Risk Minimization](https://arxiv.org/abs/1710.09412)In this example, we will use `CutMix` and `MixUp` independently in a manually createdpreprocessing pipeline. In most state of the art pipelines images are randomlyaugmented by either `CutMix`, `MixUp`, or neither. The function below implements both.<jupyter_code>cut_mix = keras_cv.layers.CutMix() mix_up = keras_cv.layers.MixUp() def cut_mix_and_mix_up(samples): samples = cut_mix(samples, training=True) samples = mix_up(samples, training=True) return samples train_dataset = load_dataset().map(cut_mix_and_mix_up, num_parallel_calls=AUTOTUNE) visualize_dataset(train_dataset, title="After CutMix and MixUp")<jupyter_output><empty_output><jupyter_text>Great! Looks like we have successfully added `CutMix` and `MixUp` to our preprocessingpipeline. Customizing your augmentation pipelinePerhaps you want to exclude an augmentation from `RandAugment`, or perhaps you want toinclude the `keras_cv.layers.GridMask` as an option alongside the default `RandAugment`augmentations.KerasCV allows you to construct production grade custom data augmentation pipelines usingthe `keras_cv.layers.RandomAugmentationPipeline` layer. This class operates similarly to`RandAugment`; selecting a random layer to apply to each image `augmentations_per_image`times. `RandAugment` can be thought of as a specific case of`RandomAugmentationPipeline`. In fact, our `RandAugment` implementation inherits from`RandomAugmentationPipeline` internally.In this example, we will create a custom `RandomAugmentationPipeline` by removing`RandomRotation` layers from the standard `RandAugment` policy, and substitute a`GridMask` layer in its place. As a first step, let's use the helper method `RandAugment.get_standard_policy()` tocreate a base pipeline.<jupyter_code>layers = keras_cv.layers.RandAugment.get_standard_policy( value_range=(0, 255), magnitude=0.75, magnitude_stddev=0.3 )<jupyter_output><empty_output><jupyter_text>First, let's filter out `RandomRotation` layers<jupyter_code>layers = [ layer for layer in layers if not isinstance(layer, keras_cv.layers.RandomRotation) ]<jupyter_output><empty_output><jupyter_text>Next, let's add `keras_cv.layers.GridMask` to our layers:<jupyter_code>layers = layers + [keras_cv.layers.GridMask()]<jupyter_output><empty_output><jupyter_text>Finally, we can put together our pipeline<jupyter_code>pipeline = keras_cv.layers.RandomAugmentationPipeline( layers=layers, augmentations_per_image=3 ) def apply_pipeline(inputs): inputs["images"] = pipeline(inputs["images"]) return inputs<jupyter_output><empty_output><jupyter_text>Let's check out the results!<jupyter_code>train_dataset = load_dataset().map(apply_pipeline, num_parallel_calls=AUTOTUNE) visualize_dataset(train_dataset, title="After custom pipeline")<jupyter_output><empty_output><jupyter_text>Awesome! As you can see, no images were randomly rotated. You can customize thepipeline however you like:<jupyter_code>pipeline = keras_cv.layers.RandomAugmentationPipeline( layers=[keras_cv.layers.GridMask(), keras_cv.layers.Grayscale(output_channels=3)], augmentations_per_image=1, )<jupyter_output><empty_output><jupyter_text>This pipeline will either apply `GrayScale` or GridMask:<jupyter_code>train_dataset = load_dataset().map(apply_pipeline, num_parallel_calls=AUTOTUNE) visualize_dataset(train_dataset, title="After custom pipeline")<jupyter_output><empty_output><jupyter_text>Looks great! You can use `RandomAugmentationPipeline` however you want. Training a CNNAs a final exercise, let's take some of these layers for a spin. In this section, wewill use `CutMix`, `MixUp`, and `RandAugment` to train a state of the art `ResNet50`image classifier on the Oxford flowers dataset.<jupyter_code>def preprocess_for_model(inputs): images, labels = inputs["images"], inputs["labels"] images = tf.cast(images, tf.float32) return images, labels train_dataset = ( load_dataset() .map(apply_rand_augment, num_parallel_calls=AUTOTUNE) .map(cut_mix_and_mix_up, num_parallel_calls=AUTOTUNE) ) visualize_dataset(train_dataset, "CutMix, MixUp and RandAugment") train_dataset = train_dataset.map(preprocess_for_model, num_parallel_calls=AUTOTUNE) test_dataset = load_dataset(split="test") test_dataset = test_dataset.map(preprocess_for_model, num_parallel_calls=AUTOTUNE) train_dataset = train_dataset.prefetch(AUTOTUNE) test_dataset = test_dataset.prefetch(AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Next we should create a the model itself. Notice that we use `label_smoothing=0.1` inthe loss function. When using `MixUp`, label smoothing is _highly_ recommended.<jupyter_code>input_shape = IMAGE_SIZE + (3,) def get_model(): model = keras_cv.models.ImageClassifier.from_preset( "efficientnetv2_s", num_classes=num_classes ) model.compile( loss=keras.losses.CategoricalCrossentropy(label_smoothing=0.1), optimizer=keras.optimizers.SGD(momentum=0.9), metrics=["accuracy"], ) return model<jupyter_output><empty_output><jupyter_text>Finally we train the model:<jupyter_code>model = get_model() model.fit( train_dataset, epochs=1, validation_data=test_dataset, )<jupyter_output><empty_output>
keras-io/guides/ipynb/keras_cv/cut_mix_mix_up_and_rand_augment.ipynb/0
{ "file_path": "keras-io/guides/ipynb/keras_cv/cut_mix_mix_up_and_rand_augment.ipynb", "repo_id": "keras-io", "token_count": 3886 }
121
<jupyter_start><jupyter_text>Working with preprocessing layers**Authors:** Francois Chollet, Mark Omernick**Date created:** 2020/07/25**Last modified:** 2021/04/23**Description:** Overview of how to leverage preprocessing layers to create end-to-end models. Keras preprocessingThe Keras preprocessing layers API allows developers to build Keras-native inputprocessing pipelines. These input processing pipelines can be used as independentpreprocessing code in non-Keras workflows, combined directly with Keras models, andexported as part of a Keras SavedModel.With Keras preprocessing layers, you can build and export models that are trulyend-to-end: models that accept raw images or raw structured data as input; models thathandle feature normalization or feature value indexing on their own. Available preprocessing Text preprocessing- `tf.keras.layers.TextVectorization`: turns raw strings into an encoded representation that can be read by an `Embedding` layer or `Dense` layer. Numerical features preprocessing- `tf.keras.layers.Normalization`: performs feature-wise normalization of input features.- `tf.keras.layers.Discretization`: turns continuous numerical features into integer categorical features. Categorical features preprocessing- `tf.keras.layers.CategoryEncoding`: turns integer categorical features into one-hot, multi-hot, or count dense representations.- `tf.keras.layers.Hashing`: performs categorical feature hashing, also known as the "hashing trick".- `tf.keras.layers.StringLookup`: turns string categorical values into an encoded representation that can be read by an `Embedding` layer or `Dense` layer.- `tf.keras.layers.IntegerLookup`: turns integer categorical values into an encoded representation that can be read by an `Embedding` layer or `Dense` layer. Image preprocessingThese layers are for standardizing the inputs of an image model.- `tf.keras.layers.Resizing`: resizes a batch of images to a target size.- `tf.keras.layers.Rescaling`: rescales and offsets the values of a batch of images (e.g. go from inputs in the `[0, 255]` range to inputs in the `[0, 1]` range.- `tf.keras.layers.CenterCrop`: returns a center crop of a batch of images. Image data augmentationThese layers apply random augmentation transforms to a batch of images. Theyare only active during training.- `tf.keras.layers.RandomCrop`- `tf.keras.layers.RandomFlip`- `tf.keras.layers.RandomTranslation`- `tf.keras.layers.RandomRotation`- `tf.keras.layers.RandomZoom`- `tf.keras.layers.RandomContrast` The `adapt()` methodSome preprocessing layers have an internal state that can be computed based ona sample of the training data. The list of stateful preprocessing layers is:- `TextVectorization`: holds a mapping between string tokens and integer indices- `StringLookup` and `IntegerLookup`: hold a mapping between input values and integerindices.- `Normalization`: holds the mean and standard deviation of the features.- `Discretization`: holds information about value bucket boundaries.Crucially, these layers are **non-trainable**. Their state is not set during training; itmust be set **before training**, either by initializing them from a precomputed constant,or by "adapting" them on data.You set the state of a preprocessing layer by exposing it to training data, via the`adapt()` method:<jupyter_code>import numpy as np import tensorflow as tf import keras from keras import layers data = np.array( [ [0.1, 0.2, 0.3], [0.8, 0.9, 1.0], [1.5, 1.6, 1.7], ] ) layer = layers.Normalization() layer.adapt(data) normalized_data = layer(data) print("Features mean: %.2f" % (normalized_data.numpy().mean())) print("Features std: %.2f" % (normalized_data.numpy().std()))<jupyter_output><empty_output><jupyter_text>The `adapt()` method takes either a Numpy array or a `tf.data.Dataset` object. In thecase of `StringLookup` and `TextVectorization`, you can also pass a list of strings:<jupyter_code>data = [ "ξεῖν᾽, ἦ τοι μὲν ὄνειροι ἀμήχανοι ἀκριτόμυθοι", "γίγνοντ᾽, οὐδέ τι πάντα τελείεται ἀνθρώποισι.", "δοιαὶ γάρ τε πύλαι ἀμενηνῶν εἰσὶν ὀνείρων:", "αἱ μὲν γὰρ κεράεσσι τετεύχαται, αἱ δ᾽ ἐλέφαντι:", "τῶν οἳ μέν κ᾽ ἔλθωσι διὰ πριστοῦ ἐλέφαντος,", "οἵ ῥ᾽ ἐλεφαίρονται, ἔπε᾽ ἀκράαντα φέροντες:", "οἱ δὲ διὰ ξεστῶν κεράων ἔλθωσι θύραζε,", "οἵ ῥ᾽ ἔτυμα κραίνουσι, βροτῶν ὅτε κέν τις ἴδηται.", ] layer = layers.TextVectorization() layer.adapt(data) vectorized_text = layer(data) print(vectorized_text)<jupyter_output><empty_output><jupyter_text>In addition, adaptable layers always expose an option to directly set state viaconstructor arguments or weight assignment. If the intended state values are known atlayer construction time, or are calculated outside of the `adapt()` call, they can be setwithout relying on the layer's internal computation. For instance, if external vocabularyfiles for the `TextVectorization`, `StringLookup`, or `IntegerLookup` layers alreadyexist, those can be loaded directly into the lookup tables by passing a path to thevocabulary file in the layer's constructor arguments.Here's an example where you instantiate a `StringLookup` layer with precomputed vocabulary:<jupyter_code>vocab = ["a", "b", "c", "d"] data = tf.constant([["a", "c", "d"], ["d", "z", "b"]]) layer = layers.StringLookup(vocabulary=vocab) vectorized_data = layer(data) print(vectorized_data)<jupyter_output><empty_output><jupyter_text>Preprocessing data before the model or inside the modelThere are two ways you could be using preprocessing layers:**Option 1:** Make them part of the model, like this:```pythoninputs = keras.Input(shape=input_shape)x = preprocessing_layer(inputs)outputs = rest_of_the_model(x)model = keras.Model(inputs, outputs)```With this option, preprocessing will happen on device, synchronously with the rest of themodel execution, meaning that it will benefit from GPU acceleration.If you're training on a GPU, this is the best option for the `Normalization` layer, and forall image preprocessing and data augmentation layers.**Option 2:** apply it to your `tf.data.Dataset`, so as to obtain a dataset that yieldsbatches of preprocessed data, like this:```pythondataset = dataset.map(lambda x, y: (preprocessing_layer(x), y))```With this option, your preprocessing will happen on a CPU, asynchronously, and will bebuffered before going into the model.In addition, if you call `dataset.prefetch(tf.data.AUTOTUNE)` on your dataset,the preprocessing will happen efficiently in parallel with training:```pythondataset = dataset.map(lambda x, y: (preprocessing_layer(x), y))dataset = dataset.prefetch(tf.data.AUTOTUNE)model.fit(dataset, ...)```This is the best option for `TextVectorization`, and all structured data preprocessinglayers. It can also be a good option if you're training on a CPU and you use image preprocessinglayers.Note that the `TextVectorization` layer can only be executed on a CPU, as it is mostly adictionary lookup operation. Therefore, if you are training your model on a GPU or a TPU,you should put the `TextVectorization` layer in the `tf.data` pipeline to get the best performance.**When running on a TPU, you should always place preprocessing layers in the `tf.data` pipeline**(with the exception of `Normalization` and `Rescaling`, which run fine on a TPU and are commonlyused as the first layer in an image model). Benefits of doing preprocessing inside the model at inference timeEven if you go with option 2, you may later want to export an inference-only end-to-endmodel that will include the preprocessing layers. The key benefit to doing this is that**it makes your model portable** and it **helps reduce the[training/serving skew](https://developers.google.com/machine-learning/guides/rules-of-mltraining-serving_skew)**.When all data preprocessing is part of the model, other people can load and use yourmodel without having to be aware of how each feature is expected to be encoded &normalized. Your inference model will be able to process raw images or raw structureddata, and will not require users of the model to be aware of the details of e.g. thetokenization scheme used for text, the indexing scheme used for categorical features,whether image pixel values are normalized to `[-1, +1]` or to `[0, 1]`, etc. This isespecially powerful if you're exportingyour model to another runtime, such as TensorFlow.js: you won't have toreimplement your preprocessing pipeline in JavaScript.If you initially put your preprocessing layers in your `tf.data` pipeline,you can export an inference model that packages the preprocessing.Simply instantiate a new model that chainsyour preprocessing layers and your training model:```pythoninputs = keras.Input(shape=input_shape)x = preprocessing_layer(inputs)outputs = training_model(x)inference_model = keras.Model(inputs, outputs)``` Preprocessing during multi-worker trainingPreprocessing layers are compatible with the[tf.distribute](https://www.tensorflow.org/api_docs/python/tf/distribute) APIfor running training across multiple machines.In general, preprocessing layers should be placed inside a `tf.distribute.Strategy.scope()`and called either inside or before the model as discussed above.```pythonwith strategy.scope(): inputs = keras.Input(shape=input_shape) preprocessing_layer = tf.keras.layers.Hashing(10) dense_layer = tf.keras.layers.Dense(16)```For more details, refer to the _Data preprocessing_ sectionof the [Distributed input](https://www.tensorflow.org/tutorials/distribute/input)tutorial. Quick recipes Image data augmentationNote that image data augmentation layers are only active during training (similarly tothe `Dropout` layer).<jupyter_code>from tensorflow import keras from tensorflow.keras import layers # Create a data augmentation stage with horizontal flipping, rotations, zooms data_augmentation = keras.Sequential( [ layers.RandomFlip("horizontal"), layers.RandomRotation(0.1), layers.RandomZoom(0.1), ] ) # Load some data (x_train, y_train), _ = keras.datasets.cifar10.load_data() input_shape = x_train.shape[1:] classes = 10 # Create a tf.data pipeline of augmented images (and their labels) train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.batch(16).map(lambda x, y: (data_augmentation(x), y)) # Create a model and train it on the augmented image data inputs = keras.Input(shape=input_shape) x = layers.Rescaling(1.0 / 255)(inputs) # Rescale inputs outputs = keras.applications.ResNet50( # Add the rest of the model weights=None, input_shape=input_shape, classes=classes )(x) model = keras.Model(inputs, outputs) model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy") model.fit(train_dataset, steps_per_epoch=5)<jupyter_output><empty_output><jupyter_text>You can see a similar setup in action in the example[image classification from scratch](https://keras.io/examples/vision/image_classification_from_scratch/). Normalizing numerical features<jupyter_code># Load some data (x_train, y_train), _ = keras.datasets.cifar10.load_data() x_train = x_train.reshape((len(x_train), -1)) input_shape = x_train.shape[1:] classes = 10 # Create a Normalization layer and set its internal state using the training data normalizer = layers.Normalization() normalizer.adapt(x_train) # Create a model that include the normalization layer inputs = keras.Input(shape=input_shape) x = normalizer(inputs) outputs = layers.Dense(classes, activation="softmax")(x) model = keras.Model(inputs, outputs) # Train the model model.compile(optimizer="adam", loss="sparse_categorical_crossentropy") model.fit(x_train, y_train)<jupyter_output><empty_output><jupyter_text>Encoding string categorical features via one-hot encoding<jupyter_code># Define some toy data data = tf.constant([["a"], ["b"], ["c"], ["b"], ["c"], ["a"]]) # Use StringLookup to build an index of the feature values and encode output. lookup = layers.StringLookup(output_mode="one_hot") lookup.adapt(data) # Convert new test data (which includes unknown feature values) test_data = tf.constant([["a"], ["b"], ["c"], ["d"], ["e"], [""]]) encoded_data = lookup(test_data) print(encoded_data)<jupyter_output><empty_output><jupyter_text>Note that, here, index 0 is reserved for out-of-vocabulary values(values that were not seen during `adapt()`).You can see the `StringLookup` in action in the[Structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/)example. Encoding integer categorical features via one-hot encoding<jupyter_code># Define some toy data data = tf.constant([[10], [20], [20], [10], [30], [0]]) # Use IntegerLookup to build an index of the feature values and encode output. lookup = layers.IntegerLookup(output_mode="one_hot") lookup.adapt(data) # Convert new test data (which includes unknown feature values) test_data = tf.constant([[10], [10], [20], [50], [60], [0]]) encoded_data = lookup(test_data) print(encoded_data)<jupyter_output><empty_output><jupyter_text>Note that index 0 is reserved for missing values (which you should specify as the value0), and index 1 is reserved for out-of-vocabulary values (values that were not seenduring `adapt()`). You can configure this by using the `mask_token` and `oov_token`constructor arguments of `IntegerLookup`.You can see the `IntegerLookup` in action in the example[structured data classification from scratch](https://keras.io/examples/structured_data/structured_data_classification_from_scratch/). Applying the hashing trick to an integer categorical featureIf you have a categorical feature that can take many different values (on the order of10e3 or higher), where each value only appears a few times in the data,it becomes impractical and ineffective to index and one-hot encode the feature values.Instead, it can be a good idea to apply the "hashing trick": hash the values to a vectorof fixed size. This keeps the size of the feature space manageable, and removes the needfor explicit indexing.<jupyter_code># Sample data: 10,000 random integers with values between 0 and 100,000 data = np.random.randint(0, 100000, size=(10000, 1)) # Use the Hashing layer to hash the values to the range [0, 64] hasher = layers.Hashing(num_bins=64, salt=1337) # Use the CategoryEncoding layer to multi-hot encode the hashed values encoder = layers.CategoryEncoding(num_tokens=64, output_mode="multi_hot") encoded_data = encoder(hasher(data)) print(encoded_data.shape)<jupyter_output><empty_output><jupyter_text>Encoding text as a sequence of token indicesThis is how you should preprocess text to be passed to an `Embedding` layer.<jupyter_code># Define some text data to adapt the layer adapt_data = tf.constant( [ "The Brain is wider than the Sky", "For put them side by side", "The one the other will contain", "With ease and You beside", ] ) # Create a TextVectorization layer text_vectorizer = layers.TextVectorization(output_mode="int") # Index the vocabulary via `adapt()` text_vectorizer.adapt(adapt_data) # Try out the layer print( "Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(), ) # Create a simple model inputs = keras.Input(shape=(None,), dtype="int64") x = layers.Embedding(input_dim=text_vectorizer.vocabulary_size(), output_dim=16)(inputs) x = layers.GRU(8)(x) outputs = layers.Dense(1)(x) model = keras.Model(inputs, outputs) # Create a labeled dataset (which includes unknown tokens) train_dataset = tf.data.Dataset.from_tensor_slices( (["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0]) ) # Preprocess the string inputs, turning them into int sequences train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y)) # Train the model on the int sequences print("\nTraining model...") model.compile(optimizer="rmsprop", loss="mse") model.fit(train_dataset) # For inference, you can export a model that accepts strings as input inputs = keras.Input(shape=(1,), dtype="string") x = text_vectorizer(inputs) outputs = model(x) end_to_end_model = keras.Model(inputs, outputs) # Call the end-to-end model on test data (which includes unknown tokens) print("\nCalling end-to-end model on test string...") test_data = tf.constant(["The one the other will absorb"]) test_output = end_to_end_model(test_data) print("Model output:", test_output)<jupyter_output><empty_output><jupyter_text>You can see the `TextVectorization` layer in action, combined with an `Embedding` mode,in the example[text classification from scratch](https://keras.io/examples/nlp/text_classification_from_scratch/).Note that when training such a model, for best performance, you should alwaysuse the `TextVectorization` layer as part of the input pipeline. Encoding text as a dense matrix of N-grams with multi-hot encodingThis is how you should preprocess text to be passed to a `Dense` layer.<jupyter_code># Define some text data to adapt the layer adapt_data = tf.constant( [ "The Brain is wider than the Sky", "For put them side by side", "The one the other will contain", "With ease and You beside", ] ) # Instantiate TextVectorization with "multi_hot" output_mode # and ngrams=2 (index all bigrams) text_vectorizer = layers.TextVectorization(output_mode="multi_hot", ngrams=2) # Index the bigrams via `adapt()` text_vectorizer.adapt(adapt_data) # Try out the layer print( "Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(), ) # Create a simple model inputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),)) outputs = layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) # Create a labeled dataset (which includes unknown tokens) train_dataset = tf.data.Dataset.from_tensor_slices( (["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0]) ) # Preprocess the string inputs, turning them into int sequences train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y)) # Train the model on the int sequences print("\nTraining model...") model.compile(optimizer="rmsprop", loss="mse") model.fit(train_dataset) # For inference, you can export a model that accepts strings as input inputs = keras.Input(shape=(1,), dtype="string") x = text_vectorizer(inputs) outputs = model(x) end_to_end_model = keras.Model(inputs, outputs) # Call the end-to-end model on test data (which includes unknown tokens) print("\nCalling end-to-end model on test string...") test_data = tf.constant(["The one the other will absorb"]) test_output = end_to_end_model(test_data) print("Model output:", test_output)<jupyter_output><empty_output><jupyter_text>Encoding text as a dense matrix of N-grams with TF-IDF weightingThis is an alternative way of preprocessing text before passing it to a `Dense` layer.<jupyter_code># Define some text data to adapt the layer adapt_data = tf.constant( [ "The Brain is wider than the Sky", "For put them side by side", "The one the other will contain", "With ease and You beside", ] ) # Instantiate TextVectorization with "tf-idf" output_mode # (multi-hot with TF-IDF weighting) and ngrams=2 (index all bigrams) text_vectorizer = layers.TextVectorization(output_mode="tf-idf", ngrams=2) # Index the bigrams and learn the TF-IDF weights via `adapt()` text_vectorizer.adapt(adapt_data) # Try out the layer print( "Encoded text:\n", text_vectorizer(["The Brain is deeper than the sea"]).numpy(), ) # Create a simple model inputs = keras.Input(shape=(text_vectorizer.vocabulary_size(),)) outputs = layers.Dense(1)(inputs) model = keras.Model(inputs, outputs) # Create a labeled dataset (which includes unknown tokens) train_dataset = tf.data.Dataset.from_tensor_slices( (["The Brain is deeper than the sea", "for if they are held Blue to Blue"], [1, 0]) ) # Preprocess the string inputs, turning them into int sequences train_dataset = train_dataset.batch(2).map(lambda x, y: (text_vectorizer(x), y)) # Train the model on the int sequences print("\nTraining model...") model.compile(optimizer="rmsprop", loss="mse") model.fit(train_dataset) # For inference, you can export a model that accepts strings as input inputs = keras.Input(shape=(1,), dtype="string") x = text_vectorizer(inputs) outputs = model(x) end_to_end_model = keras.Model(inputs, outputs) # Call the end-to-end model on test data (which includes unknown tokens) print("\nCalling end-to-end model on test string...") test_data = tf.constant(["The one the other will absorb"]) test_output = end_to_end_model(test_data) print("Model output:", test_output)<jupyter_output><empty_output>
keras-io/guides/ipynb/preprocessing_layers.ipynb/0
{ "file_path": "keras-io/guides/ipynb/preprocessing_layers.ipynb", "repo_id": "keras-io", "token_count": 6658 }
122
# Customizing what happens in `fit()` with TensorFlow **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2020/04/15<br> **Last modified:** 2023/06/27<br> **Description:** Overriding the training step of the Model class with TensorFlow. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/custom_train_step_in_tensorflow.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/custom_train_step_in_tensorflow.py) --- ## Introduction When you're doing supervised learning, you can use `fit()` and everything works smoothly. When you need to take control of every little detail, you can write your own training loop entirely from scratch. But what if you need a custom training algorithm, but you still want to benefit from the convenient features of `fit()`, such as callbacks, built-in distribution support, or step fusing? A core principle of Keras is **progressive disclosure of complexity**. You should always be able to get into lower-level workflows in a gradual way. You shouldn't fall off a cliff if the high-level functionality doesn't exactly match your use case. You should be able to gain more control over the small details while retaining a commensurate amount of high-level convenience. When you need to customize what `fit()` does, you should **override the training step function of the `Model` class**. This is the function that is called by `fit()` for every batch of data. You will then be able to call `fit()` as usual -- and it will be running your own learning algorithm. Note that this pattern does not prevent you from building models with the Functional API. You can do this whether you're building `Sequential` models, Functional API models, or subclassed models. Let's see how that works. --- ## Setup ```python import os # This guide can only be run with the TF backend. os.environ["KERAS_BACKEND"] = "tensorflow" import tensorflow as tf import keras from keras import layers import numpy as np ``` --- ## A first simple example Let's start from a simple example: - We create a new class that subclasses `keras.Model`. - We just override the method `train_step(self, data)`. - We return a dictionary mapping metric names (including the loss) to their current value. The input argument `data` is what gets passed to fit as training data: - If you pass NumPy arrays, by calling `fit(x, y, ...)`, then `data` will be the tuple `(x, y)` - If you pass a `tf.data.Dataset`, by calling `fit(dataset, ...)`, then `data` will be what gets yielded by `dataset` at each batch. In the body of the `train_step()` method, we implement a regular training update, similar to what you are already familiar with. Importantly, **we compute the loss via `self.compute_loss()`**, which wraps the loss(es) function(s) that were passed to `compile()`. Similarly, we call `metric.update_state(y, y_pred)` on metrics from `self.metrics`, to update the state of the metrics that were passed in `compile()`, and we query results from `self.metrics` at the end to retrieve their current value. ```python class CustomModel(keras.Model): def train_step(self, data): # Unpack the data. Its structure depends on your model and # on what you pass to `fit()`. x, y = data with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute the loss value # (the loss function is configured in `compile()`) loss = self.compute_loss(y=y, y_pred=y_pred) # Compute gradients trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply(gradients, trainable_vars) # Update metrics (includes the metric that tracks the loss) for metric in self.metrics: if metric.name == "loss": metric.update_state(loss) else: metric.update_state(y, y_pred) # Return a dict mapping metric names to current value return {m.name: m.result() for m in self.metrics} ``` Let's try this out: ```python # Construct and compile an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) model.compile(optimizer="adam", loss="mse", metrics=["mae"]) # Just use `fit` as usual x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) model.fit(x, y, epochs=3) ``` <div class="k-default-codeblock"> ``` Epoch 1/3 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mae: 0.5089 - loss: 0.3778 Epoch 2/3 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 318us/step - mae: 0.3986 - loss: 0.2466 Epoch 3/3 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 372us/step - mae: 0.3848 - loss: 0.2319 WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1699222602.443035 1 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. <keras.src.callbacks.history.History at 0x2a5599f00> ``` </div> --- ## Going lower-level Naturally, you could just skip passing a loss function in `compile()`, and instead do everything *manually* in `train_step`. Likewise for metrics. Here's a lower-level example, that only uses `compile()` to configure the optimizer: - We start by creating `Metric` instances to track our loss and a MAE score (in `__init__()`). - We implement a custom `train_step()` that updates the state of these metrics (by calling `update_state()` on them), then query them (via `result()`) to return their current average value, to be displayed by the progress bar and to be pass to any callback. - Note that we would need to call `reset_states()` on our metrics between each epoch! Otherwise calling `result()` would return an average since the start of training, whereas we usually work with per-epoch averages. Thankfully, the framework can do that for us: just list any metric you want to reset in the `metrics` property of the model. The model will call `reset_states()` on any object listed here at the beginning of each `fit()` epoch or at the beginning of a call to `evaluate()`. ```python class CustomModel(keras.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.loss_tracker = keras.metrics.Mean(name="loss") self.mae_metric = keras.metrics.MeanAbsoluteError(name="mae") self.loss_fn = keras.losses.MeanSquaredError() def train_step(self, data): x, y = data with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute our own loss loss = self.loss_fn(y, y_pred) # Compute gradients trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply(gradients, trainable_vars) # Compute our own metrics self.loss_tracker.update_state(loss) self.mae_metric.update_state(y, y_pred) return { "loss": self.loss_tracker.result(), "mae": self.mae_metric.result(), } @property def metrics(self): # We list our `Metric` objects here so that `reset_states()` can be # called automatically at the start of each epoch # or at the start of `evaluate()`. return [self.loss_tracker, self.mae_metric] # Construct an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) # We don't pass a loss or metrics here. model.compile(optimizer="adam") # Just use `fit` as usual -- you can use callbacks, etc. x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) model.fit(x, y, epochs=5) ``` <div class="k-default-codeblock"> ``` Epoch 1/5 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 4.0292 - mae: 1.9270 Epoch 2/5 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 385us/step - loss: 2.2155 - mae: 1.3920 Epoch 3/5 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 336us/step - loss: 1.1863 - mae: 0.9700 Epoch 4/5 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 373us/step - loss: 0.6510 - mae: 0.6811 Epoch 5/5 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 330us/step - loss: 0.4059 - mae: 0.5094 <keras.src.callbacks.history.History at 0x2a7a02860> ``` </div> --- ## Supporting `sample_weight` & `class_weight` You may have noticed that our first basic example didn't make any mention of sample weighting. If you want to support the `fit()` arguments `sample_weight` and `class_weight`, you'd simply do the following: - Unpack `sample_weight` from the `data` argument - Pass it to `compute_loss` & `update_state` (of course, you could also just apply it manually if you don't rely on `compile()` for losses & metrics) - That's it. ```python class CustomModel(keras.Model): def train_step(self, data): # Unpack the data. Its structure depends on your model and # on what you pass to `fit()`. if len(data) == 3: x, y, sample_weight = data else: sample_weight = None x, y = data with tf.GradientTape() as tape: y_pred = self(x, training=True) # Forward pass # Compute the loss value. # The loss function is configured in `compile()`. loss = self.compute_loss( y=y, y_pred=y_pred, sample_weight=sample_weight, ) # Compute gradients trainable_vars = self.trainable_variables gradients = tape.gradient(loss, trainable_vars) # Update weights self.optimizer.apply(gradients, trainable_vars) # Update the metrics. # Metrics are configured in `compile()`. for metric in self.metrics: if metric.name == "loss": metric.update_state(loss) else: metric.update_state(y, y_pred, sample_weight=sample_weight) # Return a dict mapping metric names to current value. # Note that it will include the loss (tracked in self.metrics). return {m.name: m.result() for m in self.metrics} # Construct and compile an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) model.compile(optimizer="adam", loss="mse", metrics=["mae"]) # You can now use sample_weight argument x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) sw = np.random.random((1000, 1)) model.fit(x, y, sample_weight=sw, epochs=3) ``` <div class="k-default-codeblock"> ``` Epoch 1/3 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - mae: 0.4228 - loss: 0.1420 Epoch 2/3 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 449us/step - mae: 0.3751 - loss: 0.1058 Epoch 3/3 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 337us/step - mae: 0.3478 - loss: 0.0951 <keras.src.callbacks.history.History at 0x2a7491780> ``` </div> --- ## Providing your own evaluation step What if you want to do the same for calls to `model.evaluate()`? Then you would override `test_step` in exactly the same way. Here's what it looks like: ```python class CustomModel(keras.Model): def test_step(self, data): # Unpack the data x, y = data # Compute predictions y_pred = self(x, training=False) # Updates the metrics tracking the loss loss = self.compute_loss(y=y, y_pred=y_pred) # Update the metrics. for metric in self.metrics: if metric.name == "loss": metric.update_state(loss) else: metric.update_state(y, y_pred) # Return a dict mapping metric names to current value. # Note that it will include the loss (tracked in self.metrics). return {m.name: m.result() for m in self.metrics} # Construct an instance of CustomModel inputs = keras.Input(shape=(32,)) outputs = keras.layers.Dense(1)(inputs) model = CustomModel(inputs, outputs) model.compile(loss="mse", metrics=["mae"]) # Evaluate with our custom test_step x = np.random.random((1000, 32)) y = np.random.random((1000, 1)) model.evaluate(x, y) ``` <div class="k-default-codeblock"> ``` 32/32 ━━━━━━━━━━━━━━━━━━━━ 0s 927us/step - mae: 0.8518 - loss: 0.9166 [0.912325382232666, 0.8567370176315308] ``` </div> --- ## Wrapping up: an end-to-end GAN example Let's walk through an end-to-end example that leverages everything you just learned. Let's consider: - A generator network meant to generate 28x28x1 images. - A discriminator network meant to classify 28x28x1 images into two classes ("fake" and "real"). - One optimizer for each. - A loss function to train the discriminator. ```python # Create the discriminator discriminator = keras.Sequential( [ keras.Input(shape=(28, 28, 1)), layers.Conv2D(64, (3, 3), strides=(2, 2), padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(128, (3, 3), strides=(2, 2), padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.GlobalMaxPooling2D(), layers.Dense(1), ], name="discriminator", ) # Create the generator latent_dim = 128 generator = keras.Sequential( [ keras.Input(shape=(latent_dim,)), # We want to generate 128 coefficients to reshape into a 7x7x128 map layers.Dense(7 * 7 * 128), layers.LeakyReLU(negative_slope=0.2), layers.Reshape((7, 7, 128)), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding="same"), layers.LeakyReLU(negative_slope=0.2), layers.Conv2D(1, (7, 7), padding="same", activation="sigmoid"), ], name="generator", ) ``` Here's a feature-complete GAN class, overriding `compile()` to use its own signature, and implementing the entire GAN algorithm in 17 lines in `train_step`: ```python class GAN(keras.Model): def __init__(self, discriminator, generator, latent_dim): super().__init__() self.discriminator = discriminator self.generator = generator self.latent_dim = latent_dim self.d_loss_tracker = keras.metrics.Mean(name="d_loss") self.g_loss_tracker = keras.metrics.Mean(name="g_loss") self.seed_generator = keras.random.SeedGenerator(1337) @property def metrics(self): return [self.d_loss_tracker, self.g_loss_tracker] def compile(self, d_optimizer, g_optimizer, loss_fn): super().compile() self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.loss_fn = loss_fn def train_step(self, real_images): if isinstance(real_images, tuple): real_images = real_images[0] # Sample random points in the latent space batch_size = tf.shape(real_images)[0] random_latent_vectors = keras.random.normal( shape=(batch_size, self.latent_dim), seed=self.seed_generator ) # Decode them to fake images generated_images = self.generator(random_latent_vectors) # Combine them with real images combined_images = tf.concat([generated_images, real_images], axis=0) # Assemble labels discriminating real from fake images labels = tf.concat( [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0 ) # Add random noise to the labels - important trick! labels += 0.05 * keras.random.uniform( tf.shape(labels), seed=self.seed_generator ) # Train the discriminator with tf.GradientTape() as tape: predictions = self.discriminator(combined_images) d_loss = self.loss_fn(labels, predictions) grads = tape.gradient(d_loss, self.discriminator.trainable_weights) self.d_optimizer.apply(grads, self.discriminator.trainable_weights) # Sample random points in the latent space random_latent_vectors = keras.random.normal( shape=(batch_size, self.latent_dim), seed=self.seed_generator ) # Assemble labels that say "all real images" misleading_labels = tf.zeros((batch_size, 1)) # Train the generator (note that we should *not* update the weights # of the discriminator)! with tf.GradientTape() as tape: predictions = self.discriminator(self.generator(random_latent_vectors)) g_loss = self.loss_fn(misleading_labels, predictions) grads = tape.gradient(g_loss, self.generator.trainable_weights) self.g_optimizer.apply(grads, self.generator.trainable_weights) # Update metrics and return their value. self.d_loss_tracker.update_state(d_loss) self.g_loss_tracker.update_state(g_loss) return { "d_loss": self.d_loss_tracker.result(), "g_loss": self.g_loss_tracker.result(), } ``` Let's test-drive it: ```python # Prepare the dataset. We use both the training & test MNIST digits. batch_size = 64 (x_train, _), (x_test, _) = keras.datasets.mnist.load_data() all_digits = np.concatenate([x_train, x_test]) all_digits = all_digits.astype("float32") / 255.0 all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) dataset = tf.data.Dataset.from_tensor_slices(all_digits) dataset = dataset.shuffle(buffer_size=1024).batch(batch_size) gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim) gan.compile( d_optimizer=keras.optimizers.Adam(learning_rate=0.0003), g_optimizer=keras.optimizers.Adam(learning_rate=0.0003), loss_fn=keras.losses.BinaryCrossentropy(from_logits=True), ) # To limit the execution time, we only train on 100 batches. You can train on # the entire dataset. You will need about 20 epochs to get nice results. gan.fit(dataset.take(100), epochs=1) ``` <div class="k-default-codeblock"> ``` 100/100 ━━━━━━━━━━━━━━━━━━━━ 51s 500ms/step - d_loss: 0.5645 - g_loss: 0.7434 <keras.src.callbacks.history.History at 0x14a4f1b10> ``` </div> The ideas behind deep learning are simple, so why should their implementation be painful?
keras-io/guides/md/custom_train_step_in_tensorflow.md/0
{ "file_path": "keras-io/guides/md/custom_train_step_in_tensorflow.md", "repo_id": "keras-io", "token_count": 7346 }
123
# High-performance image generation using Stable Diffusion in KerasCV **Authors:** [fchollet](https://twitter.com/fchollet), [lukewood](https://twitter.com/luke_wood_ml), [divamgupta](https://github.com/divamgupta)<br> **Date created:** 2022/09/25<br> **Last modified:** 2022/09/25<br> **Description:** Generate new images using KerasCV's Stable Diffusion model. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_cv/generate_images_with_stable_diffusion.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_cv/generate_images_with_stable_diffusion.py) --- ## Overview In this guide, we will show how to generate novel images based on a text prompt using the KerasCV implementation of [stability.ai](https://stability.ai/)'s text-to-image model, [Stable Diffusion](https://github.com/CompVis/stable-diffusion). Stable Diffusion is a powerful, open-source text-to-image generation model. While there exist multiple open-source implementations that allow you to easily create images from textual prompts, KerasCV's offers a few distinct advantages. These include [XLA compilation](https://www.tensorflow.org/xla) and [mixed precision](https://www.tensorflow.org/guide/mixed_precision) support, which together achieve state-of-the-art generation speed. In this guide, we will explore KerasCV's Stable Diffusion implementation, show how to use these powerful performance boosts, and explore the performance benefits that they offer. **Note:** To run this guide on the `torch` backend, please set `jit_compile=False` everywhere. XLA compilation for Stable Diffusion does not currently work with torch. To get started, let's install a few dependencies and sort out some imports: ```python !pip install -q --upgrade keras-cv !pip install -q --upgrade keras # Upgrade to Keras 3. ``` ```python import time import keras_cv import keras import matplotlib.pyplot as plt ``` --- ## Introduction Unlike most tutorials, where we first explain a topic then show how to implement it, with text-to-image generation it is easier to show instead of tell. Check out the power of `keras_cv.models.StableDiffusion()`. First, we construct a model: ```python model = keras_cv.models.StableDiffusion( img_width=512, img_height=512, jit_compile=False ) ``` <div class="k-default-codeblock"> ``` By using this model checkpoint, you acknowledge that its usage is subject to the terms of the CreativeML Open RAIL-M license at https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE ``` </div> Next, we give it a prompt: ```python images = model.text_to_image("photograph of an astronaut riding a horse", batch_size=3) def plot_images(images): plt.figure(figsize=(20, 20)) for i in range(len(images)): ax = plt.subplot(1, len(images), i + 1) plt.imshow(images[i]) plt.axis("off") plot_images(images) ``` <div class="k-default-codeblock"> ``` 50/50 ━━━━━━━━━━━━━━━━━━━━ 63s 211ms/step ``` </div> ![png](/img/guides/generate_images_with_stable_diffusion/generate_images_with_stable_diffusion_7_1.png) Pretty incredible! But that's not all this model can do. Let's try a more complex prompt: ```python images = model.text_to_image( "cute magical flying dog, fantasy art, " "golden color, high quality, highly detailed, elegant, sharp focus, " "concept art, character concepts, digital painting, mystery, adventure", batch_size=3, ) plot_images(images) ``` <div class="k-default-codeblock"> ``` 50/50 ━━━━━━━━━━━━━━━━━━━━ 10s 209ms/step ``` </div> ![png](/img/guides/generate_images_with_stable_diffusion/generate_images_with_stable_diffusion_9_1.png) The possibilities are literally endless (or at least extend to the boundaries of Stable Diffusion's latent manifold). --- ## Wait, how does this even work? Unlike what you might expect at this point, Stable Diffusion doesn't actually run on magic. It's a kind of "latent diffusion model". Let's dig into what that means. You may be familiar with the idea of _super-resolution_: it's possible to train a deep learning model to _denoise_ an input image -- and thereby turn it into a higher-resolution version. The deep learning model doesn't do this by magically recovering the information that's missing from the noisy, low-resolution input -- rather, the model uses its training data distribution to hallucinate the visual details that would be most likely given the input. To learn more about super-resolution, you can check out the following Keras.io tutorials: - [Image Super-Resolution using an Efficient Sub-Pixel CNN](https://keras.io/examples/vision/super_resolution_sub_pixel/) - [Enhanced Deep Residual Networks for single-image super-resolution](https://keras.io/examples/vision/edsr/) ![Super-resolution](https://i.imgur.com/M0XdqOo.png) When you push this idea to the limit, you may start asking -- what if we just run such a model on pure noise? The model would then "denoise the noise" and start hallucinating a brand new image. By repeating the process multiple times, you can get turn a small patch of noise into an increasingly clear and high-resolution artificial picture. This is the key idea of latent diffusion, proposed in [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) in 2020. To understand diffusion in depth, you can check the Keras.io tutorial [Denoising Diffusion Implicit Models](https://keras.io/examples/generative/ddim/). ![Denoising diffusion](https://i.imgur.com/FSCKtZq.gif) Now, to go from latent diffusion to a text-to-image system, you still need to add one key feature: the ability to control the generated visual contents via prompt keywords. This is done via "conditioning", a classic deep learning technique which consists of concatenating to the noise patch a vector that represents a bit of text, then training the model on a dataset of {image: caption} pairs. This gives rise to the Stable Diffusion architecture. Stable Diffusion consists of three parts: - A text encoder, which turns your prompt into a latent vector. - A diffusion model, which repeatedly "denoises" a 64x64 latent image patch. - A decoder, which turns the final 64x64 latent patch into a higher-resolution 512x512 image. First, your text prompt gets projected into a latent vector space by the text encoder, which is simply a pretrained, frozen language model. Then that prompt vector is concatenated to a randomly generated noise patch, which is repeatedly "denoised" by the diffusion model over a series of "steps" (the more steps you run the clearer and nicer your image will be -- the default value is 50 steps). Finally, the 64x64 latent image is sent through the decoder to properly render it in high resolution. ![The Stable Diffusion architecture](https://i.imgur.com/2uC8rYJ.png) All-in-all, it's a pretty simple system -- the Keras implementation fits in four files that represent less than 500 lines of code in total: - [text_encoder.py](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion/text_encoder.py): 87 LOC - [diffusion_model.py](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion/diffusion_model.py): 181 LOC - [decoder.py](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion/decoder.py): 86 LOC - [stable_diffusion.py](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion/stable_diffusion.py): 106 LOC But this relatively simple system starts looking like magic once you train on billions of pictures and their captions. As Feynman said about the universe: _"It's not complicated, it's just a lot of it!"_ --- ## Perks of KerasCV With several implementations of Stable Diffusion publicly available why should you use `keras_cv.models.StableDiffusion`? Aside from the easy-to-use API, KerasCV's Stable Diffusion model comes with some powerful advantages, including: - Graph mode execution - XLA compilation through `jit_compile=True` - Support for mixed precision computation When these are combined, the KerasCV Stable Diffusion model runs orders of magnitude faster than naive implementations. This section shows how to enable all of these features, and the resulting performance gain yielded from using them. For the purposes of comparison, we ran benchmarks comparing the runtime of the [HuggingFace diffusers](https://github.com/huggingface/diffusers) implementation of Stable Diffusion against the KerasCV implementation. Both implementations were tasked to generate 3 images with a step count of 50 for each image. In this benchmark, we used a Tesla T4 GPU. [All of our benchmarks are open source on GitHub, and may be re-run on Colab to reproduce the results.](https://github.com/LukeWood/stable-diffusion-performance-benchmarks) The results from the benchmark are displayed in the table below: | GPU | Model | Runtime | |------------|------------------------|-----------| | Tesla T4 | KerasCV (Warm Start) | **28.97s**| | Tesla T4 | diffusers (Warm Start) | 41.33s | | Tesla V100 | KerasCV (Warm Start) | **12.45** | | Tesla V100 | diffusers (Warm Start) | 12.72 | 30% improvement in execution time on the Tesla T4!. While the improvement is much lower on the V100, we generally expect the results of the benchmark to consistently favor the KerasCV across all NVIDIA GPUs. For the sake of completeness, both cold-start and warm-start generation times are reported. Cold-start execution time includes the one-time cost of model creation and compilation, and is therefore negligible in a production environment (where you would reuse the same model instance many times). Regardless, here are the cold-start numbers: | GPU | Model | Runtime | |------------|------------------------|---------| | Tesla T4 | KerasCV (Cold Start) | 83.47s | | Tesla T4 | diffusers (Cold Start) | 46.27s | | Tesla V100 | KerasCV (Cold Start) | 76.43 | | Tesla V100 | diffusers (Cold Start) | 13.90 | While the runtime results from running this guide may vary, in our testing the KerasCV implementation of Stable Diffusion is significantly faster than its PyTorch counterpart. This may be largely attributed to XLA compilation. **Note: The performance benefits of each optimization can vary significantly between hardware setups.** To get started, let's first benchmark our unoptimized model: ```python benchmark_result = [] start = time.time() images = model.text_to_image( "A cute otter in a rainbow whirlpool holding shells, watercolor", batch_size=3, ) end = time.time() benchmark_result.append(["Standard", end - start]) plot_images(images) print(f"Standard model: {(end - start):.2f} seconds") keras.backend.clear_session() # Clear session to preserve memory. ``` <div class="k-default-codeblock"> ``` 50/50 ━━━━━━━━━━━━━━━━━━━━ 10s 209ms/step Standard model: 10.57 seconds ``` </div> ![png](/img/guides/generate_images_with_stable_diffusion/generate_images_with_stable_diffusion_13_1.png) ### Mixed precision "Mixed precision" consists of performing computation using `float16` precision, while storing weights in the `float32` format. This is done to take advantage of the fact that `float16` operations are backed by significantly faster kernels than their `float32` counterparts on modern NVIDIA GPUs. Enabling mixed precision computation in Keras (and therefore for `keras_cv.models.StableDiffusion`) is as simple as calling: ```python keras.mixed_precision.set_global_policy("mixed_float16") ``` That's all. Out of the box - it just works. ```python model = keras_cv.models.StableDiffusion(jit_compile=False) print("Compute dtype:", model.diffusion_model.compute_dtype) print( "Variable dtype:", model.diffusion_model.variable_dtype, ) ``` <div class="k-default-codeblock"> ``` By using this model checkpoint, you acknowledge that its usage is subject to the terms of the CreativeML Open RAIL-M license at https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE Compute dtype: float16 Variable dtype: float32 ``` </div> As you can see, the model constructed above now uses mixed precision computation; leveraging the speed of `float16` operations for computation, while storing variables in `float32` precision. ```python # Warm up model to run graph tracing before benchmarking. model.text_to_image("warming up the model", batch_size=3) start = time.time() images = model.text_to_image( "a cute magical flying dog, fantasy art, " "golden color, high quality, highly detailed, elegant, sharp focus, " "concept art, character concepts, digital painting, mystery, adventure", batch_size=3, ) end = time.time() benchmark_result.append(["Mixed Precision", end - start]) plot_images(images) print(f"Mixed precision model: {(end - start):.2f} seconds") keras.backend.clear_session() ``` <div class="k-default-codeblock"> ``` 50/50 ━━━━━━━━━━━━━━━━━━━━ 42s 132ms/step 50/50 ━━━━━━━━━━━━━━━━━━━━ 6s 129ms/step Mixed precision model: 6.65 seconds ``` </div> ![png](/img/guides/generate_images_with_stable_diffusion/generate_images_with_stable_diffusion_19_1.png) ### XLA Compilation TensorFlow and JAX come with the [XLA: Accelerated Linear Algebra](https://www.tensorflow.org/xla) compiler built-in. `keras_cv.models.StableDiffusion` supports a `jit_compile` argument out of the box. Setting this argument to `True` enables XLA compilation, resulting in a significant speed-up. Let's use this below: ```python # Set back to the default for benchmarking purposes. keras.mixed_precision.set_global_policy("float32") model = keras_cv.models.StableDiffusion(jit_compile=True) # Before we benchmark the model, we run inference once to make sure the TensorFlow # graph has already been traced. images = model.text_to_image("An avocado armchair", batch_size=3) plot_images(images) ``` <div class="k-default-codeblock"> ``` By using this model checkpoint, you acknowledge that its usage is subject to the terms of the CreativeML Open RAIL-M license at https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE 50/50 ━━━━━━━━━━━━━━━━━━━━ 48s 209ms/step ``` </div> ![png](/img/guides/generate_images_with_stable_diffusion/generate_images_with_stable_diffusion_21_1.png) Let's benchmark our XLA model: ```python start = time.time() images = model.text_to_image( "A cute otter in a rainbow whirlpool holding shells, watercolor", batch_size=3, ) end = time.time() benchmark_result.append(["XLA", end - start]) plot_images(images) print(f"With XLA: {(end - start):.2f} seconds") keras.backend.clear_session() ``` <div class="k-default-codeblock"> ``` 50/50 ━━━━━━━━━━━━━━━━━━━━ 11s 210ms/step With XLA: 10.63 seconds ``` </div> ![png](/img/guides/generate_images_with_stable_diffusion/generate_images_with_stable_diffusion_23_1.png) On an A100 GPU, we get about a 2x speedup. Fantastic! --- ## Putting it all together So, how do you assemble the world's most performant stable diffusion inference pipeline (as of September 2022). With these two lines of code: ```python keras.mixed_precision.set_global_policy("mixed_float16") model = keras_cv.models.StableDiffusion(jit_compile=True) ``` <div class="k-default-codeblock"> ``` By using this model checkpoint, you acknowledge that its usage is subject to the terms of the CreativeML Open RAIL-M license at https://raw.githubusercontent.com/CompVis/stable-diffusion/main/LICENSE ``` </div> And to use it... ```python # Let's make sure to warm up the model images = model.text_to_image( "Teddy bears conducting machine learning research", batch_size=3, ) plot_images(images) ``` <div class="k-default-codeblock"> ``` 50/50 ━━━━━━━━━━━━━━━━━━━━ 48s 131ms/step ``` </div> ![png](/img/guides/generate_images_with_stable_diffusion/generate_images_with_stable_diffusion_28_1.png) Exactly how fast is it? Let's find out! ```python start = time.time() images = model.text_to_image( "A mysterious dark stranger visits the great pyramids of egypt, " "high quality, highly detailed, elegant, sharp focus, " "concept art, character concepts, digital painting", batch_size=3, ) end = time.time() benchmark_result.append(["XLA + Mixed Precision", end - start]) plot_images(images) print(f"XLA + mixed precision: {(end - start):.2f} seconds") ``` <div class="k-default-codeblock"> ``` 50/50 ━━━━━━━━━━━━━━━━━━━━ 6s 130ms/step XLA + mixed precision: 6.66 seconds ``` </div> ![png](/img/guides/generate_images_with_stable_diffusion/generate_images_with_stable_diffusion_30_1.png) Let's check out the results: ```python print("{:<22} {:<22}".format("Model", "Runtime")) for result in benchmark_result: name, runtime = result print("{:<22} {:<22}".format(name, runtime)) ``` <div class="k-default-codeblock"> ``` Model Runtime Standard 10.572920799255371 Mixed Precision 6.651048421859741 XLA 10.632121562957764 XLA + Mixed Precision 6.659237861633301 ``` </div> It only took our fully-optimized model four seconds to generate three novel images from a text prompt on an A100 GPU. --- ## Conclusions KerasCV offers a state-of-the-art implementation of Stable Diffusion -- and through the use of XLA and mixed precision, it delivers the fastest Stable Diffusion pipeline available as of September 2022. Normally, at the end of a keras.io tutorial we leave you with some future directions to continue in to learn. This time, we leave you with one idea: **Go run your own prompts through the model! It is an absolute blast!** If you have your own NVIDIA GPU, or a M1 MacBookPro, you can also run the model locally on your machine. (Note that when running on a M1 MacBookPro, you should not enable mixed precision, as it is not yet well supported by Apple's Metal runtime.)
keras-io/guides/md/keras_cv/generate_images_with_stable_diffusion.md/0
{ "file_path": "keras-io/guides/md/keras_cv/generate_images_with_stable_diffusion.md", "repo_id": "keras-io", "token_count": 5890 }
124
# The Sequential model **Author:** [fchollet](https://twitter.com/fchollet)<br> **Date created:** 2020/04/12<br> **Last modified:** 2023/06/25<br> **Description:** Complete guide to the Sequential model. <img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/sequential_model.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/sequential_model.py) --- ## Setup ```python import keras from keras import layers from keras import ops ``` --- ## When to use a Sequential model A `Sequential` model is appropriate for **a plain stack of layers** where each layer has **exactly one input tensor and one output tensor**. Schematically, the following `Sequential` model: ```python # Define Sequential model with 3 layers model = keras.Sequential( [ layers.Dense(2, activation="relu", name="layer1"), layers.Dense(3, activation="relu", name="layer2"), layers.Dense(4, name="layer3"), ] ) # Call model on a test input x = ops.ones((3, 3)) y = model(x) ``` is equivalent to this function: ```python # Create 3 layers layer1 = layers.Dense(2, activation="relu", name="layer1") layer2 = layers.Dense(3, activation="relu", name="layer2") layer3 = layers.Dense(4, name="layer3") # Call layers on a test input x = ops.ones((3, 3)) y = layer3(layer2(layer1(x))) ``` A Sequential model is **not appropriate** when: - Your model has multiple inputs or multiple outputs - Any of your layers has multiple inputs or multiple outputs - You need to do layer sharing - You want non-linear topology (e.g. a residual connection, a multi-branch model) --- ## Creating a Sequential model You can create a Sequential model by passing a list of layers to the Sequential constructor: ```python model = keras.Sequential( [ layers.Dense(2, activation="relu"), layers.Dense(3, activation="relu"), layers.Dense(4), ] ) ``` Its layers are accessible via the `layers` attribute: ```python model.layers ``` <div class="k-default-codeblock"> ``` [<Dense name=dense, built=False>, <Dense name=dense_1, built=False>, <Dense name=dense_2, built=False>] ``` </div> You can also create a Sequential model incrementally via the `add()` method: ```python model = keras.Sequential() model.add(layers.Dense(2, activation="relu")) model.add(layers.Dense(3, activation="relu")) model.add(layers.Dense(4)) ``` Note that there's also a corresponding `pop()` method to remove layers: a Sequential model behaves very much like a list of layers. ```python model.pop() print(len(model.layers)) # 2 ``` <div class="k-default-codeblock"> ``` 2 ``` </div> Also note that the Sequential constructor accepts a `name` argument, just like any layer or model in Keras. This is useful to annotate TensorBoard graphs with semantically meaningful names. ```python model = keras.Sequential(name="my_sequential") model.add(layers.Dense(2, activation="relu", name="layer1")) model.add(layers.Dense(3, activation="relu", name="layer2")) model.add(layers.Dense(4, name="layer3")) ``` --- ## Specifying the input shape in advance Generally, all layers in Keras need to know the shape of their inputs in order to be able to create their weights. So when you create a layer like this, initially, it has no weights: ```python layer = layers.Dense(3) layer.weights # Empty ``` <div class="k-default-codeblock"> ``` [] ``` </div> It creates its weights the first time it is called on an input, since the shape of the weights depends on the shape of the inputs: ```python # Call layer on a test input x = ops.ones((1, 4)) y = layer(x) layer.weights # Now it has weights, of shape (4, 3) and (3,) ``` <div class="k-default-codeblock"> ``` [<KerasVariable shape=(4, 3), dtype=float32, path=dense_6/kernel>, <KerasVariable shape=(3,), dtype=float32, path=dense_6/bias>] ``` </div> Naturally, this also applies to Sequential models. When you instantiate a Sequential model without an input shape, it isn't "built": it has no weights (and calling `model.weights` results in an error stating just this). The weights are created when the model first sees some input data: ```python model = keras.Sequential( [ layers.Dense(2, activation="relu"), layers.Dense(3, activation="relu"), layers.Dense(4), ] ) # No weights at this stage! # At this point, you can't do this: # model.weights # You also can't do this: # model.summary() # Call the model on a test input x = ops.ones((1, 4)) y = model(x) print("Number of weights after calling the model:", len(model.weights)) # 6 ``` <div class="k-default-codeblock"> ``` Number of weights after calling the model: 6 ``` </div> Once a model is "built", you can call its `summary()` method to display its contents: ```python model.summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential_3"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ dense_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">1</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">10</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense_8 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">1</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ dense_9 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00af00; text-decoration-color: #00af00">1</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">35</span> (140.00 B) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">35</span> (140.00 B) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> However, it can be very useful when building a Sequential model incrementally to be able to display the summary of the model so far, including the current output shape. In this case, you should start your model by passing an `Input` object to your model, so that it knows its input shape from the start: ```python model = keras.Sequential() model.add(keras.Input(shape=(4,))) model.add(layers.Dense(2, activation="relu")) model.summary() ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential_4"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ dense_10 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">10</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">10</span> (40.00 B) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">10</span> (40.00 B) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> Note that the `Input` object is not displayed as part of `model.layers`, since it isn't a layer: ```python model.layers ``` <div class="k-default-codeblock"> ``` [<Dense name=dense_10, built=True>] ``` </div> Models built with a predefined input shape like this always have weights (even before seeing any data) and always have a defined output shape. In general, it's a recommended best practice to always specify the input shape of a Sequential model in advance if you know what it is. --- ## A common debugging workflow: `add()` + `summary()` When building a new Sequential architecture, it's useful to incrementally stack layers with `add()` and frequently print model summaries. For instance, this enables you to monitor how a stack of `Conv2D` and `MaxPooling2D` layers is downsampling image feature maps: ```python model = keras.Sequential() model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images model.add(layers.Conv2D(32, 5, strides=2, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(3)) # Can you guess what the current output shape is at this point? Probably not. # Let's just print it: model.summary() # The answer was: (40, 40, 32), so we can keep downsampling... model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(3)) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.Conv2D(32, 3, activation="relu")) model.add(layers.MaxPooling2D(2)) # And now? model.summary() # Now that we have 4x4 feature maps, time to apply global max pooling. model.add(layers.GlobalMaxPooling2D()) # Finally, we add a classification layer. model.add(layers.Dense(10)) ``` <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential_5"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">123</span>, <span style="color: #00af00; text-decoration-color: #00af00">123</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,432</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">121</span>, <span style="color: #00af00; text-decoration-color: #00af00">121</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">11,680</span> (45.62 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">11,680</span> (45.62 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential_5"</span> </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓ ┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩ │ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">123</span>, <span style="color: #00af00; text-decoration-color: #00af00">123</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,432</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">121</span>, <span style="color: #00af00; text-decoration-color: #00af00">121</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">40</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">38</span>, <span style="color: #00af00; text-decoration-color: #00af00">38</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">36</span>, <span style="color: #00af00; text-decoration-color: #00af00">36</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ max_pooling2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">12</span>, <span style="color: #00af00; text-decoration-color: #00af00">12</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ conv2d_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">9,248</span> │ ├─────────────────────────────────┼───────────────────────────┼────────────┤ │ max_pooling2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">4</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ └─────────────────────────────────┴───────────────────────────┴────────────┘ </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">48,672</span> (190.12 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">48,672</span> (190.12 KB) </pre> <pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B) </pre> Very practical, right? --- ## What to do once you have a model Once your model architecture is ready, you will want to: - Train your model, evaluate it, and run inference. See our [guide to training & evaluation with the built-in loops]( /guides/training_with_built_in_methods/) - Save your model to disk and restore it. See our [guide to serialization & saving](/guides/serialization_and_saving/). --- ## Feature extraction with a Sequential model Once a Sequential model has been built, it behaves like a [Functional API model](/guides/functional_api/). This means that every layer has an `input` and `output` attribute. These attributes can be used to do neat things, like quickly creating a model that extracts the outputs of all intermediate layers in a Sequential model: ```python initial_model = keras.Sequential( [ keras.Input(shape=(250, 250, 3)), layers.Conv2D(32, 5, strides=2, activation="relu"), layers.Conv2D(32, 3, activation="relu"), layers.Conv2D(32, 3, activation="relu"), ] ) feature_extractor = keras.Model( inputs=initial_model.inputs, outputs=[layer.output for layer in initial_model.layers], ) # Call feature extractor on test input. x = ops.ones((1, 250, 250, 3)) features = feature_extractor(x) ``` Here's a similar example that only extract features from one layer: ```python initial_model = keras.Sequential( [ keras.Input(shape=(250, 250, 3)), layers.Conv2D(32, 5, strides=2, activation="relu"), layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"), layers.Conv2D(32, 3, activation="relu"), ] ) feature_extractor = keras.Model( inputs=initial_model.inputs, outputs=initial_model.get_layer(name="my_intermediate_layer").output, ) # Call feature extractor on test input. x = ops.ones((1, 250, 250, 3)) features = feature_extractor(x) ``` --- ## Transfer learning with a Sequential model Transfer learning consists of freezing the bottom layers in a model and only training the top layers. If you aren't familiar with it, make sure to read our [guide to transfer learning](/guides/transfer_learning/). Here are two common transfer learning blueprint involving Sequential models. First, let's say that you have a Sequential model, and you want to freeze all layers except the last one. In this case, you would simply iterate over `model.layers` and set `layer.trainable = False` on each layer, except the last one. Like this: ```python model = keras.Sequential([ keras.Input(shape=(784)), layers.Dense(32, activation='relu'), layers.Dense(32, activation='relu'), layers.Dense(32, activation='relu'), layers.Dense(10), ]) # Presumably you would want to first load pre-trained weights. model.load_weights(...) # Freeze all layers except the last one. for layer in model.layers[:-1]: layer.trainable = False # Recompile and train (this will only update the weights of the last layer). model.compile(...) model.fit(...) ``` Another common blueprint is to use a Sequential model to stack a pre-trained model and some freshly initialized classification layers. Like this: ```python # Load a convolutional base with pre-trained weights base_model = keras.applications.Xception( weights='imagenet', include_top=False, pooling='avg') # Freeze the base model base_model.trainable = False # Use a Sequential model to add a trainable classifier on top model = keras.Sequential([ base_model, layers.Dense(1000), ]) # Compile & train model.compile(...) model.fit(...) ``` If you do transfer learning, you will probably find yourself frequently using these two patterns. That's about all you need to know about Sequential models! To find out more about building models in Keras, see: - [Guide to the Functional API](/guides/functional_api/) - [Guide to making new Layers & Models via subclassing](/guides/making_new_layers_and_models_via_subclassing/)
keras-io/guides/md/sequential_model.md/0
{ "file_path": "keras-io/guides/md/sequential_model.md", "repo_id": "keras-io", "token_count": 9921 }
125
""" Title: Writing a training loop from scratch in JAX Author: [fchollet](https://twitter.com/fchollet) Date created: 2023/06/25 Last modified: 2023/06/25 Description: Writing low-level training & evaluation loops in JAX. Accelerator: None """ """ ## Setup """ import os # This guide can only be run with the jax backend. os.environ["KERAS_BACKEND"] = "jax" import jax # We import TF so we can use tf.data. import tensorflow as tf import keras import numpy as np """ ## Introduction Keras provides default training and evaluation loops, `fit()` and `evaluate()`. Their usage is covered in the guide [Training & evaluation with the built-in methods](/guides/training_with_built_in_methods/). If you want to customize the learning algorithm of your model while still leveraging the convenience of `fit()` (for instance, to train a GAN using `fit()`), you can subclass the `Model` class and implement your own `train_step()` method, which is called repeatedly during `fit()`. Now, if you want very low-level control over training & evaluation, you should write your own training & evaluation loops from scratch. This is what this guide is about. """ """ ## A first end-to-end example To write a custom training loop, we need the following ingredients: - A model to train, of course. - An optimizer. You could either use an optimizer from `keras.optimizers`, or one from the `optax` package. - A loss function. - A dataset. The standard in the JAX ecosystem is to load data via `tf.data`, so that's what we'll use. Let's line them up. First, let's get the model and the MNIST dataset: """ def get_model(): inputs = keras.Input(shape=(784,), name="digits") x1 = keras.layers.Dense(64, activation="relu")(inputs) x2 = keras.layers.Dense(64, activation="relu")(x1) outputs = keras.layers.Dense(10, name="predictions")(x2) model = keras.Model(inputs=inputs, outputs=outputs) return model model = get_model() # Prepare the training dataset. batch_size = 32 (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() x_train = np.reshape(x_train, (-1, 784)).astype("float32") x_test = np.reshape(x_test, (-1, 784)).astype("float32") y_train = keras.utils.to_categorical(y_train) y_test = keras.utils.to_categorical(y_test) # Reserve 10,000 samples for validation. x_val = x_train[-10000:] y_val = y_train[-10000:] x_train = x_train[:-10000] y_train = y_train[:-10000] # Prepare the training dataset. train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size) # Prepare the validation dataset. val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val)) val_dataset = val_dataset.batch(batch_size) """ Next, here's the loss function and the optimizer. We'll use a Keras optimizer in this case. """ # Instantiate a loss function. loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) # Instantiate an optimizer. optimizer = keras.optimizers.Adam(learning_rate=1e-3) """ ### Getting gradients in JAX Let's train our model using mini-batch gradient with a custom training loop. In JAX, gradients are computed via *metaprogramming*: you call the `jax.grad` (or `jax.value_and_grad` on a function in order to create a gradient-computing function for that first function. So the first thing we need is a function that returns the loss value. That's the function we'll use to generate the gradient function. Something like this: ```python def compute_loss(x, y): ... return loss ``` Once you have such a function, you can compute gradients via metaprogramming as such: ```python grad_fn = jax.grad(compute_loss) grads = grad_fn(x, y) ``` Typically, you don't just want to get the gradient values, you also want to get the loss value. You can do this by using `jax.value_and_grad` instead of `jax.grad`: ```python grad_fn = jax.value_and_grad(compute_loss) loss, grads = grad_fn(x, y) ``` ### JAX computation is purely stateless In JAX, everything must be a stateless function -- so our loss computation function must be stateless as well. That means that all Keras variables (e.g. weight tensors) must be passed as function inputs, and any variable that has been updated during the forward pass must be returned as function output. The function have no side effect. During the forward pass, the non-trainable variables of a Keras model might get updated. These variables could be, for instance, RNG seed state variables or BatchNormalization statistics. We're going to need to return those. So we need something like this: ```python def compute_loss_and_updates(trainable_variables, non_trainable_variables, x, y): ... return loss, non_trainable_variables ``` Once you have such a function, you can get the gradient function by specifying `hax_aux` in `value_and_grad`: it tells JAX that the loss computation function returns more outputs than just the loss. Note that the loss should always be the first output. ```python grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True) (loss, non_trainable_variables), grads = grad_fn( trainable_variables, non_trainable_variables, x, y ) ``` Now that we have established the basics, let's implement this `compute_loss_and_updates` function. Keras models have a `stateless_call` method which will come in handy here. It works just like `model.__call__`, but it requires you to explicitly pass the value of all the variables in the model, and it returns not just the `__call__` outputs but also the (potentially updated) non-trainable variables. """ def compute_loss_and_updates(trainable_variables, non_trainable_variables, x, y): y_pred, non_trainable_variables = model.stateless_call( trainable_variables, non_trainable_variables, x ) loss = loss_fn(y, y_pred) return loss, non_trainable_variables """ Let's get the gradient function: """ grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True) """ ### The training step function Next, let's implement the end-to-end training step, the function that will both run the forward pass, compute the loss, compute the gradients, but also use the optimizer to update the trainable variables. This function also needs to be stateless, so it will get as input a `state` tuple that includes every state element we're going to use: - `trainable_variables` and `non_trainable_variables`: the model's variables. - `optimizer_variables`: the optimizer's state variables, such as momentum accumulators. To update the trainable variables, we use the optimizer's stateless method `stateless_apply`. It's equivalent to `optimizer.apply()`, but it requires always passing `trainable_variables` and `optimizer_variables`. It returns both the updated trainable variables and the updated optimizer_variables. """ def train_step(state, data): trainable_variables, non_trainable_variables, optimizer_variables = state x, y = data (loss, non_trainable_variables), grads = grad_fn( trainable_variables, non_trainable_variables, x, y ) trainable_variables, optimizer_variables = optimizer.stateless_apply( optimizer_variables, grads, trainable_variables ) # Return updated state return loss, ( trainable_variables, non_trainable_variables, optimizer_variables, ) """ ### Make it fast with `jax.jit` By default, JAX operations run eagerly, just like in TensorFlow eager mode and PyTorch eager mode. And just like TensorFlow eager mode and PyTorch eager mode, it's pretty slow -- eager mode is better used as a debugging environment, not as a way to do any actual work. So let's make our `train_step` fast by compiling it. When you have a stateless JAX function, you can compile it to XLA via the `@jax.jit` decorator. It will get traced during its first execution, and in subsequent executions you will be executing the traced graph (this is just like `@tf.function(jit_compile=True)`. Let's try it: """ @jax.jit def train_step(state, data): trainable_variables, non_trainable_variables, optimizer_variables = state x, y = data (loss, non_trainable_variables), grads = grad_fn( trainable_variables, non_trainable_variables, x, y ) trainable_variables, optimizer_variables = optimizer.stateless_apply( optimizer_variables, grads, trainable_variables ) # Return updated state return loss, ( trainable_variables, non_trainable_variables, optimizer_variables, ) """ We're now ready to train our model. The training loop itself is trivial: we just repeatedly call `loss, state = train_step(state, data)`. Note: - We convert the TF tensors yielded by the `tf.data.Dataset` to NumPy before passing them to our JAX function. - All variables must be built beforehand: the model must be built and the optimizer must be built. Since we're using a Functional API model, it's already built, but if it were a subclassed model you'd need to call it on a batch of data to build it. """ # Build optimizer variables. optimizer.build(model.trainable_variables) trainable_variables = model.trainable_variables non_trainable_variables = model.non_trainable_variables optimizer_variables = optimizer.variables state = trainable_variables, non_trainable_variables, optimizer_variables # Training loop for step, data in enumerate(train_dataset): data = (data[0].numpy(), data[1].numpy()) loss, state = train_step(state, data) # Log every 100 batches. if step % 100 == 0: print(f"Training loss (for 1 batch) at step {step}: {float(loss):.4f}") print(f"Seen so far: {(step + 1) * batch_size} samples") """ A key thing to notice here is that the loop is entirely stateless -- the variables attached to the model (`model.weights`) are never getting updated during the loop. Their new values are only stored in the `state` tuple. That means that at some point, before saving the model, you should be attaching the new variable values back to the model. Just call `variable.assign(new_value)` on each model variable you want to update: """ trainable_variables, non_trainable_variables, optimizer_variables = state for variable, value in zip(model.trainable_variables, trainable_variables): variable.assign(value) for variable, value in zip(model.non_trainable_variables, non_trainable_variables): variable.assign(value) """ ## Low-level handling of metrics Let's add metrics monitoring to this basic training loop. You can readily reuse built-in Keras metrics (or custom ones you wrote) in such training loops written from scratch. Here's the flow: - Instantiate the metric at the start of the loop - Include `metric_variables` in the `train_step` arguments and `compute_loss_and_updates` arguments. - Call `metric.stateless_update_state()` in the `compute_loss_and_updates` function. It's equivalent to `update_state()` -- only stateless. - When you need to display the current value of the metric, outside the `train_step` (in the eager scope), attach the new metric variable values to the metric object and vall `metric.result()`. - Call `metric.reset_state()` when you need to clear the state of the metric (typically at the end of an epoch) Let's use this knowledge to compute `CategoricalAccuracy` on training and validation data at the end of training: """ # Get a fresh model model = get_model() # Instantiate an optimizer to train the model. optimizer = keras.optimizers.Adam(learning_rate=1e-3) # Instantiate a loss function. loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True) # Prepare the metrics. train_acc_metric = keras.metrics.CategoricalAccuracy() val_acc_metric = keras.metrics.CategoricalAccuracy() def compute_loss_and_updates( trainable_variables, non_trainable_variables, metric_variables, x, y ): y_pred, non_trainable_variables = model.stateless_call( trainable_variables, non_trainable_variables, x ) loss = loss_fn(y, y_pred) metric_variables = train_acc_metric.stateless_update_state( metric_variables, y, y_pred ) return loss, (non_trainable_variables, metric_variables) grad_fn = jax.value_and_grad(compute_loss_and_updates, has_aux=True) @jax.jit def train_step(state, data): ( trainable_variables, non_trainable_variables, optimizer_variables, metric_variables, ) = state x, y = data (loss, (non_trainable_variables, metric_variables)), grads = grad_fn( trainable_variables, non_trainable_variables, metric_variables, x, y ) trainable_variables, optimizer_variables = optimizer.stateless_apply( optimizer_variables, grads, trainable_variables ) # Return updated state return loss, ( trainable_variables, non_trainable_variables, optimizer_variables, metric_variables, ) """ We'll also prepare an evaluation step function: """ @jax.jit def eval_step(state, data): trainable_variables, non_trainable_variables, metric_variables = state x, y = data y_pred, non_trainable_variables = model.stateless_call( trainable_variables, non_trainable_variables, x ) loss = loss_fn(y, y_pred) metric_variables = val_acc_metric.stateless_update_state( metric_variables, y, y_pred ) return loss, ( trainable_variables, non_trainable_variables, metric_variables, ) """ Here are our loops: """ # Build optimizer variables. optimizer.build(model.trainable_variables) trainable_variables = model.trainable_variables non_trainable_variables = model.non_trainable_variables optimizer_variables = optimizer.variables metric_variables = train_acc_metric.variables state = ( trainable_variables, non_trainable_variables, optimizer_variables, metric_variables, ) # Training loop for step, data in enumerate(train_dataset): data = (data[0].numpy(), data[1].numpy()) loss, state = train_step(state, data) # Log every 100 batches. if step % 100 == 0: print(f"Training loss (for 1 batch) at step {step}: {float(loss):.4f}") _, _, _, metric_variables = state for variable, value in zip(train_acc_metric.variables, metric_variables): variable.assign(value) print(f"Training accuracy: {train_acc_metric.result()}") print(f"Seen so far: {(step + 1) * batch_size} samples") metric_variables = val_acc_metric.variables ( trainable_variables, non_trainable_variables, optimizer_variables, metric_variables, ) = state state = trainable_variables, non_trainable_variables, metric_variables # Eval loop for step, data in enumerate(val_dataset): data = (data[0].numpy(), data[1].numpy()) loss, state = eval_step(state, data) # Log every 100 batches. if step % 100 == 0: print(f"Validation loss (for 1 batch) at step {step}: {float(loss):.4f}") _, _, metric_variables = state for variable, value in zip(val_acc_metric.variables, metric_variables): variable.assign(value) print(f"Validation accuracy: {val_acc_metric.result()}") print(f"Seen so far: {(step + 1) * batch_size} samples") """ ## Low-level handling of losses tracked by the model Layers & models recursively track any losses created during the forward pass by layers that call `self.add_loss(value)`. The resulting list of scalar loss values are available via the property `model.losses` at the end of the forward pass. If you want to be using these loss components, you should sum them and add them to the main loss in your training step. Consider this layer, that creates an activity regularization loss: """ class ActivityRegularizationLayer(keras.layers.Layer): def call(self, inputs): self.add_loss(1e-2 * jax.numpy.sum(inputs)) return inputs """ Let's build a really simple model that uses it: """ inputs = keras.Input(shape=(784,), name="digits") x = keras.layers.Dense(64, activation="relu")(inputs) # Insert activity regularization as a layer x = ActivityRegularizationLayer()(x) x = keras.layers.Dense(64, activation="relu")(x) outputs = keras.layers.Dense(10, name="predictions")(x) model = keras.Model(inputs=inputs, outputs=outputs) """ Here's what our `compute_loss_and_updates` function should look like now: - Pass `return_losses=True` to `model.stateless_call()`. - Sum the resulting `losses` and add them to the main loss. """ def compute_loss_and_updates( trainable_variables, non_trainable_variables, metric_variables, x, y ): y_pred, non_trainable_variables, losses = model.stateless_call( trainable_variables, non_trainable_variables, x, return_losses=True ) loss = loss_fn(y, y_pred) if losses: loss += jax.numpy.sum(losses) metric_variables = train_acc_metric.stateless_update_state( metric_variables, y, y_pred ) return loss, non_trainable_variables, metric_variables """ That's it! """
keras-io/guides/writing_a_custom_training_loop_in_jax.py/0
{ "file_path": "keras-io/guides/writing_a_custom_training_loop_in_jax.py", "repo_id": "keras-io", "token_count": 5785 }
126
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/data_loading/image/'" />
keras-io/redirects/api/preprocessing/image/index.html/0
{ "file_path": "keras-io/redirects/api/preprocessing/image/index.html", "repo_id": "keras-io", "token_count": 35 }
127
<meta http-equiv="refresh" content="0; URL='https://keras.io/getting_started/'" />
keras-io/redirects/getting-started/index.html/0
{ "file_path": "keras-io/redirects/getting-started/index.html", "repo_id": "keras-io", "token_count": 31 }
128
<meta http-equiv="refresh" content="0; URL='https://keras.io/guides/making_new_layers_and_models_via_subclassing/'" />
keras-io/redirects/layers/writing-your-own-keras-layers/index.html/0
{ "file_path": "keras-io/redirects/layers/writing-your-own-keras-layers/index.html", "repo_id": "keras-io", "token_count": 47 }
129
import boto3 from pathlib import Path import mimetypes import hashlib import os import json from multiprocessing.pool import ThreadPool AKEY = os.environ["AWS_S3_ACCESS_KEY"] SKEY = os.environ["AWS_S3_SECRET_KEY"] BUCKET = "keras.io" USE_THREADING = True HASH_CACHE = "contents_hashes.json" s3 = boto3.client("s3", aws_access_key_id=AKEY, aws_secret_access_key=SKEY) def hash_file(fpath): h = hashlib.sha256() b = bytearray(128 * 1024) mv = memoryview(b) with open(fpath, "rb", buffering=0) as f: while n := f.readinto(mv): h.update(mv[:n]) return h.hexdigest()[:8] def upload_file(bucket, fpath, key_name, redirect=None): print(f"...Upload to {bucket}:{key_name}") mime = mimetypes.guess_type(fpath)[0] extra_args = {"ContentType": mime, "ACL": "public-read"} if redirect: extra_args["WebsiteRedirectLocation"] = redirect s3.upload_file( fpath, bucket, key_name, ExtraArgs={"ContentType": mime, "ACL": "public-read"} ) def load_hash_cache(): try: s3.download_file(BUCKET, HASH_CACHE, HASH_CACHE) except: print(f"[ERROR] Could not dowload hash cache {HASH_CACHE}") return {} with open(HASH_CACHE) as f: contents = f.read() return json.loads(contents) def save_hash_cache(hash_cache): with open(HASH_CACHE, "w") as f: f.write(json.dumps(hash_cache)) upload_file(BUCKET, HASH_CACHE, HASH_CACHE) def wrapped_upload_file(args): bucket, fpath, key_name = args upload_file(bucket, fpath, key_name) def cleanup(site_directory, redirect_directory): paginator = s3.get_paginator("list_objects_v2") page_iterator = paginator.paginate(Bucket=BUCKET) for page in page_iterator: for obj in page["Contents"]: key = obj["Key"] if key.endswith(".html"): site_fpath = os.path.join(site_directory, key) redirect_fpath = os.path.join(redirect_directory, key) if not os.path.exists(site_fpath) and not os.path.exists( redirect_fpath ): print(f"[DELETE] {key}") s3.delete_object(Bucket=BUCKET, Key=key) def upload_dir(directory, include_img=True, hash_cache=None): print(f"Uploading files from '{directory}'...") all_targets = [] for dp, _, fn in os.walk(directory): if fn: for f in fn: fpath = os.path.join(dp, f) if f.startswith("."): continue if not include_img and "/img/" in fpath: continue key_name = fpath[len(directory) :] key_name = key_name.removeprefix("/") print(f"...{key_name}") all_targets.append((BUCKET, fpath, key_name)) if hash_cache is not None: filtered_targets = [] new_hash_cache = {} for bucket, fpath, key_name in all_targets: new_hash = hash_file(fpath) old_hash = hash_cache.get(key_name) if new_hash != old_hash: filtered_targets.append((bucket, fpath, key_name)) new_hash_cache[key_name] = new_hash all_targets = filtered_targets if USE_THREADING: pool = ThreadPool(processes=8) pool.map(wrapped_upload_file, all_targets) else: for args in all_targets: wrapped_upload_file(args) if hash_cache is not None: return new_hash_cache def upload_redirects(directory): print("Uploading redirects...") for dp, _, fn in os.walk(directory): if fn: for f in fn: fpath = os.path.join(dp, f) if not f == "index.html": continue content = open(fpath).read() url = content[content.find("URL=") + 5 :] url = url[: url.find("'")] print(fpath) print(url) key_name = fpath[len(directory) :] upload_file(BUCKET, fpath, key_name, redirect=url) if __name__ == "__main__": root = Path(__file__).parent.parent.resolve() hash_cache = load_hash_cache() hash_cache = upload_dir( os.path.join(root, "site"), include_img=True, hash_cache=hash_cache ) save_hash_cache(hash_cache)
keras-io/scripts/upload.py/0
{ "file_path": "keras-io/scripts/upload.py", "repo_id": "keras-io", "token_count": 2169 }
130
# BERT Models, tokenizers, and preprocessing layers for BERT, as described in ["BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding"](https://arxiv.org/abs/1810.04805). For a full list of available **presets**, see the [models page](/api/keras_nlp/models). {{toc}}
keras-io/templates/api/keras_nlp/models/bert/index.md/0
{ "file_path": "keras-io/templates/api/keras_nlp/models/bert/index.md", "repo_id": "keras-io", "token_count": 94 }
131
# Layer weight regularizers Regularizers allow you to apply penalties on layer parameters or layer activity during optimization. These penalties are summed into the loss function that the network optimizes. Regularization penalties are applied on a per-layer basis. The exact API will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D` and `Conv3D`) have a unified API. These layers expose 3 keyword arguments: - `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel - `bias_regularizer`: Regularizer to apply a penalty on the layer's bias - `activity_regularizer`: Regularizer to apply a penalty on the layer's output ```python from keras import layers from keras import regularizers layer = layers.Dense( units=64, kernel_regularizer=regularizers.L1L2(l1=1e-5, l2=1e-4), bias_regularizer=regularizers.L2(1e-4), activity_regularizer=regularizers.L2(1e-5) ) ``` The value returned by the `activity_regularizer` object gets divided by the input batch size so that the relative weighting between the weight regularizers and the activity regularizers does not change with the batch size. You can access a layer's regularization penalties by calling `layer.losses` after calling the layer on inputs: ```python from keras import ops layer = layers.Dense(units=5, kernel_initializer='ones', kernel_regularizer=regularizers.L1(0.01), activity_regularizer=regularizers.L2(0.01)) tensor = ops.ones(shape=(5, 5)) * 2.0 out = layer(tensor) # The kernel regularization term is 0.25 # The activity regularization term (after dividing by the batch size) is 5 print(ops.sum(layer.losses)) # 5.25 (= 5 + 0.25) ``` ## Available regularizers The following built-in regularizers are available as part of the `keras.regularizers` module: {{autogenerated}} ## Creating custom regularizers ### Simple callables A weight regularizer can be any callable that takes as input a weight tensor (e.g. the kernel of a `Conv2D` layer), and returns a scalar loss. Like this: ```python def my_regularizer(x): return 1e-3 * ops.sum(ops.square(x)) ``` ### `Regularizer` subclasses If you need to configure your regularizer via various arguments (e.g. `l1` and `l2` arguments in `l1_l2`), you should implement it as a subclass of `keras.regularizers.Regularizer`. Here's a simple example: ```python class MyRegularizer(regularizers.Regularizer): def __init__(self, strength): self.strength = strength def __call__(self, x): return self.strength * ops.sum(ops.square(x)) ``` Optionally, you can also implement the method `get_config` and the class method `from_config` in order to support serialization -- just like with any Keras object. Example: ```python class MyRegularizer(regularizers.Regularizer): def __init__(self, strength): self.strength = strength def __call__(self, x): return self.strength * ops.sum(ops.square(x)) def get_config(self): return {'strength': self.strength} ```
keras-io/templates/api/layers/regularizers.md/0
{ "file_path": "keras-io/templates/api/layers/regularizers.md", "repo_id": "keras-io", "token_count": 1010 }
132
# The Keras ecosystem The Keras project isn't limited to the core Keras API for building and training neural networks. It spans a wide range of related initiatives that cover every step of the machine learning workflow. --- ## KerasTuner [KerasTuner Documentation](/keras_tuner/) - [KerasTuner GitHub repository](https://github.com/keras-team/keras-tuner) KerasTuner is an easy-to-use, scalable hyperparameter optimization framework that solves the pain points of hyperparameter search. Easily configure your search space with a define-by-run syntax, then leverage one of the available search algorithms to find the best hyperparameter values for your models. KerasTuner comes with Bayesian Optimization, Hyperband, and Random Search algorithms built-in, and is also designed to be easy for researchers to extend in order to experiment with new search algorithms. --- ## KerasNLP [KerasNLP Documentation](/keras_nlp/) - [KerasNLP GitHub repository](https://github.com/keras-team/keras-nlp) KerasNLP is a natural language processing library that supports users through their entire development cycle. Our workflows are built from modular components that have state-of-the-art preset weights and architectures when used out-of-the-box and are easily customizable when more control is needed. --- ## KerasCV [KerasCV Documentation](/keras_cv/) - [KerasCV GitHub repository](https://github.com/keras-team/keras-cv) KerasCV is a repository of modular building blocks (layers, metrics, losses, data-augmentation) that applied computer vision engineers can leverage to quickly assemble production-grade, state-of-the-art training and inference pipelines for common use cases such as image classification, object detection, image segmentation, image data augmentation, etc. KerasCV can be understood as a horizontal extension of the Keras API: the components are new first-party Keras objects (layers, metrics, etc) that are too specialized to be added to core Keras, but that receive the same level of polish and backwards compatibility guarantees as the rest of the Keras API. --- ## AutoKeras [AutoKeras Documentation](https://autokeras.com/) - [AutoKeras GitHub repository](https://github.com/keras-team/autokeras) AutoKeras is an AutoML system based on Keras. It is developed by [DATA Lab](http://faculty.cs.tamu.edu/xiahu/index.html) at Texas A&M University. The goal of AutoKeras is to make machine learning accessible for everyone. It provides high-level end-to-end APIs such as [`ImageClassifier`](https://autokeras.com/tutorial/image_classification/) or [`TextClassifier`](https://autokeras.com/tutorial/text_classification/) to solve machine learning problems in a few lines, as well as [flexible building blocks](https://autokeras.com/tutorial/customized/) to perform architecture search. ```python import autokeras as ak clf = ak.ImageClassifier() clf.fit(x_train, y_train) results = clf.predict(x_test) ```
keras-io/templates/getting_started/ecosystem.md/0
{ "file_path": "keras-io/templates/getting_started/ecosystem.md", "repo_id": "keras-io", "token_count": 799 }
133
body { font-family: 'Open Sans', sans-serif; font-size: 0.95rem } h1, h2, h3, h4, h5, h6 { font-family: 'Open Sans', sans-serif; } h1 { font-weight: 700; font-size: 2.2rem; margin-bottom: 1rem; } h2 { font-weight: 600; font-size: 1.6rem; margin-bottom: 0.8rem; } h3 { font-size: 1.2rem; font-weight: 600; margin-bottom: 0.6rem; } h4 { font-size: 1.1rem; font-weight: 600; margin-bottom: 0.5rem; } a { color: #d00000; } a:hover { color: #ff0000; } b, strong { font-weight: 700; } hr { margin-top: 2rem; margin-bottom: 2rem; border-top: 1px solid rgba(0, 0, 0, .2); } /*table { width: 100%; margin-bottom: 2rem; }*/ blockquote { padding: 1rem; font-size: 0.8rem; } .k-page { width:66rem; float: left; } .k-nav { width: 19rem; float: left; border-bottom: 1px solid #ccc; display: block; } .k-main { width: 47rem; background: #fff; float: left; border-left: 1px solid #ccc; border-right: 1px solid #ccc; min-height: 1000px; } .k-content { width: 47rem; float: left; padding: 1.3rem; padding-top: 1rem; } .k-search-form { width: 100%; padding: 1rem; } #dropdown-nav { display: none; } .nav-link { font-size: 1rem; padding: 0.6rem 1rem; } .nav-sublink { font-size: 0.85rem; padding-top: 0.45rem; padding-bottom: 0.45rem; } .nav-sublink2 { font-size: 0.8rem; padding-top: 0.45rem; padding-bottom: 0.45rem; } @media screen and (max-width: 1342px) { .k-nav { width: 17rem; } } @media screen and (max-width: 1300px) { .k-outline { width: 0; display: none !important; } } @media screen and (max-width: 840px) { body { font-size: 1rem; } pre { font-size: 100%; } .k-page { width: 100%; min-width: 375px; } .k-nav { display: none; position: fixed; top: 62px; left: 0; width: 17rem; background: white; overflow-y: scroll; bottom: 0; } #dropdown-nav { display: block; width: 70px; float: left; cursor: pointer; } .k-search-form { width: 80%; } .k-main { width: 100%; } .k-main-inner { margin-top: 60px; } .k-main-top { position: fixed; background: white; min-width: 357px; } .k-content { width: 100%; padding: 2%; padding-top: 1%; } .nav-link { font-size: 1.1rem; } .nav-sublink { font-size: 1rem; padding-top: 0.6rem; padding-bottom: 0.6rem; } .nav-sublink2 { font-size: 1rem; padding-top: 0.6rem; padding-bottom: 0.6rem; } } .k-outline { position: fixed; width: 18rem; background: #f7f7f7; padding: 1.5rem; padding-top: 1rem; margin-left: 47rem; margin-top: 3.8rem; border-top: 1px solid #ccc; color: #a20000; max-height: 100%; overflow: scroll; padding-bottom: 5em; } .k-content img { max-width: 90%; } .k-main-top { float: left; width: 100%; border-bottom: 1px solid #ccc; background-color: white; } .k-location-slug { width: 100%; padding: 1rem; border-bottom: 1px solid #ccc; float: left; } .k-inline-icon { height: 1rem; padding-right: 0.5rem; } .k-dot { padding-left: 1rem; padding-right: 1rem; } .k-search-btn { margin-left: 0.8rem; border-color: #000000; background-color: #333333; background-image: none; filter: none; font-size: 0px; padding: 6px 27px; width: auto; vertical-align: middle; border: 1px solid #000; border-radius: 2px; } .k-search-btn svg { fill: white; } .k-search-input { display: block; width: 100%; height: 1.8rem; padding: .375rem .75rem; font-size: 0.8rem; font-weight: 300; line-height: 1.5; color: #495057; background-color: #fff; background-clip: padding-box; border: 1px solid #ced4da; border-radius: .1rem; transition: border-color .15s ease-in-out,box-shadow .15s ease-in-out; outline: none; } .k-search-input:focus { border-color: #d00000; box-shadow: 0 0 5px rgb(173 25 25 / 50%); } .gsc-search-button-v2 { margin-top: 0; } .logo-small { width: 100%; padding-left: 10%; padding-right: 10%; padding-top:5%; padding-bottom:5%; } .nav-pills .nav-sublink.active { background-color: #d00000; color: white; } .nav-pills .nav-sublink2.active { background-color: #555555; color: white; } .nav-pills .nav-link { border-radius: 0; border-top: 1px solid #ddd; color: #d00000; } .nav-pills .nav-sublink { padding-left: 1.5rem; padding-right: 0.8rem; background-color: white; color: black; text-decoration: none; } .nav-pills .nav-sublink2 { padding-left: 2.5rem; padding-right: 0.8rem; background-color: white; color: black; text-decoration: none; } .nav-link:hover { background-color: #eee; color: #d00000; } .nav-pills .nav-link.active { background-color: #222; color: white; } .nav-pills .nav-sublink.active:hover, .nav-pills .nav-sublink2.active:hover{ background-color: #f42424; color: white; } .nav-sublink:hover, .nav-sublink2:hover { background-color: #eee; color: black; } .nav-pills .nav-link.active:hover { background-color: #333; color: white; } .k-outline-depth-1 { width: 100%; font-weight: 600; font-size: 1rem; padding-bottom: 0.3rem; padding-top: 0.3rem; } .k-outline-depth-2 { width: 100%; font-weight: 400; font-size: 0.85rem; padding-bottom: 0.15rem; padding-top: 0.15rem; } .k-outline-depth-3 { width: 100%; font-size: 0.7rem; padding-left: 1rem; } .btn { margin-top: 1rem; margin-bottom: 1rem; } .btn-primary { color: #fff; background-color: #d00000; border-color: #8a0000; } .btn-primary:hover { color: #fff; background-color: #ff0000; border-color: #8a0000; } .btn-primary:focus, .btn-primary.focus { box-shadow: 0 0 0 0.2rem rgba(38, 143, 255, 0.5); } .btn-primary.disabled, .btn-primary:disabled { color: #fff; background-color: #d00000; border-color: #8a0000; } .btn-primary:not(:disabled):not(.disabled):active, .btn-primary:not(:disabled):not(.disabled).active, .show > .btn-primary.dropdown-toggle { color: #fff; background-color: #ff0000; border-color: #8a0000; } .btn-primary:not(:disabled):not(.disabled):active:focus, .btn-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-primary.dropdown-toggle:focus { box-shadow: 0 0 0 0.2rem rgba(38, 143, 255, 0.5); } .btn-secondary { background-color: white; color: #3f3f3f; border-color: #bbb; } .btn-secondary:hover { color: #3f3f3f; background-color: #f0f0f0; border-color: #bbb; } .btn-secondary:focus, .btn-secondary.focus { box-shadow: 0 0 0 0.2rem rgba(130, 138, 145, 0.5); } .btn-secondary.disabled, .btn-secondary:disabled { color: #3f3f3f; background-color: #f0f0f0; border-color: #bbb; } .btn-secondary:not(:disabled):not(.disabled):active, .btn-secondary:not(:disabled):not(.disabled).active, .show > .btn-secondary.dropdown-toggle { color: #3f3f3f; background-color: #f0f0f0; border-color: #bbb; } .btn-secondary:not(:disabled):not(.disabled):active:focus, .btn-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-secondary.dropdown-toggle:focus { box-shadow: 0 0 0 0.2rem rgba(130, 138, 145, 0.5); } .logo { width: 28rem; margin-bottom: 1rem; } footer { float: left; width: 100%; padding: 1em; border-top: solid 1px #bbb; } table { margin-top: 1.5rem; margin-bottom: 1.5rem; width: 100%; border: solid 2px gray; font-size: 10pt; } tr { border: solid 1px gray; } th { border: solid 1px gray; padding: 0.3rem; } td { border: solid 1px gray; padding: 0.3rem; } .gh-btn-container { margin-left: 4.7em; padding-bottom: 10px; padding-top: 5px; } .example-card { padding: 0.25rem; padding-left: 0.75rem; padding-right: 0.75rem; margin-top: 0.25rem; margin-bottom: 0.25rem; background-color: #efefef40; text-decoration: none; } .example-card:hover { background-color: #f8f5f560; text-decoration: none; } .example-card a:hover { text-decoration: none; } .example-card-title { color: #d00000; } .example-subcategory-title { margin-top: 1rem; } .example-highlight { float: left; margin-right: 0.5rem; background-color: white; width: 1.6rem; height: 1.6rem; text-align: center; border: solid #ddd 1px; border-radius: 1rem; font-size: 0.85rem; padding-top: 0.05rem; } .example_version_banner { padding: 0.5em; margin-bottom: 1em; text-align: center; font-weight: bolder; } .keras_3 { background: #188485; color: white; } .keras_2 { background: #ffedb7; color: black; } .k-location-slug-pointer { margin-right: 0.3em; color: #4f4f4f; font-size: 0.9em; }
keras-io/theme/css/docs.css/0
{ "file_path": "keras-io/theme/css/docs.css", "repo_id": "keras-io", "token_count": 3798 }
134
<style> table { border: none; margin: 0; } tr { border: none; } th { border: none; } td { border: none; } </style> <div class='k-main-inner'> <div class='k-content'> <script async src="https://cse.google.com/cse.js?cx=005305742766141300566:jc9gtvxee8o"></script> <div class="gcse-search"></div> </div> </div> <script> window.addEventListener('load', function(){ const urlParams = new URLSearchParams(window.location.search); const query = urlParams.get('query'); if (query != null) { var inp = document.getElementById('gsc-i-id1'); inp.value = query; document.getElementsByClassName('gsc-search-button-v2')[0].click(); } }); </script>
keras-io/theme/search.html/0
{ "file_path": "keras-io/theme/search.html", "repo_id": "keras-io", "token_count": 511 }
135
{ "python.testing.pytestEnabled": true, "python.formatting.provider": "black", "editor.rulers": [ 80 ] }
keras-nlp/.vscode/settings.json/0
{ "file_path": "keras-nlp/.vscode/settings.json", "repo_id": "keras-nlp", "token_count": 60 }
136
# KerasNLP Modeling Tools This directory contains runnable scripts that are not specific to a specific model architecture, but still useful for end-to-end workflows. ## split_sentences.py The `split_sentences.py` script will process raw input files and split them into output files where each line contains a sentence, and a blank line marks the start of a new document. This is useful for tasks like next sentence prediction where the boundaries between sentences are needed for training. The script supports two types of inputs files. Plain text files, where each individual file is assumed to be an entire document, and wikipedia dump files in the format outputted by the wikiextractor tool (each document is enclosed in `<doc>` tags). Example usage: ```shell python examples/tools/split_sentences.py \ --input_files ~/datasets/wikipedia,~/datasets/bookscorpus \ --output_directory ~/datasets/sentence-split-data ``` ### train_word_piece_vocabulary.py The `train_word_piece_vocabulary.py` script allows you to compute your own WordPiece vocabulary. Example usage: ```shell python examples/tools/train_word_piece_vocabulary.py \ --input_files ~/datasets/my-raw-dataset/ \ --output_file vocab.txt ```
keras-nlp/examples/tools/README.md/0
{ "file_path": "keras-nlp/examples/tools/README.md", "repo_id": "keras-nlp", "token_count": 350 }
137
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops @keras_nlp_export("keras_nlp.layers.RotaryEmbedding") class RotaryEmbedding(keras.layers.Layer): """Rotary positional encoding layer. This layer encodes absolute positional information with a rotation matrix. It calculates the rotary encoding with a mix of sine and cosine functions with geometrically increasing wavelengths. Defined and formulated in [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864v4). The input must be a tensor with shape a sequence dimension and a feature dimension. Typically, this will either an input with shape `(batch_size, sequence_length, feature_length)` or `(batch_size, sequence_length, num_heads, feature_length)`. This layer will return a new tensor with the rotary embedding applied to the input tensor. Args: max_wavelength: int. The maximum angular wavelength of the sine/cosine curves. scaling_factor: float. The scaling factor used to scale frequency range. sequence_axis: int. Sequence axis in the input tensor. feature_axis: int. Feature axis in the input tensor. Call arguments: inputs: The tensor inputs to apply the embedding to. This can have any shape, but must contain both a sequence and feature axis. The rotary embedding will be applied to `inputs` and returned. start_index: An integer or integer tensor. The starting position to compute the rotary embedding from. This is useful during cached decoding, where each position is predicted separately in a loop. Examples: ```python batch_size = 16 feature_length = 18 sequence_length = 256 num_heads = 8 # No multi-head dimension. tensor = np.ones((batch_size, sequence_length, feature_length)) rot_emb_layer = RotaryEmbedding() tensor_rot = rot_emb_layer(tensor) # With multi-head dimension. tensor = np.ones((batch_size, sequence_length, num_heads, feature_length)) tensor_rot = rot_emb_layer(tensor) ``` References: - [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864v4) """ def __init__( self, max_wavelength=10000, scaling_factor=1.0, sequence_axis=1, feature_axis=-1, **kwargs ): super().__init__(**kwargs) self.max_wavelength = max_wavelength self.sequence_axis = sequence_axis self.feature_axis = feature_axis self.scaling_factor = scaling_factor self.built = True def call(self, inputs, start_index=0): cos_emb, sin_emb = self._compute_cos_sin_embedding(inputs, start_index) return self._apply_rotary_pos_emb(inputs, cos_emb, sin_emb) def _apply_rotary_pos_emb(self, tensor, cos_emb, sin_emb): x1, x2 = ops.split(tensor, 2, axis=self.feature_axis) half_rot_tensor = ops.concatenate((-x2, x1), axis=self.feature_axis) return (tensor * cos_emb) + (half_rot_tensor * sin_emb) def _compute_cos_sin_embedding(self, inputs, start_index=0): def get_axis(axis): return axis if axis > 0 else len(inputs.shape) + axis feature_axis = get_axis(self.feature_axis) sequence_axis = get_axis(self.sequence_axis) rotary_dim = ops.shape(inputs)[feature_axis] inverse_freq = self._get_inverse_freq(rotary_dim) seq_len = ops.shape(inputs)[self.sequence_axis] tensor = ops.cast(ops.arange(seq_len), self.compute_dtype) + start_index tensor = ops.cast(tensor, dtype=inverse_freq.dtype) freq = ops.einsum("i,j->ij", tensor, inverse_freq) embedding = ops.concatenate((freq, freq), axis=-1) # Reshape the embedding to be broadcastable with input shape. if feature_axis < sequence_axis: embedding = ops.transpose(embedding) for axis in range(len(inputs.shape)): if axis != sequence_axis and axis != feature_axis: embedding = ops.expand_dims(embedding, axis) return ops.cos(embedding), ops.sin(embedding) def _get_inverse_freq(self, rotary_dim): freq_range = ops.arange(0, rotary_dim, 2) freq_range = ops.cast(freq_range, self.compute_dtype) freq_range = freq_range / ops.cast( self.scaling_factor, self.compute_dtype ) inverse_freq = 1.0 / ( self.max_wavelength ** (freq_range / ops.cast(rotary_dim, self.compute_dtype)) ) return inverse_freq def get_config(self): config = super().get_config() config.update( { "max_wavelength": self.max_wavelength, "scaling_factor": self.scaling_factor, "sequence_axis": self.sequence_axis, "feature_axis": self.feature_axis, } ) return config def compute_output_shape(self, input_shape): return input_shape
keras-nlp/keras_nlp/layers/modeling/rotary_embedding.py/0
{ "file_path": "keras-nlp/keras_nlp/layers/modeling/rotary_embedding.py", "repo_id": "keras-nlp", "token_count": 2284 }
138
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from keras_nlp.layers.preprocessing.multi_segment_packer import ( MultiSegmentPacker, ) from keras_nlp.tests.test_case import TestCase class MultiSegmentPackerTest(TestCase): def test_trim_single_input_ints(self): input_data = np.arange(3, 10) packer = MultiSegmentPacker( sequence_length=8, start_value=1, end_value=2 ) token_ids, segment_ids = packer(input_data) self.assertAllEqual(token_ids, [1, 3, 4, 5, 6, 7, 8, 2]) self.assertAllEqual(segment_ids, [0, 0, 0, 0, 0, 0, 0, 0]) def test_trim_single_input_strings(self): input_data = np.array(["a", "b", "c", "d"]) packer = MultiSegmentPacker( sequence_length=5, start_value="[CLS]", end_value="[SEP]" ) token_ids, segment_ids = packer(input_data) self.assertAllEqual(token_ids, ["[CLS]", "a", "b", "c", "[SEP]"]) self.assertAllEqual(segment_ids, [0, 0, 0, 0, 0]) def test_trim_multiple_inputs_round_robin(self): seq1 = ["a", "b", "c"] seq2 = ["x", "y", "z"] packer = MultiSegmentPacker( sequence_length=7, start_value="[CLS]", end_value="[SEP]", truncate="round_robin", ) token_ids, segment_ids = packer([seq1, seq2]) self.assertAllEqual( token_ids, ["[CLS]", "a", "b", "[SEP]", "x", "y", "[SEP]"] ) self.assertAllEqual(segment_ids, [0, 0, 0, 0, 1, 1, 1]) def test_trim_multiple_inputs_waterfall(self): seq1 = ["a", "b", "c"] seq2 = ["x", "y", "z"] packer = MultiSegmentPacker( sequence_length=7, start_value="[CLS]", end_value="[SEP]", truncate="waterfall", ) token_ids, segment_ids = packer([seq1, seq2]) self.assertAllEqual( token_ids, ["[CLS]", "a", "b", "c", "[SEP]", "x", "[SEP]"] ) self.assertAllEqual(segment_ids, [0, 0, 0, 0, 0, 1, 1]) def test_trim_batched_inputs_round_robin(self): seq1 = [["a", "b", "c"], ["a", "b", "c"]] seq2 = [["x", "y", "z"], ["x", "y", "z"]] packer = MultiSegmentPacker( sequence_length=7, start_value="[CLS]", end_value="[SEP]", truncate="round_robin", ) token_ids, segment_ids = packer([seq1, seq2]) self.assertAllEqual( token_ids, [ ["[CLS]", "a", "b", "[SEP]", "x", "y", "[SEP]"], ["[CLS]", "a", "b", "[SEP]", "x", "y", "[SEP]"], ], ) self.assertAllEqual( segment_ids, [ [0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1], ], ) def test_trim_batched_inputs_waterfall(self): seq1 = [["a", "b", "c"], ["a", "b"]] seq2 = [["x", "y", "z"], ["x", "y", "z"]] packer = MultiSegmentPacker( sequence_length=7, start_value="[CLS]", end_value="[SEP]", truncate="waterfall", ) token_ids, segment_ids = packer([seq1, seq2]) self.assertAllEqual( token_ids, [ ["[CLS]", "a", "b", "c", "[SEP]", "x", "[SEP]"], ["[CLS]", "a", "b", "[SEP]", "x", "y", "[SEP]"], ], ) self.assertAllEqual( segment_ids, [ [0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1, 1], ], ) def test_pad_inputs(self): seq1 = ["a"] seq2 = ["x"] packer = MultiSegmentPacker( 6, start_value="[CLS]", end_value="[SEP]", pad_value="[PAD]" ) token_ids, segment_ids = packer([seq1, seq2]) self.assertAllEqual( token_ids, ["[CLS]", "a", "[SEP]", "x", "[SEP]", "[PAD]"], ) self.assertAllEqual(segment_ids, [0, 0, 0, 1, 1, 0]) def test_pad_batched_inputs(self): seq1 = [["a"], ["a"]] seq2 = [["x"], ["x", "y"]] packer = MultiSegmentPacker( sequence_length=7, start_value="[CLS]", end_value="[SEP]", pad_value="[PAD]", ) token_ids, segment_ids = packer([seq1, seq2]) self.assertAllEqual( token_ids, [ ["[CLS]", "a", "[SEP]", "x", "[SEP]", "[PAD]", "[PAD]"], ["[CLS]", "a", "[SEP]", "x", "y", "[SEP]", "[PAD]"], ], ) self.assertAllEqual( segment_ids, [ [0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0], ], ) def test_list_special_tokens(self): seq1 = [["a", "b"], ["a", "b"]] seq2 = [["x", "y"], ["x"]] packer = MultiSegmentPacker( 8, start_value="<s>", end_value="</s>", sep_value=["</s>", "</s>"], pad_value="<pad>", truncate="round_robin", ) token_ids, segment_ids = packer([seq1, seq2]) self.assertAllEqual( token_ids, [ ["<s>", "a", "b", "</s>", "</s>", "x", "y", "</s>"], ["<s>", "a", "b", "</s>", "</s>", "x", "</s>", "<pad>"], ], ) self.assertAllEqual( segment_ids, [ [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 0], ], ) def test_config(self): seq1 = [["a", "b", "c"], ["a", "b"]] seq2 = [["x", "y", "z"], ["x", "y", "z"]] original_packer = MultiSegmentPacker( sequence_length=7, start_value="[CLS]", end_value="[SEP]", truncate="waterfall", ) cloned_packer = MultiSegmentPacker.from_config( original_packer.get_config() ) token_ids, segment_ids = original_packer([seq1, seq2]) cloned_token_ids, cloned_segment_ids = cloned_packer([seq1, seq2]) self.assertAllEqual(token_ids, cloned_token_ids) self.assertAllEqual(segment_ids, cloned_segment_ids)
keras-nlp/keras_nlp/layers/preprocessing/multi_segment_packer_test.py/0
{ "file_path": "keras-nlp/keras_nlp/layers/preprocessing/multi_segment_packer_test.py", "repo_id": "keras-nlp", "token_count": 3793 }
139
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.api_export import keras_nlp_export from keras_nlp.metrics.rouge_base import RougeBase @keras_nlp_export("keras_nlp.metrics.RougeL") class RougeL(RougeBase): """ROUGE-L metric. This class implements the ROUGE-L variant of the ROUGE metric. The ROUGE-L metric is traditionally used for evaluating summarisation systems. Succinctly put, ROUGE-L is a score based on the length of the longest common subsequence present in the reference text and the hypothesis text. Note on input shapes: For `y_true` and `y_pred`, this class supports scalar values and batch inputs of shapes `()`, `(batch_size,)` and `(batch_size, 1)`. Args: use_stemmer: bool. Whether Porter Stemmer should be used to strip word suffixes to improve matching. Defaults to `False`. dtype: string or tf.dtypes.Dtype. Precision of metric computation. If not specified, it defaults to `"float32"`. name: string. Name of the metric instance. **kwargs: Other keyword arguments. References: - [Lin et al., 2004](https://aclanthology.org/W04-1013/) Examples: 1. Python string. >>> rouge_l = keras_nlp.metrics.RougeL() >>> y_true = "the tiny little cat was found under the big funny bed" >>> y_pred = "the cat was under the bed" >>> rouge_l(y_true, y_pred)["f1_score"] <tf.Tensor: shape=(), dtype=float32, numpy=0.7058824> 2. List inputs. a. Python list. >>> rouge_l = keras_nlp.metrics.RougeL() >>> y_true = [ ... "the tiny little cat was found under the big funny bed", ... "i really love contributing to KerasNLP", ... ] >>> y_pred = [ ... "the cat was under the bed", ... "i love contributing to KerasNLP", ... ] >>> rouge_l(y_true, y_pred)["f1_score"] <tf.Tensor: shape=(), dtype=float32, numpy=0.80748665> 3. 2D inputs. >>> rouge_l = keras_nlp.metrics.RougeL() >>> y_true = [ ... ["the tiny little cat was found under the big funny bed"], ... ["i really love contributing to KerasNLP"], ... ] >>> y_pred = [ ... ["the cat was under the bed"], ... ["i love contributing to KerasNLP"], ... ] >>> rouge_l(y_true, y_pred)["f1_score"] <tf.Tensor: shape=(), dtype=float32, numpy=0.80748665> """ def __init__( self, use_stemmer=False, name="rouge-l", **kwargs, ): super().__init__( variant="rougeL", use_stemmer=use_stemmer, name=name, **kwargs, ) def get_config(self): config = super().get_config() del config["variant"] return config
keras-nlp/keras_nlp/metrics/rouge_l.py/0
{ "file_path": "keras-nlp/keras_nlp/metrics/rouge_l.py", "repo_id": "keras-nlp", "token_count": 1311 }
140
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ALBERT model preset configurations.""" backbone_presets = { "albert_base_en_uncased": { "metadata": { "description": ( "12-layer ALBERT model where all input is lowercased. " "Trained on English Wikipedia + BooksCorpus." ), "params": 11683584, "official_name": "ALBERT", "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, "kaggle_handle": "kaggle://keras/albert/keras/albert_base_en_uncased/2", }, "albert_large_en_uncased": { "metadata": { "description": ( "24-layer ALBERT model where all input is lowercased. " "Trained on English Wikipedia + BooksCorpus." ), "params": 17683968, "official_name": "ALBERT", "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, "kaggle_handle": "kaggle://keras/albert/keras/albert_large_en_uncased/2", }, "albert_extra_large_en_uncased": { "metadata": { "description": ( "24-layer ALBERT model where all input is lowercased. " "Trained on English Wikipedia + BooksCorpus." ), "params": 58724864, "official_name": "ALBERT", "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, "kaggle_handle": "kaggle://keras/albert/keras/albert_extra_large_en_uncased/2", }, "albert_extra_extra_large_en_uncased": { "metadata": { "description": ( "12-layer ALBERT model where all input is lowercased. " "Trained on English Wikipedia + BooksCorpus." ), "params": 222595584, "official_name": "ALBERT", "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, "kaggle_handle": "kaggle://keras/albert/keras/albert_extra_extra_large_en_uncased/2", }, }
keras-nlp/keras_nlp/models/albert/albert_presets.py/0
{ "file_path": "keras-nlp/keras_nlp/models/albert/albert_presets.py", "repo_id": "keras-nlp", "token_count": 1282 }
141
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.models.backbone import Backbone from keras_nlp.models.bloom.bloom_decoder import BloomDecoder from keras_nlp.models.bloom.bloom_presets import backbone_presets from keras_nlp.utils.python_utils import classproperty def _bloom_kernel_initializer(stddev=0.02): return keras.initializers.RandomNormal(stddev=stddev) @keras_nlp_export("keras_nlp.models.BloomBackbone") class BloomBackbone(Backbone): """A BLOOM decoder network. This network implements a Transformer-based decoder network, BigScience Language Open-science Open-access Multilingual (BLOOM), as descriped in ["BLOOM: A 176B-Parameter Open-Access Multilingual Language Model"](https://arxiv.org/pdf/2211.05100.pdf). The default constructor gives a fully customizable, randomly initialized Bloom model with any number of layers, heads, and embedding dimensions. To load preset architectures and weights, use the `from_preset()` constructor. Disclaimer: Pre-trained models are provided on an "as is" basis, without warranties or conditions of any kind. The underlying model is provided by a third party and subject to a separate license, available [here](https://huggingface.co/spaces/bigscience/license). Args: vocabulary_size: int. The size of the token vocabulary. num_layers: int. The number of transformer layers. num_heads: int. The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. hidden_dim: int. The dimensionality of the embeddings and hidden states. intermediate_dim: int. The output dimension of the first Dense layer in the MLP network of each transformer. dropout: float. Dropout probability for the Transformer decoder. layer_norm_epsilon: float. Epsilon for the layer normalization layers in the transformer decoder. max_sequence_length: int. The maximum sequence length that this decoder can consume. dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use for model computations and weights. Note that some computations, such as softmax and layer normalization, will always be done at float32 precision regardless of dtype. Examples: ```python input_data = { "token_ids": np.ones(shape=(1, 12), dtype="int32"), "padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]), } # Pretrained BLOOM decoder. model = keras_nlp.models.BloomBackbone.from_preset("bloom_560m_multi") model(input_data) # Randomly initialized BLOOM decoder with a custom config. model = keras_nlp.models.BloomBackbone( vocabulary_size=10, num_layers=2, num_heads=2, hidden_dim=32, intermediate_dim=32*4, dropout=0.0, layer_norm_epsilon=1e-5, max_sequence_length=128, ) model(input_data) ``` """ def __init__( self, vocabulary_size, num_layers, num_heads, hidden_dim, intermediate_dim, dropout=0.0, layer_norm_epsilon=1e-5, max_sequence_length=2048, dtype=None, **kwargs, ): # === Layers === self.token_embedding = ReversibleEmbedding( input_dim=vocabulary_size, output_dim=hidden_dim, embeddings_initializer=_bloom_kernel_initializer(stddev=0.02), tie_weights=False, dtype=dtype, name="token_embedding", ) self.embeddings_layer_norm = keras.layers.LayerNormalization( epsilon=layer_norm_epsilon, dtype=dtype, name="token_embedding_layernorm", ) self.transformer_layers = [] for i in range(num_layers): layer = BloomDecoder( num_heads=num_heads, intermediate_dim=intermediate_dim, dropout=dropout, layer_norm_epsilon=layer_norm_epsilon, dtype=dtype, name=f"transformer_layer_{i}", ) self.transformer_layers.append(layer) self.layer_norm = keras.layers.LayerNormalization( epsilon=layer_norm_epsilon, dtype=dtype, name="final_layernorm", ) # === Functional Model === token_id_input = keras.Input( shape=(None,), dtype="int32", name="token_ids" ) padding_mask_input = keras.Input( shape=(None,), dtype="int32", name="padding_mask" ) x = self.token_embedding(token_id_input) x = self.embeddings_layer_norm(x) for transformer_layer in self.transformer_layers: x = transformer_layer(x, decoder_padding_mask=padding_mask_input) sequence_output = self.layer_norm(x) super().__init__( inputs={ "token_ids": token_id_input, "padding_mask": padding_mask_input, }, outputs=sequence_output, **kwargs, ) # === Config === self.vocabulary_size = vocabulary_size self.num_layers = num_layers self.num_heads = num_heads self.hidden_dim = hidden_dim self.intermediate_dim = intermediate_dim self.dropout = dropout self.layer_norm_epsilon = layer_norm_epsilon self.max_sequence_length = max_sequence_length def get_config(self): config = super().get_config() config.update( { "vocabulary_size": self.vocabulary_size, "num_layers": self.num_layers, "num_heads": self.num_heads, "hidden_dim": self.hidden_dim, "intermediate_dim": self.intermediate_dim, "dropout": self.dropout, "layer_norm_epsilon": self.layer_norm_epsilon, "max_sequence_length": self.max_sequence_length, } ) return config @classproperty def presets(cls): return copy.deepcopy(backbone_presets)
keras-nlp/keras_nlp/models/bloom/bloom_backbone.py/0
{ "file_path": "keras-nlp/keras_nlp/models/bloom/bloom_backbone.py", "repo_id": "keras-nlp", "token_count": 2983 }
142
# Copyright 2022 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl import logging from keras_nlp.api_export import keras_nlp_export from keras_nlp.layers.preprocessing.masked_lm_mask_generator import ( MaskedLMMaskGenerator, ) from keras_nlp.models.deberta_v3.deberta_v3_preprocessor import ( DebertaV3Preprocessor, ) from keras_nlp.utils.keras_utils import pack_x_y_sample_weight @keras_nlp_export("keras_nlp.models.DebertaV3MaskedLMPreprocessor") class DebertaV3MaskedLMPreprocessor(DebertaV3Preprocessor): """DeBERTa preprocessing for the masked language modeling task. This preprocessing layer will prepare inputs for a masked language modeling task. It is primarily intended for use with the `keras_nlp.models.DebertaV3MaskedLM` task model. Preprocessing will occur in multiple steps. - Tokenize any number of input segments using the `tokenizer`. - Pack the inputs together with the appropriate `"<s>"`, `"</s>"` and `"<pad>"` tokens, i.e., adding a single `"<s>"` at the start of the entire sequence, `"</s></s>"` between each segment, and a `"</s>"` at the end of the entire sequence. - Randomly select non-special tokens to mask, controlled by `mask_selection_rate`. - Construct a `(x, y, sample_weight)` tuple suitable for training with a `keras_nlp.models.DebertaV3MaskedLM` task model. Args: tokenizer: A `keras_nlp.models.DebertaV3Tokenizer` instance. sequence_length: The length of the packed inputs. mask_selection_rate: The probability an input token will be dynamically masked. mask_selection_length: The maximum number of masked tokens supported by the layer. mask_token_rate: float. `mask_token_rate` must be between 0 and 1 which indicates how often the mask_token is substituted for tokens selected for masking. Defaults to `0.8`. random_token_rate: float. `random_token_rate` must be between 0 and 1 which indicates how often a random token is substituted for tokens selected for masking. Note: mask_token_rate + random_token_rate <= 1, and for (1 - mask_token_rate - random_token_rate), the token will not be changed. Defaults to `0.1`. truncate: string. The algorithm to truncate a list of batched segments to fit within `sequence_length`. The value can be either `round_robin` or `waterfall`: - `"round_robin"`: Available space is assigned one token at a time in a round-robin fashion to the inputs that still need some, until the limit is reached. - `"waterfall"`: The allocation of the budget is done using a "waterfall" algorithm that allocates quota in a left-to-right manner and fills up the buckets until we run out of budget. It supports an arbitrary number of segments. Examples: Directly calling the layer on data. ```python preprocessor = keras_nlp.models.DebertaV3MaskedLMPreprocessor.from_preset( "deberta_v3_base_en" ) # Tokenize and mask a single sentence. preprocessor("The quick brown fox jumped.") # Tokenize and mask a batch of single sentences. preprocessor(["The quick brown fox jumped.", "Call me Ishmael."]) # Tokenize and mask sentence pairs. # In this case, always convert input to tensors before calling the layer. first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) second = tf.constant(["The fox tripped.", "Oh look, a whale."]) preprocessor((first, second)) ``` Mapping with `tf.data.Dataset`. ```python preprocessor = keras_nlp.models.DebertaV3MaskedLMPreprocessor.from_preset( "deberta_v3_base_en" ) first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) second = tf.constant(["The fox tripped.", "Oh look, a whale."]) # Map single sentences. ds = tf.data.Dataset.from_tensor_slices(first) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) # Map sentence pairs. ds = tf.data.Dataset.from_tensor_slices((first, second)) # Watch out for tf.data's default unpacking of tuples here! # Best to invoke the `preprocessor` directly in this case. ds = ds.map( lambda first, second: preprocessor(x=(first, second)), num_parallel_calls=tf.data.AUTOTUNE, ) ``` """ def __init__( self, tokenizer, sequence_length=512, truncate="round_robin", mask_selection_rate=0.15, mask_selection_length=96, mask_token_rate=0.8, random_token_rate=0.1, **kwargs, ): super().__init__( tokenizer, sequence_length=sequence_length, truncate=truncate, **kwargs, ) self.mask_selection_rate = mask_selection_rate self.mask_selection_length = mask_selection_length self.mask_token_rate = mask_token_rate self.random_token_rate = random_token_rate self.masker = None def build(self, input_shape): super().build(input_shape) # Defer masker creation to `build()` so that we can be sure tokenizer # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( mask_selection_rate=self.mask_selection_rate, mask_selection_length=self.mask_selection_length, mask_token_rate=self.mask_token_rate, random_token_rate=self.random_token_rate, vocabulary_size=self.tokenizer.vocabulary_size(), mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ self.tokenizer.cls_token_id, self.tokenizer.sep_token_id, self.tokenizer.pad_token_id, ], ) def get_config(self): config = super().get_config() config.update( { "mask_selection_rate": self.mask_selection_rate, "mask_selection_length": self.mask_selection_length, "mask_token_rate": self.mask_token_rate, "random_token_rate": self.random_token_rate, } ) return config def call(self, x, y=None, sample_weight=None): if y is not None or sample_weight is not None: logging.warning( f"{self.__class__.__name__} generates `y` and `sample_weight` " "based on your input data, but your data already contains `y` " "or `sample_weight`. Your `y` and `sample_weight` will be " "ignored." ) x = super().call(x) token_ids, padding_mask = x["token_ids"], x["padding_mask"] masker_outputs = self.masker(token_ids) x = { "token_ids": masker_outputs["token_ids"], "padding_mask": padding_mask, "mask_positions": masker_outputs["mask_positions"], } y = masker_outputs["mask_ids"] sample_weight = masker_outputs["mask_weights"] return pack_x_y_sample_weight(x, y, sample_weight)
keras-nlp/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py/0
{ "file_path": "keras-nlp/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py", "repo_id": "keras-nlp", "token_count": 3238 }
143
# Copyright 2024 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.gemma.gemma_backbone import GemmaBackbone from keras_nlp.models.gemma.gemma_causal_lm_preprocessor import ( GemmaCausalLMPreprocessor, ) from keras_nlp.models.gemma.gemma_presets import backbone_presets from keras_nlp.models.generative_task import GenerativeTask from keras_nlp.utils.python_utils import classproperty @keras_nlp_export("keras_nlp.models.GemmaCausalLM") class GemmaCausalLM(GenerativeTask): """An end-to-end Gemma model for causal language modeling. A causal language model (LM) predicts the next token based on previous tokens. This task setup can be used to train the model unsupervised on plain text input, or to autoregressively generate plain text similar to the data used for training. This task can be used for pre-training or fine-tuning a Gemma model, simply by calling `fit()`. This model has a `generate()` method, which generates text based on a prompt. The generation strategy used is controlled by an additional `sampler` argument on `compile()`. You can recompile the model with different `keras_nlp.samplers` objects to control the generation. By default, `"greedy"` sampling will be used. This model can optionally be configured with a `preprocessor` layer, in which case it will automatically apply preprocessing to string inputs during `fit()`, `predict()`, `evaluate()` and `generate()`. This is done by default when creating the model with `from_preset()`. Args: backbone: A `keras_nlp.models.GemmaBackbone` instance. preprocessor: A `keras_nlp.models.GemmaCausalLMPreprocessor` or `None`. If `None`, this model will not apply preprocessing, and inputs should be preprocessed before calling the model. Examples: Use `generate()` to do text generation. ```python gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset("gemma_2b_en") gemma_lm.generate("I want to say", max_length=30) # Generate with batched prompts. gemma_lm.generate(["This is a", "Where are you"], max_length=30) ``` Compile the `generate()` function with a custom sampler. ```python gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset("gemma_2b_en") gemma_lm.compile(sampler="top_k") gemma_lm.generate("I want to say", max_length=30) gemma_lm.compile(sampler=keras_nlp.samplers.BeamSampler(num_beams=2)) gemma_lm.generate("I want to say", max_length=30) ``` Use `generate()` without preprocessing. ```python prompt = { # Token ids for "<bos> Keras is". "token_ids": np.array([[2, 214064, 603, 0, 0, 0, 0]] * 2), # Use `"padding_mask"` to indicate values that should not be overridden. "padding_mask": np.array([[1, 1, 1, 0, 0, 0, 0]] * 2), } gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset( "gemma_2b_en", preprocessor=None, ) gemma_lm.generate(prompt) ``` Call `fit()` on a single batch. ```python features = ["The quick brown fox jumped.", "I forgot my homework."] gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset("gemma_2b_en") gemma_lm.fit(x=features, batch_size=2) ``` Call `fit()` without preprocessing. ```python x = { # Token ids for "<bos> Keras is deep learning library<eos>" "token_ids": np.array([[2, 214064, 603, 5271, 6044, 9581, 1, 0]] * 2), "padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 0]] * 2), } y = np.array([[214064, 603, 5271, 6044, 9581, 3, 0, 0]] * 2) sw = np.array([[1, 1, 1, 1, 1, 1, 0, 0]] * 2) gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset( "gemma_2b_en", preprocessor=None, ) gemma_lm.fit(x=x, y=y, sample_weight=sw, batch_size=2) ``` Custom backbone and vocabulary. ```python tokenizer = keras_nlp.models.GemmaTokenizer( proto="proto.spm", ) preprocessor = keras_nlp.models.GemmaCausalLMPreprocessor( tokenizer=tokenizer, sequence_length=128, ) backbone = keras_nlp.models.GemmaBackbone( vocabulary_size=30552, num_layers=4, num_heads=4, hidden_dim=256, intermediate_dim=512, max_sequence_length=128, ) gemma_lm = keras_nlp.models.GemmaCausalLM( backbone=backbone, preprocessor=preprocessor, ) gemma_lm.fit(x=features, batch_size=2) ``` """ def __init__( self, backbone, preprocessor=None, **kwargs, ): # === Layers === self.backbone = backbone self.preprocessor = preprocessor # === Functional Model === inputs = backbone.input hidden_states = backbone(inputs) outputs = backbone.token_embedding(hidden_states, reverse=True) super().__init__( inputs=inputs, outputs=outputs, **kwargs, ) # === Default compilation === self.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.Adam(2e-5), metrics=[keras.metrics.SparseCategoricalAccuracy()], sampler="greedy", jit_compile=True, ) @classproperty def presets(cls): return copy.deepcopy(backbone_presets) @classproperty def backbone_cls(cls): return GemmaBackbone @classproperty def preprocessor_cls(cls): return GemmaCausalLMPreprocessor def call_with_cache( self, token_ids, cache, cache_update_index, ): """Forward pass of `GemmaCausalLM` with cache. `call_with_cache` adds an additional forward pass for the model for autoregressive inference. Unlike calling the model directly, this method allows caching previous key/value Tensors in multi-head attention layer, and avoids recomputing the outputs of seen tokens. Args: token_ids: a dense int Tensor with shape `(batch_size, max_length)`. cache: a dense float Tensor, the cache of key and value. cache_update_index: int, or int Tensor. The index of current inputs in the whole sequence. Returns: A (logits, hidden_states, cache) tuple. Where `logits` is the language model logits for the input token_ids, `hidden_states` is the final hidden representation of the input tokens, and `cache` is the decoding cache. """ x = self.backbone.token_embedding(token_ids) x = x * ops.cast(ops.sqrt(self.backbone.hidden_dim), x.dtype) # Each decoder layer has a cache; we update them separately. caches = [] for i, transformer_layer in enumerate(self.backbone.transformer_layers): current_cache = cache[:, i, ...] x, next_cache = transformer_layer( x, cache=current_cache, cache_update_index=cache_update_index, ) caches.append(next_cache) cache = ops.stack(caches, axis=1) hidden_states = x = self.backbone.layer_norm(x) logits = self.backbone.token_embedding(x, reverse=True) return logits, hidden_states, cache def _build_cache(self, token_ids): """Build an empty cache for use with `call_with_cache()`.""" batch_size = ops.shape(token_ids)[0] max_length = ops.shape(token_ids)[1] num_layers = self.backbone.num_layers num_heads = self.backbone.num_key_value_heads head_dim = self.backbone.head_dim shape = [batch_size, num_layers, 2, max_length, num_heads, head_dim] cache = ops.zeros(shape, dtype=self.compute_dtype) # Seed the cache. _, hidden_states, cache = self.call_with_cache(token_ids, cache, 0) return hidden_states, cache def generate_step( self, inputs, end_token_id=None, ): """A compilable generation function for a single batch of inputs. This function represents the inner, XLA-compilable, generation function for a single batch of inputs. Inputs should have the same structure as model inputs, a dictionary with keys `"token_ids"` and `"padding_mask"`. Args: inputs: A dictionary with two keys `"token_ids"` and `"padding_mask"` and batched tensor values. end_token_id: The id of the end token to stop on. If all sequences have produced a new `end_token_id`, generation will stop. """ token_ids, padding_mask = inputs["token_ids"], inputs["padding_mask"] # Create and seed cache with a single forward pass. hidden_states, cache = self._build_cache(token_ids) # Compute the lengths of all user inputted tokens ids. row_lengths = ops.sum(ops.cast(padding_mask, "int32"), axis=-1) # Start at the first index that has no user inputted id. index = ops.min(row_lengths) def next(prompt, cache, index): # The cache index is the index of our previous token. cache_update_index = index - 1 batch_size = ops.shape(prompt)[0] prompt = ops.slice(prompt, [0, cache_update_index], [batch_size, 1]) logits, hidden_states, cache = self.call_with_cache( prompt, cache, cache_update_index, ) return ( ops.squeeze(logits, axis=1), ops.squeeze(hidden_states, axis=1), cache, ) token_ids = self._sampler( next=next, prompt=token_ids, cache=cache, index=index, mask=padding_mask, end_token_id=end_token_id, hidden_states=hidden_states, model=self, ) # Compute an output padding mask with the token ids we updated. if end_token_id is not None: # Build a mask of `end_token_id` locations not in the original # prompt (not in locations where `padding_mask` is True). end_locations = ops.logical_and( ops.equal(token_ids, end_token_id), ops.logical_not(padding_mask), ) end_locations = ops.cast(end_locations, "int32") # Use cumsum to get ones in all locations after end_locations. cumsum = ops.cast(ops.cumsum(end_locations, axis=-1), "int32") overflow = cumsum - end_locations # Our padding mask is the inverse of these overflow locations. padding_mask = ops.logical_not(ops.cast(overflow, "bool")) else: # Without early stopping, all locations will have been updated. padding_mask = ops.ones_like(token_ids, dtype="bool") return { "token_ids": token_ids, "padding_mask": padding_mask, } def score( self, token_ids, padding_mask=None, scoring_mode="logits", layer_intercept_fn=None, target_ids=None, ): """Score a generation represented by the provided token ids. Args: token_ids: A <int>[batch_size, num_tokens] tensor containing tokens to score. Typically, this tensor captures the output from a call to `GemmaCausalLM.generate()`, i.e., tokens for both the input text and the model-generated text. padding_mask: A <bool>[batch_size, num_tokens] tensor indicating the tokens that should be preserved during generation. This is an artifact required by the GemmaBackbone and isn't influential on the computation of this function. If omitted, this function uses `keras.ops.ones()` to create a tensor of the appropriate shape. scoring_mode: The type of scores to return, either "logits" or "loss", both will be per input token. layer_intercept_fn: An optional function for augmenting activations with additional computation, for example, as part of interpretability research. This function will be passed the activations as its first parameter and a numeric index associated with that backbone layer. _This index _is not_ an index into `self.backbone.layers`_. The index -1 accompanies the embeddings returned by calling `self.backbone.token_embedding()` on `token_ids` in the forward direction. All subsequent indexes will be 0-based indices for the activations returned by each of the Transformers layers in the backbone. This function must return a <float>[batch_size, num_tokens, hidden_dims] tensor that can be passed as an input to the next layer in the model. target_ids: An <bool>[batch_size, num_tokens] tensor containing the predicted tokens against which the loss should be computed. If a span of tokens is provided (sequential truthy values along axis=1 in the tensor), the loss will be computed as the aggregate across those tokens. Raises: ValueError: If an unsupported scoring_mode is provided, or if the target_ids are not provided when using ScoringMode.LOSS. Returns: The per-token scores as a tensor of size <float>[batch_size, num_tokens, vocab_size] in "logits" mode, or <float>[batch_size, num_tokens] in "loss" mode. Examples: Compute gradients between embeddings and loss scores with TensorFlow: ```python gemma_lm = keras_nlp.models.GemmaCausalLM.from_preset( "gemma_2b_en" ) generations = gemma_lm.generate( ["This is a", "Where are you"], max_length=30 ) preprocessed = gemma_lm.preprocessor.generate_preprocess(generations) generation_ids = preprocessed["token_ids"] padding_mask = preprocessed["padding_mask"] target_ids = keras.ops.roll(generation_ids, shift=-1, axis=1) embeddings = None with tf.GradientTape(watch_accessed_variables=True) as tape: def layer_intercept_fn(x, i): if i == -1: nonlocal embeddings, tape embeddings = x tape.watch(embeddings) return x losses = gemma_lm.score( token_ids=generation_ids, padding_mask=padding_mask, scoring_mode="loss", layer_intercept_fn=layer_intercept_fn, target_ids=target_ids, ) grads = tape.gradient(losses, embeddings) ``` """ if scoring_mode not in ("logits", "loss"): raise ValueError( "Unsupported scoring_mode. Must be one of 'logits' or 'loss'." ) if scoring_mode == "loss" and target_ids is None: raise ValueError( "Cannot compute loss without targets. Please provide target " "token ids via the target_ids parameter." ) batch_shape = ops.shape(token_ids)[:2] assert len(batch_shape) == 2 if padding_mask is None: padding_mask = ops.ones(shape=batch_shape) if layer_intercept_fn is None: def default_layer_intercept_fn(x, unused_i): return x layer_intercept_fn = default_layer_intercept_fn token_embeddings = self.backbone.token_embedding(token_ids) x = layer_intercept_fn(token_embeddings, -1) x = token_embeddings * ops.cast( ops.sqrt(self.backbone.hidden_dim), dtype=self.compute_dtype ) for i, transformer_layer in enumerate(self.backbone.transformer_layers): x = transformer_layer(x, padding_mask=padding_mask) x = layer_intercept_fn(x, i) x = self.backbone.layer_norm(x) logits = self.backbone.token_embedding(x, reverse=True) if scoring_mode == "logits": return logits per_token_loss_fn = keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction="none" ) per_token_loss = per_token_loss_fn(target_ids, logits) return per_token_loss
keras-nlp/keras_nlp/models/gemma/gemma_causal_lm.py/0
{ "file_path": "keras-nlp/keras_nlp/models/gemma/gemma_causal_lm.py", "repo_id": "keras-nlp", "token_count": 7624 }
144
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest.mock import patch import pytest from keras_nlp.backend import ops from keras_nlp.models.gpt_neo_x.gpt_neo_x_backbone import GPTNeoXBackbone from keras_nlp.models.gpt_neo_x.gpt_neo_x_causal_lm import GPTNeoXCausalLM from keras_nlp.models.gpt_neo_x.gpt_neo_x_causal_lm_preprocessor import ( GPTNeoXCausalLMPreprocessor, ) from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer from keras_nlp.tests.test_case import TestCase class GPTNeoXCausalLMTest(TestCase): def setUp(self): self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] self.vocab += ["<|endoftext|>"] self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] self.preprocessor = GPTNeoXCausalLMPreprocessor( GPTNeoXTokenizer(vocabulary=self.vocab, merges=self.merges), sequence_length=8, ) self.backbone = GPTNeoXBackbone( vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), num_layers=2, num_heads=2, hidden_dim=4, intermediate_dim=8, max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, "backbone": self.backbone, } self.train_data = ([" airplane at airport", " airplane at airport"],) self.input_data = self.preprocessor(*self.train_data)[0] def test_causal_lm_basics(self): self.run_task_test( cls=GPTNeoXCausalLM, init_kwargs=self.init_kwargs, train_data=self.train_data, expected_output_shape=(2, 8, 7), ) def test_generate(self): causal_lm = GPTNeoXCausalLM(**self.init_kwargs) # String input. prompt = " airplane at airport" output = causal_lm.generate(" airplane at airport") self.assertTrue(prompt in output) # Int tensor input. prompt_ids = self.preprocessor.generate_preprocess([prompt]) causal_lm.preprocessor = None outputs = causal_lm.generate(prompt_ids) # Assert prompt is in output in token id space. self.assertAllEqual( outputs["token_ids"][:, :5], prompt_ids["token_ids"][:, :5], ) self.assertAllEqual( outputs["padding_mask"][:, :5], prompt_ids["padding_mask"][:, :5], ) def test_early_stopping(self): causal_lm = GPTNeoXCausalLM(**self.init_kwargs) call_with_cache = causal_lm.call_with_cache def wrapper(*args, **kwargs): """Modify output logits to always favor end_token_id""" logits, hidden_states, cache = call_with_cache(*args, **kwargs) index = self.preprocessor.tokenizer.end_token_id update = ops.ones_like(logits)[:, :, index] * 1.0e9 update = ops.expand_dims(update, axis=-1) logits = ops.slice_update(logits, (0, 0, index), update) return logits, hidden_states, cache with patch.object(causal_lm, "call_with_cache", wraps=wrapper): prompt = [" airplane at airport", " airplane"] output = causal_lm.generate(prompt) # We should immediately abort and output the prompt. self.assertEqual(prompt, output) def test_generate_compilation(self): causal_lm = GPTNeoXCausalLM(**self.init_kwargs) # Assert we do not recompile with successive calls. causal_lm.generate(" airplane at airport") first_fn = causal_lm.generate_function causal_lm.generate(" airplane at airport") second_fn = causal_lm.generate_function self.assertEqual(first_fn, second_fn) # Assert we do recompile after compile is called. causal_lm.compile(sampler="greedy") self.assertIsNone(causal_lm.generate_function) @pytest.mark.large def test_saved_model(self): self.run_model_saving_test( cls=GPTNeoXCausalLM, init_kwargs=self.init_kwargs, input_data=self.input_data, )
keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py/0
{ "file_path": "keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py", "repo_id": "keras-nlp", "token_count": 2219 }
145
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.models.backbone import Backbone from keras_nlp.models.mistral.mistral_layer_norm import ( MistralLayerNormalization, ) from keras_nlp.models.mistral.mistral_presets import backbone_presets from keras_nlp.models.mistral.mistral_transformer_decoder import ( MistralTransformerDecoder, ) from keras_nlp.utils.python_utils import classproperty def _mistral_kernel_initializer(stddev=0.02): return keras.initializers.RandomNormal(stddev=stddev) @keras_nlp_export("keras_nlp.models.MistralBackbone") class MistralBackbone(Backbone): """ The Mistral Transformer core architecture with hyperparameters. This network implements a Transformer-based decoder network, Mistral, as described in ["Mistral 7B"](https://arxiv.org/pdf/2310.06825.pdf). It includes the embedding lookups and transformer layers. The default constructor gives a fully customizable, randomly initialized Mistral model with any number of layers, heads, and embedding dimensions. To load preset architectures and weights, use the `from_preset` constructor. Args: vocabulary_size (int): The size of the token vocabulary. num_layers (int): The number of transformer layers. num_query_heads (int): The number of query attention heads for each transformer. hidden_dim (int): The size of the transformer encoding and pooling layers. intermediate_dim (int): The output dimension of the first Dense layer in a three-layer feedforward network for each transformer. num_key_value_heads (int): The number of key and value attention heads for each transformer. rope_max_wavelength (int, optional): The maximum angular wavelength of the sine/cosine curves, for rotary embeddings. Defaults to `10000`. rope_scaling_factor (float, optional): The scaling factor for calculation of roatary embedding. Defaults to `1.0`. layer_norm_epsilon (float, optional): Epsilon for the layer normalization layers in the transformer decoder. Defaults to `1e-6`. sliding_window (int, optional): The sliding window for the mistral attention layers. This controls the maximum cache size for the attention layers in each transformer decoder. Only `sliding_window` number of tokens are saved in the cache and used to generate the next token. Defaults to `512`. dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use for model computations and weights. Note that some computations, such as softmax and layer normalization, will always be done at float32 precision regardless of dtype. Examples: ```python input_data = { "token_ids": np.ones(shape=(1, 12), dtype="int32"), "padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]), } # Pretrained Mistral decoder. model = keras_nlp.models.MistralBackbone.from_preset("mistral7b_base_en") model(input_data) # Randomly initialized Mistral decoder with custom config. model = keras_nlp.models.MistralBackbone( vocabulary_size=10, hidden_dim=512, num_layers=2, num_query_heads=32, num_key_value_heads=8, intermediate_dim=1024, sliding_window=512, layer_norm_epsilon=1e-6, dtype="float32" ) model(input_data) ``` """ def __init__( self, vocabulary_size, num_layers, num_query_heads, hidden_dim, intermediate_dim, num_key_value_heads, rope_max_wavelength=10000, rope_scaling_factor=1.0, layer_norm_epsilon=1e-6, sliding_window=512, dropout=0, dtype=None, **kwargs, ): # === Layers === self.token_embedding = ReversibleEmbedding( input_dim=vocabulary_size, output_dim=hidden_dim, tie_weights=False, embeddings_initializer=_mistral_kernel_initializer(stddev=0.01), dtype=dtype, name="token_embedding", ) self.transformer_layers = [] for i in range(num_layers): layer = MistralTransformerDecoder( intermediate_dim=intermediate_dim, num_query_heads=num_query_heads, num_key_value_heads=num_key_value_heads, rope_max_wavelength=rope_max_wavelength, rope_scaling_factor=rope_scaling_factor, layer_norm_epsilon=layer_norm_epsilon, activation=ops.silu, kernel_initializer=_mistral_kernel_initializer(stddev=0.02), sliding_window=sliding_window, dropout=dropout, dtype=dtype, name=f"transformer_layer_{i}", ) self.transformer_layers.append(layer) self.layer_norm = MistralLayerNormalization( epsilon=layer_norm_epsilon, dtype=dtype, name="sequence_output_layernorm", ) # === Functional Model === token_id_input = keras.Input( shape=(None,), dtype="int32", name="token_ids" ) padding_mask_input = keras.Input( shape=(None,), dtype="int32", name="padding_mask" ) x = self.token_embedding(token_id_input) for transformer_layer in self.transformer_layers: x = transformer_layer(x, decoder_padding_mask=padding_mask_input) sequence_output = self.layer_norm(x) super().__init__( inputs={ "token_ids": token_id_input, "padding_mask": padding_mask_input, }, outputs=sequence_output, **kwargs, ) # === Config === self.vocabulary_size = vocabulary_size self.num_layers = num_layers self.num_query_heads = num_query_heads self.hidden_dim = hidden_dim self.intermediate_dim = intermediate_dim self.rope_max_wavelength = rope_max_wavelength self.num_key_value_heads = num_key_value_heads self.rope_scaling_factor = rope_scaling_factor self.sliding_window = sliding_window self.layer_norm_epsilon = layer_norm_epsilon self.dropout = dropout def get_config(self): config = super().get_config() config.update( { "vocabulary_size": self.vocabulary_size, "num_layers": self.num_layers, "num_query_heads": self.num_query_heads, "hidden_dim": self.hidden_dim, "intermediate_dim": self.intermediate_dim, "rope_max_wavelength": self.rope_max_wavelength, "rope_scaling_factor": self.rope_scaling_factor, "num_key_value_heads": self.num_key_value_heads, "sliding_window": self.sliding_window, "layer_norm_epsilon": self.layer_norm_epsilon, "dropout": self.dropout, } ) return config @classproperty def presets(cls): return copy.deepcopy(backbone_presets)
keras-nlp/keras_nlp/models/mistral/mistral_backbone.py/0
{ "file_path": "keras-nlp/keras_nlp/models/mistral/mistral_backbone.py", "repo_id": "keras-nlp", "token_count": 3485 }
146
# Copyright 2022 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from absl import logging from keras_nlp.api_export import keras_nlp_export from keras_nlp.layers.preprocessing.masked_lm_mask_generator import ( MaskedLMMaskGenerator, ) from keras_nlp.models.roberta.roberta_preprocessor import RobertaPreprocessor from keras_nlp.utils.keras_utils import pack_x_y_sample_weight @keras_nlp_export("keras_nlp.models.RobertaMaskedLMPreprocessor") class RobertaMaskedLMPreprocessor(RobertaPreprocessor): """RoBERTa preprocessing for the masked language modeling task. This preprocessing layer will prepare inputs for a masked language modeling task. It is primarily intended for use with the `keras_nlp.models.RobertaMaskedLM` task model. Preprocessing will occur in multiple steps. 1. Tokenize any number of input segments using the `tokenizer`. 2. Pack the inputs together with the appropriate `"<s>"`, `"</s>"` and `"<pad>"` tokens, i.e., adding a single `"<s>"` at the start of the entire sequence, `"</s></s>"` between each segment, and a `"</s>"` at the end of the entire sequence. 3. Randomly select non-special tokens to mask, controlled by `mask_selection_rate`. 4. Construct a `(x, y, sample_weight)` tuple suitable for training with a `keras_nlp.models.RobertaMaskedLM` task model. Args: tokenizer: A `keras_nlp.models.RobertaTokenizer` instance. sequence_length: int. The length of the packed inputs. truncate: string. The algorithm to truncate a list of batched segments to fit within `sequence_length`. The value can be either `round_robin` or `waterfall`: - `"round_robin"`: Available space is assigned one token at a time in a round-robin fashion to the inputs that still need some, until the limit is reached. - `"waterfall"`: The allocation of the budget is done using a "waterfall" algorithm that allocates quota in a left-to-right manner and fills up the buckets until we run out of budget. It supports an arbitrary number of segments. mask_selection_rate: float. The probability an input token will be dynamically masked. mask_selection_length: int. The maximum number of masked tokens in a given sample. mask_token_rate: float. The probability the a selected token will be replaced with the mask token. random_token_rate: float. The probability the a selected token will be replaced with a random token from the vocabulary. A selected token will be left as is with probability `1 - mask_token_rate - random_token_rate`. Call arguments: x: A tensor of single string sequences, or a tuple of multiple tensor sequences to be packed together. Inputs may be batched or unbatched. For single sequences, raw python inputs will be converted to tensors. For multiple sequences, pass tensors directly. y: Label data. Should always be `None` as the layer generates labels. sample_weight: Label weights. Should always be `None` as the layer generates label weights. Examples: Directly calling the layer on data. ```python # Load the preprocessor from a preset. preprocessor = keras_nlp.models.RobertaMaskedLMPreprocessor.from_preset( "roberta_base_en" ) # Tokenize and mask a single sentence. preprocessor("The quick brown fox jumped.") # Tokenize and mask a batch of single sentences. preprocessor(["The quick brown fox jumped.", "Call me Ishmael."]) # Tokenize and mask sentence pairs. # In this case, always convert input to tensors before calling the layer. first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) second = tf.constant(["The fox tripped.", "Oh look, a whale."]) preprocessor((first, second)) ``` Mapping with `tf.data.Dataset`. ```python preprocessor = keras_nlp.models.RobertaMaskedLMPreprocessor.from_preset( "roberta_base_en" ) first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) second = tf.constant(["The fox tripped.", "Oh look, a whale."]) # Map single sentences. ds = tf.data.Dataset.from_tensor_slices(first) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) # Map sentence pairs. ds = tf.data.Dataset.from_tensor_slices((first, second)) # Watch out for tf.data's default unpacking of tuples here! # Best to invoke the `preprocessor` directly in this case. ds = ds.map( lambda first, second: preprocessor(x=(first, second)), num_parallel_calls=tf.data.AUTOTUNE, ) ``` """ def __init__( self, tokenizer, sequence_length=512, truncate="round_robin", mask_selection_rate=0.15, mask_selection_length=96, mask_token_rate=0.8, random_token_rate=0.1, **kwargs, ): super().__init__( tokenizer, sequence_length=sequence_length, truncate=truncate, **kwargs, ) self.mask_selection_rate = mask_selection_rate self.mask_selection_length = mask_selection_length self.mask_token_rate = mask_token_rate self.random_token_rate = random_token_rate self.masker = None def build(self, input_shape): super().build(input_shape) # Defer packer creation to `build()` so that we can be sure tokenizer # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( mask_selection_rate=self.mask_selection_rate, mask_selection_length=self.mask_selection_length, mask_token_rate=self.mask_token_rate, random_token_rate=self.random_token_rate, vocabulary_size=self.tokenizer.vocabulary_size(), mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ self.tokenizer.start_token_id, self.tokenizer.end_token_id, self.tokenizer.pad_token_id, ], ) self.built = True def call(self, x, y=None, sample_weight=None): if y is not None or sample_weight is not None: logging.warning( f"{self.__class__.__name__} generates `y` and `sample_weight` " "based on your input data, but your data already contains `y` " "or `sample_weight`. Your `y` and `sample_weight` will be " "ignored." ) x = super().call(x) token_ids, padding_mask = x["token_ids"], x["padding_mask"] masker_outputs = self.masker(token_ids) x = { "token_ids": masker_outputs["token_ids"], "padding_mask": padding_mask, "mask_positions": masker_outputs["mask_positions"], } y = masker_outputs["mask_ids"] sample_weight = masker_outputs["mask_weights"] return pack_x_y_sample_weight(x, y, sample_weight) def get_config(self): config = super().get_config() config.update( { "mask_selection_rate": self.mask_selection_rate, "mask_selection_length": self.mask_selection_length, "mask_token_rate": self.mask_token_rate, "random_token_rate": self.random_token_rate, } ) return config
keras-nlp/keras_nlp/models/roberta/roberta_masked_lm_preprocessor.py/0
{ "file_path": "keras-nlp/keras_nlp/models/roberta/roberta_masked_lm_preprocessor.py", "repo_id": "keras-nlp", "token_count": 3290 }
147
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.layers.modeling.transformer_layer_utils import ( compute_causal_mask, ) from keras_nlp.models.t5.t5_layer_norm import T5LayerNorm from keras_nlp.models.t5.t5_multi_head_attention import T5MultiHeadAttention class T5TransformerLayer(keras.layers.Layer): def __init__( self, is_decoder, hidden_dim, intermediate_dim, key_value_dim, dropout, activation, layer_norm_epsilon, num_heads, use_gated_activation=False, use_relative_attention_bias=False, **kwargs, ): super().__init__(**kwargs) self.is_decoder = is_decoder self.use_gated_activation = use_gated_activation self.self_attention = T5MultiHeadAttention( is_decoder=is_decoder, hidden_dim=hidden_dim, key_value_dim=key_value_dim, num_heads=num_heads, dropout=dropout, use_relative_attention_bias=use_relative_attention_bias, dtype=self.dtype_policy, name="self_attention", ) self.self_attention_layer_norm = T5LayerNorm( layer_norm_epsilon, dtype=self.dtype_policy, ) self.self_attention_dropout = keras.layers.Dropout( dropout, dtype=self.dtype_policy, ) if self.is_decoder: self.cross_attention = T5MultiHeadAttention( is_decoder=is_decoder, hidden_dim=hidden_dim, key_value_dim=key_value_dim, num_heads=num_heads, dropout=dropout, use_relative_attention_bias=False, dtype=self.dtype_policy, name="cross_attention", ) self.cross_attention_layer_norm = T5LayerNorm( layer_norm_epsilon, dtype=self.dtype_policy, ) self.cross_attention_dropout = keras.layers.Dropout( dropout, dtype=self.dtype_policy, ) self.input_projector = keras.layers.Dense( intermediate_dim, use_bias=False, activation=keras.activations.get(activation), kernel_initializer=keras.initializers.RandomNormal( mean=0, stddev=hidden_dim**-0.5 ), dtype=self.dtype_policy, name="input_projector", ) if self.use_gated_activation: self.gate_projector = keras.layers.Dense( intermediate_dim, use_bias=False, kernel_initializer=keras.initializers.RandomNormal( mean=0, stddev=hidden_dim**-0.5 ), dtype=self.dtype_policy, name="gate_projector", ) self.output_projector = keras.layers.Dense( hidden_dim, use_bias=False, kernel_initializer=keras.initializers.RandomNormal( mean=0, stddev=intermediate_dim**-0.5 ), dtype=self.dtype_policy, name="output_projector", ) self.layer_norm = T5LayerNorm( epsilon=layer_norm_epsilon, dtype=self.dtype_policy, ) self.dropout_layer = keras.layers.Dropout( dropout, dtype=self.dtype_policy, ) def call( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, use_causal_mask=False, training=False, ): if use_causal_mask: shape = ops.shape(hidden_states) batch_size, length = shape[0], shape[1] causal_mask = compute_causal_mask(batch_size, length, length) attention_mask = causal_mask & ops.cast(attention_mask, "bool") x = hidden_states # Intermediate result. residual = x x = self.self_attention_layer_norm(x) x, position_bias = self.self_attention( x, mask=attention_mask, position_bias=position_bias, training=training, ) x = self.self_attention_dropout(x, training=training) x = x + residual if self.is_decoder: residual = x x = self.cross_attention_layer_norm(x) x, _ = self.cross_attention( x, key_value_states=encoder_hidden_states, mask=encoder_attention_mask, training=training, ) x = self.cross_attention_dropout(x, training=training) x = x + residual residual = x x = self.layer_norm(x) if self.use_gated_activation: hidden_activation = self.input_projector(x) hidden_linear = self.gate_projector(x) x = hidden_activation * hidden_linear else: x = self.input_projector(x) x = self.dropout_layer(x, training=training) x = self.output_projector(x) x = self.dropout_layer(x, training=training) x = x + residual if position_bias is not None: return x, position_bias else: return x
keras-nlp/keras_nlp/models/t5/t5_transformer_layer.py/0
{ "file_path": "keras-nlp/keras_nlp/models/t5/t5_transformer_layer.py", "repo_id": "keras-nlp", "token_count": 3042 }
148
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.models.backbone import Backbone from keras_nlp.models.xlnet.xlnet_content_and_query_embedding import ( ContentAndQueryEmbedding, ) from keras_nlp.models.xlnet.xlnet_encoder import XLNetAttentionMaskLayer from keras_nlp.models.xlnet.xlnet_encoder import XLNetEncoder from keras_nlp.models.xlnet.xlnet_encoder import XLNetSegmentMatrixLayer @keras_nlp_export("keras_nlp.models.XLNetBackbone") class XLNetBackbone(Backbone): """XLNet encoder network. This class implements a XLNet Transformer. The default constructor gives a fully customizable, randomly initialized XLNet encoder with any number of layers, heads, and embedding dimensions. To load preset architectures and weights, use the `from_preset` constructor. Disclaimer: Pre-trained models are provided on an "as is" basis, without warranties or conditions of any kind. Attributes: vocabulary_size: int. The size of the token vocabulary. num_layers: int. The number of transformer encoder layers. num_heads: int, the number of heads in the `keras.layers.TwoStreamRelativeAttention` layer. hidden_dim: int, the size hidden states. intermediate_dim: int, the hidden size of feedforward network. dropout: float, defaults to 0.0 the dropout value, shared by `keras.layers.TwoStreamRelativeAttention` and feedforward network. activation: string or `keras.activations`, defaults to "gelu". the activation function of feedforward network. kernel_initializer_range: int, defaults to 0.02. The kernel initializer range for the dense and relative attention layers. bias_initializer: string or `keras.initializers` initializer, defaults to "zeros". The bias initializer for the dense and multiheaded relative attention layers. dtype: string or `keras.mixed_precision.DTypePolicy`. The dtype to use for model computations and weights. Note that some computations, such as softmax and layer normalization, will always be done at float32 precision regardless of dtype. Call arguments: token_ids: Indices of input sequence tokens in the vocabulary of shape `[batch_size, sequence_length]`. segment_ids: Segment token indices to indicate first and second portions of the inputs of shape `[batch_size, sequence_length]`. padding_mask: Mask to avoid performing attention on padding token indices of shape `[batch_size, sequence_length]`. Examples: ```python import numpy as np from keras_nlp.models import XLNetBackbone input_data = { "token_ids": np.array( [460, 5272, 1758, 4905, 9, 4, 3], shape=(1, 7), ), "segment_ids": np.array( [0, 0, 0, 0, 0, 0, 2], shape=(1, 7), ), "padding_mask": np.array( [1, 1, 1, 1, 1, 1, 1], shape=(1, 7) ), } # Randomly initialized XLNet encoder with a custom config model = keras_nlp.models.XLNetBackbone( vocabulary_size=32000, num_layers=12, num_heads=12, hidden_dim=768, intermediate_dim=3072, ) output = model(input_data) ``` """ def __init__( self, vocabulary_size, num_layers, num_heads, hidden_dim, intermediate_dim, dropout=0.0, activation="gelu", kernel_initializer_range=0.02, bias_initializer="zeros", dtype=None, **kwargs, ): # === Layers === self.content_query_embedding = ContentAndQueryEmbedding( vocabulary_size=vocabulary_size, hidden_dim=hidden_dim, dropout=dropout, dtype=dtype, name="content_query_embedding", ) self.attn_mask_layer = XLNetAttentionMaskLayer( hidden_dim=hidden_dim, kernel_initializer_range=kernel_initializer_range, dtype=dtype, name="encoder_block_attn_mask_layer", ) self.seg_mat_layer = XLNetSegmentMatrixLayer( dtype=dtype, name="encoder_block_seg_mat_layer", ) head_dim = hidden_dim // num_heads self.transformer_layers = [] for i in range(num_layers): layer = XLNetEncoder( num_heads=num_heads, hidden_dim=hidden_dim, head_dim=head_dim, intermediate_dim=intermediate_dim, dropout=dropout, activation=activation, layer_norm_epsilon=1e-12, kernel_initializer_range=kernel_initializer_range, bias_initializer=bias_initializer, dtype=dtype, name=f"xlnet_encoder_{i}", ) self.transformer_layers.append(layer) self.dropout = keras.layers.Dropout( dropout, dtype=dtype, name="dropout", ) # === Functional Model === token_id_input = keras.Input( shape=(None,), dtype="int32", name="token_ids" ) padding_mask_input = keras.Input( shape=(None,), dtype="int32", name="padding_mask" ) segment_id_input = keras.Input( shape=(None,), dtype="int32", name="segment_ids" ) # Content and Query Embedding word_emb, pos_emb = self.content_query_embedding(token_id_input) # Apply XLNetAttentionMaskLayer and XLNetSegmentMatrixLayer Layers # to get the processed attention masks and segment matrix. attn_mask_content, attn_mask_query = self.attn_mask_layer( padding_mask_input ) seg_mat = self.seg_mat_layer(segment_id_input) output_content = word_emb for transformer_layer in self.transformer_layers: output_content, output_query = transformer_layer( output_content=output_content, attn_mask_content=attn_mask_content, attn_mask_query=attn_mask_query, pos_emb=pos_emb, seg_mat=seg_mat, ) output = self.dropout(output_content) super().__init__( inputs={ "token_ids": token_id_input, "padding_mask": padding_mask_input, "segment_ids": segment_id_input, }, outputs=output, **kwargs, ) # === Config === self.vocabulary_size = vocabulary_size self.num_layers = num_layers self.num_heads = num_heads self.hidden_dim = hidden_dim self.intermediate_dim = intermediate_dim self.dropout = dropout self.activation = activation self.kernel_initializer_range = kernel_initializer_range self.bias_initializer = bias_initializer def get_config(self): config = super().get_config() config.update( { "vocabulary_size": self.vocabulary_size, "num_layers": self.num_layers, "num_heads": self.num_heads, "hidden_dim": self.hidden_dim, "intermediate_dim": self.intermediate_dim, "dropout": self.dropout, "activation": self.activation, "kernel_initializer_range": self.kernel_initializer_range, "bias_initializer": self.bias_initializer, } ) return config @property def token_embedding(self): return self.get_layer("content_query_embedding").word_embed
keras-nlp/keras_nlp/models/xlnet/xlnet_backbone.py/0
{ "file_path": "keras-nlp/keras_nlp/models/xlnet/xlnet_backbone.py", "repo_id": "keras-nlp", "token_count": 3735 }
149
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import ops from keras_nlp.backend import random from keras_nlp.samplers.sampler import Sampler @keras_nlp_export("keras_nlp.samplers.TopKSampler") class TopKSampler(Sampler): """Top-K Sampler class. This sampler implements top-k search algorithm. Briefly, top-k algorithm randomly selects a token from the tokens of top K probability, with selection chance determined by the probability. Args: k: int, the `k` value of top-k. seed: int. The random seed. Defaults to `None`. Call arguments: {{call_args}} Examples: ```python causal_lm = keras_nlp.models.GPT2CausalLM.from_preset("gpt2_base_en") # Pass by name to compile. causal_lm.compile(sampler="top_k") causal_lm.generate(["Keras is a"]) # Pass by object to compile. sampler = keras_nlp.samplers.TopKSampler(k=5, temperature=0.7) causal_lm.compile(sampler=sampler) causal_lm.generate(["Keras is a"]) ``` """ def __init__( self, k=5, seed=None, **kwargs, ): super().__init__(**kwargs) self.k = k self.seed = seed self.seed_generator = random.SeedGenerator(seed) def get_next_token(self, probabilities): # Filter out top-k tokens. top_k_pred, top_k_indices = ops.top_k( probabilities, k=self.k, sorted=False, ) # Sample the next token from the probability distribution. sample_indices = random.categorical( # tf does not support half precision multinomial sampling, so make # sure we have full precision here. ops.cast(ops.log(top_k_pred), "float32"), 1, seed=self.seed_generator, dtype="int32", ) # Rearrange to get the next token idx from the original order. output = ops.take_along_axis(top_k_indices, sample_indices, axis=-1) return ops.squeeze(output, axis=-1) def get_config(self): config = super().get_config() config.update( { "k": self.k, "seed": self.seed, } ) return config
keras-nlp/keras_nlp/samplers/top_k_sampler.py/0
{ "file_path": "keras-nlp/keras_nlp/samplers/top_k_sampler.py", "repo_id": "keras-nlp", "token_count": 1200 }
150
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from keras_nlp.tests.test_case import TestCase from keras_nlp.tokenizers.unicode_codepoint_tokenizer import ( UnicodeCodepointTokenizer, ) class UnicodeCodepointTokenizerTest(TestCase): def test_tokenize(self): input_data = ["ninja", "samurai", "▀▁▂▃"] tokenizer = UnicodeCodepointTokenizer() call_output = tokenizer(input_data) tokenize_output = tokenizer.tokenize(input_data) exp_outputs = [ [110, 105, 110, 106, 97], [115, 97, 109, 117, 114, 97, 105], [9600, 9601, 9602, 9603], ] self.assertAllEqual(call_output, exp_outputs) self.assertAllEqual(tokenize_output, exp_outputs) def test_tokenize_scalar(self): input_data = "ninja" tokenizer = UnicodeCodepointTokenizer() call_output = tokenizer(input_data) tokenize_output = tokenizer.tokenize(input_data) self.assertAllEqual(call_output, [110, 105, 110, 106, 97]) self.assertAllEqual(tokenize_output, [110, 105, 110, 106, 97]) def test_dense_output(self): input_data = ["ninja", "samurai", "▀▁▂▃"] tokenizer = UnicodeCodepointTokenizer(sequence_length=10) call_output = tokenizer(input_data) self.assertAllEqual( call_output, [ [110, 105, 110, 106, 97, 0, 0, 0, 0, 0], [115, 97, 109, 117, 114, 97, 105, 0, 0, 0], [9600, 9601, 9602, 9603, 0, 0, 0, 0, 0, 0], ], ) def test_tokenize_scalar_with_vocabulary_size(self): input_data = "ninja" tokenizer = UnicodeCodepointTokenizer(vocabulary_size=105) call_output = tokenizer(input_data) tokenize_output = tokenizer.tokenize(input_data) self.assertAllEqual(call_output, [104, 104, 104, 104, 97]) self.assertAllEqual(tokenize_output, [104, 104, 104, 104, 97]) def test_tokenize_dense_with_vocabulary_size(self): input_data = ["ninja", "samurai", "▀▁▂▃"] tokenizer = UnicodeCodepointTokenizer( sequence_length=10, vocabulary_size=105 ) call_output = tokenizer(input_data) self.assertAllEqual( call_output, [ [104, 104, 104, 104, 97, 0, 0, 0, 0, 0], [104, 97, 104, 104, 104, 97, 104, 0, 0, 0], [104, 104, 104, 104, 0, 0, 0, 0, 0, 0], ], ) def test_tokenize_ragged_with_vocabulary_size(self): input_data = ["ninja", "samurai", "▀▁▂▃"] tokenizer = UnicodeCodepointTokenizer(vocabulary_size=105) call_output = tokenizer(input_data) tokenize_output = tokenizer.tokenize(input_data) exp_outputs = [ [104, 104, 104, 104, 97], [104, 97, 104, 104, 104, 97, 104], [104, 104, 104, 104], ] self.assertAllEqual(call_output, exp_outputs) self.assertAllEqual(tokenize_output, exp_outputs) def test_detokenize(self): input_data = [ [110, 105, 110, 106, 97], [115, 97, 109, 117, 114, 97, 105], [9600, 9601, 9602, 9603], ] tokenizer = UnicodeCodepointTokenizer() detokenize_output = tokenizer.detokenize(input_data) self.assertAllEqual( detokenize_output, [ b"ninja", b"samurai", b"\xe2\x96\x80\xe2\x96\x81\xe2\x96\x82\xe2\x96\x83", ], ) def test_detokenize_replace_error(self): # 10000000 is an invalid value input_data = tf.ragged.constant([[110, 105, 10000000, 110, 106, 97]]) tokenizer = UnicodeCodepointTokenizer( errors="replace", replacement_char=75 ) detokenize_output = tokenizer.detokenize(input_data) self.assertAllEqual(detokenize_output, [b"niKnja"]) def test_detokenize_ignore_error(self): input_data = tf.ragged.constant([[110, 105, 10000000, 110, 106, 97]]) tokenizer = UnicodeCodepointTokenizer(errors="ignore") detokenize_output = tokenizer.detokenize(input_data) self.assertAllEqual(detokenize_output, [b"ninja"]) def test_detokenize_strict_error(self): input_data = tf.ragged.constant([[110, 105, 10000000, 110, 106, 97]]) tokenizer = UnicodeCodepointTokenizer(errors="strict") with self.assertRaises(tf.errors.InvalidArgumentError): _ = tokenizer.detokenize(input_data) def test_normalization_without_UTF8_valueerror(self): with self.assertRaises(ValueError): _ = UnicodeCodepointTokenizer( errors="strict", input_encoding="UTF-16", normalization_form="NFC", ) def test_lowercase(self): input_data = tf.constant(["NiNJaS"]) tokenizer = UnicodeCodepointTokenizer() call_output = tokenizer(input_data) self.assertAllEqual( call_output, [[110, 105, 110, 106, 97, 115]], ) def test_skip_lowercase(self): input_data = tf.constant(["NiNJaS"]) tokenizer = UnicodeCodepointTokenizer(lowercase=False) call_output = tokenizer(input_data) self.assertAllEqual( call_output, [[78, 105, 78, 74, 97, 83]], ) def test_tokenize_first_batch_second(self): tokenizer = UnicodeCodepointTokenizer() ds = tf.data.Dataset.from_tensor_slices( ["ninja", "samurai", "▀▁▂▃", "keras", "tensorflow"] ) ds = ds.map(tokenizer) ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(5)) output = ds.take(1).get_single_element() exp_output = [ [110, 105, 110, 106, 97], [115, 97, 109, 117, 114, 97, 105], [9600, 9601, 9602, 9603], [107, 101, 114, 97, 115], [116, 101, 110, 115, 111, 114, 102, 108, 111, 119], ] self.assertAllEqual(output, exp_output) def test_tokenize_first_batch_second_with_sequence_length(self): tokenizer = UnicodeCodepointTokenizer(sequence_length=10) ds = tf.data.Dataset.from_tensor_slices( ["ninja", "samurai", "▀▁▂▃", "keras", "tensorflow"] ) ds = ds.map(tokenizer) ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(5)) output = ds.take(1).get_single_element() exp_output = [ [110, 105, 110, 106, 97, 0, 0, 0, 0, 0], [115, 97, 109, 117, 114, 97, 105, 0, 0, 0], [9600, 9601, 9602, 9603, 0, 0, 0, 0, 0, 0], [107, 101, 114, 97, 115, 0, 0, 0, 0, 0], [116, 101, 110, 115, 111, 114, 102, 108, 111, 119], ] self.assertAllEqual(output, exp_output) def test_batch_first_tokenize_second(self): tokenizer = UnicodeCodepointTokenizer() ds = tf.data.Dataset.from_tensor_slices( ["ninja", "samurai", "▀▁▂▃", "keras", "tensorflow"] ) ds = ds.batch(5).map(tokenizer) output = ds.take(1).get_single_element() exp_output = [ [110, 105, 110, 106, 97], [115, 97, 109, 117, 114, 97, 105], [9600, 9601, 9602, 9603], [107, 101, 114, 97, 115], [116, 101, 110, 115, 111, 114, 102, 108, 111, 119], ] self.assertAllEqual(output, exp_output) def test_batch_first_tokenize_second_with_sequence_length(self): tokenizer = UnicodeCodepointTokenizer(sequence_length=10) ds = tf.data.Dataset.from_tensor_slices( ["ninja", "samurai", "▀▁▂▃", "keras", "tensorflow"] ) ds = ds.batch(5).map(tokenizer) output = ds.take(1).get_single_element() exp_output = [ [110, 105, 110, 106, 97, 0, 0, 0, 0, 0], [115, 97, 109, 117, 114, 97, 105, 0, 0, 0], [9600, 9601, 9602, 9603, 0, 0, 0, 0, 0, 0], [107, 101, 114, 97, 115, 0, 0, 0, 0, 0], [116, 101, 110, 115, 111, 114, 102, 108, 111, 119], ] self.assertAllEqual(output, exp_output) def test_load_model_with_config(self): input_data = tf.constant(["hello"]) original_tokenizer = UnicodeCodepointTokenizer( lowercase=False, sequence_length=11, normalization_form="NFC", errors="strict", vocabulary_size=None, ) cloned_tokenizer = UnicodeCodepointTokenizer.from_config( original_tokenizer.get_config() ) self.assertAllEqual( original_tokenizer(input_data), cloned_tokenizer(input_data), ) decoded_input = [107, 101, 114, 97, 115] self.assertAllEqual( original_tokenizer.detokenize(decoded_input), cloned_tokenizer.detokenize(decoded_input), ) def test_config(self): tokenizer = UnicodeCodepointTokenizer( name="unicode_character_tokenizer_config_gen", lowercase=False, sequence_length=8, normalization_form="NFC", errors="ignore", replacement_char=0, vocabulary_size=100, ) exp_config = { "dtype": "int32", "errors": "ignore", "lowercase": False, "name": "unicode_character_tokenizer_config_gen", "normalization_form": "NFC", "replacement_char": 0, "sequence_length": 8, "input_encoding": "UTF-8", "output_encoding": "UTF-8", "trainable": True, "vocabulary_size": 100, } self.assertEqual(tokenizer.get_config(), exp_config) tokenize_different_encoding = UnicodeCodepointTokenizer( name="unicode_character_tokenizer_config_gen", lowercase=False, sequence_length=8, errors="ignore", replacement_char=0, input_encoding="UTF-16", output_encoding="UTF-16", vocabulary_size=None, ) exp_config_different_encoding = { "dtype": "int32", "errors": "ignore", "lowercase": False, "name": "unicode_character_tokenizer_config_gen", "normalization_form": None, "replacement_char": 0, "sequence_length": 8, "input_encoding": "UTF-16", "output_encoding": "UTF-16", "trainable": True, "vocabulary_size": None, } self.assertEqual( tokenize_different_encoding.get_config(), exp_config_different_encoding, )
keras-nlp/keras_nlp/tokenizers/unicode_codepoint_tokenizer_test.py/0
{ "file_path": "keras-nlp/keras_nlp/tokenizers/unicode_codepoint_tokenizer_test.py", "repo_id": "keras-nlp", "token_count": 5608 }
151
<jupyter_start><jupyter_text>Install deps<jupyter_code>!pip install git+https://github.com/abheesht17/keras-nlp.git@bert-base-chinese tensorflow tf-models-official --upgrade --quiet import json import os import keras_nlp import tensorflow as tf import tensorflow_models as tfm from tensorflow import keras MODEL_TYPE = "bert_base" MODEL_SUFFIX = "multi_cased" MODEL_NAME = f"{MODEL_TYPE}_{MODEL_SUFFIX}" VOCAB_SIZE = 119547<jupyter_output><empty_output><jupyter_text>Load the model garden checkpoints and weights<jupyter_code># Model garden BERT paths. zip_path = f"""https://storage.googleapis.com/tf_model_garden/nlp/bert/v3/{MODEL_SUFFIX}_L-12_H-768_A-12.tar.gz""" zip_file = keras.utils.get_file( f"""/content/{MODEL_NAME}""", zip_path, extract=True, archive_format="tar", ) !tar -xvf """{MODEL_NAME}""" # Model garden BERT paths. extract_dir = "/content/tmp/temp_dir/raw/" vocab_path = os.path.join(extract_dir, "vocab.txt") checkpoint_path = os.path.join(extract_dir, "bert_model.ckpt") config_path = os.path.join(extract_dir, "bert_config.json") vars = tf.train.list_variables(checkpoint_path) weights = {} for name, shape in vars: print(name, shape) weight = tf.train.load_variable(checkpoint_path, name) weights[name] = weight<jupyter_output>_CHECKPOINTABLE_OBJECT_GRAPH [] encoder/layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE [119547, 768] encoder/layer_with_weights-1/embeddings/.ATTRIBUTES/VARIABLE_VALUE [512, 768] encoder/layer_with_weights-10/_attention_layer/_key_dense/bias/.ATTRIBUTES/VARIABLE_VALUE [12, 64] encoder/layer_with_weights-10/_attention_layer/_key_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE [768, 12, 64] encoder/layer_with_weights-10/_attention_layer/_output_dense/bias/.ATTRIBUTES/VARIABLE_VALUE [768] encoder/layer_with_weights-10/_attention_layer/_output_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE [12, 64, 768] encoder/layer_with_weights-10/_attention_layer/_query_dense/bias/.ATTRIBUTES/VARIABLE_VALUE [12, 64] encoder/layer_with_weights-10/_attention_layer/_query_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE [768, 12, 64] encoder/layer_with_weights-10/_attention_layer/_value_dense/bias/.ATTRIBUTES/VARIABLE_VALUE [12, 64] encoder/layer_with_weights-10/_attention_layer/_value_dense/kernel/.ATTRIBUTES/VARIABL[...]<jupyter_text>Load BertBase model with KerasNLP.<jupyter_code>model = keras_nlp.models.BertBase(vocabulary_size=VOCAB_SIZE) model.summary()<jupyter_output>Model: "bert_custom" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== token_ids (InputLayer) [(None, None)] 0 [] token_embedding (Embedding) (None, None, 768) 91812096 ['token_ids[0][0]'] segment_ids (InputLayer) [(None, None)] 0 [] position_embedding (PositionEm (None, None, 768) 393216 ['token_embedding[0][0][...]<jupyter_text>Convert Weights<jupyter_code>model.get_layer("token_embedding").embeddings.assign( weights[ "encoder/layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer("position_embedding").position_embeddings.assign( weights[ "encoder/layer_with_weights-1/embeddings/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer("segment_embedding").embeddings.assign( weights[ "encoder/layer_with_weights-2/embeddings/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer("embeddings_layer_norm").gamma.assign( weights["encoder/layer_with_weights-3/gamma/.ATTRIBUTES/VARIABLE_VALUE"] ) model.get_layer("embeddings_layer_norm").beta.assign( weights["encoder/layer_with_weights-3/beta/.ATTRIBUTES/VARIABLE_VALUE"] ) for i in range(model.num_layers): model.get_layer( f"transformer_layer_{i}" )._self_attention_layer._key_dense.kernel.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer/_key_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer._key_dense.bias.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer/_key_dense/bias/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer._query_dense.kernel.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer/_query_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer._query_dense.bias.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer/_query_dense/bias/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer._value_dense.kernel.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer/_value_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer._value_dense.bias.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer/_value_dense/bias/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer._output_dense.kernel.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer/_output_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer._output_dense.bias.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer/_output_dense/bias/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer_norm.gamma.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer_norm/gamma/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._self_attention_layer_norm.beta.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_attention_layer_norm/beta/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._feedforward_intermediate_dense.kernel.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_intermediate_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._feedforward_intermediate_dense.bias.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_intermediate_dense/bias/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._feedforward_output_dense.kernel.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_output_dense/kernel/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._feedforward_output_dense.bias.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_output_dense/bias/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._feedforward_layer_norm.gamma.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_output_layer_norm/gamma/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer( f"transformer_layer_{i}" )._feedforward_layer_norm.beta.assign( weights[ f"encoder/layer_with_weights-{i + 4}/_output_layer_norm/beta/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer("pooled_dense").kernel.assign( weights[ f"encoder/layer_with_weights-{model.num_layers + 4}/kernel/.ATTRIBUTES/VARIABLE_VALUE" ] ) model.get_layer("pooled_dense").bias.assign( weights[ f"encoder/layer_with_weights-{model.num_layers + 4}/bias/.ATTRIBUTES/VARIABLE_VALUE" ] ) pass<jupyter_output><empty_output><jupyter_text>Compare Output<jupyter_code>def preprocess(x): tokenizer = keras_nlp.tokenizers.WordPieceTokenizer( vocabulary=vocab_path, lowercase=False ) packer = keras_nlp.layers.MultiSegmentPacker( sequence_length=model.max_sequence_length, start_value=tokenizer.token_to_id("[CLS]"), end_value=tokenizer.token_to_id("[SEP]"), ) return packer(tokenizer(x)) token_ids, segment_ids = preprocess(["The झटपट brown लोमड़ी."]) encoder_config = tfm.nlp.encoders.EncoderConfig( type="bert", bert=json.load(tf.io.gfile.GFile(config_path)), ) mg_model = tfm.nlp.encoders.build_encoder(encoder_config) checkpoint = tf.train.Checkpoint(encoder=mg_model) checkpoint.read(checkpoint_path).assert_consumed() keras_nlp_output = model( { "token_ids": token_ids, "segment_ids": segment_ids, "padding_mask": token_ids != 0, } )["pooled_output"] mg_output = mg_model( { "input_word_ids": token_ids, "input_type_ids": segment_ids, "input_mask": token_ids != 0, } )["pooled_output"] keras_nlp_output[0, :10] mg_output[0, :10] # Very close! Though not 100% exact. tf.reduce_mean(keras_nlp_output - mg_output) # Save BertBase checkpoint model.save_weights(f"""{MODEL_NAME}.h5""") model2 = keras_nlp.models.BertBase(vocabulary_size=VOCAB_SIZE) model2.load_weights(f"""{MODEL_NAME}.h5""") # Same output from loaded checkpoint keras_nlp_output2 = model2( { "token_ids": token_ids, "segment_ids": segment_ids, "padding_mask": token_ids != 0, } )["pooled_output"] tf.reduce_mean(keras_nlp_output - keras_nlp_output2) # Save vocab file as well vocab_info = tf.io.gfile.GFile(vocab_path).read() f = open("vocab.txt", "w") f.write(vocab_info) # Get MD5 of model !md5sum """{MODEL_NAME}.h5""" # Upload model to drive # from google.colab import drive # drive.mount('/content/drive') # Check uploaded model once added to repo model_cloud = keras_nlp.models.BertBase(weights=MODEL_SUFFIX) # Same output from cloud model keras_nlp_output_cloud = model_cloud( { "token_ids": token_ids, "segment_ids": segment_ids, "padding_mask": token_ids != 0, } )["pooled_output"] tf.reduce_mean(keras_nlp_output - keras_nlp_output_cloud) keras_nlp_output_cloud[0, :10]<jupyter_output><empty_output>
keras-nlp/tools/checkpoint_conversion/bert_base_multi_cased.ipynb/0
{ "file_path": "keras-nlp/tools/checkpoint_conversion/bert_base_multi_cased.ipynb", "repo_id": "keras-nlp", "token_count": 5148 }
152
# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import requests import tensorflow as tf from transformers import AutoTokenizer from transformers import GPTNeoXModel from keras_nlp.models import GPTNeoXBackbone from keras_nlp.models import GPTNeoXTokenizer PRESET_NAME = "pythia-70m" BASE_MODEL = "EleutherAI/gpt-neox-20b" PRESET = "EleutherAI/pythia-70m-deduped" EXTRACT_DIR = "./{}" extract_dir = EXTRACT_DIR.format(PRESET_NAME) if not os.path.exists(extract_dir): os.makedirs(extract_dir) # Vocab vocab_path = os.path.join(extract_dir, "vocab.json") response = requests.get( f"https://huggingface.co/{BASE_MODEL}/raw/main/vocab.json" ) open(vocab_path, "wb").write(response.content) merges_path = os.path.join(extract_dir, "merges.txt") response = requests.get( f"https://huggingface.co/{BASE_MODEL}/raw/main/merges.txt" ) open(merges_path, "wb").write(response.content) # Config. config_path = os.path.join(extract_dir, "config.json") response = requests.get(f"https://huggingface.co/{PRESET}/raw/main/config.json") open(config_path, "wb").write(response.content) cfg = {} with open(config_path, "r") as pt_cfg_handler: pt_cfg = json.load(pt_cfg_handler) cfg["vocabulary_size"] = pt_cfg["vocab_size"] cfg["num_layers"] = pt_cfg["num_hidden_layers"] cfg["num_heads"] = pt_cfg["num_attention_heads"] cfg["hidden_dim"] = pt_cfg["hidden_size"] cfg["intermediate_dim"] = pt_cfg["intermediate_size"] cfg["max_sequence_length"] = pt_cfg["max_position_embeddings"] cfg["layer_norm_epsilon"] = pt_cfg["layer_norm_eps"] cfg["rotary_percentage"] = pt_cfg["rotary_pct"] cfg["rotary_max_wavelength"] = pt_cfg["rotary_emb_base"] hf_model = GPTNeoXModel.from_pretrained(PRESET) hf_model.eval() hf_wts = hf_model.state_dict() keras_model = GPTNeoXBackbone(**cfg) keras_model.get_layer("token_embedding").embeddings.assign( hf_model.embed_in.weight.detach().numpy() ) for layer_index in range(cfg["num_layers"]): # attention layer keras_model.get_layer( f"transformer_layer_{layer_index}" )._self_attention_layer._qkv_dense.kernel.assign( hf_wts[f"layers.{layer_index}.attention.query_key_value.weight"] .numpy() .T.reshape((cfg["hidden_dim"], cfg["num_heads"], -1)) ) keras_model.get_layer( f"transformer_layer_{layer_index}" )._self_attention_layer._qkv_dense.bias.assign( hf_wts[f"layers.{layer_index}.attention.query_key_value.bias"].reshape( (cfg["num_heads"], -1) ) ) # Attention Dense keras_model.get_layer( f"transformer_layer_{layer_index}" )._self_attention_layer._output_dense.kernel.assign( hf_wts[f"layers.{layer_index}.attention.dense.weight"].numpy().T ) keras_model.get_layer( f"transformer_layer_{layer_index}" )._self_attention_layer._output_dense.bias.assign( hf_wts[f"layers.{layer_index}.attention.dense.bias"] ) # LAYERNORM keras_model.get_layer( f"transformer_layer_{layer_index}" )._self_attention_layer_norm.gamma.assign( hf_wts[f"layers.{layer_index}.input_layernorm.weight"] ) keras_model.get_layer( f"transformer_layer_{layer_index}" )._self_attention_layer_norm.beta.assign( hf_wts[f"layers.{layer_index}.input_layernorm.bias"] ) keras_model.get_layer( f"transformer_layer_{layer_index}" )._feedforward_layer_norm.gamma.assign( hf_wts[f"layers.{layer_index}.post_attention_layernorm.weight"] ) keras_model.get_layer( f"transformer_layer_{layer_index}" )._feedforward_layer_norm.beta.assign( hf_wts[f"layers.{layer_index}.post_attention_layernorm.bias"] ) # MLP keras_model.get_layer( f"transformer_layer_{layer_index}" )._feedforward_intermediate_dense.kernel.assign( hf_wts[f"layers.{layer_index}.mlp.dense_h_to_4h.weight"].numpy().T ) keras_model.get_layer( f"transformer_layer_{layer_index}" )._feedforward_intermediate_dense.bias.assign( hf_wts[f"layers.{layer_index}.mlp.dense_h_to_4h.bias"] ) keras_model.get_layer( f"transformer_layer_{layer_index}" )._feedforward_output_dense.kernel.assign( hf_wts[f"layers.{layer_index}.mlp.dense_4h_to_h.weight"].numpy().T ) keras_model.get_layer( f"transformer_layer_{layer_index}" )._feedforward_output_dense.bias.assign( hf_wts[f"layers.{layer_index }.mlp.dense_4h_to_h.bias"] ) keras_model.get_layer("layer_norm").gamma.assign( hf_wts["final_layer_norm.weight"] ) keras_model.get_layer("layer_norm").beta.assign(hf_wts["final_layer_norm.bias"]) hf_tokenizer = AutoTokenizer.from_pretrained(PRESET) sample_text = ["cricket is awesome, easily the best sport in the world!"] hf_inputs = hf_tokenizer(sample_text, return_tensors="pt") print("HF inputs", hf_inputs) keras_tokenizer = GPTNeoXTokenizer(vocabulary=vocab_path, merges=merges_path) keras_tokenized_inputs = keras_tokenizer(sample_text) print("Keras tok input", keras_tokenized_inputs) keras_inputs = { "token_ids": tf.convert_to_tensor(hf_inputs["input_ids"]), "padding_mask": tf.convert_to_tensor(hf_inputs["attention_mask"]), } keras_outputs = keras_model(keras_inputs) print("Keras output = ", keras_outputs.numpy()) hf_outputs = hf_model(**hf_inputs).last_hidden_state print("HF output = ", hf_outputs)
keras-nlp/tools/checkpoint_conversion/convert_gpt_neox_checkpoints.py/0
{ "file_path": "keras-nlp/tools/checkpoint_conversion/convert_gpt_neox_checkpoints.py", "repo_id": "keras-nlp", "token_count": 2561 }
153
### Summary ### Related Issues ### PR Overview - [ ] This PR requires new unit tests [y/n] (make sure tests are included) - [ ] This PR requires to update the documentation [y/n] (make sure the docs are up-to-date) - [ ] This PR is backwards compatible [y/n] - [ ] This PR changes the current API [y/n] (all API changes need to be approved by fchollet)
keras-preprocessing/PULL_REQUEST_TEMPLATE.md/0
{ "file_path": "keras-preprocessing/PULL_REQUEST_TEMPLATE.md", "repo_id": "keras-preprocessing", "token_count": 107 }
154
import os import random import shutil import numpy as np import pandas as pd import pytest from PIL import Image from keras_preprocessing.image import dataframe_iterator, image_data_generator @pytest.fixture(scope='module') def all_test_images(): img_w = img_h = 20 rgb_images = [] rgba_images = [] gray_images = [] for n in range(8): bias = np.random.rand(img_w, img_h, 1) * 64 variance = np.random.rand(img_w, img_h, 1) * (255 - 64) imarray = np.random.rand(img_w, img_h, 3) * variance + bias im = Image.fromarray(imarray.astype('uint8')).convert('RGB') rgb_images.append(im) imarray = np.random.rand(img_w, img_h, 4) * variance + bias im = Image.fromarray(imarray.astype('uint8')).convert('RGBA') rgba_images.append(im) imarray = np.random.rand(img_w, img_h, 1) * variance + bias im = Image.fromarray( imarray.astype('uint8').squeeze()).convert('L') gray_images.append(im) return [rgb_images, rgba_images, gray_images] def test_dataframe_iterator(all_test_images, tmpdir): num_classes = 2 # save the images in the tmpdir count = 0 filenames = [] filepaths = [] filenames_without = [] for test_images in all_test_images: for im in test_images: filename = "image-{}.png".format(count) filename_without = "image-{}".format(count) filenames.append(filename) filepaths.append(os.path.join(str(tmpdir), filename)) filenames_without.append(filename_without) im.save(str(tmpdir / filename)) count += 1 df = pd.DataFrame({ "filename": filenames, "class": [str(random.randint(0, 1)) for _ in filenames], "filepaths": filepaths }) # create iterator iterator = dataframe_iterator.DataFrameIterator(df, str(tmpdir)) batch = next(iterator) assert len(batch) == 2 assert isinstance(batch[0], np.ndarray) assert isinstance(batch[1], np.ndarray) generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe(df, x_col='filepaths') df_iterator_dir = generator.flow_from_dataframe(df, str(tmpdir)) df_sparse_iterator = generator.flow_from_dataframe(df, str(tmpdir), class_mode="sparse") assert not np.isnan(df_sparse_iterator.classes).any() # check number of classes and images assert len(df_iterator.class_indices) == num_classes assert len(df_iterator.classes) == count assert set(df_iterator.filenames) == set(filepaths) assert len(df_iterator_dir.class_indices) == num_classes assert len(df_iterator_dir.classes) == count assert set(df_iterator_dir.filenames) == set(filenames) # test without shuffle _, batch_y = next(generator.flow_from_dataframe(df, str(tmpdir), shuffle=False, class_mode="sparse")) assert (batch_y == df['class'].astype('float')[:len(batch_y)]).all() # Test invalid use cases with pytest.raises(ValueError): generator.flow_from_dataframe(df, str(tmpdir), color_mode='cmyk') with pytest.raises(ValueError): generator.flow_from_dataframe(df, str(tmpdir), class_mode='output') with pytest.warns(DeprecationWarning): generator.flow_from_dataframe(df, str(tmpdir), has_ext=True) with pytest.warns(DeprecationWarning): generator.flow_from_dataframe(df, str(tmpdir), has_ext=False) def preprocessing_function(x): """This will fail if not provided by a Numpy array. Note: This is made to enforce backward compatibility. """ assert x.shape == (26, 26, 3) assert type(x) is np.ndarray return np.zeros_like(x) # Test usage as Sequence generator = image_data_generator.ImageDataGenerator( preprocessing_function=preprocessing_function) dir_seq = generator.flow_from_dataframe(df, str(tmpdir), target_size=(26, 26), color_mode='rgb', batch_size=3, class_mode='categorical') assert len(dir_seq) == np.ceil(count / 3) x1, y1 = dir_seq[1] assert x1.shape == (3, 26, 26, 3) assert y1.shape == (3, num_classes) x1, y1 = dir_seq[5] assert (x1 == 0).all() with pytest.raises(ValueError): x1, y1 = dir_seq[9] def test_dataframe_iterator_validate_filenames(all_test_images, tmpdir): # save the images in the paths count = 0 filenames = [] for test_images in all_test_images: for im in test_images: filename = 'image-{}.png'.format(count) im.save(str(tmpdir / filename)) filenames.append(filename) count += 1 df = pd.DataFrame({"filename": filenames + ['test.jpp', 'test.jpg']}) generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe(df, str(tmpdir), class_mode="input") assert len(df_iterator.filenames) == len(df['filename']) - 2 df_iterator = generator.flow_from_dataframe(df, str(tmpdir), class_mode="input", validate_filenames=False) assert len(df_iterator.filenames) == len(df['filename']) def test_dataframe_iterator_sample_weights(all_test_images, tmpdir): # save the images in the paths count = 0 filenames = [] for test_images in all_test_images: for im in test_images: filename = 'image-{}.png'.format(count) im.save(str(tmpdir / filename)) filenames.append(filename) count += 1 df = pd.DataFrame({"filename": filenames}) df['weight'] = ([2, 5] * len(df))[:len(df)] generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe(df, str(tmpdir), x_col="filename", y_col=None, shuffle=False, batch_size=5, weight_col='weight', class_mode="input") batch = next(df_iterator) assert len(batch) == 3 # (x, y, weights) # check if input and output have the same shape and they're the same assert(batch[0].all() == batch[1].all()) # check if the input and output images are not the same numpy array input_img = batch[0][0] output_img = batch[1][0] output_img[0][0][0] += 1 assert input_img[0][0][0] != output_img[0][0][0] assert np.array_equal(np.array([2, 5, 2, 5, 2]), batch[2]) # fail df['weight'] = (['2', '5'] * len(df))[:len(df)] with pytest.raises(TypeError): image_data_generator.ImageDataGenerator().flow_from_dataframe( df, weight_col='weight', class_mode="input" ) def test_dataframe_iterator_class_mode_input(all_test_images, tmpdir): # save the images in the paths count = 0 filenames = [] for test_images in all_test_images: for im in test_images: filename = 'image-{}.png'.format(count) im.save(str(tmpdir / filename)) filenames.append(filename) count += 1 df = pd.DataFrame({"filename": filenames}) generator = image_data_generator.ImageDataGenerator() df_autoencoder_iterator = generator.flow_from_dataframe(df, str(tmpdir), x_col="filename", y_col=None, class_mode="input") batch = next(df_autoencoder_iterator) # check if input and output have the same shape and they're the same assert np.allclose(batch[0], batch[1]) # check if the input and output images are not the same numpy array input_img = batch[0][0] output_img = batch[1][0] output_img[0][0][0] += 1 assert(input_img[0][0][0] != output_img[0][0][0]) df_autoencoder_iterator = generator.flow_from_dataframe(df, str(tmpdir), x_col="filename", y_col="class", class_mode="input") batch = next(df_autoencoder_iterator) # check if input and output have the same shape and they're the same assert(batch[0].all() == batch[1].all()) # check if the input and output images are not the same numpy array input_img = batch[0][0] output_img = batch[1][0] output_img[0][0][0] += 1 assert(input_img[0][0][0] != output_img[0][0][0]) def test_dataframe_iterator_class_mode_categorical_multi_label(all_test_images, tmpdir): # save the images in the paths filenames = [] count = 0 for test_images in all_test_images: for im in test_images: filename = 'image-{}.png'.format(count) im.save(str(tmpdir / filename)) filenames.append(filename) count += 1 label_opt = ['a', 'b', ['a'], ['b'], ['a', 'b'], ['b', 'a']] df = pd.DataFrame({ "filename": filenames, "class": [random.choice(label_opt) for _ in filenames[:-2]] + ['b', 'a'] }) generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe(df, str(tmpdir)) batch_x, batch_y = next(df_iterator) assert isinstance(batch_x, np.ndarray) assert len(batch_x.shape) == 4 assert isinstance(batch_y, np.ndarray) assert batch_y.shape == (len(batch_x), 2) for labels in batch_y: assert all(label in {0, 1} for label in labels) # on first 3 batches df = pd.DataFrame({ "filename": filenames, "class": [['b', 'a']] + ['b'] + [['c']] + [random.choice(label_opt) for _ in filenames[:-3]] }) generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe(df, str(tmpdir), shuffle=False) batch_x, batch_y = next(df_iterator) assert isinstance(batch_x, np.ndarray) assert len(batch_x.shape) == 4 assert isinstance(batch_y, np.ndarray) assert batch_y.shape == (len(batch_x), 3) for labels in batch_y: assert all(label in {0, 1} for label in labels) assert (batch_y[0] == np.array([1, 1, 0])).all() assert (batch_y[1] == np.array([0, 1, 0])).all() assert (batch_y[2] == np.array([0, 0, 1])).all() def test_dataframe_iterator_class_mode_multi_output(all_test_images, tmpdir): # save the images in the paths filenames = [] count = 0 for test_images in all_test_images: for im in test_images: filename = 'image-{}.png'.format(count) im.save(str(tmpdir / filename)) filenames.append(filename) count += 1 # fit both outputs are a single number df = pd.DataFrame({"filename": filenames}).assign( output_0=np.random.uniform(size=len(filenames)), output_1=np.random.uniform(size=len(filenames)) ) df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe( df, y_col=['output_0', 'output_1'], directory=str(tmpdir), batch_size=3, shuffle=False, class_mode='multi_output' ) batch_x, batch_y = next(df_iterator) assert isinstance(batch_x, np.ndarray) assert len(batch_x.shape) == 4 assert isinstance(batch_y, list) assert len(batch_y) == 2 assert np.array_equal(batch_y[0], np.array(df['output_0'].tolist()[:3])) assert np.array_equal(batch_y[1], np.array(df['output_1'].tolist()[:3])) # if one of the outputs is a 1D array df['output_1'] = [np.random.uniform(size=(2, 2, 1)).flatten() for _ in range(len(df))] df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe( df, y_col=['output_0', 'output_1'], directory=str(tmpdir), batch_size=3, shuffle=False, class_mode='multi_output' ) batch_x, batch_y = next(df_iterator) assert isinstance(batch_x, np.ndarray) assert len(batch_x.shape) == 4 assert isinstance(batch_y, list) assert len(batch_y) == 2 assert np.array_equal(batch_y[0], np.array(df['output_0'].tolist()[:3])) assert np.array_equal(batch_y[1], np.array(df['output_1'].tolist()[:3])) # if one of the outputs is a 2D array df['output_1'] = [np.random.uniform(size=(2, 2, 1)) for _ in range(len(df))] df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe( df, y_col=['output_0', 'output_1'], directory=str(tmpdir), batch_size=3, shuffle=False, class_mode='multi_output' ) batch_x, batch_y = next(df_iterator) assert isinstance(batch_x, np.ndarray) assert len(batch_x.shape) == 4 assert isinstance(batch_y, list) assert len(batch_y) == 2 assert np.array_equal(batch_y[0], np.array(df['output_0'].tolist()[:3])) assert np.array_equal(batch_y[1], np.array(df['output_1'].tolist()[:3])) # fail if single column with pytest.raises(TypeError): image_data_generator.ImageDataGenerator().flow_from_dataframe( df, y_col='output_0', directory=str(tmpdir), class_mode='multi_output' ) def test_dataframe_iterator_class_mode_raw(all_test_images, tmpdir): # save the images in the paths filenames = [] count = 0 for test_images in all_test_images: for im in test_images: filename = 'image-{}.png'.format(count) im.save(str(tmpdir / filename)) filenames.append(filename) count += 1 # case for 1D output df = pd.DataFrame({"filename": filenames}).assign( output_0=np.random.uniform(size=len(filenames)), output_1=np.random.uniform(size=len(filenames)) ) df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe( df, y_col='output_0', directory=str(tmpdir), batch_size=3, shuffle=False, class_mode='raw' ) batch_x, batch_y = next(df_iterator) assert isinstance(batch_x, np.ndarray) assert len(batch_x.shape) == 4 assert isinstance(batch_y, np.ndarray) assert batch_y.shape == (3,) assert np.array_equal(batch_y, df['output_0'].values[:3]) # case with a 2D output df_iterator = image_data_generator.ImageDataGenerator().flow_from_dataframe( df, y_col=['output_0', 'output_1'], directory=str(tmpdir), batch_size=3, shuffle=False, class_mode='raw' ) batch_x, batch_y = next(df_iterator) assert isinstance(batch_x, np.ndarray) assert len(batch_x.shape) == 4 assert isinstance(batch_y, np.ndarray) assert batch_y.shape == (3, 2) assert np.array_equal(batch_y, df[['output_0', 'output_1']].values[:3]) @pytest.mark.parametrize('validation_split,num_training', [ (0.25, 18), (0.50, 12), (0.75, 6), ]) def test_dataframe_iterator_with_validation_split(all_test_images, validation_split, num_training, tmpdir): num_classes = 2 # save the images in the tmpdir count = 0 filenames = [] filenames_without = [] for test_images in all_test_images: for im in test_images: filename = "image-{}.png".format(count) filename_without = "image-{}".format(count) filenames.append(filename) filenames_without.append(filename_without) im.save(str(tmpdir / filename)) count += 1 df = pd.DataFrame({"filename": filenames, "class": [str(random.randint(0, 1)) for _ in filenames]}) # create iterator generator = image_data_generator.ImageDataGenerator( validation_split=validation_split ) df_sparse_iterator = generator.flow_from_dataframe(df, str(tmpdir), class_mode="sparse") if np.isnan(next(df_sparse_iterator)[:][1]).any(): raise ValueError('Invalid values.') with pytest.raises(ValueError): generator.flow_from_dataframe( df, tmpdir, subset='foo') train_iterator = generator.flow_from_dataframe(df, str(tmpdir), subset='training') assert train_iterator.samples == num_training valid_iterator = generator.flow_from_dataframe(df, str(tmpdir), subset='validation') assert valid_iterator.samples == count - num_training # check number of classes and images assert len(train_iterator.class_indices) == num_classes assert len(train_iterator.classes) == num_training assert len(set(train_iterator.filenames) & set(filenames)) == num_training def test_dataframe_iterator_with_custom_indexed_dataframe(all_test_images, tmpdir): num_classes = 2 # save the images in the tmpdir count = 0 filenames = [] for test_images in all_test_images: for im in test_images: filename = "image-{}.png".format(count) filenames.append(filename) im.save(str(tmpdir / filename)) count += 1 # create dataframes classes = np.random.randint(num_classes, size=len(filenames)) classes = [str(c) for c in classes] df = pd.DataFrame({"filename": filenames, "class": classes}) df2 = pd.DataFrame({"filename": filenames, "class": classes}, index=np.arange(1, len(filenames) + 1)) df3 = pd.DataFrame({"filename": filenames, "class": classes}, index=filenames) # create iterators seed = 1 generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe( df, str(tmpdir), seed=seed) df2_iterator = generator.flow_from_dataframe( df2, str(tmpdir), seed=seed) df3_iterator = generator.flow_from_dataframe( df3, str(tmpdir), seed=seed) # Test all iterators return same pairs of arrays for _ in range(len(filenames)): a1, c1 = next(df_iterator) a2, c2 = next(df2_iterator) a3, c3 = next(df3_iterator) assert np.array_equal(a1, a2) assert np.array_equal(a1, a3) assert np.array_equal(c1, c2) assert np.array_equal(c1, c3) def test_dataframe_iterator_n(all_test_images, tmpdir): # save the images in the tmpdir count = 0 filenames = [] for test_images in all_test_images: for im in test_images: filename = "image-{}.png".format(count) filenames.append(filename) im.save(str(tmpdir / filename)) count += 1 # exclude first two items n_files = len(filenames) input_filenames = filenames[2:] # create dataframes classes = np.random.randint(2, size=len(input_filenames)) classes = [str(c) for c in classes] df = pd.DataFrame({"filename": input_filenames}) df2 = pd.DataFrame({"filename": input_filenames, "class": classes}) # create iterators generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe( df, str(tmpdir), class_mode=None) df2_iterator = generator.flow_from_dataframe( df2, str(tmpdir), class_mode='binary') # Test the number of items in iterators assert df_iterator.n == n_files - 2 assert df2_iterator.n == n_files - 2 def test_dataframe_iterator_absolute_path(all_test_images, tmpdir): # save the images in the tmpdir count = 0 file_paths = [] for test_images in all_test_images: for im in test_images: filename = "image-{:0>5}.png".format(count) file_path = str(tmpdir / filename) file_paths.append(file_path) im.save(file_path) count += 1 # prepare an image with a forbidden extension. file_path_fbd = str(tmpdir / 'image-forbid.fbd') shutil.copy(file_path, file_path_fbd) # create dataframes classes = np.random.randint(2, size=len(file_paths)) classes = [str(c) for c in classes] df = pd.DataFrame({"filename": file_paths}) df2 = pd.DataFrame({"filename": file_paths, "class": classes}) df3 = pd.DataFrame({"filename": ['image-not-exist.png'] + file_paths}) df4 = pd.DataFrame({"filename": file_paths + [file_path_fbd]}) # create iterators generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe( df, None, class_mode=None, shuffle=False, batch_size=1) df2_iterator = generator.flow_from_dataframe( df2, None, class_mode='binary', shuffle=False, batch_size=1) df3_iterator = generator.flow_from_dataframe( df3, None, class_mode=None, shuffle=False, batch_size=1) df4_iterator = generator.flow_from_dataframe( df4, None, class_mode=None, shuffle=False, batch_size=1) validation_split = 0.2 generator_split = image_data_generator.ImageDataGenerator( validation_split=validation_split ) df_train_iterator = generator_split.flow_from_dataframe( df, None, class_mode=None, shuffle=False, subset='training', batch_size=1) df_val_iterator = generator_split.flow_from_dataframe( df, None, class_mode=None, shuffle=False, subset='validation', batch_size=1) # Test the number of items in iterators assert df_iterator.n == len(file_paths) assert df2_iterator.n == len(file_paths) assert df3_iterator.n == len(file_paths) assert df4_iterator.n == len(file_paths) assert df_val_iterator.n == int(validation_split * len(file_paths)) assert df_train_iterator.n == len(file_paths) - df_val_iterator.n # Test flow_from_dataframe for i in range(len(file_paths)): a1 = next(df_iterator) a2, _ = next(df2_iterator) a3 = next(df3_iterator) a4 = next(df4_iterator) if i < df_val_iterator.n: a5 = next(df_val_iterator) else: a5 = next(df_train_iterator) assert np.array_equal(a1, a2) assert np.array_equal(a1, a3) assert np.array_equal(a1, a4) assert np.array_equal(a1, a5) def test_dataframe_iterator_with_subdirs(all_test_images, tmpdir): num_classes = 2 # create folders and subfolders paths = [] for cl in range(num_classes): class_directory = 'class-{}'.format(cl) classpaths = [ class_directory, os.path.join(class_directory, 'subfolder-1'), os.path.join(class_directory, 'subfolder-2'), os.path.join(class_directory, 'subfolder-1', 'sub-subfolder') ] for path in classpaths: tmpdir.join(path).mkdir() paths.append(classpaths) # save the images in the paths count = 0 filenames = [] for test_images in all_test_images: for im in test_images: # rotate image class im_class = count % num_classes # rotate subfolders classpaths = paths[im_class] filename = os.path.join( classpaths[count % len(classpaths)], 'image-{}.png'.format(count)) filenames.append(filename) im.save(str(tmpdir / filename)) count += 1 # create dataframe classes = np.random.randint(num_classes, size=len(filenames)) classes = [str(c) for c in classes] df = pd.DataFrame({"filename": filenames, "class": classes}) # create iterator generator = image_data_generator.ImageDataGenerator() df_iterator = generator.flow_from_dataframe( df, str(tmpdir), class_mode='binary') # Test the number of items in iterator assert df_iterator.n == len(filenames) assert set(df_iterator.filenames) == set(filenames) def test_dataframe_iterator_classes_indices_order(all_test_images, tmpdir): # save the images in the paths count = 0 filenames = [] for test_images in all_test_images: for im in test_images: filename = 'image-{}.png'.format(count) im.save(str(tmpdir / filename)) filenames.append(filename) count += 1 # Test the class_indices without classes input generator = image_data_generator.ImageDataGenerator() label_opt = ['a', 'b', ['a'], ['b'], ['a', 'b'], ['b', 'a']] df_f = pd.DataFrame({ "filename": filenames, "class": ['a', 'b'] + [random.choice(label_opt) for _ in filenames[:-2]] }) flow_forward_iter = generator.flow_from_dataframe(df_f, str(tmpdir)) label_rev = ['b', 'a', ['b'], ['a'], ['b', 'a'], ['a', 'b']] df_r = pd.DataFrame({ "filename": filenames, "class": ['b', 'a'] + [random.choice(label_rev) for _ in filenames[:-2]] }) flow_backward_iter = generator.flow_from_dataframe(df_r, str(tmpdir)) # check class_indices assert flow_forward_iter.class_indices == flow_backward_iter.class_indices # Test the class_indices with classes input generator_2 = image_data_generator.ImageDataGenerator() df_f2 = pd.DataFrame([['data/A.jpg', 'A'], ['data/B.jpg', 'B']], columns=['filename', 'class']) flow_forward = generator_2.flow_from_dataframe(df_f2, classes=['A', 'B']) df_b2 = pd.DataFrame([['data/A.jpg', 'A'], ['data/B.jpg', 'B']], columns=['filename', 'class']) flow_backward = generator_2.flow_from_dataframe(df_b2, classes=['B', 'A']) # check class_indices assert flow_forward.class_indices != flow_backward.class_indices if __name__ == '__main__': pytest.main([__file__])
keras-preprocessing/tests/image/dataframe_iterator_test.py/0
{ "file_path": "keras-preprocessing/tests/image/dataframe_iterator_test.py", "repo_id": "keras-preprocessing", "token_count": 12511 }
155
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from keras_tuner.backend import config from keras_tuner.backend import keras @pytest.fixture(autouse=True) def set_seeds_before_tests(): """Test wrapper to set the seed before each test. This wrapper runs for all the tests in the test suite. """ keras.utils.set_random_seed(0) # Use channels_first for torch backend. if config.backend() == "torch": keras.backend.set_image_data_format("channels_first") else: keras.backend.set_image_data_format("channels_last") yield
keras-tuner/keras_tuner/conftest.py/0
{ "file_path": "keras-tuner/keras_tuner/conftest.py", "repo_id": "keras-tuner", "token_count": 354 }
156
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from keras_tuner import protos from keras_tuner.api_export import keras_tuner_export from keras_tuner.engine import conditions as conditions_mod from keras_tuner.engine.hyperparameters import hyperparameter @keras_tuner_export("keras_tuner.engine.hyperparameters.Boolean") class Boolean(hyperparameter.HyperParameter): """Choice between True and False. Args: name: A string. the name of parameter. Must be unique for each `HyperParameter` instance in the search space. default: Boolean, the default value to return for the parameter. If unspecified, the default value will be False. """ def __init__(self, name, default=False, **kwargs): super().__init__(name=name, default=default, **kwargs) if default not in {True, False}: raise ValueError( "`default` must be a Python boolean. " f"You passed: default={default}" ) def __repr__(self): return f'Boolean(name: "{self.name}", default: {self.default})' @property def values(self): return (True, False) def prob_to_value(self, prob): return bool(prob >= 0.5) def value_to_prob(self, value): # Center the value in its probability bucket. return 0.75 if value else 0.25 @classmethod def from_proto(cls, proto): conditions = [ conditions_mod.Condition.from_proto(c) for c in proto.conditions ] return cls( name=proto.name, default=proto.default, conditions=conditions ) def to_proto(self): return protos.get_proto().Boolean( name=self.name, default=self.default, conditions=[c.to_proto() for c in self.conditions], )
keras-tuner/keras_tuner/engine/hyperparameters/hp_types/boolean_hp.py/0
{ "file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/boolean_hp.py", "repo_id": "keras-tuner", "token_count": 893 }
157
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from keras_tuner import protos from keras_tuner.backend import keras from keras_tuner.engine import hyperparameters as hp_module def test_hyperparameters(): hp = hp_module.HyperParameters() assert hp.values == {} assert hp.space == [] hp.Choice("choice", [1, 2, 3], default=2) assert hp.values == {"choice": 2} assert len(hp.space) == 1 assert hp.space[0].name == "choice" hp.values["choice"] = 3 assert hp.get("choice") == 3 hp = hp.copy() assert hp.values == {"choice": 3} assert len(hp.space) == 1 assert hp.space[0].name == "choice" with pytest.raises(KeyError, match="does not exist"): hp.get("wrong") def test_name_collision(): # TODO: figure out how name collision checks # should work. pass def test_name_scope(): hp = hp_module.HyperParameters() hp.Choice("choice", [1, 2, 3], default=2) with hp.name_scope("scope1"): hp.Choice("choice", [4, 5, 6], default=5) with hp.name_scope("scope2"): hp.Choice("choice", [7, 8, 9], default=8) hp.Int("range", min_value=0, max_value=10, step=1, default=0) assert hp.values == { "choice": 2, "scope1/choice": 5, "scope1/scope2/choice": 8, "scope1/range": 0, } def test_parent_name(): hp = hp_module.HyperParameters() hp.Choice("a", [1, 2, 3], default=2) b1 = hp.Int("b", 0, 10, parent_name="a", parent_values=1, default=5) b2 = hp.Int("b", 0, 100, parent_name="a", parent_values=2, default=4) assert b1 is None assert b2 == 4 # Only active values appear in `values`. assert hp.values == {"a": 2, "b": 4} def test_conditional_scope(): hp = hp_module.HyperParameters() hp.Choice("choice", [1, 2, 3], default=2) # Assignment to a non-active conditional hyperparameter returns `None`. with hp.conditional_scope("choice", [1, 3]): child1 = hp.Choice("child_choice", [4, 5, 6]) assert child1 is None # Retrieve a non-active hp, still none. with hp.conditional_scope("choice", [1, 3]): child1 = hp.Choice("child_choice", [4, 5, 6]) assert child1 is None # Assignment to an active conditional hyperparameter returns the value. with hp.conditional_scope("choice", 2): child2 = hp.Choice("child_choice", [7, 8, 9]) assert child2 == 7 # Retrieve the value, still same value. with hp.conditional_scope("choice", 2): child2 = hp.Choice("child_choice", [7, 8, 9]) assert child2 == 7 # Only active values appear in `values`. assert hp.values == {"choice": 2, "child_choice": 7} with pytest.raises(ValueError, match="not defined"): with hp.conditional_scope("not_defined_hp", 2): hp.Choice("child_choice", [7, 8, 9]) def test_to_proto_unrecognized_hp_type(): hps = hp_module.HyperParameters() hps._space.append(None) hps.Fixed("d", "3") with pytest.raises(ValueError, match="Unrecognized HP"): hp_module.HyperParameters.from_proto(hps.to_proto()) def test_to_proto_unrecognized_value_type(): hps = hp_module.HyperParameters() hps.Fixed("d", "3") hps.values["d"] = None with pytest.raises(ValueError, match="Unrecognized value type"): hp_module.HyperParameters.from_proto(hps.to_proto()) def test_is_active_with_hp_name_and_hp(): hp = hp_module.HyperParameters() hp.Choice("choice", [1, 2, 3], default=3) with hp.conditional_scope("choice", [1, 3]): hp.Choice("child_choice", [4, 5, 6]) with hp.conditional_scope("choice", 2): hp.Choice("child_choice2", [7, 8, 9]) # Custom oracle populates value for an inactive hp. hp.values["child_choice2"] = 7 assert hp.is_active("child_choice") assert hp.is_active(hp._hps["child_choice"][0]) assert not hp.is_active("child_choice2") assert not hp.is_active(hp._hps["child_choice2"][0]) def test_build_with_conditional_scope(): def build_model(hp): model = hp.Choice("model", ["v1", "v2"]) with hp.conditional_scope("model", "v1"): v1_params = { "layers": hp.Int("layers", 1, 3), "units": hp.Int("units", 16, 32), } with hp.conditional_scope("model", "v2"): v2_params = { "layers": hp.Int("layers", 2, 4), "units": hp.Int("units", 32, 64), } params = v1_params if model == "v1" else v2_params inputs = keras.Input(shape=(10,)) x = inputs for _ in range(params["layers"]): x = keras.layers.Dense(params["units"])(x) outputs = keras.layers.Dense(1)(x) model = keras.Model(inputs, outputs) model.compile("sgd", "mse") return model hp = hp_module.HyperParameters() build_model(hp) assert hp.values == { "model": "v1", "layers": 1, "units": 16, } def test_error_when_hp_same_name_as_condition(): hp = hp_module.HyperParameters() hp.Choice("a", [1, 2, 3], default=3) with pytest.raises(ValueError, match="cannot have the same name"): with hp.conditional_scope("a", [1, 3]): hp.Choice("a", [4, 5, 6], default=6) def test_nested_conditional_scopes_and_name_scopes(): hp = hp_module.HyperParameters() a = hp.Choice("a", [1, 2, 3], default=3) with hp.conditional_scope("a", [1, 3]): b = hp.Choice("b", [4, 5, 6], default=6) with hp.conditional_scope("b", 6): c = hp.Choice("c", [7, 8, 9]) with hp.name_scope("d"): e = hp.Choice("e", [10, 11, 12]) with hp.conditional_scope("a", 2): f = hp.Choice("f", [13, 14, 15]) with hp.name_scope("g"): h = hp.Int("h", 0, 10) assert hp.values == { "a": 3, "b": 6, "c": 7, "d/e": 10, } # Assignment to an active conditional hyperparameter returns the value. assert a == 3 assert b == 6 assert c == 7 assert e == 10 # Assignment to a non-active conditional hyperparameter returns `None`. assert f is None assert h is None def test_get_with_conditional_scopes(): hp = hp_module.HyperParameters() hp.Choice("a", [1, 2, 3], default=2) assert hp.get("a") == 2 with hp.conditional_scope("a", 2): hp.Fixed("b", 4) assert hp.get("b") == 4 assert hp.get("a") == 2 with hp.conditional_scope("a", 3): hp.Fixed("b", 5) assert hp.get("b") == 4 # Value corresponding to the currently active condition is returned. assert hp.get("b") == 4 def test_merge_inactive_hp_with_conditional_scopes(): hp = hp_module.HyperParameters() hp.Choice("a", [1, 2, 3], default=3) assert hp.get("a") == 3 with hp.conditional_scope("a", 2): hp.Fixed("b", 4) hp2 = hp_module.HyperParameters() hp2.merge(hp) # only active hp should be included to values assert "a" in hp2.values assert "b" not in hp2.values def test_merge(): hp = hp_module.HyperParameters() hp.Int("a", 0, 100) hp.Fixed("b", 2) hp2 = hp_module.HyperParameters() hp2.Fixed("a", 3) hp.Int("c", 10, 100, default=30) hp.merge(hp2) assert hp.get("a") == 3 assert hp.get("b") == 2 assert hp.get("c") == 30 hp3 = hp_module.HyperParameters() hp3.Fixed("a", 5) hp3.Choice("d", [1, 2, 3], default=1) hp.merge(hp3, overwrite=False) assert hp.get("a") == 3 assert hp.get("b") == 2 assert hp.get("c") == 30 assert hp.get("d") == 1 def _sort_space(hps): space = hps.get_config()["space"] return sorted(space, key=lambda hp: hp["config"]["name"]) def test_hyperparameters_proto(): hps = hp_module.HyperParameters() hps.Int("a", 1, 10, sampling="reverse_log", default=3) hps.Float("b", 2, 8, sampling="linear", default=4) hps.Choice("c", [1, 5, 10], ordered=False, default=5) hps.Fixed("d", "3") hps.Fixed("e", 3) hps.Fixed("f", 3.1) hps.Fixed("g", True) hps.Boolean("h") with hps.name_scope("d"): hps.Choice("e", [2.0, 4.5, 8.5], default=2.0) hps.Choice("f", ["1", "2"], default="1") with hps.conditional_scope("f", "1"): hps.Int("g", -10, 10, step=2, default=-2) new_hps = hp_module.HyperParameters.from_proto(hps.to_proto()) assert _sort_space(hps) == _sort_space(new_hps) assert hps.values == new_hps.values def test_hyperparameters_values_proto(): values = protos.get_proto().HyperParameters.Values( values={ "a": protos.get_proto().Value(int_value=1), "b": protos.get_proto().Value(float_value=2.0), "c": protos.get_proto().Value(string_value="3"), } ) # When only values are provided, each param is created as `Fixed`. hps = hp_module.HyperParameters.from_proto(values) assert hps.values == {"a": 1, "b": 2.0, "c": "3"} def test_dict_methods(): hps = hp_module.HyperParameters() hps.Int("a", 0, 10, default=3) hps.Choice("b", [1, 2], default=2) with hps.conditional_scope("b", 1): hps.Float("c", -10, 10, default=3) # Don't allow access of a non-active param within its scope. with pytest.raises(ValueError, match="is currently inactive"): hps["c"] with hps.conditional_scope("b", 2): hps.Float("c", -30, -20, default=-25) assert hps["a"] == 3 assert hps["b"] == 2 # Ok to access 'c' here since there is an active 'c'. assert hps["c"] == -25 with pytest.raises(KeyError, match="does not exist"): hps["d"] assert "a" in hps assert "b" in hps assert "c" in hps assert "d" not in hps def test_return_populated_value_for_new_hp(): hp = hp_module.HyperParameters() hp.values["hp_name"] = "hp_value" assert ( hp.Choice( "hp_name", ["hp_value", "hp_value_default"], default="hp_value_default", ) == "hp_value" ) def test_return_default_value_if_not_populated(): hp = hp_module.HyperParameters() assert ( hp.Choice( "hp_name", ["hp_value", "hp_value_default"], default="hp_value_default", ) == "hp_value_default" ) def test_serialize_deserialize_hyperparameters(): hp = hp_module.HyperParameters() hp.Int("temp", 1, 5) hp = hp_module.deserialize(hp_module.serialize(hp)) assert len(hp.space) == 1 def test_int_log_without_step_random_sample(): hp = hp_module.HyperParameters() hp.Int("rg", min_value=2, max_value=32, sampling="log") hp.space[0].random_sample()
keras-tuner/keras_tuner/engine/hyperparameters/hyperparameters_test.py/0
{ "file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hyperparameters_test.py", "repo_id": "keras-tuner", "token_count": 4913 }
158
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import keras_tuner from keras_tuner.backend import keras def get_data(): """Create random but repetitive dummy MNIST data.""" x = np.random.randint(0, 255, size=(1000, 28, 28)) y = np.random.randint(0, 9, size=(1000,)) train_x = np.repeat(x, repeats=10, axis=0) train_y = np.repeat(y, repeats=10, axis=0) val_x, val_y = x, y rng_state = np.random.get_state() np.random.shuffle(train_x) np.random.set_state(rng_state) np.random.shuffle(train_y) return (train_x, train_y), (val_x, val_y) def build_model(hp): inputs = keras.Input(shape=(28, 28)) x = keras.layers.Reshape((28 * 28,))(inputs) for i in range(hp.Int("num_layers", 1, 4)): x = keras.layers.Dense( units=hp.Int(f"units_{str(i)}", 128, 512, 32, default=256), activation="relu", )(x) x = keras.layers.Dropout(hp.Float("dp", 0.0, 0.6, 0.1, default=0.5))(x) outputs = keras.layers.Dense(10, activation="softmax")(x) model = keras.Model(inputs, outputs) model.compile( optimizer=keras.optimizers.Adam( hp.Choice("learning_rate", [1e-2, 2e-3, 5e-4]) ), loss="sparse_categorical_crossentropy", metrics=["accuracy"], ) return model def test_end_to_end_workflow(tmp_path): (x, y), (val_x, val_y) = get_data() x = x.astype("float32") / 255.0 val_x = val_x.astype("float32") / 255.0 tuner = keras_tuner.tuners.RandomSearch( build_model, objective="val_accuracy", max_trials=20, directory=tmp_path, ) tuner.search_space_summary() tuner.search( x=x, y=y, epochs=10, batch_size=128, callbacks=[keras.callbacks.EarlyStopping(patience=2)], validation_data=(val_x, val_y), ) tuner.results_summary() best_model = tuner.get_best_models(1)[0] val_loss, val_acc = best_model.evaluate(val_x, val_y) assert val_acc > 0.955 if __name__ == "__main__": test_end_to_end_workflow("test_dir", None)
keras-tuner/keras_tuner/integration_tests/end_to_end_test.py/0
{ "file_path": "keras-tuner/keras_tuner/integration_tests/end_to_end_test.py", "repo_id": "keras-tuner", "token_count": 1146 }
159
# Copyright 2019 The KerasTuner Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from keras_tuner.protos.v4 import ( service_pb2 as keras__tuner_dot_protos_dot_service__pb2, ) class OracleStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetSpace = channel.unary_unary( "/keras_tuner.Oracle/GetSpace", request_serializer=keras__tuner_dot_protos_dot_service__pb2.GetSpaceRequest.SerializeToString, response_deserializer=keras__tuner_dot_protos_dot_service__pb2.GetSpaceResponse.FromString, ) self.UpdateSpace = channel.unary_unary( "/keras_tuner.Oracle/UpdateSpace", request_serializer=keras__tuner_dot_protos_dot_service__pb2.UpdateSpaceRequest.SerializeToString, response_deserializer=keras__tuner_dot_protos_dot_service__pb2.UpdateSpaceResponse.FromString, ) self.CreateTrial = channel.unary_unary( "/keras_tuner.Oracle/CreateTrial", request_serializer=keras__tuner_dot_protos_dot_service__pb2.CreateTrialRequest.SerializeToString, response_deserializer=keras__tuner_dot_protos_dot_service__pb2.CreateTrialResponse.FromString, ) self.UpdateTrial = channel.unary_unary( "/keras_tuner.Oracle/UpdateTrial", request_serializer=keras__tuner_dot_protos_dot_service__pb2.UpdateTrialRequest.SerializeToString, response_deserializer=keras__tuner_dot_protos_dot_service__pb2.UpdateTrialResponse.FromString, ) self.EndTrial = channel.unary_unary( "/keras_tuner.Oracle/EndTrial", request_serializer=keras__tuner_dot_protos_dot_service__pb2.EndTrialRequest.SerializeToString, response_deserializer=keras__tuner_dot_protos_dot_service__pb2.EndTrialResponse.FromString, ) self.GetBestTrials = channel.unary_unary( "/keras_tuner.Oracle/GetBestTrials", request_serializer=keras__tuner_dot_protos_dot_service__pb2.GetBestTrialsRequest.SerializeToString, response_deserializer=keras__tuner_dot_protos_dot_service__pb2.GetBestTrialsResponse.FromString, ) self.GetTrial = channel.unary_unary( "/keras_tuner.Oracle/GetTrial", request_serializer=keras__tuner_dot_protos_dot_service__pb2.GetTrialRequest.SerializeToString, response_deserializer=keras__tuner_dot_protos_dot_service__pb2.GetTrialResponse.FromString, ) class OracleServicer(object): """Missing associated documentation comment in .proto file.""" def GetSpace(self, request, context): """Return the HyperParameter search space.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateSpace(self, request, context): """Updates the HyperParameter search space.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateTrial(self, request, context): """Creates a Trial.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateTrial(self, request, context): """Updates a Trial with metrics and a step.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def EndTrial(self, request, context): """Ends a Trial.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetBestTrials(self, request, context): """Gets the best Trials.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetTrial(self, request, context): """Gets a Trial by ID.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def add_OracleServicer_to_server(servicer, server): rpc_method_handlers = { "GetSpace": grpc.unary_unary_rpc_method_handler( servicer.GetSpace, request_deserializer=keras__tuner_dot_protos_dot_service__pb2.GetSpaceRequest.FromString, response_serializer=keras__tuner_dot_protos_dot_service__pb2.GetSpaceResponse.SerializeToString, ), "UpdateSpace": grpc.unary_unary_rpc_method_handler( servicer.UpdateSpace, request_deserializer=keras__tuner_dot_protos_dot_service__pb2.UpdateSpaceRequest.FromString, response_serializer=keras__tuner_dot_protos_dot_service__pb2.UpdateSpaceResponse.SerializeToString, ), "CreateTrial": grpc.unary_unary_rpc_method_handler( servicer.CreateTrial, request_deserializer=keras__tuner_dot_protos_dot_service__pb2.CreateTrialRequest.FromString, response_serializer=keras__tuner_dot_protos_dot_service__pb2.CreateTrialResponse.SerializeToString, ), "UpdateTrial": grpc.unary_unary_rpc_method_handler( servicer.UpdateTrial, request_deserializer=keras__tuner_dot_protos_dot_service__pb2.UpdateTrialRequest.FromString, response_serializer=keras__tuner_dot_protos_dot_service__pb2.UpdateTrialResponse.SerializeToString, ), "EndTrial": grpc.unary_unary_rpc_method_handler( servicer.EndTrial, request_deserializer=keras__tuner_dot_protos_dot_service__pb2.EndTrialRequest.FromString, response_serializer=keras__tuner_dot_protos_dot_service__pb2.EndTrialResponse.SerializeToString, ), "GetBestTrials": grpc.unary_unary_rpc_method_handler( servicer.GetBestTrials, request_deserializer=keras__tuner_dot_protos_dot_service__pb2.GetBestTrialsRequest.FromString, response_serializer=keras__tuner_dot_protos_dot_service__pb2.GetBestTrialsResponse.SerializeToString, ), "GetTrial": grpc.unary_unary_rpc_method_handler( servicer.GetTrial, request_deserializer=keras__tuner_dot_protos_dot_service__pb2.GetTrialRequest.FromString, response_serializer=keras__tuner_dot_protos_dot_service__pb2.GetTrialResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( "keras_tuner.Oracle", rpc_method_handlers ) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Oracle(object): """Missing associated documentation comment in .proto file.""" @staticmethod def GetSpace( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/keras_tuner.Oracle/GetSpace", keras__tuner_dot_protos_dot_service__pb2.GetSpaceRequest.SerializeToString, keras__tuner_dot_protos_dot_service__pb2.GetSpaceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def UpdateSpace( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/keras_tuner.Oracle/UpdateSpace", keras__tuner_dot_protos_dot_service__pb2.UpdateSpaceRequest.SerializeToString, keras__tuner_dot_protos_dot_service__pb2.UpdateSpaceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def CreateTrial( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/keras_tuner.Oracle/CreateTrial", keras__tuner_dot_protos_dot_service__pb2.CreateTrialRequest.SerializeToString, keras__tuner_dot_protos_dot_service__pb2.CreateTrialResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def UpdateTrial( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/keras_tuner.Oracle/UpdateTrial", keras__tuner_dot_protos_dot_service__pb2.UpdateTrialRequest.SerializeToString, keras__tuner_dot_protos_dot_service__pb2.UpdateTrialResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def EndTrial( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/keras_tuner.Oracle/EndTrial", keras__tuner_dot_protos_dot_service__pb2.EndTrialRequest.SerializeToString, keras__tuner_dot_protos_dot_service__pb2.EndTrialResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def GetBestTrials( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/keras_tuner.Oracle/GetBestTrials", keras__tuner_dot_protos_dot_service__pb2.GetBestTrialsRequest.SerializeToString, keras__tuner_dot_protos_dot_service__pb2.GetBestTrialsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, ) @staticmethod def GetTrial( request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None, ): return grpc.experimental.unary_unary( request, target, "/keras_tuner.Oracle/GetTrial", keras__tuner_dot_protos_dot_service__pb2.GetTrialRequest.SerializeToString, keras__tuner_dot_protos_dot_service__pb2.GetTrialResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata, )
keras-tuner/keras_tuner/protos/v4/service_pb2_grpc.py/0
{ "file_path": "keras-tuner/keras_tuner/protos/v4/service_pb2_grpc.py", "repo_id": "keras-tuner", "token_count": 6228 }
160
sudo pip install --upgrade pip sudo pip install -r requirements.txt echo "bash shell/lint.sh" > .git/hooks/pre-commit chmod a+x .git/hooks/pre-commit
keras/.devcontainer/setup.sh/0
{ "file_path": "keras/.devcontainer/setup.sh", "repo_id": "keras", "token_count": 53 }
161
from keras.api_export import keras_export from keras.backend.common import global_state @keras_export("keras.StatelessScope") class StatelessScope: """Scope to prevent any update to Keras Variables. The values of variables to be used inside the scope should be passed via the `state_mapping` argument, a list of tuples `(k, v)` where `k` is a `KerasVariable` and `v` is the intended value for this variable (a backend tensor). Updated values can be collected on scope exit via `value = scope.get_current_value(variable)`. No updates will be applied in-place to any variables for the duration of the scope. Example: ```python state_mapping = [(k, ops.ones(k.shape, k.dtype)) for k in model.weights] with keras.StatelessScope(state_mapping) as scope: outputs = model.some_function(inputs) # All model variables remain unchanged. Their new values can be # collected via: for k in model.weights: new_value = scope.get_current_value(k) print(f"New value for {k}: {new_value}) ``` """ def __init__( self, state_mapping=None, collect_losses=False, initialize_variables=True, ): from keras import backend from keras.backend.common.variables import KerasVariable self.collect_losses = collect_losses self.initialize_variables = initialize_variables self.losses = [] self.state_mapping = {} state_mapping = state_mapping or {} for k, v in state_mapping: if not isinstance(k, KerasVariable): raise ValueError( "Invalid reference variable in StatelessScope: " "all keys in argument `mapping` must be KerasVariable " f"instances. Received instead: {k}" ) v = backend.convert_to_tensor(v, dtype=k.dtype) if k.shape != v.shape: raise ValueError( "Invalid variable value in StatelessScope: " "all values in argument `mapping` must be tensors with " "a shape that matches the corresponding variable shape. " f"For variable {k}, received invalid value {v} with shape " f"{v.shape}." ) self.state_mapping[id(k)] = v def __enter__(self): self.original_scope = get_stateless_scope() global_state.set_global_attribute("stateless_scope", self) return self def add_loss(self, loss): self.losses.append(loss) def add_update(self, update): variable, value = update self.state_mapping[id(variable)] = value def get_current_value(self, variable): return self.state_mapping.get(id(variable), None) def __exit__(self, *args, **kwargs): global_state.set_global_attribute( "stateless_scope", self.original_scope ) if self.original_scope is None and self.initialize_variables: # We're back in eager scope; # if any variables were created within the stateless # scope, we initialize them here. from keras.backend.common.variables import initialize_all_variables initialize_all_variables() def in_stateless_scope(): return global_state.get_global_attribute("stateless_scope") is not None def get_stateless_scope(): return global_state.get_global_attribute("stateless_scope")
keras/keras/backend/common/stateless_scope.py/0
{ "file_path": "keras/keras/backend/common/stateless_scope.py", "repo_id": "keras", "token_count": 1473 }
162
import jax from jax import numpy as jnp from keras.optimizers import base_optimizer class JaxOptimizer(base_optimizer.BaseOptimizer): """A class for JAX specific optimizer logic. Its purpose is to route around statelessness requirements in cond ops used for EMA handling and gradient accumulation handling. We do this by skipping conditionals entirely. """ def _backend_apply_gradients(self, grads, trainable_variables): if self.gradient_accumulation_steps: is_update_step = ( self.iterations + 1 ) % self.gradient_accumulation_steps == 0 steps = self.gradient_accumulation_steps current_trainable_vars_value = [ v.value for v in trainable_variables ] current_optimizer_vars_value = [v.value for v in self.variables] new_g_accs = jax.lax.cond( is_update_step, lambda: [ jnp.zeros(x.shape, dtype=x.dtype) for x in self._accumulated_gradients ], lambda: [ grads[i] + self._accumulated_gradients[i] for i in range(len(grads)) ], ) grads = jax.lax.cond( is_update_step, lambda: [ (grads[i] + self._accumulated_gradients[i]) / steps for i in range(len(grads)) ], lambda: list(grads), ) self._backend_update_step( grads, trainable_variables, self.learning_rate ) new_trainable_vars = jax.lax.cond( is_update_step, lambda: [v.value for v in trainable_variables], lambda: current_trainable_vars_value, ) new_opt_vars = jax.lax.cond( is_update_step, lambda: [v.value for v in self.variables], lambda: current_optimizer_vars_value, ) for value, v in zip(new_trainable_vars, trainable_variables): v.assign(value) for value, v in zip(new_opt_vars, self.variables): v.assign(value) for n_g_acc, g_acc in zip(new_g_accs, self._accumulated_gradients): g_acc.assign(n_g_acc) else: self._backend_update_step( grads, trainable_variables, self.learning_rate ) if self.use_ema: self._update_model_variables_moving_average( self._trainable_variables ) if self.ema_overwrite_frequency is not None: should_overwrite_model_vars = ( self.iterations + 1 ) % self.ema_overwrite_frequency == 0 should_overwrite_model_vars_int = ( should_overwrite_model_vars.astype("int32") ) should_not_overwrite_model_vars_int = jnp.logical_not( should_overwrite_model_vars ).astype("int32") current_trainable_vars_value = [ v.value for v in self._trainable_variables ] for var, average_var in zip( self._trainable_variables, self._model_variables_moving_average, ): var.assign( average_var * should_overwrite_model_vars_int + var.value * should_not_overwrite_model_vars_int ) self.iterations.assign_add(1)
keras/keras/backend/jax/optimizer.py/0
{ "file_path": "keras/keras/backend/jax/optimizer.py", "repo_id": "keras", "token_count": 2039 }
163
from keras.backend.tensorflow import core from keras.backend.tensorflow import distribution_lib from keras.backend.tensorflow import image from keras.backend.tensorflow import linalg from keras.backend.tensorflow import math from keras.backend.tensorflow import nn from keras.backend.tensorflow import numpy from keras.backend.tensorflow import random from keras.backend.tensorflow import tensorboard from keras.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS from keras.backend.tensorflow.core import Variable from keras.backend.tensorflow.core import cast from keras.backend.tensorflow.core import compute_output_spec from keras.backend.tensorflow.core import cond from keras.backend.tensorflow.core import convert_to_numpy from keras.backend.tensorflow.core import convert_to_tensor from keras.backend.tensorflow.core import device_scope from keras.backend.tensorflow.core import is_tensor from keras.backend.tensorflow.core import name_scope from keras.backend.tensorflow.core import scatter from keras.backend.tensorflow.core import shape from keras.backend.tensorflow.core import stop_gradient from keras.backend.tensorflow.core import vectorized_map from keras.backend.tensorflow.rnn import cudnn_ok from keras.backend.tensorflow.rnn import gru from keras.backend.tensorflow.rnn import lstm from keras.backend.tensorflow.rnn import rnn
keras/keras/backend/tensorflow/__init__.py/0
{ "file_path": "keras/keras/backend/tensorflow/__init__.py", "repo_id": "keras", "token_count": 442 }
164
import functools import tensorflow as tf ones_bool = functools.partial(tf.ones, dtype=tf.bool) ones_int8 = functools.partial(tf.ones, dtype=tf.int8) zeros_int8 = functools.partial(tf.zeros, dtype=tf.int8) ones_like_int8 = functools.partial(tf.ones_like, dtype=tf.int8) zeros_like_int8 = functools.partial(tf.zeros_like, dtype=tf.int8) def empty_tensor(shape, dtype): return tf.reshape(tf.convert_to_tensor((), dtype=dtype), shape=shape) def sparse_to_dense(x, default_value=None): x_shape = x.shape x = tf.sparse.to_dense(x, default_value=default_value) x.set_shape(x_shape) return x def sparse_with_values(x, values): x_shape = x.shape x = tf.SparseTensor(x.indices, values, x.dense_shape) x.set_shape(x_shape) return x def broadcast_scalar_to_sparse_shape(scalar, sparse): output = tf.broadcast_to(scalar, sparse.dense_shape) output.set_shape(sparse.shape) return output def sparse_subtract(x1, x2): """Subtraction for `tf.SparseTensor`s. Either `x1` or `x2` or both can be `tf.SparseTensor`s. Args: x1: fist tensor to add. x2: second tensor to add. Returns: The sum of `x1` and `x2`, which is a `tf.SparseTensor` if and only if both `x1` or `x2` are `tf.SparseTensor`s. """ if isinstance(x2, tf.SparseTensor): return tf.sparse.add(x1, tf.sparse.map_values(tf.negative, x2)) else: return tf.sparse.add(x1, tf.negative(x2)) def sparse_union_indices_and_values(x1, x2_indices, x2_values=None): """Compute the indices for the union of the indices of the provided `tf.SparseTensor`s and another set of indices and return the modified values for these indices. Args: x: a `tf.SparseTensor`. indices: another set of indices in the `tf.SparseTensor` format. Returns: A tuple containing: - the indices for the union - `x1` values for the union indices (some zeros were added) - `x2` values for the union indices (some zeros were added) or `None` if `x2_values` was `None`. """ # Add zeros at the x2 indices to x1 to create the union. zeros2 = tf.SparseTensor( x2_indices, tf.zeros((tf.shape(x2_indices)[0],), x1.values.dtype), x1.dense_shape, ) x1_for_union = tf.sparse.add(x1, zeros2) if x2_values is not None: # Add zeros at the x1 indices to x2 to create the union. x2 = tf.SparseTensor(x2_indices, x2_values, x1.dense_shape) zeros1 = tf.sparse.map_values(tf.zeros_like, x1) x2_for_union = tf.sparse.add(x2, zeros1) return x1_for_union.indices, x1_for_union.values, x2_for_union.values else: return x1_for_union.indices, x1_for_union.values, None def indexed_slices_union_indices_and_values(x1, x2_indices, x2_values=None): """Compute the indices for the union of two `tf.IndexedSlices` and modify the values for these indices. Args: x1: the first `tf.IndexedSlices`. x2_indices: the indices for the second `tf.IndexedSlices`. x2_value: (optional) the values for the second `tf.IndexedSlices`. Returns: A tuple containing: - the indices for the union - `x1` values for the union indices (some zeros were added) - `x2` values for the union indices (some zeros were added) or `None` if `x2_values` was `None`. """ # Compute the union of the indices by doing a logical or between the one-hot # encoded indices for x1 and x2. dim_0 = x1.dense_shape[0] x1_indices_expanded = tf.expand_dims(x1.indices, axis=1) x2_indices_expanded = tf.expand_dims(x2_indices, axis=1) x1_indices_count = tf.shape(x1_indices_expanded)[0] x2_indices_count = tf.shape(x2_indices_expanded)[0] x1_indices_one_hot = tf.scatter_nd( x1_indices_expanded, ones_bool((x1_indices_count,)), (dim_0,), ) x2_indices_one_hot = tf.scatter_nd( x2_indices_expanded, ones_bool((x2_indices_count,)), (dim_0,), ) union_indices = tf.squeeze( tf.where(tf.math.logical_or(x1_indices_one_hot, x2_indices_one_hot)), axis=-1, ) union_indices_count = tf.shape(union_indices)[0] # Re-gather the values with extra zeros added at indices that are part of # the union but were not in x1 or x2. def values_for_union(indices_expanded, indices_count, values): indices_indices = tf.scatter_nd( indices_expanded, tf.range(1, indices_count + 1), (dim_0,), ) to_union_indices = tf.gather(indices_indices, union_indices) values_with_leading_zeros = tf.concat( [tf.zeros((1,) + values.shape[1:], values.dtype), values], axis=0 ) return tf.gather(values_with_leading_zeros, to_union_indices) # Only recompute values if some indices were added. x1_values_for_union_indices = tf.cond( tf.equal(x1_indices_count, union_indices_count), lambda: x1.values, lambda: values_for_union( x1_indices_expanded, x1_indices_count, x1.values ), ) if x2_values is not None: x2_values_for_union_indices = tf.cond( tf.equal(x2_indices_count, union_indices_count), lambda: x2_values, lambda: values_for_union( x2_indices_expanded, x2_indices_count, x2_values ), ) else: x2_values_for_union_indices = None return ( union_indices, x1_values_for_union_indices, x2_values_for_union_indices, ) def sparse_intersection_indices_and_values(x1, x2): """Compute the indices for the intersection of two `tf.SparseTensor`s and modify the values for these indices. Args: x1: the first `tf.SparseTensor`. x2: the second `tf.SparseTensor`. Returns: A tuple containing: - the indices for the intersection - `x1` values for the intersection indices (some values were removed) - `x2` values for the intersection indices (some values were removed) """ # Compute the intersection of indices in the form of a sparse # tensor containing ones as values. ones1 = tf.sparse.map_values(ones_like_int8, x1) ones2 = tf.sparse.map_values(ones_like_int8, x2) # tf.sets.intersection ignores the last dimension when, so we # need to add a dummy extra dimension and then remove it. intersection_extra_dim = tf.sets.intersection( tf.sparse.expand_dims(ones1, axis=-1), tf.sparse.expand_dims(ones2, axis=-1), ) def empty_intersection(): return ( empty_tensor((0, x1.shape.rank), dtype=tf.int64), empty_tensor((0,), dtype=x1.values.dtype), empty_tensor((0,), dtype=x2.values.dtype), ) def non_empty_intersection(): intersection = tf.sparse.reshape(intersection_extra_dim, x1.dense_shape) # Compute the masks to remove indices in x1 and x2 that are not # in the intersection, then trim x1 and x2. zeros1 = tf.sparse.map_values(zeros_like_int8, x1) zeros2 = tf.sparse.map_values(zeros_like_int8, x2) mask1 = tf.sparse.add(zeros1, intersection) mask2 = tf.sparse.add(zeros2, intersection) return ( intersection.indices, tf.sparse.retain(x1, tf.cast(mask1.values, tf.bool)).values, tf.sparse.retain(x2, tf.cast(mask2.values, tf.bool)).values, ) return tf.cond( tf.equal(tf.size(intersection_extra_dim), 0), empty_intersection, non_empty_intersection, ) def indexed_slices_intersection_indices_and_values(x1, x2): """Compute the indices for the intersection of two `tf.IndexedSlices` and modify the values for these indices. Args: x1: the first `tf.IndexedSlices`. x2: the second `tf.IndexedSlices`. Returns: A tuple containing: - the indices for the intersection - `x1` values for the intersection indices (some values were removed) - `x2` values for the intersection indices (some values were removed) """ # Compute the intersection of the indices by doing a logical # and between the one hot encoded indices for x1 and x2. dim_0 = x1.dense_shape[0] x1_indices_expanded = tf.expand_dims(x1.indices, axis=1) x2_indices_expanded = tf.expand_dims(x2.indices, axis=1) x1_indices_count = x1_indices_expanded.shape[0] x2_indices_count = x2_indices_expanded.shape[0] x1_indices_one_hot = tf.scatter_nd( x1_indices_expanded, ones_bool((x1_indices_count,)), (dim_0,), ) x2_indices_one_hot = tf.scatter_nd( x2_indices_expanded, ones_bool((x2_indices_count,)), (dim_0,), ) intersection_indices = tf.squeeze( tf.where(tf.math.logical_and(x1_indices_one_hot, x2_indices_one_hot)), axis=-1, ) intersection_indices_count = tf.shape(intersection_indices)[0] def empty_intersection(): return ( intersection_indices, empty_tensor((0,) + x1.values.shape[1:], x1.dtype), empty_tensor((0,) + x2.values.shape[1:], x2.dtype), ) def non_empty_intersection(): # Re-gather sub parts of the values that are part of the intersection. def values_for_intersection(indices_expanded, indices_count, values): indices_indices = tf.scatter_nd( indices_expanded, tf.range(indices_count), (dim_0,), ) to_intersection_indices = tf.gather( indices_indices, intersection_indices ) return tf.gather(values, to_intersection_indices) # Only recompute values if some indices were removed. x1_values_for_intersection = tf.cond( tf.equal(x1_indices_count, intersection_indices_count), lambda: x1.values, lambda: values_for_intersection( x1_indices_expanded, x1_indices_count, x1.values ), ) x2_values_for_intersection = tf.cond( tf.equal(x2_indices_count, intersection_indices_count), lambda: x2.values, lambda: values_for_intersection( x2_indices_expanded, x2_indices_count, x2.values ), ) return ( intersection_indices, x1_values_for_intersection, x2_values_for_intersection, ) return tf.cond( tf.equal(intersection_indices_count, 0), empty_intersection, non_empty_intersection, ) def densifying_unary(default_value): """Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to a non-zero-preserving element-wise unary operator. There are requirements on the operator for this decorator to work correctly: - The operator must be element-wise - The operator must be unary (one input tensor and one output tensor) - The operator must return a tensor of the same shape. Additional arguments to the function (besides the input tensor) are supported. The returned result is a dense tensor and contains `default_value` outside of the indices of the input tensor. Args: default_value: The value to use outside of indices. It must be the value that the operator returns for zero values. Returns: Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`. """ def wrap_densifying_unary(func): @functools.wraps(func) def sparse_wrapper(x, *args, **kwargs): if isinstance(x, tf.SparseTensor): sparse_output = sparse_with_values( x, func(x.values, *args, **kwargs) ) return sparse_to_dense( sparse_output, tf.cast(default_value, sparse_output.values.dtype), ) elif isinstance(x, tf.IndexedSlices): sparse_output_values = func(x.values, *args, **kwargs) output = tf.fill( x.dense_shape, tf.cast(default_value, sparse_output_values.dtype), ) return tf.tensor_scatter_nd_update( output, tf.expand_dims(x.indices, 1), sparse_output_values ) return func(x, *args, **kwargs) return sparse_wrapper return wrap_densifying_unary def elementwise_unary(func): """Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to a zero-preserving element-wise unary operator. There are requirements on the operator for this decorator to work correctly: - The operator must be element-wise - The operator must be unary (one input tensor and one output tensor) - The operator must return a tensor of the same shape, and if it is a `tf.SparseTensor` or `tf.IndexedSlices`, the indices of the result must be the same. Therefore: - Reduction operations are not supported (e.g. `mean`). - Operations for which the result may be dense (e.g. `reciprocal`), or the sparse indices depend on the inputs are not supported (e.g. `clip`). This implies that `func(0)` must be 0. Additional arguments to the function (besides the input tensor) are supported as long as they cannot change the indices of the result. For instance,`round` is supported, but `clip` is not supported as `clip(x, 1.0, 2.0)` would always return a dense tensor. Note that if an input sparse tensor contains zero values, the indices and the zero values are preserved. Args: func: The function to wrap. Returns: Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`. """ @functools.wraps(func) def sparse_wrapper(x, *args, **kwargs): if isinstance(x, tf.SparseTensor): return sparse_with_values(x, func(x.values, *args, **kwargs)) elif isinstance(x, tf.IndexedSlices): return tf.IndexedSlices( func(x.values, *args, **kwargs), x.indices, x.dense_shape ) else: return func(x, *args, **kwargs) return sparse_wrapper def elementwise_binary_union(sparse_op, densify_mixed=False): """Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to an element-wise binary operator such that the indices present in the result are the union of the indices in the two operand. The primary use case for this is the `add` and `subtract` operators. There are requirements on the operator for this decorator to work correctly: - The operator must be element-wise. - The operator must be binary (two input tensors and one output tensor). - Both inputs must be of the same shape or one input must be a scalar. - The output must be of the same shape as the (non scalar) inputs. - The indices of the output must be the union of the indices of the inputs. This implies that func(0, 0) must be 0. As a result, if one operand is dense or a scalar, then the result will be dense. Additional arguments to the function (besides the input tensors) are not supported. Note that if the result of the operation is zero at some indices, including because the operands were zero at these indices, the zeros and indices are preserved. Args: sparse_op: implementation of the operation for `tf.SparseTensor`. Must work if both of the operands are `tf.SparseTensor`s and can optionally work if one of the operand is a `tf.SparseTensor` and the other one is dense tensor, see `densify_mixed`. densify_mixed: if `True`, `sparse_op` does not support a mix of `tf.SparseTensor` and dense tensor or dense tensor with `tf.SparseTensor` and the `tf.SparseTensor` tensor is densified. Returns: Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`. """ def wrap_elementwise_binary_union(func): @functools.wraps(func) def sparse_wrapper(x1, x2): if isinstance(x1, tf.SparseTensor): if isinstance(x2, tf.SparseTensor): # x1 is a SparseTensor and x2 is a SparseTensor. if x1.indices is x2.indices: return sparse_with_values( x1, func(x1.values, x2.values) ) else: output = sparse_op(x1, x2) output.set_shape(x1.shape) return output else: # x1 is a SparseTensor. if densify_mixed: x1 = sparse_to_dense(x1) else: if not hasattr(x2, "shape") or len(x2.shape) == 0: # x2 is a scalar, broadcast. x2 = broadcast_scalar_to_sparse_shape(x2, x1) return sparse_op(x1, x2) elif isinstance(x2, tf.SparseTensor): # x2 is a SparseTensor. if densify_mixed: x2 = sparse_to_dense(x2) else: if not hasattr(x1, "shape") or len(x1.shape) == 0: # x1 is a scalar, broadcast. x1 = broadcast_scalar_to_sparse_shape(x1, x2) return sparse_op(x1, x2) elif isinstance(x1, tf.IndexedSlices): if isinstance(x2, tf.IndexedSlices): # x1 is an IndexedSlices and x2 is an IndexedSlices. if x1.indices is x2.indices: return tf.IndexedSlices( func(x1.values, x2.values), x1.indices, x1.dense_shape, ) else: # Compute the union of indices. ( union_indices, x1_values_for_union, x2_values_for_union, ) = indexed_slices_union_indices_and_values( x1, x2.indices, x2.values ) # Now, it is an element-wise operation on the union. return tf.IndexedSlices( func( x1_values_for_union, x2_values_for_union, ), union_indices, x1.dense_shape, ) else: # x1 is an IndexedSlices, densify. x1 = tf.convert_to_tensor(x1) elif isinstance(x2, tf.IndexedSlices): # x2 is an IndexedSlices, densify. x2 = tf.convert_to_tensor(x2) return func(x1, x2) return sparse_wrapper return wrap_elementwise_binary_union def elementwise_binary_intersection(func): """Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to an element-wise binary operator such that the indices present in the result are the intersection of the indices in the two operand. The primary use case for this is the `multiply` operator. There are requirements on the operator for this decorator to work correctly: - The operator must be element-wise. - The operator must be binary (two input tensors and one output tensor). - Both inputs must be of the same shape or one input must be a scalar. - The output must be of the same shape as the (non scalar) inputs. - The indices of the output must be the intersection of the indices of the inputs. This implies that func(0, x) and func(x, 0) must be 0 for any x. As a result, if one operand is dense or a scalar, then the indices are the ones from the other operand. Additional arguments to the function (besides the input tensors) are not supported. Note that if the operands contains zero values at some common indices, the indices and the zero values are preserved. Args: func: The function to wrap. Returns: Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`. """ @functools.wraps(func) def sparse_wrapper(x1, x2): if isinstance(x1, tf.SparseTensor): if isinstance(x2, tf.SparseTensor): # x1 is a SparseTensor and x2 is a SparseTensor. if x1.indices is x2.indices: return sparse_with_values(x1, func(x1.values, x2.values)) else: # Compute the intersection of indices. ( intersection_indices, x1_values_for_intersection, x2_values_for_intersection, ) = sparse_intersection_indices_and_values(x1, x2) # Now, it is an element-wise operation on the intersection. output = tf.SparseTensor( intersection_indices, func( x1_values_for_intersection, x2_values_for_intersection, ), x1.dense_shape, ) output.set_shape(x1.shape) return output else: # x1 is a SparseTensor. if not hasattr(x2, "shape") or len(x2.shape) == 0: # x2 is a scalar, apply func element-wise. return sparse_with_values(x1, func(x1.values, x2)) else: # x2 is dense, gather values from x1 indices. return sparse_with_values( x1, func(x1.values, tf.gather_nd(x2, x1.indices)) ) elif isinstance(x2, tf.SparseTensor): # x2 is a SparseTensor. if not hasattr(x1, "shape") or len(x1.shape) == 0: # x1 is a scalar, apply func element-wise. return sparse_with_values(x2, func(x1, x2.values)) else: # x1 is dense, gather values from x2 indices. return sparse_with_values( x2, func(tf.gather_nd(x1, x2.indices), x2.values) ) elif isinstance(x1, tf.IndexedSlices): if isinstance(x2, tf.IndexedSlices): # x1 is an IndexedSlices and x2 is an IndexedSlices. if x1.indices is x2.indices: return tf.IndexedSlices( func(x1.values, x2.values), x1.indices, x1.dense_shape ) else: # Compute the intersection of indices. ( intersection_indices, x1_values_for_intersection, x2_values_for_intersection, ) = indexed_slices_intersection_indices_and_values(x1, x2) # Now, it is an element-wise operation on the intersection. return tf.IndexedSlices( func( x1_values_for_intersection, x2_values_for_intersection, ), intersection_indices, x1.dense_shape, ) else: # x1 is an IndexedSlices. if not hasattr(x2, "shape") or len(x2.shape) == 0: # x2 is a scalar, apply func element-wise. return tf.IndexedSlices( func(x1.values, x2), x1.indices, x1.dense_shape ) else: # x2 is dense, gather values from x1 indices. return tf.IndexedSlices( func(x1.values, tf.gather(x2, x1.indices)), x1.indices, x1.dense_shape, ) elif isinstance(x2, tf.IndexedSlices): # x2 is an IndexedSlices. if not hasattr(x1, "shape") or len(x1.shape) == 0: # x1 is a scalar, apply func element-wise. return tf.IndexedSlices( func(x1, x2.values), x2.indices, x2.dense_shape ) else: # x1 is dense, gather values from x2 indices. return tf.IndexedSlices( func(tf.gather(x1, x2.indices), x2.values), x2.indices, x2.dense_shape, ) # Default case, no SparseTensor and no IndexedSlices. return func(x1, x2) return sparse_wrapper def elementwise_division(func): """Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to element-wise binary division and related operators. This decorator is designed for operations related to the division of two operands (e.g. `divide`). It accepts `tf.SparseTensor` and `tf.IndexedSlices` for both the dividend and the divisor, but handles them differently based on whether they are the dividend or the divisor. - If the divisor is a `tf.SparseTensor` or `tf.IndexedSlices`, it is densified and the result is dense because the result contains Inf or Nan outside of the indices of the dividend. - If the dividend is a `tf.SparseTensor` or `tf.IndexedSlices` and the divisor is dense, it finds occurrences of zeros and NaNs in the divisor. The result may therefore have more indices than there were in the dividend to return correct values where the divisor was zero or NaN. - If the dividend is a `tf.SparseTensor` or `tf.IndexedSlices` and the divisor is a scalar, it does the division element-wise. Note that the result is incorrectly sparse if the scalar divisor is zero. Args: func: The function to wrap. Returns: Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`. """ @functools.wraps(func) def sparse_wrapper(x1, x2): if isinstance(x1, tf.SparseTensor): if isinstance(x2, tf.SparseTensor): # x1 is a SparseTensor and x2 is a SparseTensor. # Divisor is sparse, meaning we're doing divisions by zero # outside of x2.indices, so the result is dense. Densify both. x1 = sparse_to_dense(x1) x2 = sparse_to_dense(x2) else: # x1 is a SparseTensor. if not hasattr(x2, "shape") or len(x2.shape) == 0: # x2 is a scalar, apply func element-wise. return sparse_with_values(x1, func(x1.values, x2)) else: # x2 is dense. x2_zeros_and_nans = tf.equal(x2, 0) if not tf.as_dtype(x2.dtype).is_integer: x2_zeros_and_nans = tf.math.logical_or( x2_zeros_and_nans, tf.math.is_nan(x2) ) def func_for_x1_indices(): # Gather values from x1 indices. return sparse_with_values( x1, func(x1.values, tf.gather_nd(x2, x1.indices)) ) def func_for_union_indices(): # Compute the union of indices to keep zeros and NaNs. x2_zeros_and_nan_indices = tf.where(x2_zeros_and_nans) ( union_indices, x1_values_for_union, _, ) = sparse_union_indices_and_values( x1, x2_zeros_and_nan_indices ) output = tf.SparseTensor( union_indices, func( x1_values_for_union, tf.gather_nd(x2, union_indices), ), x1.dense_shape, ) output.set_shape(x1.shape) return output return tf.cond( tf.reduce_any(x2_zeros_and_nans), func_for_union_indices, func_for_x1_indices, ) elif isinstance(x2, tf.SparseTensor): # x2 is a SparseTensor. # Divisor is sparse, densify to do the divisions by zero correctly. x2 = sparse_to_dense(x2) elif isinstance(x1, tf.IndexedSlices): if isinstance(x2, tf.IndexedSlices): # x1 is an IndexedSlices and x2 is an IndexedSlices. # Divisor is slices, meaning we're doing divisions by zero # outside of x2.indices, so the result is dense. Densify both. x1 = tf.convert_to_tensor(x1) x2 = tf.convert_to_tensor(x2) else: # x1 is a IndexedSlices. if not hasattr(x2, "shape") or len(x2.shape) == 0: # x2 is a scalar, apply func element-wise. return tf.IndexedSlices( func(x1.values, x2), x1.indices, x1.dense_shape ) else: # x2 is dense. x2_zeros_and_nans = tf.equal(x2, 0) if not tf.as_dtype(x2.dtype).is_integer: x2_zeros_and_nans = tf.math.logical_or( x2_zeros_and_nans, tf.math.is_nan(x2) ) x2_zeros_and_nans = tf.reduce_any( x2_zeros_and_nans, axis=tuple(range(1, x2.shape.rank)) ) def func_for_x1_indices(): # Gather values from x1 indices. return tf.IndexedSlices( func(x1.values, tf.gather(x2, x1.indices)), x1.indices, x1.dense_shape, ) def func_for_union_indices(): x2_zeros_and_nan_indices = tf.squeeze( tf.where(x2_zeros_and_nans), axis=-1 ) # Compute the union of indices to keep zeros and NaNs. ( union_indices, x1_values_for_union, _, ) = indexed_slices_union_indices_and_values( x1, x2_zeros_and_nan_indices ) return tf.IndexedSlices( func( x1_values_for_union, tf.gather(x2, union_indices), ), union_indices, x1.dense_shape, ) return tf.cond( tf.reduce_any(x2_zeros_and_nans), func_for_union_indices, func_for_x1_indices, ) elif isinstance(x2, tf.IndexedSlices): # x2 is a IndexedSlices. # Divisor is slices, densify to do the divisions by zero correctly. x2 = tf.convert_to_tensor(x2) # Default case, no SparseTensor and no IndexedSlices. return func(x1, x2) return sparse_wrapper
keras/keras/backend/tensorflow/sparse.py/0
{ "file_path": "keras/keras/backend/tensorflow/sparse.py", "repo_id": "keras", "token_count": 16588 }
165
import torch from keras import ops from keras import optimizers from keras.backend.torch.optimizers import torch_parallel_optimizer class Adagrad( torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adagrad ): def _parallel_update_step( self, grads, variables, learning_rate, ): keras_variables = variables variables = [v.value for v in variables] dtype = variables[0].dtype lr = ops.cast(learning_rate, dtype) accumulators = [ self._accumulators[self._get_variable_index(variable)].value for variable in keras_variables ] torch._foreach_add_(accumulators, torch._foreach_mul(grads, grads)) torch._foreach_add_( variables, torch._foreach_div( torch._foreach_mul(grads, lr), torch._foreach_sqrt( torch._foreach_add(accumulators, self.epsilon) ), ), alpha=-1, )
keras/keras/backend/torch/optimizers/torch_adagrad.py/0
{ "file_path": "keras/keras/backend/torch/optimizers/torch_adagrad.py", "repo_id": "keras", "token_count": 504 }
166
from keras import backend from keras.api_export import keras_export @keras_export("keras.callbacks.Callback") class Callback: """Base class used to build new callbacks. Callbacks can be passed to keras methods such as `fit()`, `evaluate()`, and `predict()` in order to hook into the various stages of the model training, evaluation, and inference lifecycle. To create a custom callback, subclass `keras.callbacks.Callback` and override the method associated with the stage of interest. Example: >>> training_finished = False >>> class MyCallback(Callback): ... def on_train_end(self, logs=None): ... global training_finished ... training_finished = True >>> model = Sequential([ ... layers.Dense(1, input_shape=(1,))]) >>> model.compile(loss='mean_squared_error') >>> model.fit(np.array([[1.0]]), np.array([[1.0]]), ... callbacks=[MyCallback()]) >>> assert training_finished == True If you want to use `Callback` objects in a custom training loop: 1. You should pack all your callbacks into a single `callbacks.CallbackList` so they can all be called together. 2. You will need to manually call all the `on_*` methods at the appropriate locations in your loop. Like this: Example: ```python callbacks = keras.callbacks.CallbackList([...]) callbacks.append(...) callbacks.on_train_begin(...) for epoch in range(EPOCHS): callbacks.on_epoch_begin(epoch) for i, data in dataset.enumerate(): callbacks.on_train_batch_begin(i) batch_logs = model.train_step(data) callbacks.on_train_batch_end(i, batch_logs) epoch_logs = ... callbacks.on_epoch_end(epoch, epoch_logs) final_logs=... callbacks.on_train_end(final_logs) ``` Attributes: params: Dict. Training parameters (eg. verbosity, batch size, number of epochs...). model: Instance of `Model`. Reference of the model being trained. The `logs` dictionary that callback methods take as argument will contain keys for quantities relevant to the current batch or epoch (see method-specific docstrings). """ def __init__(self): self.validation_data = None self._model = None def set_params(self, params): self.params = params def set_model(self, model): self._model = model @property def model(self): if backend.backend() == "jax" and hasattr( self._model, "jax_state_sync" ): # With JAX, by default the model state is not # attached to the model in the middle of an # epoch. We have to force a sync before # accessing model state for e.g. checkpointing. self._model.jax_state_sync() return self._model def on_batch_begin(self, batch, logs=None): """A backwards compatibility alias for `on_train_batch_begin`.""" def on_batch_end(self, batch, logs=None): """A backwards compatibility alias for `on_train_batch_end`.""" def on_epoch_begin(self, epoch, logs=None): """Called at the start of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ def on_epoch_end(self, epoch, logs=None): """Called at the end of an epoch. Subclasses should override for any actions to run. This function should only be called during TRAIN mode. Args: epoch: Integer, index of epoch. logs: Dict, metric results for this training epoch, and for the validation epoch if validation is performed. Validation result keys are prefixed with `val_`. For training epoch, the values of the `Model`'s metrics are returned. Example: `{'loss': 0.2, 'accuracy': 0.7}`. """ def on_train_batch_begin(self, batch, logs=None): """Called at the beginning of a training batch in `fit` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ # For backwards compatibility. self.on_batch_begin(batch, logs=logs) def on_train_batch_end(self, batch, logs=None): """Called at the end of a training batch in `fit` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ # For backwards compatibility. self.on_batch_end(batch, logs=logs) def on_test_batch_begin(self, batch, logs=None): """Called at the beginning of a batch in `evaluate` methods. Also called at the beginning of a validation batch in the `fit` methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ def on_test_batch_end(self, batch, logs=None): """Called at the end of a batch in `evaluate` methods. Also called at the end of a validation batch in the `fit` methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ def on_predict_batch_begin(self, batch, logs=None): """Called at the beginning of a batch in `predict` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ def on_predict_batch_end(self, batch, logs=None): """Called at the end of a batch in `predict` methods. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch. """ def on_train_begin(self, logs=None): """Called at the beginning of training. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ def on_train_end(self, logs=None): """Called at the end of training. Subclasses should override for any actions to run. Args: logs: Dict. Currently the output of the last call to `on_epoch_end()` is passed to this argument for this method but that may change in the future. """ def on_test_begin(self, logs=None): """Called at the beginning of evaluation or validation. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ def on_test_end(self, logs=None): """Called at the end of evaluation or validation. Subclasses should override for any actions to run. Args: logs: Dict. Currently the output of the last call to `on_test_batch_end()` is passed to this argument for this method but that may change in the future. """ def on_predict_begin(self, logs=None): """Called at the beginning of prediction. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """ def on_predict_end(self, logs=None): """Called at the end of prediction. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future. """
keras/keras/callbacks/callback.py/0
{ "file_path": "keras/keras/callbacks/callback.py", "repo_id": "keras", "token_count": 3811 }
167
import pytest from keras import callbacks from keras import layers from keras import optimizers from keras import testing from keras.models import Sequential from keras.testing import test_utils from keras.utils import io_utils from keras.utils import numerical_utils class ReduceLROnPlateauTest(testing.TestCase): def setUp(self): (x_train, y_train), (x_test, y_test) = test_utils.get_test_data( train_samples=10, test_samples=10, input_shape=(3,), num_classes=2, ) y_test = numerical_utils.to_categorical(y_test) y_train = numerical_utils.to_categorical(y_train) model = Sequential([layers.Dense(5), layers.Dense(2)]) model.compile( loss="mse", optimizer=optimizers.Adam(0.1), ) self.model = model self.x_train = x_train self.x_test = x_test self.y_train = y_train self.y_test = y_test @pytest.mark.requires_trainable_backend def test_reduces_lr_with_model_fit(self): reduce_lr = callbacks.ReduceLROnPlateau( patience=1, factor=0.1, monitor="val_loss", min_delta=100 ) self.model.fit( self.x_train, self.y_train, validation_data=(self.x_test, self.y_test), callbacks=[reduce_lr], epochs=2, ) self.assertEqual(self.model.optimizer.learning_rate.value, 0.01) @pytest.mark.requires_trainable_backend def test_throws_when_optimizer_has_schedule(self): reduce_lr = callbacks.ReduceLROnPlateau( patience=1, factor=0.1, monitor="val_loss", min_delta=100 ) self.model.compile( loss="mse", optimizer=optimizers.Adam( optimizers.schedules.PolynomialDecay( initial_learning_rate=0.1, decay_steps=10 ) ), ) with self.assertRaisesRegex( TypeError, "This optimizer was created with a `LearningRateSchedule`", ): self.model.fit( self.x_train, self.y_train, validation_data=(self.x_test, self.y_test), callbacks=[reduce_lr], epochs=2, ) @pytest.mark.requires_trainable_backend def test_verbose_logging(self): reduce_lr = callbacks.ReduceLROnPlateau( patience=1, factor=0.1, monitor="val_loss", min_delta=100, verbose=1 ) io_utils.disable_interactive_logging() io_utils.set_logging_verbosity("INFO") with self.assertLogs() as logs: self.model.fit( self.x_train, self.y_train, validation_data=(self.x_test, self.y_test), callbacks=[reduce_lr], epochs=2, ) expected_log = "ReduceLROnPlateau reducing learning rate to 0.01" self.assertTrue(any(expected_log in log for log in logs.output)) @pytest.mark.requires_trainable_backend def test_honors_min_lr(self): reduce_lr = callbacks.ReduceLROnPlateau( patience=1, factor=0.1, monitor="val_loss", min_delta=10, min_lr=0.005, ) self.model.fit( self.x_train, self.y_train, validation_data=(self.x_test, self.y_test), callbacks=[reduce_lr], epochs=4, ) self.assertEqual(self.model.optimizer.learning_rate.value, 0.005) @pytest.mark.requires_trainable_backend def test_cooldown(self): reduce_lr = callbacks.ReduceLROnPlateau( patience=1, factor=0.1, monitor="val_loss", min_delta=100, cooldown=2, ) self.model.fit( self.x_train, self.y_train, validation_data=(self.x_test, self.y_test), callbacks=[reduce_lr], epochs=4, ) # With a cooldown of 2 epochs, we should only reduce the LR every other # epoch, so after 4 epochs we will have reduced 2 times. self.assertAllClose(self.model.optimizer.learning_rate.value, 0.001)
keras/keras/callbacks/reduce_lr_on_plateau_test.py/0
{ "file_path": "keras/keras/callbacks/reduce_lr_on_plateau_test.py", "repo_id": "keras", "token_count": 2220 }
168
from keras import ops from keras.api_export import keras_export from keras.backend import standardize_dtype from keras.initializers.initializer import Initializer from keras.saving import serialization_lib @keras_export(["keras.initializers.Constant", "keras.initializers.constant"]) class Constant(Initializer): """Initializer that generates tensors with constant values. Only scalar values are allowed. The constant value provided must be convertible to the dtype requested when calling the initializer. Examples: >>> # Standalone usage: >>> initializer = Constant(10.) >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = Constant(10.) >>> layer = Dense(3, kernel_initializer=initializer) Args: value: A Python scalar. """ def __init__(self, value=0.0): self.value = value def __call__(self, shape, dtype=None): dtype = standardize_dtype(dtype) return ops.cast(self.value, dtype=dtype) * ops.ones( shape=shape, dtype=dtype ) def get_config(self): return {"value": serialization_lib.serialize_keras_object(self.value)} @classmethod def from_config(cls, config): value = serialization_lib.deserialize_keras_object(config["value"]) return cls(value) @keras_export(["keras.initializers.Zeros", "keras.initializers.zeros"]) class Zeros(Initializer): """Initializer that generates tensors initialized to 0. Examples: >>> # Standalone usage: >>> initializer = Zeros() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = Zeros() >>> layer = Dense(units=3, kernel_initializer=initializer) """ def __call__(self, shape, dtype=None): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, `keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `keras.backend.set_floatx(float_dtype)`). """ dtype = standardize_dtype(dtype) return ops.zeros(shape, dtype=dtype) @keras_export(["keras.initializers.Ones", "keras.initializers.ones"]) class Ones(Initializer): """Initializer that generates tensors initialized to 1. Also available via the shortcut function `ones`. Examples: >>> # Standalone usage: >>> initializer = Ones() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = Ones() >>> layer = Dense(3, kernel_initializer=initializer) """ def __call__(self, shape, dtype=None): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, `keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `keras.backend.set_floatx(float_dtype)`). """ dtype = standardize_dtype(dtype) return ops.ones(shape, dtype=dtype) @keras_export( [ "keras.initializers.IdentityInitializer", "keras.initializers.Identity", "keras.initializers.identity", ] ) class Identity(Initializer): """Initializer that generates the identity matrix. Only usable for generating 2D matrices. Examples: >>> # Standalone usage: >>> initializer = Identity() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = Identity() >>> layer = Dense(3, kernel_initializer=initializer) Args: gain: Multiplicative factor to apply to the identity matrix. """ def __init__(self, gain=1.0): self.gain = gain def __call__(self, shape, dtype=None): """Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. If not specified, `keras.backend.floatx()` is used, which default to `float32` unless you configured it otherwise (via `keras.backend.set_floatx(float_dtype)`). """ if len(shape) != 2: raise ValueError( "Identity matrix initializer can only be used for 2D matrices. " f"Received: shape={shape} of rank {len(shape)}." ) dtype = standardize_dtype(dtype) return self.gain * ops.eye(*shape, dtype=dtype)
keras/keras/initializers/constant_initializers.py/0
{ "file_path": "keras/keras/initializers/constant_initializers.py", "repo_id": "keras", "token_count": 1918 }
169
"""Keras abstract base layer for separable convolution.""" from keras import activations from keras import constraints from keras import initializers from keras import ops from keras import regularizers from keras.backend import standardize_data_format from keras.layers.input_spec import InputSpec from keras.layers.layer import Layer from keras.ops.operation_utils import compute_conv_output_shape from keras.utils.argument_validation import standardize_padding from keras.utils.argument_validation import standardize_tuple class BaseSeparableConv(Layer): """Abstract base layer for separable convolution. This layer performs a depthwise convolution that acts separately on channels, followed by a pointwise convolution that mixes channels. If `use_bias` is True and a bias initializer is provided, it adds a bias vector to the output. Args: rank: int, the rank of the convolution, e.g. 2 for 2D convolution. depth_multiplier: The number of depthwise convolution output channels for each input channel. The total number of depthwise convolution output channels will be equal to `input_channel * depth_multiplier`. filters: int, the dimensionality of the output space (i.e. the number of filters in the pointwise convolution). kernel_size: int or tuple/list of `rank` integers, specifying the size of the depthwise convolution window. strides: int or tuple/list of `rank` integers, specifying the stride length of the depthwise convolution. If only one int is specified, the same stride size will be used for all dimensions. `stride value != 1` is incompatible with `dilation_rate != 1`. padding: string, either `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input. When `padding="same"` and `strides=1`, the output has the same size as the input. data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, steps, features)` while `"channels_first"` corresponds to inputs with shape `(batch, features, steps)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `"channels_last"`. dilation_rate: int or tuple/list of `rank` integers, specifying the dilation rate to use for dilated convolution. If only one int is specified, the same dilation rate will be used for all dimensions. activation: Activation function. If `None`, no activation is applied. use_bias: bool, if `True`, bias will be added to the output. depthwise_initializer: An initializer for the depthwise convolution kernel. If None, then the default initializer (`"glorot_uniform"`) will be used. pointwise_initializer: An initializer for the pointwise convolution kernel. If None, then the default initializer (`"glorot_uniform"`) will be used. bias_initializer: An initializer for the bias vector. If None, the default initializer ('"zeros"') will be used. depthwise_regularizer: Optional regularizer for the depthwise convolution kernel. pointwise_regularizer: Optional regularizer for the pointwise convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. depthwise_constraint: Optional projection function to be applied to the depthwise kernel after being updated by an `Optimizer` (e.g. used for norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). pointwise_constraint: Optional projection function to be applied to the pointwise kernel after being updated by an `Optimizer`. bias_constraint: Optional projection function to be applied to the bias after being updated by an `Optimizer`. """ def __init__( self, rank, depth_multiplier, filters, kernel_size, strides=1, padding="valid", data_format=None, dilation_rate=1, activation=None, use_bias=True, depthwise_initializer="glorot_uniform", pointwise_initializer="glorot_uniform", bias_initializer="zeros", depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs, ): super().__init__( trainable=trainable, name=name, activity_regularizer=regularizers.get(activity_regularizer), **kwargs, ) self.rank = rank self.depth_multiplier = depth_multiplier self.filters = filters self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size") self.strides = standardize_tuple(strides, rank, "strides") self.dilation_rate = standardize_tuple( dilation_rate, rank, "dilation_rate" ) self.padding = standardize_padding(padding) self.data_format = standardize_data_format(data_format) self.activation = activations.get(activation) self.use_bias = use_bias self.depthwise_initializer = initializers.get(depthwise_initializer) self.pointwise_initializer = initializers.get(pointwise_initializer) self.bias_initializer = initializers.get(bias_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.pointwise_regularizer = regularizers.get(pointwise_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.pointwise_constraint = constraints.get(pointwise_constraint) self.bias_constraint = constraints.get(bias_constraint) self.data_format = self.data_format self.input_spec = InputSpec(min_ndim=self.rank + 2) if self.depth_multiplier is not None and self.depth_multiplier <= 0: raise ValueError( "Invalid value for argument `depth_multiplier`. Expected a " "strictly positive value. Received " f"depth_multiplier={self.depth_multiplier}." ) if self.filters is not None and self.filters <= 0: raise ValueError( "Invalid value for argument `filters`. Expected a strictly " f"positive value. Received filters={self.filters}." ) if not all(self.kernel_size): raise ValueError( "The argument `kernel_size` cannot contain 0. Received: " f"kernel_size={self.kernel_size}." ) if not all(self.strides): raise ValueError( "The argument `strides` cannot contains 0(s). Received: " f"strides={self.strides}" ) if max(self.strides) > 1 and max(self.dilation_rate) > 1: raise ValueError( "`strides > 1` not supported in conjunction with " f"`dilation_rate > 1`. Received: strides={self.strides} and " f"dilation_rate={self.dilation_rate}" ) def build(self, input_shape): if self.data_format == "channels_last": channel_axis = -1 input_channel = input_shape[-1] else: channel_axis = 1 input_channel = input_shape[1] self.input_spec = InputSpec( min_ndim=self.rank + 2, axes={channel_axis: input_channel} ) depthwise_kernel_shape = self.kernel_size + ( input_channel, self.depth_multiplier, ) pointwise_kernel_shape = (1,) * self.rank + ( self.depth_multiplier * input_channel, self.filters, ) self.depthwise_kernel = self.add_weight( name="depthwise_kernel", shape=depthwise_kernel_shape, initializer=self.depthwise_initializer, regularizer=self.depthwise_regularizer, constraint=self.depthwise_constraint, trainable=True, dtype=self.dtype, ) self.pointwise_kernel = self.add_weight( name="pointwise_kernel", shape=pointwise_kernel_shape, initializer=self.pointwise_initializer, regularizer=self.pointwise_regularizer, constraint=self.pointwise_constraint, trainable=True, dtype=self.dtype, ) if self.use_bias: self.bias = self.add_weight( name="bias", shape=(self.filters,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, trainable=True, dtype=self.dtype, ) else: self.bias = None self.built = True def call(self, inputs): outputs = ops.separable_conv( inputs, self.depthwise_kernel, self.pointwise_kernel, strides=self.strides, padding=self.padding, dilation_rate=self.dilation_rate, data_format=self.data_format, ) if self.use_bias: if self.data_format == "channels_last": bias_shape = (1,) * (self.rank + 1) + (self.filters,) else: bias_shape = (1, self.filters) + (1,) * self.rank bias = ops.reshape(self.bias, bias_shape) outputs += bias if self.activation is not None: return self.activation(outputs) return outputs def compute_output_shape(self, input_shape): return compute_conv_output_shape( input_shape, self.filters, self.kernel_size, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate, ) def get_config(self): config = super().get_config() config.update( { "depth_multiplier": self.depth_multiplier, "filters": self.filters, "kernel_size": self.kernel_size, "strides": self.strides, "padding": self.padding, "data_format": self.data_format, "dilation_rate": self.dilation_rate, "activation": activations.serialize(self.activation), "use_bias": self.use_bias, "depthwise_initializer": initializers.serialize( self.depthwise_initializer ), "pointwise_initializer": initializers.serialize( self.pointwise_initializer ), "bias_initializer": initializers.serialize( self.bias_initializer ), "depthwise_regularizer": regularizers.serialize( self.depthwise_regularizer ), "pointwise_regularizer": regularizers.serialize( self.pointwise_regularizer ), "bias_regularizer": regularizers.serialize( self.bias_regularizer ), "activity_regularizer": regularizers.serialize( self.activity_regularizer ), "depthwise_constraint": constraints.serialize( self.depthwise_constraint ), "pointwise_constraint": constraints.serialize( self.pointwise_constraint ), "bias_constraint": constraints.serialize(self.bias_constraint), } ) return config
keras/keras/layers/convolutional/base_separable_conv.py/0
{ "file_path": "keras/keras/layers/convolutional/base_separable_conv.py", "repo_id": "keras", "token_count": 5676 }
170
import numpy as np from keras import activations from keras import constraints from keras import initializers from keras import ops from keras import regularizers from keras.api_export import keras_export from keras.layers.input_spec import InputSpec from keras.layers.layer import Layer @keras_export("keras.layers.Dense") class Dense(Layer): """Just your regular densely-connected NN layer. `Dense` implements the operation: `output = activation(dot(input, kernel) + bias)` where `activation` is the element-wise activation function passed as the `activation` argument, `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only applicable if `use_bias` is `True`). Note: If the input to the layer has a rank greater than 2, `Dense` computes the dot product between the `inputs` and the `kernel` along the last axis of the `inputs` and axis 0 of the `kernel` (using `tf.tensordot`). For example, if input has dimensions `(batch_size, d0, d1)`, then we create a `kernel` with shape `(d1, units)`, and the `kernel` operates along axis 2 of the `input`, on every sub-tensor of shape `(1, 1, d1)` (there are `batch_size * d0` such sub-tensors). The output in this case will have shape `(batch_size, d0, units)`. Args: units: Positive integer, dimensionality of the output space. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation"). kernel_constraint: Constraint function applied to the `kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. lora_rank: Optional integer. If set, the layer's forward pass will implement LoRA (Low-Rank Adaptation) with the provided rank. LoRA sets the layer's kernel to non-trainable and replaces it with a delta over the original kernel, obtained via multiplying two lower-rank trainable matrices. This can be useful to reduce the computation cost of fine-tuning large dense layers. You can also enable LoRA on an existing `Dense` layer by calling `layer.enable_lora(rank)`. Input shape: N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common situation would be a 2D input with shape `(batch_size, input_dim)`. Output shape: N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input with shape `(batch_size, input_dim)`, the output would have shape `(batch_size, units)`. """ def __init__( self, units, activation=None, use_bias=True, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, lora_rank=None, **kwargs, ): super().__init__(activity_regularizer=activity_regularizer, **kwargs) self.units = units self.activation = activations.get(activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.lora_rank = lora_rank self.lora_enabled = False self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True def build(self, input_shape): input_dim = input_shape[-1] self._kernel = self.add_weight( name="kernel", shape=(input_dim, self.units), initializer=self.kernel_initializer, regularizer=self.kernel_regularizer, constraint=self.kernel_constraint, ) if self.use_bias: self.bias = self.add_weight( name="bias", shape=(self.units,), initializer=self.bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint, ) else: self.bias = None self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim}) self.built = True if self.lora_rank: self.enable_lora(self.lora_rank) @property def kernel(self): if not self.built: raise AttributeError( "You must build the layer before accessing `kernel`." ) if self.lora_enabled: return self._kernel + ops.matmul( self.lora_kernel_a, self.lora_kernel_b ) return self._kernel def call(self, inputs): x = ops.matmul(inputs, self.kernel) if self.bias is not None: x = x + self.bias if self.activation is not None: x = self.activation(x) return x def compute_output_shape(self, input_shape): output_shape = list(input_shape) output_shape[-1] = self.units return tuple(output_shape) def enable_lora( self, rank, a_initializer="he_uniform", b_initializer="zeros" ): if self.kernel_constraint: raise ValueError( "Lora is incompatible with kernel constraints. " "In order to enable lora on this layer, remove the " "`kernel_constraint` argument." ) if not self.built: raise ValueError( "Cannot enable lora on a layer that isn't yet built." ) if self.lora_enabled: raise ValueError( "lora is already enabled. " "This can only be done once per layer." ) self._tracker.unlock() self.lora_kernel_a = self.add_weight( name="lora_kernel_a", shape=(self.kernel.shape[0], rank), initializer=initializers.get(a_initializer), regularizer=self.kernel_regularizer, ) self.lora_kernel_b = self.add_weight( name="lora_kernel_b", shape=(rank, self.kernel.shape[1]), initializer=initializers.get(b_initializer), regularizer=self.kernel_regularizer, ) self.kernel.trainable = False self._tracker.lock() self.lora_enabled = True def save_own_variables(self, store): if not self.lora_enabled: return super().save_own_variables(store) kernel_value = self.kernel store["0"] = kernel_value if self.use_bias: store["1"] = self.bias def load_own_variables(self, store): if not self.lora_enabled: return super().load_own_variables(store) self._kernel.assign(store["0"]) if self.use_bias: self.bias.assign(store["1"]) self.lora_kernel_a.assign(np.zeros(self.lora_kernel_a.shape)) self.lora_kernel_b.assign(np.zeros(self.lora_kernel_b.shape)) def get_config(self): base_config = super().get_config() config = { "units": self.units, "activation": activations.serialize(self.activation), "use_bias": self.use_bias, "kernel_initializer": initializers.serialize( self.kernel_initializer ), "bias_initializer": initializers.serialize(self.bias_initializer), "kernel_regularizer": regularizers.serialize( self.kernel_regularizer ), "bias_regularizer": regularizers.serialize(self.bias_regularizer), "kernel_constraint": constraints.serialize(self.kernel_constraint), "bias_constraint": constraints.serialize(self.bias_constraint), } if self.lora_rank: config["lora_rank"] = self.lora_rank return {**base_config, **config}
keras/keras/layers/core/dense.py/0
{ "file_path": "keras/keras/layers/core/dense.py", "repo_id": "keras", "token_count": 3866 }
171
import tree from keras import backend from keras.api_export import keras_export @keras_export(["keras.InputSpec", "keras.layers.InputSpec"]) class InputSpec: """Specifies the rank, dtype and shape of every input to a layer. Layers can expose (if appropriate) an `input_spec` attribute: an instance of `InputSpec`, or a nested structure of `InputSpec` instances (one per input tensor). These objects enable the layer to run input compatibility checks for input structure, input rank, input shape, and input dtype for the first argument of `Layer.__call__`. A `None` entry in a shape is compatible with any dimension. Args: dtype: Expected dtype of the input. shape: Shape tuple, expected shape of the input (may include `None` for dynamic axes). Includes the batch size. ndim: Integer, expected rank of the input. max_ndim: Integer, maximum rank of the input. min_ndim: Integer, minimum rank of the input. axes: Dictionary mapping integer axes to a specific dimension value. allow_last_axis_squeeze: If `True`, allow inputs of rank N+1 as long as the last axis of the input is 1, as well as inputs of rank N-1 as long as the last axis of the spec is 1. name: Expected key corresponding to this input when passing data as a dictionary. Example: ```python class MyLayer(Layer): def __init__(self): super().__init__() # The layer will accept inputs with # shape (*, 28, 28) & (*, 28, 28, 1) # and raise an appropriate error message otherwise. self.input_spec = InputSpec( shape=(None, 28, 28, 1), allow_last_axis_squeeze=True) ``` """ def __init__( self, dtype=None, shape=None, ndim=None, max_ndim=None, min_ndim=None, axes=None, allow_last_axis_squeeze=False, name=None, ): self.dtype = ( backend.standardize_dtype(dtype) if dtype is not None else None ) if shape is not None: self.shape = backend.standardize_shape(shape) self.ndim = len(shape) else: self.ndim = ndim self.shape = None self.max_ndim = max_ndim self.min_ndim = min_ndim self.name = name self.allow_last_axis_squeeze = allow_last_axis_squeeze try: axes = axes or {} self.axes = {int(k): axes[k] for k in axes} except (ValueError, TypeError): raise TypeError( "Argument `axes` must be a dict with integer keys. " f"Received: axes={axes}" ) if self.axes and (self.ndim is not None or self.max_ndim is not None): max_dim = (self.ndim if self.ndim else self.max_ndim) - 1 max_axis = max(self.axes) if max_axis > max_dim: raise ValueError( "Axis {} is greater than the maximum " "allowed value: {}".format(max_axis, max_dim) ) def __repr__(self): spec = [ ("dtype=" + str(self.dtype)) if self.dtype else "", ("shape=" + str(self.shape)) if self.shape else "", ("ndim=" + str(self.ndim)) if self.ndim else "", ("max_ndim=" + str(self.max_ndim)) if self.max_ndim else "", ("min_ndim=" + str(self.min_ndim)) if self.min_ndim else "", ("axes=" + str(self.axes)) if self.axes else "", ] return f"InputSpec({', '.join(x for x in spec if x)})" def get_config(self): return { "dtype": self.dtype, "shape": self.shape, "ndim": self.ndim, "max_ndim": self.max_ndim, "min_ndim": self.min_ndim, "axes": self.axes, } @classmethod def from_config(cls, config): return cls(**config) def assert_input_compatibility(input_spec, inputs, layer_name): """Checks compatibility between the layer and provided inputs. This checks that the tensor(s) `inputs` verify the input assumptions of a layer (if any). If not, a clear and actional exception gets raised. Args: input_spec: An InputSpec instance, list of InputSpec instances, a nested structure of InputSpec instances, or None. inputs: Input tensor, list of input tensors, or a nested structure of input tensors. layer_name: String, name of the layer (for error message formatting). Raises: ValueError: in case of mismatch between the provided inputs and the expectations of the layer. """ if not input_spec: return input_spec = tree.flatten(input_spec) if isinstance(inputs, dict): # Flatten `inputs` by reference order if input spec names are provided names = [spec.name for spec in input_spec] if all(names): list_inputs = [] for name in names: if name not in inputs: raise ValueError( f'Missing data for input "{name}". ' "You passed a data dictionary with keys " f"{list(inputs.keys())}. " f"Expected the following keys: {names}" ) list_inputs.append(inputs[name]) inputs = list_inputs inputs = tree.flatten(inputs) if len(input_spec) != len(inputs): raise ValueError( f"Layer '{layer_name}' expected {len(input_spec)} input(s). " f"Received {len(inputs)} instead." ) for x in inputs: # Having a shape/dtype is the only commonality of the various # tensor-like objects that may be passed. The most common kind of # invalid type we are guarding for is a Layer instance (Functional API), # which does not have a `shape` attribute. if not hasattr(x, "shape"): raise ValueError( f"Inputs to a layer should be tensors. Got '{x}' " f"(of type {type(x)}) as input for layer '{layer_name}'." ) if len(inputs) != len(input_spec): raise ValueError( f'Layer "{layer_name}" expects {len(input_spec)} input(s),' f" but it received {len(inputs)} input tensors. " f"Inputs received: {inputs}" ) for input_index, (x, spec) in enumerate(zip(inputs, input_spec)): if spec is None: continue shape = backend.standardize_shape(x.shape) ndim = len(shape) # Check ndim. if spec.ndim is not None and not spec.allow_last_axis_squeeze: if ndim != spec.ndim: raise ValueError( f'Input {input_index} of layer "{layer_name}" ' "is incompatible with the layer: " f"expected ndim={spec.ndim}, found ndim={ndim}. " f"Full shape received: {shape}" ) if spec.max_ndim is not None: if ndim is not None and ndim > spec.max_ndim: raise ValueError( f'Input {input_index} of layer "{layer_name}" ' "is incompatible with the layer: " f"expected max_ndim={spec.max_ndim}, " f"found ndim={ndim}" ) if spec.min_ndim is not None: if ndim is not None and ndim < spec.min_ndim: raise ValueError( f'Input {input_index} of layer "{layer_name}" ' "is incompatible with the layer: " f"expected min_ndim={spec.min_ndim}, " f"found ndim={ndim}. " f"Full shape received: {shape}" ) # Check dtype. if spec.dtype is not None: dtype = backend.standardize_dtype(x.dtype) if dtype != spec.dtype: raise ValueError( f'Input {input_index} of layer "{layer_name}" ' "is incompatible with the layer: " f"expected dtype={spec.dtype}, " f"found dtype={dtype}" ) # Check specific shape axes. if spec.axes: for axis, value in spec.axes.items(): if value is not None and shape[axis] not in { value, None, }: raise ValueError( f'Input {input_index} of layer "{layer_name}" is ' f"incompatible with the layer: expected axis {axis} " f"of input shape to have value {value}, " "but received input with " f"shape {shape}" ) # Check shape. if spec.shape is not None: spec_shape = spec.shape if spec.allow_last_axis_squeeze: if shape and shape[-1] == 1: shape = shape[:-1] if spec_shape and spec_shape[-1] == 1: spec_shape = spec_shape[:-1] for spec_dim, dim in zip(spec_shape, shape): if spec_dim is not None and dim is not None: if spec_dim != dim: raise ValueError( f'Input {input_index} of layer "{layer_name}" is ' "incompatible with the layer: " f"expected shape={spec.shape}, " f"found shape={shape}" )
keras/keras/layers/input_spec.py/0
{ "file_path": "keras/keras/layers/input_spec.py", "repo_id": "keras", "token_count": 4864 }
172
import numpy as np import pytest from absl.testing import parameterized from keras import backend from keras import layers from keras import testing from keras.losses import MeanSquaredError from keras.models import Model class BatchNormalizationTest(testing.TestCase, parameterized.TestCase): @pytest.mark.requires_trainable_backend def test_bn_basics(self): # vector case self.run_layer_test( layers.BatchNormalization, init_kwargs={ "center": True, "scale": True, }, call_kwargs={"training": True}, input_shape=(2, 3), expected_output_shape=(2, 3), expected_num_trainable_weights=2, expected_num_non_trainable_weights=2, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, ) self.run_layer_test( layers.BatchNormalization, init_kwargs={ "center": False, "scale": False, }, call_kwargs={"training": True}, input_shape=(2, 3), expected_output_shape=(2, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=2, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=True, ) # image case, with regularizers self.run_layer_test( layers.BatchNormalization, init_kwargs={ "center": True, "scale": True, "beta_regularizer": "l2", "gamma_regularizer": "l2", }, call_kwargs={"training": True}, input_shape=(2, 4, 4, 3), expected_output_shape=(2, 4, 4, 3), expected_num_trainable_weights=2, expected_num_non_trainable_weights=2, expected_num_seed_generators=0, expected_num_losses=2, # we have 2 regularizers. supports_masking=True, ) @parameterized.product( axis=(-1, 1), input_shape=((5, 2, 3), (5, 3, 3, 2)), moving_mean_initializer=("zeros", "ones"), moving_variance_initializer=("zeros", "ones"), ) def test_correctness( self, axis, input_shape, moving_mean_initializer, moving_variance_initializer, ): # Training layer = layers.BatchNormalization( axis=axis, momentum=0, moving_mean_initializer=moving_mean_initializer, moving_variance_initializer=moving_variance_initializer, ) # Random data centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=input_shape) out = x for _ in range(3): out = layer(out, training=True) # Assert the normalization is correct. broadcast_shape = [1] * len(input_shape) broadcast_shape[axis] = input_shape[axis] out = backend.convert_to_numpy(out) out = out - np.reshape( backend.convert_to_numpy(layer.beta), broadcast_shape ) out = out / np.reshape( backend.convert_to_numpy(layer.gamma), broadcast_shape ) reduction_axes = list(range(len(input_shape))) del reduction_axes[axis] reduction_axes = tuple(reduction_axes) self.assertAllClose(np.mean(out, axis=reduction_axes), 0.0, atol=1e-3) self.assertAllClose(np.std(out, axis=reduction_axes), 1.0, atol=1e-3) self.assertAllClose(layer.moving_mean, 0.0, atol=1e-3) self.assertAllClose(layer.moving_variance, 1.0, atol=1e-3) # Inference done before training shouldn't match. inference_out = layer(x, training=False) training_out = layer(x, training=True) self.assertNotAllClose(inference_out, training_out) # Since momentum is zero, inference after training should match. training_out = layer(x, training=True) inference_out = layer(x, training=False) self.assertAllClose(inference_out, training_out) # Masked result with no training should not differ x[:, 1, :] = 0.0 unmasked_out = layer(x, training=False) masked = layers.Masking()(x) masked_out = layer(masked, training=False) self.assertAllClose(unmasked_out, masked_out) # Masked result should differ from unmasked result unmasked_out = layer(x, training=False) x[:, 1, :] = 0.0 masked = layers.Masking()(x) masked_out = layer(masked, training=True) self.assertNotAllClose(unmasked_out, masked_out) @parameterized.product( synchronized=( (False, True) if backend.backend == "tensorflow" else (False,) ), ) def test_input_fully_masked(self, synchronized): norm = layers.BatchNormalization( scale=False, center=False, synchronized=synchronized, ) x = np.zeros((4, 5)) mask = np.zeros((4,), dtype=np.float32) y = norm(x, mask=mask, training=True) self.assertAllClose(y, np.zeros_like(x, dtype=np.float32)) @parameterized.product(run_eagerly=(True, False), mask_value=(0.0, 0.1, 1)) @pytest.mark.requires_trainable_backend def test_bachnorm_ignore_masked_values(self, run_eagerly, mask_value): padded_data = np.array( [ [ [1, 5], [2, 5], [mask_value, mask_value], [mask_value, mask_value], ] for _ in range(10) ], dtype="float32", ) inputs = layers.Input((None, 2)) masked = layers.Masking(mask_value=mask_value)(inputs) normed = layers.BatchNormalization(momentum=0.0)(masked) model = Model(inputs, normed) loss = MeanSquaredError() model.compile( "rmsprop", loss=loss, run_eagerly=run_eagerly, ) model.fit(x=padded_data, y=padded_data, batch_size=10, epochs=5) self.assertAllClose(model.layers[2].moving_mean.numpy(), [1.5, 5.0]) self.assertAllClose( model.layers[2].moving_variance.numpy(), [0.25, 0.0] ) def test_trainable_behavior(self): layer = layers.BatchNormalization(axis=-1, momentum=0.8, epsilon=1e-7) layer.build((1, 4, 4, 3)) layer.trainable = False self.assertEqual(len(layer.weights), 4) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.non_trainable_weights), 4) # Random data centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(200, 4, 4, 3)) out = layer(x, training=True) self.assertAllClose(out, x) layer.trainable = True self.assertEqual(len(layer.weights), 4) self.assertEqual(len(layer.trainable_weights), 2) self.assertEqual(len(layer.non_trainable_weights), 2) for _ in range(10): out = layer(x, training=True) out = backend.convert_to_numpy(out) out = out - np.reshape( backend.convert_to_numpy(layer.beta), (1, 1, 1, 3) ) out = out / np.reshape( backend.convert_to_numpy(layer.gamma), (1, 1, 1, 3) ) self.assertAllClose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-3) self.assertAllClose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-3)
keras/keras/layers/normalization/batch_normalization_test.py/0
{ "file_path": "keras/keras/layers/normalization/batch_normalization_test.py", "repo_id": "keras", "token_count": 3790 }
173
from keras import backend from keras import ops from keras.api_export import keras_export from keras.layers.pooling.base_global_pooling import BaseGlobalPooling @keras_export( [ "keras.layers.GlobalAveragePooling1D", "keras.layers.GlobalAvgPool1D", ] ) class GlobalAveragePooling1D(BaseGlobalPooling): """Global average pooling operation for temporal data. Args: data_format: string, either `"channels_last"` or `"channels_first"`. The ordering of the dimensions in the inputs. `"channels_last"` corresponds to inputs with shape `(batch, steps, features)` while `"channels_first"` corresponds to inputs with shape `(batch, features, steps)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be `"channels_last"`. keepdims: A boolean, whether to keep the temporal dimension or not. If `keepdims` is `False` (default), the rank of the tensor is reduced for spatial dimensions. If `keepdims` is `True`, the temporal dimension are retained with length 1. The behavior is the same as for `tf.reduce_mean` or `np.mean`. Call arguments: inputs: A 3D tensor. mask: Binary tensor of shape `(batch_size, steps)` indicating whether a given step should be masked (excluded from the average). Input shape: - If `data_format='channels_last'`: 3D tensor with shape: `(batch_size, steps, features)` - If `data_format='channels_first'`: 3D tensor with shape: `(batch_size, features, steps)` Output shape: - If `keepdims=False`: 2D tensor with shape `(batch_size, features)`. - If `keepdims=True`: - If `data_format="channels_last"`: 3D tensor with shape `(batch_size, 1, features)` - If `data_format="channels_first"`: 3D tensor with shape `(batch_size, features, 1)` Example: >>> x = np.random.rand(2, 3, 4) >>> y = keras.layers.GlobalAveragePooling1D()(x) >>> y.shape (2, 4) """ def __init__(self, data_format=None, keepdims=False, **kwargs): super().__init__( pool_dimensions=1, data_format=data_format, keepdims=keepdims, **kwargs, ) self.supports_masking = True def call(self, inputs, mask=None): steps_axis = 1 if self.data_format == "channels_last" else 2 if mask is not None: mask = backend.cast(mask, inputs[0].dtype) mask = ops.expand_dims( mask, 2 if self.data_format == "channels_last" else 1 ) inputs *= mask return ops.sum( inputs, axis=steps_axis, keepdims=self.keepdims ) / ops.sum(mask, axis=steps_axis, keepdims=self.keepdims) else: return ops.mean(inputs, axis=steps_axis, keepdims=self.keepdims) def compute_mask(self, inputs, mask=None): return None
keras/keras/layers/pooling/global_average_pooling1d.py/0
{ "file_path": "keras/keras/layers/pooling/global_average_pooling1d.py", "repo_id": "keras", "token_count": 1374 }
174
import numpy as np import pytest from absl.testing import parameterized from tensorflow import data as tf_data from keras import backend from keras import layers from keras import testing class CenterCropTest(testing.TestCase, parameterized.TestCase): def np_center_crop(self, img, h_new, w_new, data_format="channels_last"): img = np.array(img) if img.ndim == 4: if data_format == "channels_last": _, h, w = img.shape[:3] else: _, h, w = img.shape[1:] else: if data_format == "channels_last": h, w = img.shape[:2] else: h, w = img.shape[1:] h_start = (h - h_new) // 2 w_start = (w - w_new) // 2 if data_format == "channels_last": return img[ ..., h_start : h_start + h_new, w_start : w_start + w_new, : ] else: return img[ ..., h_start : h_start + h_new, w_start : w_start + w_new ] @pytest.mark.requires_trainable_backend def test_center_crop_basics(self): self.run_layer_test( layers.CenterCrop, init_kwargs={ "height": 6, "width": 6, "data_format": "channels_last", }, input_shape=(2, 12, 12, 3), expected_output_shape=(2, 6, 6, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, ) self.run_layer_test( layers.CenterCrop, init_kwargs={ "height": 7, "width": 7, "data_format": "channels_first", }, input_shape=(2, 3, 13, 13), expected_output_shape=(2, 3, 7, 7), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, ) @parameterized.parameters( [ ((5, 7), "channels_first"), ((5, 7), "channels_last"), ((4, 9), "channels_first"), ((9, 4), "channels_last"), ] ) def test_center_crop_correctness(self, size, data_format): # batched case if data_format == "channels_first": img = np.random.random((2, 3, 9, 11)) else: img = np.random.random((2, 9, 11, 3)) out = layers.CenterCrop( size[0], size[1], data_format=data_format, )(img) if data_format == "channels_first": img_transpose = np.transpose(img, (0, 2, 3, 1)) ref_out = np.transpose( self.np_center_crop(img_transpose, size[0], size[1]), (0, 3, 1, 2), ) else: ref_out = self.np_center_crop(img, size[0], size[1]) self.assertAllClose(ref_out, out) # unbatched case if data_format == "channels_first": img = np.random.random((3, 9, 11)) else: img = np.random.random((9, 11, 3)) out = layers.CenterCrop( size[0], size[1], data_format=data_format, )(img) if data_format == "channels_first": img_transpose = np.transpose(img, (1, 2, 0)) ref_out = np.transpose( self.np_center_crop( img_transpose, size[0], size[1], ), (2, 0, 1), ) else: ref_out = self.np_center_crop( img, size[0], size[1], ) self.assertAllClose(ref_out, out) @parameterized.parameters( [ ((15, 10), "channels_first"), ((10, 17), "channels_last"), ] ) def test_input_smaller_than_crop_box(self, size, data_format): """Output should equal resizing with crop_to_aspect ratio.""" # batched case if data_format == "channels_first": img = np.random.random((2, 3, 9, 11)) else: img = np.random.random((2, 9, 11, 3)) out = layers.CenterCrop( size[0], size[1], data_format=data_format, )(img) ref_out = layers.Resizing( size[0], size[1], data_format=data_format, crop_to_aspect_ratio=True )(img) self.assertAllClose(ref_out, out) # unbatched case if data_format == "channels_first": img = np.random.random((3, 9, 11)) else: img = np.random.random((9, 11, 3)) out = layers.CenterCrop( size[0], size[1], data_format=data_format, )(img) ref_out = layers.Resizing( size[0], size[1], data_format=data_format, crop_to_aspect_ratio=True )(img) self.assertAllClose(ref_out, out) def test_tf_data_compatibility(self): if backend.config.image_data_format() == "channels_last": input_shape = (2, 10, 12, 3) output_shape = (2, 8, 9, 3) else: input_shape = (2, 3, 10, 12) output_shape = (2, 3, 8, 9) layer = layers.CenterCrop(8, 9) input_data = np.random.random(input_shape) ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer) for output in ds.take(1): output = output.numpy() self.assertEqual(tuple(output.shape), output_shape) def test_list_compatibility(self): if backend.config.image_data_format() == "channels_last": images = [ np.random.rand(10, 10, 3), np.random.rand(10, 10, 3), ] output_shape = (2, 6, 5, 3) else: images = [ np.random.rand(3, 10, 10), np.random.rand(3, 10, 10), ] output_shape = (2, 3, 6, 5) output = layers.CenterCrop(height=6, width=5)(images) ref_output = self.np_center_crop( images, 6, 5, data_format=backend.config.image_data_format() ) self.assertEqual(tuple(output.shape), output_shape) self.assertAllClose(ref_output, output)
keras/keras/layers/preprocessing/center_crop_test.py/0
{ "file_path": "keras/keras/layers/preprocessing/center_crop_test.py", "repo_id": "keras", "token_count": 3590 }
175
import numpy as np import pytest from tensorflow import data as tf_data from keras import backend from keras import layers from keras import testing class RandomBrightnessTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_layer(self): self.run_layer_test( layers.RandomBrightness, init_kwargs={ "factor": 0.75, "value_range": (20, 200), "seed": 1, }, input_shape=(8, 3, 4, 3), supports_masking=False, expected_output_shape=(8, 3, 4, 3), ) def test_random_brightness_inference(self): seed = 3481 layer = layers.RandomBrightness([0, 1.0]) np.random.seed(seed) inputs = np.random.randint(0, 255, size=(224, 224, 3)) output = layer(inputs, training=False) self.assertAllClose(inputs, output) def test_output(self): seed = 2390 # Always scale up, but randomly between 0 ~ 255 layer = layers.RandomBrightness([0, 1.0]) np.random.seed(seed) inputs = np.random.randint(0, 255, size=(224, 224, 3)) output = backend.convert_to_numpy(layer(inputs)) diff = output - inputs diff = backend.convert_to_numpy(diff) self.assertTrue(np.amin(diff) >= 0) self.assertTrue(np.mean(diff) > 0) # Always scale down, but randomly between 0 ~ 255 layer = layers.RandomBrightness([-1.0, 0.0]) np.random.seed(seed) inputs = np.random.randint(0, 255, size=(224, 224, 3)) output = backend.convert_to_numpy(layer(inputs)) diff = output - inputs self.assertTrue(np.amax(diff) <= 0) self.assertTrue(np.mean(diff) < 0) def test_tf_data_compatibility(self): layer = layers.RandomBrightness(factor=0.5, seed=1337) input_data = np.random.random((2, 8, 8, 3)) ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer) for output in ds.take(1): output.numpy()
keras/keras/layers/preprocessing/random_brightness_test.py/0
{ "file_path": "keras/keras/layers/preprocessing/random_brightness_test.py", "repo_id": "keras", "token_count": 956 }
176
import numpy as np import pytest from absl.testing import parameterized from tensorflow import data as tf_data from keras import Sequential from keras import backend from keras import layers from keras import testing class ResizingTest(testing.TestCase, parameterized.TestCase): def test_resizing_basics(self): self.run_layer_test( layers.Resizing, init_kwargs={ "height": 6, "width": 6, "data_format": "channels_last", "interpolation": "bicubic", "crop_to_aspect_ratio": True, }, input_shape=(2, 12, 12, 3), expected_output_shape=(2, 6, 6, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, run_training_check=False, ) self.run_layer_test( layers.Resizing, init_kwargs={ "height": 6, "width": 6, "data_format": "channels_first", "interpolation": "bilinear", "crop_to_aspect_ratio": True, }, input_shape=(2, 3, 12, 12), expected_output_shape=(2, 3, 6, 6), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, run_training_check=False, ) self.run_layer_test( layers.Resizing, init_kwargs={ "height": 6, "width": 6, "data_format": "channels_last", "interpolation": "nearest", "crop_to_aspect_ratio": False, }, input_shape=(2, 12, 12, 3), expected_output_shape=(2, 6, 6, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, run_training_check=False, ) @pytest.mark.skipif( backend.backend() == "torch", reason="Torch does not support lanczos." ) def test_resizing_basics_lanczos5(self): self.run_layer_test( layers.Resizing, init_kwargs={ "height": 6, "width": 6, "data_format": "channels_first", "interpolation": "lanczos5", "crop_to_aspect_ratio": False, }, input_shape=(2, 3, 12, 12), expected_output_shape=(2, 3, 6, 6), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, expected_num_losses=0, supports_masking=False, run_training_check=False, ) @parameterized.parameters([("channels_first",), ("channels_last",)]) def test_down_sampling_numeric(self, data_format): img = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype(np.float32) if data_format == "channels_first": img = img.transpose(0, 3, 1, 2) out = layers.Resizing( height=2, width=2, interpolation="nearest", data_format=data_format )(img) ref_out = ( np.asarray([[5, 7], [13, 15]]) .astype(np.float32) .reshape((1, 2, 2, 1)) ) if data_format == "channels_first": ref_out = ref_out.transpose(0, 3, 1, 2) self.assertAllClose(ref_out, out) @parameterized.parameters([("channels_first",), ("channels_last",)]) def test_up_sampling_numeric(self, data_format): img = np.reshape(np.arange(0, 4), (1, 2, 2, 1)).astype(np.float32) if data_format == "channels_first": img = img.transpose(0, 3, 1, 2) out = layers.Resizing( height=4, width=4, interpolation="nearest", data_format=data_format, )(img) ref_out = ( np.asarray([[0, 0, 1, 1], [0, 0, 1, 1], [2, 2, 3, 3], [2, 2, 3, 3]]) .astype(np.float32) .reshape((1, 4, 4, 1)) ) if data_format == "channels_first": ref_out = ref_out.transpose(0, 3, 1, 2) self.assertAllClose(ref_out, out) @parameterized.parameters([("channels_first",), ("channels_last",)]) def test_crop_to_aspect_ratio(self, data_format): img = np.reshape(np.arange(0, 16), (1, 4, 4, 1)).astype("float32") if data_format == "channels_first": img = img.transpose(0, 3, 1, 2) out = layers.Resizing( height=4, width=2, interpolation="nearest", data_format=data_format, crop_to_aspect_ratio=True, )(img) ref_out = ( np.asarray( [ [1, 2], [5, 6], [9, 10], [13, 14], ] ) .astype("float32") .reshape((1, 4, 2, 1)) ) if data_format == "channels_first": ref_out = ref_out.transpose(0, 3, 1, 2) self.assertAllClose(ref_out, out) @parameterized.parameters([("channels_first",), ("channels_last",)]) def test_unbatched_image(self, data_format): img = np.reshape(np.arange(0, 16), (4, 4, 1)).astype("float32") if data_format == "channels_first": img = img.transpose(2, 0, 1) out = layers.Resizing( 2, 2, interpolation="nearest", data_format=data_format )(img) ref_out = ( np.asarray( [ [5, 7], [13, 15], ] ) .astype("float32") .reshape((2, 2, 1)) ) if data_format == "channels_first": ref_out = ref_out.transpose(2, 0, 1) self.assertAllClose(ref_out, out) def test_tf_data_compatibility(self): if backend.config.image_data_format() == "channels_last": input_shape = (2, 10, 12, 3) output_shape = (2, 8, 9, 3) else: input_shape = (2, 3, 10, 12) output_shape = (2, 3, 8, 9) layer = layers.Resizing(8, 9) input_data = np.random.random(input_shape) ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer) for output in ds.take(1): output = output.numpy() self.assertEqual(tuple(output.shape), output_shape) @pytest.mark.skipif( backend.backend() != "tensorflow", reason="Sequential + tf.data only works with TF backend", ) def test_tf_data_compatibility_sequential(self): # Test compatibility when wrapping in a Sequential # https://github.com/keras-team/keras/issues/347 if backend.config.image_data_format() == "channels_last": input_shape = (2, 10, 12, 3) output_shape = (2, 8, 9, 3) else: input_shape = (2, 3, 10, 12) output_shape = (2, 3, 8, 9) layer = layers.Resizing(8, 9) input_data = np.random.random(input_shape) ds = ( tf_data.Dataset.from_tensor_slices(input_data) .batch(2) .map(Sequential([layer])) ) for output in ds.take(1): output = output.numpy() self.assertEqual(tuple(output.shape), output_shape)
keras/keras/layers/preprocessing/resizing_test.py/0
{ "file_path": "keras/keras/layers/preprocessing/resizing_test.py", "repo_id": "keras", "token_count": 4178 }
177
import numpy as np import pytest from keras import backend from keras import layers from keras import testing class GaussianNoiseTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_gaussian_noise_basics(self): self.run_layer_test( layers.GaussianNoise, init_kwargs={ "stddev": 0.2, }, input_shape=(2, 3), expected_output_shape=(2, 3), expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_seed_generators=1, expected_num_losses=0, supports_masking=True, ) def test_gaussian_noise_correctness(self): inputs = np.ones((20, 500)) layer = layers.GaussianNoise(0.3, seed=1337) outputs = layer(inputs, training=True) self.assertAllClose( np.std(backend.convert_to_numpy(outputs)), 0.3, atol=0.02 )
keras/keras/layers/regularization/gaussian_noise_test.py/0
{ "file_path": "keras/keras/layers/regularization/gaussian_noise_test.py", "repo_id": "keras", "token_count": 474 }
178
from keras import ops from keras.api_export import keras_export from keras.backend.common.keras_tensor import KerasTensor from keras.layers.layer import Layer from keras.ops import operation_utils @keras_export("keras.layers.Reshape") class Reshape(Layer): """Layer that reshapes inputs into the given shape. Args: target_shape: Target shape. Tuple of integers, does not include the samples dimension (batch size). Input shape: Arbitrary, although all dimensions in the input shape must be known/fixed. Use the keyword argument `input_shape` (tuple of integers, does not include the samples/batch size axis) when using this layer as the first layer in a model. Output shape: `(batch_size, *target_shape)` Example: >>> x = keras.Input(shape=(12,)) >>> y = keras.layers.Reshape((3, 4))(x) >>> y.shape (None, 3, 4) >>> # also supports shape inference using `-1` as dimension >>> y = keras.layers.Reshape((-1, 2, 2))(x) >>> y.shape (None, 3, 2, 2) """ def __init__(self, target_shape, **kwargs): super().__init__(**kwargs) self.target_shape = tuple(target_shape) def compute_output_shape(self, input_shape): return ( input_shape[0], *operation_utils.compute_reshape_output_shape( input_shape[1:], self.target_shape, "target_shape" ), ) def compute_output_spec(self, inputs): output_shape = self.compute_output_shape(inputs.shape) return KerasTensor( shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse ) def build(self, input_shape): sample_output_shape = operation_utils.compute_reshape_output_shape( input_shape[1:], self.target_shape, "target_shape" ) self._resolved_target_shape = tuple( -1 if d is None else d for d in sample_output_shape ) self.built = True def call(self, inputs): return ops.reshape( inputs, (ops.shape(inputs)[0],) + self._resolved_target_shape ) def get_config(self): config = {"target_shape": self.target_shape} base_config = super().get_config() return {**base_config, **config}
keras/keras/layers/reshaping/reshape.py/0
{ "file_path": "keras/keras/layers/reshaping/reshape.py", "repo_id": "keras", "token_count": 973 }
179
import numpy as np import pytest from keras import initializers from keras import layers from keras import testing class SimpleRNNTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basics(self): self.run_layer_test( layers.Bidirectional, init_kwargs={"layer": layers.SimpleRNN(4)}, input_shape=(3, 2, 4), expected_output_shape=(3, 8), expected_num_trainable_weights=6, expected_num_non_trainable_weights=0, supports_masking=True, ) self.run_layer_test( layers.Bidirectional, init_kwargs={ "layer": layers.SimpleRNN(4), "backward_layer": layers.SimpleRNN(4, go_backwards=True), "merge_mode": "sum", }, input_shape=(3, 2, 4), expected_output_shape=(3, 4), expected_num_trainable_weights=6, expected_num_non_trainable_weights=0, supports_masking=True, ) def test_correctness(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") forward_layer = layers.SimpleRNN( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) layer = layers.Bidirectional( layer=forward_layer, ) output = layer(sequence) self.assertAllClose( np.array( [ [0.39687276, 0.39687276, 0.10004295, 0.10004295], [0.7237238, 0.7237238, 0.53391594, 0.53391594], ] ), output, ) layer = layers.Bidirectional(layer=forward_layer, merge_mode="ave") output = layer(sequence) self.assertAllClose( np.array([[0.24845785, 0.24845785], [0.6288199, 0.6288199]]), output, ) layer = layers.Bidirectional(layer=forward_layer, merge_mode=None) output1, output2 = layer(sequence) self.assertAllClose( np.array([[0.39687276, 0.39687276], [0.7237238, 0.7237238]]), output1, ) self.assertAllClose( np.array([[0.10004295, 0.10004295], [0.53391594, 0.53391594]]), output2, ) backward_layer = layers.SimpleRNN( 2, kernel_initializer=initializers.Constant(0.03), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.01), go_backwards=True, ) layer = layers.Bidirectional( layer=forward_layer, backward_layer=backward_layer, merge_mode="mul" ) output = layer(sequence) self.assertAllClose( np.array([[0.08374989, 0.08374989], [0.6740834, 0.6740834]]), output, ) forward_layer = layers.GRU( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), return_sequences=True, ) layer = layers.Bidirectional(layer=forward_layer, merge_mode="sum") output = layer(sequence) self.assertAllClose( np.array( [ [ [0.20937867, 0.20937867], [0.34462988, 0.34462988], [0.40290534, 0.40290534], ], [ [0.59829646, 0.59829646], [0.6734641, 0.6734641], [0.6479671, 0.6479671], ], ] ), output, ) def test_statefulness(self): sequence = np.arange(24).reshape((2, 4, 3)).astype("float32") forward_layer = layers.LSTM( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), stateful=True, ) layer = layers.Bidirectional(layer=forward_layer) layer(sequence) output = layer(sequence) self.assertAllClose( np.array( [ [0.26234663, 0.26234663, 0.16959146, 0.16959146], [0.6137073, 0.6137073, 0.5381646, 0.5381646], ] ), output, ) layer.reset_state() layer(sequence) output = layer(sequence) self.assertAllClose( np.array( [ [0.26234663, 0.26234663, 0.16959146, 0.16959146], [0.6137073, 0.6137073, 0.5381646, 0.5381646], ] ), output, ) def test_pass_initial_state(self): sequence = np.arange(24).reshape((2, 4, 3)).astype("float32") initial_state = [ np.arange(4).reshape((2, 2)).astype("float32") * 1, np.arange(4).reshape((2, 2)).astype("float32") * 2, np.arange(4).reshape((2, 2)).astype("float32") * 3, np.arange(4).reshape((2, 2)).astype("float32") * 4, ] forward_layer = layers.LSTM( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) layer = layers.Bidirectional( layer=forward_layer, ) output = layer(sequence, initial_state=initial_state) self.assertAllClose( np.array( [ [0.20794602, 0.4577124, 0.14046375, 0.48191673], [0.6682636, 0.6711909, 0.60943645, 0.60950446], ] ), output, ) def test_masking(self): sequence = np.arange(24).reshape((2, 4, 3)).astype("float32") forward_layer = layers.GRU( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), ) layer = layers.Bidirectional(layer=forward_layer) mask = np.array([[True, True, False, True], [True, False, False, True]]) output = layer(sequence, mask=mask) self.assertAllClose( np.array( [ [0.19393763, 0.19393763, 0.11669192, 0.11669192], [0.30818558, 0.30818558, 0.28380975, 0.28380975], ] ), output, ) def test_return_state(self): sequence = np.arange(24).reshape((2, 4, 3)).astype("float32") forward_layer = layers.LSTM( 2, kernel_initializer=initializers.Constant(0.01), recurrent_initializer=initializers.Constant(0.02), bias_initializer=initializers.Constant(0.03), return_state=True, ) layer = layers.Bidirectional(layer=forward_layer) output, h1, c1, h2, c2 = layer(sequence) self.assertAllClose( np.array( [ [0.1990008, 0.1990008, 0.12659755, 0.12659755], [0.52335435, 0.52335435, 0.44717982, 0.44717982], ] ), output, ) self.assertAllClose( np.array([[0.1990008, 0.1990008], [0.52335435, 0.52335435]]), h1, ) self.assertAllClose( np.array([[0.35567185, 0.35567185], [1.0492687, 1.0492687]]), c1, ) self.assertAllClose( np.array([[0.12659755, 0.12659755], [0.44717982, 0.44717982]]), h2, ) self.assertAllClose( np.array([[0.2501858, 0.2501858], [0.941473, 0.941473]]), c2, )
keras/keras/layers/rnn/bidirectional_test.py/0
{ "file_path": "keras/keras/layers/rnn/bidirectional_test.py", "repo_id": "keras", "token_count": 4597 }
180
import numpy as np import pytest from keras import layers from keras import ops from keras import testing class OneStateRNNCell(layers.Layer): def __init__(self, units, state_size=None, **kwargs): super().__init__(**kwargs) self.units = units self.state_size = state_size if state_size else units def build(self, input_shape): self.kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer="ones", name="kernel", ) self.recurrent_kernel = self.add_weight( shape=(self.units, self.units), initializer="ones", name="recurrent_kernel", ) self.built = True def call(self, inputs, states): prev_output = states[0] h = ops.matmul(inputs, self.kernel) output = h + ops.matmul(prev_output, self.recurrent_kernel) return output, [output] class TwoStatesRNNCell(layers.Layer): def __init__(self, units, state_size=None, **kwargs): super().__init__(**kwargs) self.units = units self.state_size = state_size if state_size else [units, units] self.output_size = units def build(self, input_shape): self.kernel = self.add_weight( shape=(input_shape[-1], self.units), initializer="ones", name="kernel", ) self.recurrent_kernel_1 = self.add_weight( shape=(self.units, self.units), initializer="ones", name="recurrent_kernel_1", ) self.recurrent_kernel_2 = self.add_weight( shape=(self.units, self.units), initializer="ones", name="recurrent_kernel_2", ) self.built = True def call(self, inputs, states): prev_1 = states[0] prev_2 = states[0] h = ops.matmul(inputs, self.kernel) output_1 = h + ops.matmul(prev_1, self.recurrent_kernel_1) output_2 = h + ops.matmul(prev_2, self.recurrent_kernel_2) output = output_1 + output_2 return output, [output_1, output_2] class RNNTest(testing.TestCase): @pytest.mark.requires_trainable_backend def test_basics(self): self.run_layer_test( layers.RNN, init_kwargs={"cell": OneStateRNNCell(5, state_size=5)}, input_shape=(3, 2, 4), expected_output_shape=(3, 5), expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={"cell": OneStateRNNCell(5, state_size=[5])}, input_shape=(3, 2, 4), expected_output_shape=(3, 5), expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={"cell": OneStateRNNCell(5, state_size=(5,))}, input_shape=(3, 2, 4), expected_output_shape=(3, 5), expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={"cell": OneStateRNNCell(5), "return_sequences": True}, input_shape=(3, 2, 4), expected_output_shape=(3, 2, 5), expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={ "cell": OneStateRNNCell(5), "go_backwards": True, "unroll": True, }, input_shape=(3, 2, 4), expected_output_shape=(3, 5), expected_num_trainable_weights=2, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={"cell": TwoStatesRNNCell(5, state_size=[5, 5])}, input_shape=(3, 2, 4), expected_output_shape=(3, 5), expected_num_trainable_weights=3, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={"cell": TwoStatesRNNCell(5, state_size=(5, 5))}, input_shape=(3, 2, 4), expected_output_shape=(3, 5), expected_num_trainable_weights=3, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) self.run_layer_test( layers.RNN, init_kwargs={"cell": TwoStatesRNNCell(5), "return_sequences": True}, input_shape=(3, 2, 4), expected_output_shape=(3, 2, 5), expected_num_trainable_weights=3, expected_num_non_trainable_weights=0, expected_num_seed_generators=0, supports_masking=True, ) def test_compute_output_shape_single_state(self): sequence = np.ones((3, 4, 5)) layer = layers.RNN(OneStateRNNCell(8), return_sequences=False) output_shape = layer.compute_output_shape(sequence.shape) self.assertEqual(output_shape, (3, 8)) layer = layers.RNN(OneStateRNNCell(8), return_sequences=True) output_shape = layer.compute_output_shape(sequence.shape) self.assertEqual(output_shape, (3, 4, 8)) layer = layers.RNN( OneStateRNNCell(8), return_sequences=False, return_state=True ) output_shape = layer.compute_output_shape(sequence.shape) self.assertEqual(output_shape[0], (3, 8)) self.assertEqual(output_shape[1], (3, 8)) layer = layers.RNN( OneStateRNNCell(8), return_sequences=True, return_state=True ) output_shape = layer.compute_output_shape(sequence.shape) self.assertEqual(output_shape[0], (3, 4, 8)) self.assertEqual(output_shape[1], (3, 8)) def test_compute_output_shape_two_states(self): sequence = np.ones((3, 4, 5)) layer = layers.RNN(TwoStatesRNNCell(8), return_sequences=False) output_shape = layer.compute_output_shape(sequence.shape) self.assertEqual(output_shape, (3, 8)) layer = layers.RNN(TwoStatesRNNCell(8), return_sequences=True) output_shape = layer.compute_output_shape(sequence.shape) self.assertEqual(output_shape, (3, 4, 8)) layer = layers.RNN( TwoStatesRNNCell(8), return_sequences=False, return_state=True ) output_shape = layer.compute_output_shape(sequence.shape) self.assertEqual(output_shape[0], (3, 8)) self.assertEqual(output_shape[1], (3, 8)) self.assertEqual(output_shape[2], (3, 8)) layer = layers.RNN( TwoStatesRNNCell(8), return_sequences=True, return_state=True ) output_shape = layer.compute_output_shape(sequence.shape) self.assertEqual(output_shape[0], (3, 4, 8)) self.assertEqual(output_shape[1], (3, 8)) self.assertEqual(output_shape[2], (3, 8)) def test_dynamic_shapes(self): sequence_shape = (None, None, 3) layer = layers.RNN(OneStateRNNCell(8), return_sequences=False) output_shape = layer.compute_output_shape(sequence_shape) self.assertEqual(output_shape, (None, 8)) layer = layers.RNN(OneStateRNNCell(8), return_sequences=True) output_shape = layer.compute_output_shape(sequence_shape) self.assertEqual(output_shape, (None, None, 8)) layer = layers.RNN( OneStateRNNCell(8), return_sequences=False, return_state=True ) output_shape = layer.compute_output_shape(sequence_shape) self.assertEqual(output_shape[0], (None, 8)) self.assertEqual(output_shape[1], (None, 8)) layer = layers.RNN( OneStateRNNCell(8), return_sequences=True, return_state=True ) output_shape = layer.compute_output_shape(sequence_shape) self.assertEqual(output_shape[0], (None, None, 8)) self.assertEqual(output_shape[1], (None, 8)) layer = layers.RNN(TwoStatesRNNCell(8), return_sequences=False) output_shape = layer.compute_output_shape(sequence_shape) self.assertEqual(output_shape, (None, 8)) layer = layers.RNN(TwoStatesRNNCell(8), return_sequences=True) output_shape = layer.compute_output_shape(sequence_shape) self.assertEqual(output_shape, (None, None, 8)) layer = layers.RNN( TwoStatesRNNCell(8), return_sequences=False, return_state=True ) output_shape = layer.compute_output_shape(sequence_shape) self.assertEqual(output_shape[0], (None, 8)) self.assertEqual(output_shape[1], (None, 8)) self.assertEqual(output_shape[2], (None, 8)) layer = layers.RNN( TwoStatesRNNCell(8), return_sequences=True, return_state=True ) output_shape = layer.compute_output_shape(sequence_shape) self.assertEqual(output_shape[0], (None, None, 8)) self.assertEqual(output_shape[1], (None, 8)) self.assertEqual(output_shape[2], (None, 8)) def test_forward_pass_single_state(self): sequence = np.ones((1, 2, 3)) layer = layers.RNN(OneStateRNNCell(2), return_sequences=False) output = layer(sequence) self.assertAllClose(np.array([[9.0, 9.0]]), output) layer = layers.RNN(OneStateRNNCell(2), return_sequences=True) output = layer(sequence) self.assertAllClose(np.array([[[3.0, 3.0], [9.0, 9.0]]]), output) layer = layers.RNN( OneStateRNNCell(2), return_sequences=False, return_state=True ) output, state = layer(sequence) self.assertAllClose(np.array([[9.0, 9.0]]), output) self.assertAllClose(np.array([[9.0, 9.0]]), state) layer = layers.RNN( OneStateRNNCell(2), return_sequences=True, return_state=True ) output, state = layer(sequence) self.assertAllClose(np.array([[[3.0, 3.0], [9.0, 9.0]]]), output) self.assertAllClose(np.array([[9.0, 9.0]]), state) def test_forward_pass_two_states(self): sequence = np.ones((1, 2, 3)) layer = layers.RNN(TwoStatesRNNCell(2), return_sequences=False) output = layer(sequence) self.assertAllClose(np.array([[18.0, 18.0]]), output) layer = layers.RNN(TwoStatesRNNCell(2), return_sequences=True) output = layer(sequence) self.assertAllClose(np.array([[[6.0, 6.0], [18.0, 18.0]]]), output) layer = layers.RNN( TwoStatesRNNCell(2), return_sequences=False, return_state=True ) output, state1, state2 = layer(sequence) self.assertAllClose(np.array([[18.0, 18.0]]), output) self.assertAllClose(np.array([[9.0, 9.0]]), state1) self.assertAllClose(np.array([[9.0, 9.0]]), state2) layer = layers.RNN( TwoStatesRNNCell(2), return_sequences=True, return_state=True ) output, state1, state2 = layer(sequence) self.assertAllClose(np.array([[[6.0, 6.0], [18.0, 18.0]]]), output) self.assertAllClose(np.array([[9.0, 9.0]]), state1) self.assertAllClose(np.array([[9.0, 9.0]]), state2) def test_passing_initial_state_single_state(self): sequence = np.ones((2, 3, 2)) state = np.ones((2, 2)) layer = layers.RNN(OneStateRNNCell(2), return_sequences=False) output = layer(sequence, initial_state=state) self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), output) layer = layers.RNN( OneStateRNNCell(2), return_sequences=False, return_state=True ) output, state = layer(sequence, initial_state=state) self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), output) self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), state) def test_passing_initial_state_two_states(self): sequence = np.ones((2, 3, 2)) state = [np.ones((2, 2)), np.ones((2, 2))] layer = layers.RNN(TwoStatesRNNCell(2), return_sequences=False) output = layer(sequence, initial_state=state) self.assertAllClose(np.array([[44.0, 44.0], [44.0, 44.0]]), output) layer = layers.RNN( TwoStatesRNNCell(2), return_sequences=False, return_state=True ) output, state_1, state_2 = layer(sequence, initial_state=state) self.assertAllClose(np.array([[44.0, 44.0], [44.0, 44.0]]), output) self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), state_1) self.assertAllClose(np.array([[22.0, 22.0], [22.0, 22.0]]), state_2) def test_statefulness_single_state(self): sequence = np.ones((1, 2, 3)) layer = layers.RNN(OneStateRNNCell(2), stateful=True) layer(sequence) output = layer(sequence) self.assertAllClose(np.array([[45.0, 45.0]]), output) layer = layers.RNN(OneStateRNNCell(2), stateful=True, return_state=True) layer(sequence) output, state = layer(sequence) self.assertAllClose(np.array([[45.0, 45.0]]), output) self.assertAllClose(np.array([[45.0, 45.0]]), state) def test_statefulness_two_states(self): sequence = np.ones((1, 2, 3)) layer = layers.RNN(TwoStatesRNNCell(2), stateful=True) layer(sequence) output = layer(sequence) self.assertAllClose(np.array([[90.0, 90.0]]), output) layer = layers.RNN( TwoStatesRNNCell(2), stateful=True, return_state=True ) layer(sequence) output, state_1, state_2 = layer(sequence) self.assertAllClose(np.array([[90.0, 90.0]]), output) self.assertAllClose(np.array([[45.0, 45.0]]), state_1) self.assertAllClose(np.array([[45.0, 45.0]]), state_2) def test_go_backwards(self): sequence = np.arange(24).reshape((2, 3, 4)).astype("float32") layer = layers.RNN(OneStateRNNCell(2), go_backwards=True) layer(sequence) output = layer(sequence) self.assertAllClose(np.array([[202.0, 202.0], [538.0, 538.0]]), output) layer = layers.RNN(OneStateRNNCell(2), stateful=True, return_state=True) layer(sequence) output, state = layer(sequence) self.assertAllClose( np.array([[954.0, 954.0], [3978.0, 3978.0]]), output ) self.assertAllClose(np.array([[954.0, 954.0], [3978.0, 3978.0]]), state) def test_serialization(self): layer = layers.RNN(TwoStatesRNNCell(2), return_sequences=False) self.run_class_serialization_test(layer) layer = layers.RNN(OneStateRNNCell(2), return_sequences=False) self.run_class_serialization_test(layer) # TODO: test masking
keras/keras/layers/rnn/rnn_test.py/0
{ "file_path": "keras/keras/layers/rnn/rnn_test.py", "repo_id": "keras", "token_count": 7348 }
181