text
stringlengths 5
261k
| id
stringlengths 16
106
| metadata
dict | __index_level_0__
int64 0
266
|
---|---|---|---|
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras_cv.models import DeepLabV3Plus
from keras_cv.models import ImageClassifier
from keras_cv.models import RetinaNet
from keras_cv.models import YOLOV8Detector
from keras_cv.tests.test_case import TestCase
from keras_cv.utils import preset_utils
class PresetUtilsTest(TestCase):
@parameterized.parameters(
(ImageClassifier, "resnet50_v2_imagenet_classifier", "classification"),
(
ImageClassifier,
"efficientnetv2_s_imagenet_classifier",
"classification",
),
(
ImageClassifier,
"mobilenet_v3_large_imagenet_classifier",
"classification",
),
(YOLOV8Detector, "yolo_v8_m_pascalvoc", "detection"),
(RetinaNet, "retinanet_resnet50_pascalvoc", "detection"),
(DeepLabV3Plus, "deeplab_v3_plus_resnet50_pascalvoc", "segmentation"),
)
@pytest.mark.large
def test_preset_saving(self, cls, preset_name, task_type):
save_dir = self.get_temp_dir()
if task_type == "detection":
model = cls.from_preset(preset_name, bounding_box_format="xywh")
else:
model = cls.from_preset(preset_name)
preset_utils.save_to_preset(model, save_dir)
# Check existence of files
self.assertTrue(os.path.exists(os.path.join(save_dir, "config.json")))
self.assertTrue(
os.path.exists(os.path.join(save_dir, "model.weights.h5"))
)
self.assertTrue(os.path.exists(os.path.join(save_dir, "metadata.json")))
# Check the model config (`config.json`)
with open(os.path.join(save_dir, "config.json"), "r") as f:
config_json = f.read()
self.assertTrue(
"build_config" not in config_json
) # Test on raw json to include nested keys
self.assertTrue(
"compile_config" not in config_json
) # Test on raw json to include nested keys
config = json.loads(config_json)
self.assertEqual(config["weights"], "model.weights.h5")
# Try loading the model from preset directory
restored_model = preset_utils.load_from_preset(save_dir)
input_batch = np.ones(shape=(2, 224, 224, 3))
expected_output = model(input_batch)
restored_output = restored_model(input_batch)
self.assertAllClose(expected_output, restored_output)
def test_preset_errors(self):
with self.assertRaisesRegex(ValueError, "must be a string"):
ImageClassifier.from_preset(ImageClassifier)
with self.assertRaisesRegex(ValueError, "Unknown preset identifier"):
ImageClassifier.from_preset("taggle://rednet/rednet/rednet")
| keras-cv/keras_cv/utils/preset_utils_test.py/0 | {
"file_path": "keras-cv/keras_cv/utils/preset_utils_test.py",
"repo_id": "keras-cv",
"token_count": 1377
} | 83 |
import json
import os
import sys
import tensorboard as tb
from absl import flags
flags.DEFINE_string(
"model_name", None, "The name of the KerasCV.model that was trained"
)
flags.DEFINE_string(
"tensorboard_logs_path", None, "Path to tensorboard logs to load"
)
flags.DEFINE_string("training_script_path", None, "Path to the training script")
flags.DEFINE_string(
"script_version",
None,
"commit hash of the latest commit in KerasCV/master "
"for the training script",
)
flags.DEFINE_string(
"weights_version",
None,
"The version of the training script used to produce the latest weights. "
"For example, v0",
)
flags.DEFINE_string(
"contributor",
None,
"The GitHub username of the contributor of these results",
)
flags.DEFINE_string(
"accelerators", None, "The number of accelerators used for training."
)
FLAGS = flags.FLAGS
FLAGS(sys.argv)
model_name = FLAGS.model_name or input(
"Input the name of the KerasCV.model that was trained\n"
)
weights_version = FLAGS.weights_version or input(
"Input the weights version for your script\n"
)
training_script_path = FLAGS.training_script_path or input(
"Input the path to your training script\n"
)
full_training_script_path = os.path.abspath(training_script_path)
# Build an experiment name.
# This will be structured as task/training_script_name/model_name-version
training_script_rooted_at_training = full_training_script_path[
full_training_script_path.index("keras-cv/examples/training/") + 27 :
]
training_script_dirs = training_script_rooted_at_training.split("/")
tensorboard_experiment_name = f"{training_script_dirs[0]}/{'/'.join(training_script_dirs[1:])[:-3]}/{model_name}-{weights_version}" # noqa: E501
training_script_json_path = full_training_script_path[
: full_training_script_path.index("keras-cv/examples/training/") + 27
] + "/".join(training_script_dirs[:2] + ["training_history.json"])
script_version = FLAGS.script_version or input(
"Input the commit hash of the latest commit in KerasCV/master "
"for the training script used for training."
)
tensorboard_logs_path = FLAGS.tensorboard_logs_path or input(
"Input the path to the TensorBoard logs\n"
)
tensorboard_experiment_id = (
os.popen(
f"python3 -m tensorboard.main dev upload "
f"--logdir {tensorboard_logs_path} "
f"--name {tensorboard_experiment_name} "
f"--one_shot --verbose 0"
)
.read()
.split("/")[-2]
)
tensorboard_experiment = tb.data.experimental.ExperimentFromDev(
tensorboard_experiment_id
)
tensorboard_results = tensorboard_experiment.get_scalars()
training_epochs = max(
tensorboard_results[tensorboard_results.run == "train"].step
)
results_tags = tensorboard_results.tag.unique()
# Validation accuracy won't exist in all logs (e.g for OD tasks).
# We capture the max validation accuracy if it exists, but otherwise omit it.
max_validation_accuracy = None
if (
"epoch_categorical_accuracy" in results_tags
or "epoch_sparse_categorical_accuracy" in results_tags
):
max_validation_accuracy = max(
tensorboard_results[
(tensorboard_results.run == "validation")
& (
(tensorboard_results.tag == "epoch_categorical_accuracy")
| (
tensorboard_results.tag
== "epoch_sparse_categorical_accuracy"
)
)
].value
)
max_validation_accuracy = f"{max_validation_accuracy:.4f}"
# Mean IOU won't exist in all logs (e.g for classification tasks).
# We capture the max IOU if it exists, but otherwise omit it.
max_mean_iou = None
if "epoch_mean_io_u" in results_tags:
max_mean_iou = max(
tensorboard_results[
(tensorboard_results.run == "validation")
& (tensorboard_results.tag == "epoch_mean_io_u")
].value
)
max_mean_iou = f"{max_mean_iou:.4f}"
contributor = FLAGS.contributor or input(
"Input your GitHub username "
"(or the username of the contributor, if it's not you)\n"
)
accelerators = FLAGS.accelerators or input(
"Input the number of accelerators used during training.\n"
)
args = input(
"Input any training arguments used for the training script.\n"
"Use comma-separate, colon-split key-value pairs. For example:\n"
"arg1:value, arg2:value\n"
)
args_dict = {}
for arg in args.split(","):
if len(arg.strip()) == 0:
continue
key_value_pair = [s.strip() for s in arg.split(":")]
args_dict[key_value_pair[0]] = key_value_pair[1]
new_results = {
"script": {
"name": "/".join(training_script_dirs[2:]),
"version": script_version,
},
"epochs_trained": training_epochs,
"tensorboard_logs": f"https://tensorboard.dev/experiment/{tensorboard_experiment_id}/", # noqa: E501
"contributor": contributor,
"args": args_dict,
"accelerators": int(accelerators),
}
if max_validation_accuracy is not None:
new_results["validation_accuracy"] = max_validation_accuracy
if max_mean_iou is not None:
new_results["validation_mean_iou"] = max_mean_iou
# Check if the JSON file already exists
results_file = open(training_script_json_path, "r")
results_string = results_file.read()
results = json.loads(results_string) if results_string != "" else {}
results_file.close()
# If we've never run this script on this model, insert a record for it
if model_name not in results:
results[model_name] = {}
# Add this run's results to the model's record
model_results = results[model_name]
model_results[weights_version] = new_results
# Save the updated results
results_file = open(training_script_json_path, "w")
json.dump(results, results_file, indent=4, sort_keys=True)
results_file.close()
| keras-cv/shell/weights/update_training_history.py/0 | {
"file_path": "keras-cv/shell/weights/update_training_history.py",
"repo_id": "keras-cv",
"token_count": 2223
} | 84 |
# Keras backends
## "バックエンド"とは?
Kerasはモデルレベルのライブラリで,深層学習モデルを開発するための高水準な構成要素を提供します.テンソル積,畳み込みなどのような低水準の操作をKeras自身で扱うことはありません.その代わりに,Kerasの"バックエンドエンジン"としての役割を果たし,そのような操作を行うために特化し,また最適化されたテンソルを取り扱うライブラリに依存しています.唯一のテンソルのライブラリを選び,そのライブラリに束縛されたKerasの実装を行うのではなく,Kerasはモジュール方式でこの問題を扱い,いくつかの異なるバックエンドエンジンをKerasにシームレスに接続できます.
現在は,Kerasは3つのバックエンドが利用可能で,それは**TensorFlow**バックエンドと**Theano**バックエンド,そして**CNTK**バックエンドです.
- [TensorFlow](http://www.tensorflow.org/) はGoogle, Inc.により開発されたオープンソースで,テンソルをシンボリックに操作ができるフレームワークです.
- [Theano](http://deeplearning.net/software/theano/) はモントリオール大学のLISA/MILA Labにより開発されたオープンソースで,テンソルをシンボリックに操作ができるフレームワークです.
- [CNTK](https://www.microsoft.com/en-us/cognitive-toolkit/) はMicrosoftによって開発された深層学習のためのcommercial-grade toolkitというオープンソースです.
将来的に,さらにバックエンドを追加する予定です.
----
## バックエンドの切り替え
少なくとも一度Kerasを実行したら,以下にあるKerasの設定ファイルを見つけるでしょう.
`$HOME/.keras/keras.json`
もしそこにこのファイルがなければ,あなたが作成できます.
__Windows ユーザへ注意__: `$HOME` を `%USERPROFILE%` に変更してください.
デフォルトの設定ファイルはおそらく以下のように見えるでしょう:
```json
{
"image_data_format": "channels_last",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "tensorflow"
}
```
`backend`フィールドを`"theano"`か`"tensorflow"`,`"cntk"`に変えるだけで,次回の実行時から新しい設定を利用します.
環境変数`KERAS_BACKEND`も定義することができて,かつあなたの設定ファイルで定義されているものを上書きします:
```bash
KERAS_BACKEND=tensorflow python -c "from keras import backend"
Using TensorFlow backend.
```
----
## keras.json の詳細
`keras.json`構成ファイルは次の設定を含みます:
```
{
"image_data_format": "channels_last",
"epsilon": 1e-07,
"floatx": "float32",
"backend": "tensorflow"
}
```
`$HOME/.keras/keras.json`を編集することでこれらの設定を変更できます.
* `image_data_format`: 文字列,`"channels_last"` か `"channels_first"` のいずれか.Kerasが従うデータのフォーマット規則を指定します. (`keras.backend.image_data_format()` がこれを返します.)
* 2次元データ (例えば画像) に対しては, `"channels_last"` は `(rows, cols, channels)` とみなし,`"channels_first"` は `(channels, rows, cols)`とみなします.
* 3次元データに対しては, `"channels_last"` は `(conv_dim1, conv_dim2, conv_dim3, channels)` とみなし, `"channels_first"` は `(channels, conv_dim1, conv_dim2, conv_dim3)` とみなします.
* `epsilon`: float,いくつかの操作で0除算を避けるために使う微小量定数.
* `floatx`: 文字列,`"float16"`,`"float32"`,か `"float64"`.デフォルトの浮動小数点精度.
* `backend`: 文字列,`"tensorflow"` か `"theano"` か `"cntk"`.
----
## 新しいコードを書くための抽象的なKerasバックエンドの利用
もし,あなたがTheano(`th`)とTesorFlow(`tf`)の両方で互換性があるように記述できるKerasモジュールが欲しいときは,抽象的なKerasバックエンドAPIを通じて書く必要があります.以下は導入部になります.
あなたは以下を通じてバックエンドモジュールをインポートできます:
```python
from keras import backend as K
```
以下のコードは入力のプレースホルダーのインスタンスを作成します.
これは`tf.placeholder()`,`th.tensor.matrix()`,または`th.tensor.tensor3()`,などと同じです.
```python
input = K.placeholder(shape=(2, 4, 5))
# 以下も動作します:
input = K.placeholder(shape=(None, 4, 5))
# 以下も動作します:
input = K.placeholder(ndim=3)
```
以下のコードは共有変数のインスタンスを作成します.
これは`tf.variable()`,または`th.shared()`と同じことです.
```python
import numpy as np
val = np.random.random((3, 4, 5))
var = K.variable(value=val)
# すべて0の変数:
var = K.zeros(shape=(3, 4, 5))
# すべて1の変数:
var = K.ones(shape=(3, 4, 5))
```
あなたが必要とするであろう大抵のテンソルの操作はTensorFlowやTheanoにおいて行うように実行できます:
```python
# Initializing Tensors with Random Numbers
b = K.random_uniform_variable(shape=(3, 4), low=0, high=1) # Uniform distribution
c = K.random_normal_variable(shape=(3, 4), mean=0, scale=1) # Gaussian distribution
d = K.random_normal_variable(shape=(3, 4), mean=0, scale=1)
# Tensor Arithmetic
a = b + c * K.abs(d)
c = K.dot(a, K.transpose(b))
a = K.sum(b, axis=1)
a = K.softmax(b)
a = K.concatenate([b, c], axis=-1)
# etc...
```
----
## バックエンド関数
### epsilon
```python
keras.backend.epsilon()
```
数値演算で使われる微小量を返します.
__戻り値__
浮動小数点数.
__例__
```python
>>> keras.backend.epsilon()
1e-07
```
----
### set_epsilon
```python
keras.backend.set_epsilon(e)
```
数値演算で使われる微小量をセットします.
__引数__
- __e__: 浮動小数点数,新たな微小量(epsilon).
__例__
```python
>>> from keras import backend as K
>>> K.epsilon()
1e-07
>>> K.set_epsilon(1e-05)
>>> K.epsilon()
1e-05
```
----
### floatx
```python
keras.backend.floatx()
```
デフォルトのfloat型を文字列で返します(e.g. 'float16', 'float32', 'float64').
__戻り値__
文字列,現在のデフォルトのfloat型.
__例__
```python
>>> keras.backend.floatx()
'float32'
```
----
### set_floatx
```python
keras.backend.set_floatx(floatx)
```
デフォルトのfloat型をセットします.
__引数__
- __floatx__: 'float16','float32',または'float64'の文字列.
__例__
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> K.set_floatx('float16')
>>> K.floatx()
'float16'
```
----
### cast_to_floatx
```python
keras.backend.cast_to_floatx(x)
```
Numpy配列をデフォルトのKerasのfloat型にキャストします.
__引数__
- __x__: Numpy 配列
__戻り値__
新しい型にキャストされた同じNumpy 配列.
__例__
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
----
### image_data_format
```python
keras.backend.image_data_format()
```
画像におけるデフォルトのフォーマット規則('channels_first' か 'channels_last')を返します.
__戻り値__
`'channels_first'`,または`'channels_last'`のどちらかの文字列.
__例__
```python
>>> keras.backend.image_data_format()
'channels_first'
```
----
### set_image_data_format
```python
keras.backend.set_image_data_format(data_format)
```
デフォルトのフォーマット規則をセットします.
__引数__
- __data_format__: `'channels_first'`,または`'channels_last'`の文字列.
__例__
```python
>>> from keras import backend as K
>>> K.image_data_format()
'channels_first'
>>> K.set_image_data_format('channels_last')
>>> K.image_data_format()
'channels_last'
```
----
### get_uid
```python
keras.backend.get_uid(prefix='')
```
デフォルトのグラフにおけるuidを取得します.
__引数__
- __prefix__: グラフにおける任意の接頭語.
__戻り値__
グラフにおける唯一の識別子.
----
### reset_uids
```python
keras.backend.reset_uids()
```
グラフの識別子をリセットします.
----
### clear_session
```python
keras.backend.clear_session()
```
現在のTFグラフを壊し,新たなものを作成します.
古いモデル/レイヤが散らかってしまうを避けるのに役立ちます.
----
### manual_variable_initialization
```python
keras.backend.manual_variable_initialization(value)
```
手動で変数を初期化するかのフラグがセットされます.
この真理値が変数がインスタンス化することで初期化すべきか(デフォルト),利用者側で初期化を制御すべきか(例えば,`tf.initialize_all_variables()` を通じて)を決定します.
__引数__
- __value__: 真理値.
----
### learning_phase
```python
keras.backend.learning_phase()
```
学習フェーズのフラグを返します.
学習フェーズのフラグは学習期間とテスト期間で異なる振る舞いをする任意のKeras関数への入力として渡される真理値のテンソル (0 = test, 1 = train) です.
__戻り値__
学習フェーズ(テンソルのスカラーにおける整数か,Pythonの整数).
----
### set_learning_phase
```python
keras.backend.set_learning_phase(value)
```
値を固定化するための学習フェーズをセットします.
__引数__
- __value__: 学習フェーズの値.0,または1の整数.
__Raises__
- __ValueError__: もし`value`が`0`,または`1`ではなかった場合.
----
### is_sparse
```python
keras.backend.is_sparse(tensor)
```
テンソルがスパースかどうかを返します.
__引数__
- __tensor__: テンソルのインスタンス.
__戻り値__
真理値.
__例__
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
----
### to_dense
```python
keras.backend.to_dense(tensor)
```
スパースなテンソルを密なテンソルに変換し,それを返します.
__引数__
- __tensor__: テンソルのインスタンス(潜在的にスパースであること).
__戻り値__
密なテンソル.
__例__
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
----
### variable
```python
keras.backend.variable(value, dtype=None, name=None, constraint=None)
```
テンソルのインスタンス化し,それを返します.
__引数__
- __value__: テンソルの初期値が含まれたNumpy 配列.
- __dtype__: テンソルの型.
- __name__: テンソルに対する任意の名前を表す文字列.
- __constraint__: オプティマイザの更新後に変数に適用するオプションの射影関数。
__戻り値__
変数のインスタンス(Kerasのメタ情報が含まれています).
__例__
```python
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]])
```
----
### constant
```python
keras.backend.constant(value, dtype=None, shape=None, name=None)
```
__引数__
- __value__: 定数,またはリスト.
- __dtype__: 返されたテンソルに対する要素の型.
- __shape__: 返されたテンソルに対する任意の次元.
- __name__: テンソルの任意の名前.
__戻り値__
不変のテンソル.
----
### is_keras_tensor
```python
keras.backend.is_keras_tensor(x)
```
`x`がKerasのテンソルかどうかを返します.
「Kerasのテンソル」とはKerasのレイヤー(`Layer`クラス)や`Input`から返されたテンソルです。
__引数__
- __x__: 潜在的なテンソル.
__戻り値__
真理値: 引数がKerasのテンソルかどうか.
__Raises__
- __ValueError__: `x`がシンボリックなテンソルでない場合。
__例__
```python
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a Keras tensor.
True
```
----
### placeholder
```python
keras.backend.placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None)
```
プレースホルダーのテンソルをインスタンス化し,それを返します.
__引数__
- __shape__: プレースホルダーのshape(整数のタプル,`None`を含んでいても構いません).
- __ndim__: テンソルの軸の数.少なくとも{`shape`, `ndim`}から一つ指定する必要があります.両方が指定されると,`shape`が使われます.
- __dtype__: プレースホルダーの型.
- __sparse__: プレースホルダーがスパースの型を持つべきかどうかの真理値.
- __name__: このプレースホルダーに対する任意の名前を表す文字列.
__戻り値__
テンソルのインスタンス(Kerasのメタ情報が含まれています).
__例__
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph._keras_shape
(2, 4, 5)
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
----
### is_placeholder
```python
keras.backend.is_placeholder(x)
```
`x`がプレースホルダか否かを返します。
__引数__
- __x__: プレースホルダの候補
__戻り値__
真理値。
----
### shape
```python
keras.backend.shape(x)
```
テンソル,または変数のshapeを返します.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソルで表されたshape.
__例__
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> inputs = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(inputs)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(inputs).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
----
### int_shape
```python
keras.backend.int_shape(x)
```
整数,またはNoneからなるタプルとしての変数,またはテンソルのshapeを返します.
__引数__
- __x__: テンソル,または変数.
__戻り値__
整数のタプル(またはNone).
__例__
```python
>>> from keras import backend as K
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(inputs)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
----
### ndim
```python
keras.backend.ndim(x)
```
テンソルの軸の数を整数で返します.
__引数__
- __x__: テンソル,または変数.
__戻り値__
軸の数を表す整数(スカラー).
__例__
```python
>>> from keras import backend as K
>>> inputs = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(inputs)
3
>>> K.ndim(kvar)
2
```
----
### dtype
```python
keras.backend.dtype(x)
```
Kerasのテンソル,または変数のdtypeを文字列で返します.
__引数__
- __x__: テンソル,または変数.
__戻り値__
`x`のdtypeを表す文字列.
__例__
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32_ref'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32_ref'
```
----
### eval
```python
keras.backend.eval(x)
```
テンソルの変数値を評価します.
__引数__
- __x__: 変数.
__戻り値__
Numpy 配列.
__例__
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
----
### zeros
```python
keras.backend.zeros(shape, dtype=None, name=None)
```
全要素が0の変数をインスタンス化し,それを返します.
__引数__
- __shape__: 整数のタプル.返されたKerasの変数に対するshape.
- __dtype__: 文字列.返されたKerasの変数に対するデータの型.
- __name__: 文字列.返されたKerasの変数に対する名前.
__戻り値__
`0.0`で埋まった変数(Kerasのメタ情報が含まれています).`shape`シンボリックだった場合、変数を返せず、その代わりに動的な形のテンソルを返すことに注意してください。
__例__
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
----
### ones
```python
keras.backend.ones(shape, dtype=None, name=None)
```
全要素が1の変数をインスタンス化し,それを返します.
__引数__
- __shape__: 整数のタプル.返されたKerasの変数に対するshape.
- __dtype__: 文字列.返されたKerasの変数に対するデータの型.
- __name__: 文字列.返されたKerasの変数に対する名前.
__戻り値__
`1.0`で埋まった変数.`shape`シンボリックだった場合、変数を返せず、その代わりに動的な形のテンソルを返すことに注意してください。
__例__
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
----
### eye
```python
keras.backend.eye(size, dtype=None, name=None)
```
単位行列をインスタンス化し,それを返します.
__引数__
- __shape__: 整数のタプル.返されたKerasの変数に対するshape.
- __dtype__: 文字列.返されたKerasの変数に対するデータの型.
- __name__: 文字列.返されたKerasの変数に対する名前.
__戻り値__
単位行列を表すKerasの変数.
__例__
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
----
### zeros_like
```python
keras.backend.zeros_like(x, dtype=None, name=None)
```
別のテンソルと同じshapeを持つ全要素が0の変数のインスタンスを作成します.
__引数__
- __x__: Kerasのテンソル,または変数.
- __dtype__: 文字列.返されたKerasの変数に対するデータの型.
- __name__: 文字列.返されたKerasの変数に対する名前.
__戻り値__
xのshapeを持つ全要素が0のKerasの変数.
__例__
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
----
### ones_like
```python
keras.backend.ones_like(x, dtype=None, name=None)
```
別のテンソルと同じshapeを持つ全要素が1の変数のインスタンスを作成します.
__引数__
- __x__: Kerasのテンソル,または変数.
- __dtype__: 文字列.返されたKerasの変数に対するデータの型.
- __name__: 文字列.返されたKerasの変数に対する名前.
__戻り値__
xのshapeを持つ全要素が1のKerasの変数.
__例__
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
----
### identity
```python
keras.backend.identity(x, name=None)
```
入力されたテンソルと同じ内容を持つテンソルを返します.
__引数__
- __x__: 入力テンソル.
- __name__: 文字列、作る変数の名前。
__戻り値__
同じshape,型,及び内容を持つテンソル.
----
### random_uniform_variable
```python
keras.backend.random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None)
```
一様分布からサンプリングされた値を持つ変数のインスタンスを作成します.
__引数__
- __shape__: 整数のタプル.返されたKerasの変数に対するshape.
- __low__: 浮動小数点数.出力の区間における下限.
- __high__: 浮動小数点数.出力の区間における上限.
- __dtype__: 文字列.返されたKerasの変数に対するデータの型.
- __name__: 文字列.返されたKerasの変数に対する名前.
- __seed__: 整数.ランダムシード値.
__戻り値__
サンプリング値で埋まったKerasの変数.
__例__
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
----
### random_normal_variable
```python
keras.backend.random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None)
```
ガウス分布からサンプリングされた値を持つ変数のインスタンスを作成します.
__引数__
- __shape__: 整数のタプル.返されたKerasの変数に対するshape.
- __mean__: 浮動小数点数.ガウス分布の平均.
- __scale__: 浮動小数点数.ガウス分布の標準偏差.
- __dtype__: 文字列.返されたKerasの変数に対するデータの型.
- __name__: 文字列.返されたKerasの変数に対する名前.
- __seed__: 整数.ランダムシード値.
__戻り値__
サンプリング値で埋まったKerasの変数.
__例__
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
----
### count_params
```python
keras.backend.count_params(x)
```
Kerasの変数におけるスカラーの数を返します.
__引数__
- __x__: Kerasの変数.
__戻り値__
`x` におけるスカラーの数を表す整数.
__例__
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
----
### cast
```python
keras.backend.cast(x, dtype)
```
テンソルを異なる型にキャストします.
Kerasの変数をキャストできますが,Kerasのテンソルが返されます.
__引数__
- __x__: Kerasのテンソル(または変数).
- __dtype__: 文字列.`'float16'`,`'float32'`,または`'float64'`のいずれか.
__戻り値__
`dtype`を持つKerasのテンソル.
__例__
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
<tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16>
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
<tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
```
----
### update
```python
keras.backend.update(x, new_x)
```
`x`の値を`new_x`のものに更新する.
__引数__
- __x__: 変数.
- __new_x__: `x`と同じshapeを持つテンソル.
__戻り値__
更新された`x`.
----
### update_add
```python
keras.backend.update_add(x, increment)
```
`x`の値を`increment`で加算することで更新する.
__引数__
- __x__: 変数.
- __increment__: `x`と同じshapeを持つテンソル.
__戻り値__
更新された`x`.
----
### update_sub
```python
keras.backend.update_sub(x, decrement)
```
`x`の値を`decrement`で減算することで更新する.
__引数__
- __x__: 変数.
- __decrement__: `x`と同じshapeを持つテンソル.
__戻り値__
更新された`x`.
----
### moving_average_update
```python
keras.backend.moving_average_update(x, value, momentum)
```
変数における移動平均を計算します.
__引数__
- __x__: `Variable`
- __value__: `x`と同じshapeを持つテンソル.
- __momentum__: 移動平均のモーメンタム.
__戻り値__
変数を更新するための命令.
----
### dot
```python
keras.backend.dot(x, y)
```
2つのテンソル(かつ/または変数)を掛け合わせ,テンソルを返します.
n階テンソルにn次元のを掛ける場合,Theanoの振る舞いを再現します(例`(2, 3) * (4, 3, 5) -> (2, 4, 5)`).
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
`x`と`y`でドット積を行なったテンソル.
__例__
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
----
### batch_dot
```python
keras.backend.batch_dot(x, y, axes=None)
```
バッチ式のドット積.
`batch_dot`は`x`と`y`がバッチに含まれる,すなわち`(batch_size, :)`のshapeの中で,`x`と`y`のドット積を計算するために使われます.`batch_dot`の結果は入力より小さい次元を持つテンソルになります.次元数が1になれば,ndimが少なくとも2であることを保証するために`expand_dims`を利用します.
__引数__
- __x__: `ndim >= 2`のKerasのテンソル.
- __y__: `ndim >= 2`のKerasのテンソル.
- __axes__: 目標となる次元を持つ整数のリスト(もしくは整数単体).`axes[0]`と`axes[1]`の長さは同じにすべきです.
__戻り値__
(次元数の総和より少ない)`x`のshapeと(バッチの次元の総和より少ない)`y`のshapeを連結したshapeに等しいテンソル.もし最後のランクが1なら,`(batch_size, 1)`に整形します.
__例__
`x = [[1, 2], [3,4]]`, `y = [[5, 6], [7, 8]]`と仮定すると,非対角成分を計算しなくても,`x.dot(y.T)`の主対角成分である`batch_dot(x, y, axes=1) = [[17, 53]]`が得られます.
shapeの推定: `x`と`y`のshapeがそれぞれ`(100, 20)`,`(100, 30, 20)`としましょう.`axes`が(1, 2)の場合,出力されたテンソルのshapeを見つけるために,`x`と`y`のshapeにおけるそれぞれの次元でループさせることになります.
- `x.shape[0]`: 100: 出力されるshapeに付加されます.
- `x.shape[1]`: 20: 出力されるshapeには付加されず,`x`の次元1は総和が取られています(`dot_axes[0]` = 1).
- `y.shape[0]`: 100: 出力されるshapeには付加されず,`y`の最初の次元はいつも無視されます.
- `y.shape[1]`: 30: 出力されるshapeに付加されます.
- `y.shape[2]`: 20: 出力されるshapeには付加されず,`y`の次元1は総和が取られています(`dot_axes[1]` = 2)`output_shape` = `(100, 30)`.
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
----
### transpose
```python
keras.backend.transpose(x)
```
行列を転置します.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
__例__
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> inputs = K.placeholder((2, 3))
>>> inputs
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(inputs)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
----
### gather
```python
keras.backend.gather(reference, indices)
```
テンソルの`reference`における添字の要素`indices`を探索します.
__引数__
- __reference__: テンソル.
- __indices__: 添字の整数テンソル.
__戻り値__
`reference`と同じ型を持つテンソル.
----
### max
```python
keras.backend.max(x, axis=None, keepdims=False)
```
テンソル内の最大値.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数,最大値を探すため軸.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
`x`の中の最大値を持ったテンソル.
----
### min
```python
keras.backend.min(x, axis=None, keepdims=False)
```
テンソル内の最大値.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数,最小値を探すため軸.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
`x`の中の最小値を持ったテンソル.
----
### sum
```python
keras.backend.sum(x, axis=None, keepdims=False)
```
テンソルに対して,指定した軸に沿って和を計算します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.和を計算する軸方向.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
`x`の和をとったテンソル.
----
### prod
```python
keras.backend.prod(x, axis=None, keepdims=False)
```
テンソルに対して,指定した軸に沿って積を計算します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.積を計算する軸方向.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
`x`の積をとったテンソル.
----
### cumsum
```python
keras.backend.cumsum(x, axis=0)
```
テンソルに対して,指定した軸に沿って累積和を計算します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.和を計算する軸方向.
__戻り値__
`x`を`axis`に沿って累積和をとったテンソル.
----
### cumprod
```python
keras.backend.cumprod(x, axis=0)
```
テンソルに対して,指定した軸に沿って累積積を計算します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.積を計算する軸方向.
__戻り値__
`x`を`axis`に沿って累積積をとったテンソル.
----
### var
```python
keras.backend.var(x, axis=None, keepdims=False)
```
指定した軸に沿ったテンソルの分散を計算します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.分散を計算する軸方向.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
`x`の要素の分散を持つテンソル.
----
### std
```python
std(x, axis=None, keepdims=False)
```
指定した軸に沿ったテンソルの標準偏差を計算します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.標準偏差を計算する軸方向.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
`x`の要素の標準偏差を持つテンソル.
----
### mean
```python
keras.backend.mean(x, axis=None, keepdims=False)
```
指定した軸に沿ったテンソルの平均を計算します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.平均を計算する軸方向.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
`x`の要素の平均を持つテンソル.
----
### any
```python
keras.backend.any(x, axis=None, keepdims=False)
```
ビット単位の縮約(論理OR).
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.縮約する軸方向.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
uint8のテンソル.
----
### all
```python
keras.backend.all(x, axis=None, keepdims=False)
```
ビット単位の縮約(論理AND).
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.縮約する軸方向.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
uint8のテンソル.
----
### argmax
```python
keras.backend.argmax(x, axis=-1)
```
テンソルの軸に沿った最大値の添字を返します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.縮約する軸方向.
__戻り値__
テンソル.
----
### argmin
```python
keras.backend.argmin(x, axis=-1)
```
テンソルの軸に沿った最小値の添字を返します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.縮約する軸方向.
__戻り値__
テンソル.
----
### square
```python
keras.backend.square(x)
```
要素ごとの二乗.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### abs
```python
keras.backend.abs(x)
```
要素ごとの絶対値.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### sqrt
```python
keras.backend.sqrt(x)
```
要素ごとの平方根.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### exp
```python
keras.backend.exp(x)
```
要素ごとの指数関数値.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### log
```python
keras.backend.log(x)
```
要素ごとの対数.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### logsumexp
```python
keras.backend.logsumexp(x, axis=None, keepdims=False)
```
log(sum(exp(テンソルの次元を横断した要素)))を計算します.
この関数はlog(sum(exp(x)))よりも計算上安定します.小さい入力に対して対数をとることで発生するアンダーフローと,大きな入力に対して指数関数にかけることで発生するオーバーフローを回避します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 整数.縮約する軸方向.
- __keepdims__: 次元を保つかどうかの真理値.`keepdims`が`False`の場合,テンソルのランクは1に削減します.`keepdims`が`True`の場合,縮小された次元は1の長さにとどめます.
__戻り値__
縮約されたテンソル.
----
### round
```python
keras.backend.round(x)
```
要素ごとの最も近い整数への丸め.
同点であれば偶数よりに丸め込まれます。
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### sign
```python
keras.backend.sign(x)
```
要素ごとの符号.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### pow
```python
keras.backend.pow(x, a)
```
要素ごとの指数乗.
__引数__
- __x__: テンソル,または変数.
- __a__: Pythonの整数.
__戻り値__
テンソル.
----
### clip
```python
keras.backend.clip(x, min_value, max_value)
```
要素ごとのクリッピング.
__引数__
- __x__: テンソル,または変数.
- __min_value__: Pythonの浮動小数点数,または整数.
- __max_value__: Pythonの浮動小数点数,または整数.
__戻り値__
テンソル.
----
### equal
```python
keras.backend.equal(x, y)
```
2つのテンソル間の要素ごとの等値性.
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
真理値からなるテンソル.
----
### not_equal
```python
keras.backend.not_equal(x, y)
```
2つのテンソル間の要素ごとの不等性.
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
真理値からなるテンソル.
----
### greater
```python
keras.backend.greater(x, y)
```
要素ごとの(x > y)の真理値.
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
真理値からなるテンソル.
----
### greater_equal
```python
keras.backend.greater_equal(x, y)
```
要素ごとの(x >= y)の真理値.
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
真理値からなるテンソル.
----
### less
```python
keras.backend.less(x, y)
```
要素ごとの(x < y)の真理値.
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
真理値からなるテンソル.
----
### less_equal
```python
keras.backend.less_equal(x, y)
```
要素ごとの(x <= y)の真理値.
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
真理値からなるテンソル.
----
### maximum
```python
keras.backend.maximum(x, y)
```
2つのテンソルの要素ごとの最大値.
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
テンソル.
----
### minimum
```python
keras.backend.minimum(x, y)
```
2つのテンソルの要素ごとの最小値.
__引数__
- __x__: テンソル,または変数.
- __y__: テンソル,または変数.
__戻り値__
テンソル.
----
### sin
```python
keras.backend.sin(x)
```
要素ごとにxのsinを計算します.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### cos
```python
keras.backend.cos(x)
```
要素ごとにxのcosを計算します.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### normalize_batch_in_training
```python
keras.backend.normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001)
```
平均と標準偏差を計算したのちに,バッチとしてbatch_normalizationを適用します.
__引数__
- __x__: テンソル,または変数.
- __gamma__: 入力をスケールするためのテンソル.
- __beta__: 入力を補正するためのテンソル.
- __reduction_axes__: 繰り返し可能な整数,軸上の値すべてにわたって正規化を行う.
- __epsilon__: 微小量.
__戻り値__
3つの要素`(normalize_tensor, mean, variance)`からなるタプル.
----
### batch_normalization
```python
keras.backend.batch_normalization(x, mean, var, beta, gamma, epsilon=0.001)
```
与えられたmean,var,beta,gammaを使ってxにbatch normalizationを適用します.
すなわち,`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` が返されます.
__引数__
- __x__: テンソル,または変数.
- __mean__: バッチにおける平均.
- __var__: バッチにおける分散.
- __gamma__: 入力をスケールするためのテンソル.
- __beta__: 入力を補正するためのテンソル.
- __reduction_axes__: 繰り返し可能な整数,軸上の値すべてにわたって正規化を行う.
- __epsilon__: 微小量.
__戻り値__
テンソル.
----
### concatenate
```python
keras.backend.concatenate(tensors, axis=-1)
```
指定した軸に沿ってテンソルのリストを連結します.
__引数__
- __tensor__: 連結するためのテンソルのリスト.
- __axis__: 連結する軸方向.
__戻り値__
テンソル.
----
### reshape
```python
keras.backend.reshape(x, shape)
```
指定したshapeにテンソルを整形します.
__引数__
- __x__: テンソル,または変数.
- __shape__: shapeのタプル.
__戻り値__
テンソル.
----
### permute_dimensions
```python
keras.backend.permute_dimensions(x, pattern)
```
テンソルにおける軸の順序を変更します.
__引数__
- __x__: テンソル,または変数.
- __pattern__: 次元の添字かなるタプル,e.g. `(0, 2, 1)`.
__戻り値__
テンソル.
----
### resize_images
```python
keras.backend.resize_images(x, height_factor, width_factor, data_format)
```
4階テンソルに含まれる画像をリサイズします.
__引数__
- __x__: リサイズのためのテンソル,または変数.
- __height_factor__: 自然数.
- __width_factor__: 自然数.
- __data_format__: `channels_first`,または`channels_last"`のどちらか.
__戻り値__
テンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
----
### resize_volumes
```python
keras.backend.resize_volumes(x, depth_factor, height_factor, width_factor, data_format)
```
5階テンソルに含まれるvolumeをリサイズします.
__引数__
- __x__: リサイズのためのテンソル,または変数.
- __depth_factor__: 自然数.
- __height_factor__: 自然数.
- __width_factor__: 自然数.
- __data_format__: `channels_first`,または`channels_last`のどちらか.
__戻り値__
テンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
----
### repeat_elements
```python
keras.backend.repeat_elements(x, rep, axis)
```
`np.repeat`のように,軸に沿ってテンソルの要素を繰り返します.
`x`がshape`(s1, s2, s3)`を持ち,`axis`が`1`の場合,この出力はshape`(s1, s2 * rep, s3)`を持ちます.
__引数__
- __x__: テンソル,または変数.
- __rep__: Pythonの整数,繰り返す回数.
- __axis__: 繰り返す軸方向.
__Raises__
- __ValueError__: `x.shape[axis]`が定義されていない場合.
__戻り値__
テンソル.
----
### repeat
```python
keras.backend.repeat(x, n)
```
2階テンソルを繰り返します.
`x`がshape (samples, dim)を持ち`n`が`2`であれば,この出力はshape`(samples, 2, dim)`を持ちます.
__引数__
- __x__: テンソル,または変数.
- __n__: Pythonの整数,繰り返す回数.
__戻り値__
テンソル.
----
### arange
```python
keras.backend.arange(start, stop=None, step=1, dtype='int32')
```
整数の並びからなる1階テンソルを作成します.
関数の引数はTheanoのarangeの慣例と同じです: 唯一の引数が与えられた場合,実際には"stop"の引数です.
返されたテンソルのデフォルトの型は`'int32'`でTensorFlowのデフォルトと一致します.
__引数__
- __start__: 始めの値.
- __stop__: 終わりの値.
- __step__: 2つの連続値の差分.
- __dtype__: 整数のデータ型.
__戻り値__
整数のテンソル.
----
### tile
```python
tile(x, n)
```
`x`を`n`でタイル状に配置したテンソルを作成します.
__引数__
- __x__: テンソル,または変数.
- __n__: 整数のリスト.`x`の次元数と同じでなければなりません.
__戻り値__
タイル状に配置されたテンソル.
----
### flatten
```python
keras.backend.flatten(x)
```
平滑化されたテンソル.
__引数__
- __x__: テンソル,または変数.
__戻り値__
1次元に整形されたテンソル.
----
### batch_flatten
```python
keras.backend.batch_flatten(x)
```
n階テンソルを0番目の次元が保たれるように2階テンソルに変換します.
言い換えると,バッチのそれぞれのサンプルに対して平滑化を行います.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### expand_dims
```python
keras.backend.expand_dims(x, axis=-1)
```
添字"axis"でのサイズ1の次元を加えます.
__引数__
- __x__: テンソル,または変数.
- __axis__: 新しい軸を追加する場所.
__戻り値__
次元が拡張されたテンソル.
----
### squeeze
```python
keras.backend.squeeze(x, axis)
```
テンソルから添字"axis"での1次元を除きます.
__引数__
- __x__: テンソル,または変数.
- __axis__: 削除する軸.
__戻り値__
`x`と同じデータで,次元が削除されたテンソル.
----
### temporal_padding
```python
keras.backend.temporal_padding(x, padding=(1, 1))
```
3階テンソルの真ん中の次元に対してパディングを行います.
__引数__
- __x__: テンソル,または変数.
- __padding__: 2つの整数からなるタプル.次元1の始めと終わりにいくつ0をパディングするか.
__戻り値__
パディングされた3階テンソル.
----
### spatial_2d_padding
```python
keras.backend.spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None)
```
4階テンソルの2番目と3番目の次元に対してパディングを行います.
__引数__
- __x__: テンソル,または変数.
- __padding__: 2つのタプルのタプル.パディングのパターン.
- __data_format__: `channels_last`か`channels_first`のどちらか.
__戻り値__
パディングされた4階テンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
----
### spatial_3d_padding
```python
keras.backend.spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None)
```
5階テンソルに対して深さ,高さ,幅を表す次元に沿って0パディングを行います.
"padding[0]","padding[1]",かつ"padding[2]"それぞれの次元に対して左右を0パディングします.
'channels_last'のdata_formatに対して,2,3,4番目の次元がパディングされます.'channels_first'のdata_formatに対して,3,4,5番目の次元がパディングされます.
__引数__
- __x__: テンソル,または変数.
- __padding__: 3つのタプルのタプル.パディングのパターン.
- __data_format__: `channels_last`か`channels_first`のどちらか.
__戻り値__
パディングされた5階テンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
----
### stack
```python
keras.backend.stack(x, axis=0)
```
ランク`R`のテンソルのリストをランク`R+1`のテンソルに積み上げます.
__引数__
- __x__: テンソルのリスト.
- __axis__: 積み上げる軸方向.
__戻り値__
テンソル.
---
### one_hot
```python
keras.backend.one_hot(indices, num_classes)
```
整数のテンソルone-hot表現を導出します.
__引数__
- __indices__: `(batch_size, dim1, dim2, ... dim(n-1))`のshapeを持つn階テンソル.
- __num_classes__: 整数.いくつのクラスを考慮するか.
__戻り値__
`(batch_size, dim1, dim2, ... dim(n-1), num_classes)`のshapeを持つ(n + 1)次元のone-hot表現が含まれたテンソル.
----
### reverse
```python
keras.backend.reverse(x, axes)
```
指定した軸に沿ってテンソルを逆順にする.
__引数__
- __x__: 逆順にするテンソル.
- __axes__: 整数,または繰り返し可能な整数.逆順にする軸.
__戻り値__
テンソル.
----
### get_value
```python
keras.backend.get_value(x)
```
変数の値を返します.
__引数__
- __x__: 入力変数.
__戻り値__
Numpy 配列.
----
### batch_get_value
```python
keras.backend.batch_get_value(ops)
```
1つ以上のテンソルの変数の値を返します.
__引数__
- __ops__: 実行する命令のリスト.
__戻り値__
Numpy 配列のリスト.
----
### set_value
```python
keras.backend.set_value(x, value)
```
Numpy 配列から,変数の値を設定します.
__引数__
- __x__: 新しい値をセットするテンソル.
- __value__: Numpy 配列(同じshapeを持ちます)テンソルにセットする値.
----
### batch_set_value
```python
keras.backend.batch_set_value(tuples)
```
複数のテンソルの変数の値を一度にセットします.
__引数__
- __tuples__: `(tensor, value)`のタプルのリスト.`value`はNumpy 配列であるべきです.
----
### print_tensor
```python
keras.backend.print_tensor(x, message='')
```
`message`と評価されたテンソルの値を表示します.
`print_tensor`は次のコードのように使われると`x`と等価な新しいテンソルを返すことに留意してください。
そうしないと表示処理は評価中に考慮されません。
__例__
```python
>>> x = K.print_tensor(x, message="x is: ")
```
__引数__
- __x__: 表示するテンソル.
- __message__: テンソルと一緒に表示するメッセージ.
__戻り値__
`x`と同じテンソル.
----
### function
```python
function(inputs, outputs, updates=None)
```
Kerasの関数のインスタンスを作成します.
__引数__
- __inputs__: プレースホルダーテンソルのリスト.
- __outputs__: 出力のテンソルのリスト.
- __updates__: 更新する命令のリスト.
- __**kwargs__: TensorFlowでは利用されません.
__戻り値__
Numpy 配列.
----
### gradients
```python
gradients(loss, variables)
```
`variables`の`loss`に関しての勾配を返します.
__引数__
- __loss__: 最小化するためのスカラーからなるテンソル.
- __variables__: 変数のリスト.
__戻り値__
勾配からなるテンソル.
----
### stop_gradient
```python
stop_gradient(variables)
```
全ての変数に関して,0の勾配を持つ`variable`を返します.
__引数__
- __variables__: 変数のリスト.
__戻り値__
同様の変数のリスト.
----
### rnn
```python
rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None)
```
テンソルの時間次元にわたって反復します.
__引数__
- __step_function__: RNN のステップ関数
- __Parameters__:
- __input__: shape`(samples, ...)` (時間次元はありません)を持つテンソルで,ある時間ステップでのサンプルのバッチに対する入力を表します.
- __states__: テンソルのリスト.
- __戻り値__:
- __output__: shape`(samples, output_dim)` を持つテンソル(時間次元はありません).
- __new_states__: 'states'と同じ長さとshapeを持つテンソルのリスト.リストの中の最初のステートは前回の時間ステップでの出力されたテンソルでなければなりません.
- __inputs__: shape`(samples, time, ...)` を持つ一時的なテンソル(少なくとも3次元です).
- __initial_states__: ステップ関数で利用される状態に対する初期値を含む,shape (samples, output_dim) を持つテンソル(時間軸を持たない).ステップ関数で扱うstatesの初期値が含まれます.
- __go_backwards__: 真理値.真ならば,逆順で時間次元にわたって反復します.
- __mask__: マスクされたすべての要素に対して0となるような,shape`(samples, time, 1)`を持つバイナリ型のテンソル.
- __constants__: 各ステップで渡される定数値のリスト.
- __unroll__: RNNをアンロールするか,またはシンボリックループ(バックエンドに応じた`while_loop`,または`scan`)どうか.
- __input_length__: TensorFlowの実装では関係ありません.Theanoでアンロールを利用するときは指定する必要があります.
__戻り値__
`(last_output, outputs, new_states)`のタプル.
- __last_output__: shape`(samples, ...)`を持つRNNの最新の出力.
- __outputs__: 各`output[s, t]`がサンプル`s`に対する時刻`t`でのステップ関数の出力であるような,shape`(samples, time, ...)`を持つテンソル
- __new_states__: shape`(samples, ...)`を持つ,ステップ関数で返される最新の状態を表すテンソルのリスト.
__Raises__
- __ValueError__: 3以下の次元の入力が与えられた場合.
- __ValueError__: `unoll`が`True`だが,入力の時間ステップが固定値ではない場合.
- __ValueError__: `None`ではない`mask`が与えられたが,statesが与えられていない(`len(states)` == 0)場合.
----
### switch
```python
switch(condition, then_expression, else_expression)
```
スカラー値に応じて2つの命令を入れ替えます.
`then_expression`と`else_expression`はともに*同じshape*を持つシンボリックなテンソルであるべきであることに注意してください.
__引数__
- __condition__: スカラーからなるテンソル(`整数`,または`真理値`).
- __then_expression__: テンソル,またはテンソルを返すcallable.
- __else_expression__: テンソル,またはテンソルを返すcallable.
__戻り値__
選択されたテンソル.
----
### in_train_phase
```python
in_train_phase(x, alt, training=None)
```
学習フェーズでは`x`を選択し,それ以外では`alt`を選択します.
`alt`は`x`と*同じshape*を持つべきであることに注意してください.
__引数__
- __x__: 学習フェーズにおいて何を返すか(テンソル,またはテンソルを返すcallable).
- __alt__: 学習フェーズ以外において何を返すか(テンソル,またはテンソルを返すcallable).
- __training__: 学習フェーズを指定した任意のスカラーからなるテンソル(またはPythonの真理値,整数).
__戻り値__
`training`のフラグに基づいた`x`,または`alt`のどちらか.`training`のフラグは`K.learning_phase()`をデフォルトにします.
----
### in_test_phase
```python
in_test_phase(x, alt, training=None)
```
テストフェーズでは`x`を選択し,それ以外では`alt`を選択します.
`alt`は`x`と*同じshape*を持つべきであることに注意してください.
__引数__
- __x__: テストフェーズにおいて何を返すか(テンソル,またはテンソルを返すcallable).
- __alt__: テストフェーズ以外において何を返すか(テンソル,またはテンソルを返すcallable).
- __training__: 学習フェーズを指定した任意のスカラーからなるテンソル(またはPythonの真理値,整数).
__戻り値__
`K.learning_phase`のフラグに基づいた`x`,または`alt`のどちらか.
----
### relu
```python
relu(x, alpha=0.0, max_value=None)
```
Rectified linear unit.
デフォルトは,要素ごとに`max(x, 0)`を返します.
__引数__
- __x__: テンソル,または変数.
- __alpha__: スカラー値.負の領域における関数の傾き(デフォルトは`0.`).
- __max_value__: 飽和度の閾値.
__戻り値__
テンソル.
----
### elu
```python
elu(x, alpha=1.0)
```
Exponential linear unit.
__引数__
- __x__: テンソル,または変数.
- __alpha__: スカラー値.正の領域における関数の傾き.
__戻り値__
テンソル.
----
### softmax
```python
softmax(x)
```
Softmax.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### softplus
```python
softplus(x)
```
Softplus.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### softsign
```python
softsign(x)
```
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### categorical_crossentropy
```python
categorical_crossentropy(output, target, from_logits=False)
```
出力テンソルと目標テンソルの間のカテゴリカルクロスエントロピー.
__引数__
- __output__: softmaxに適用したテンソル(`from_logits`がTrueでない限り,`output`はロジット値で表されるでしょう).
- __target__: `output`と同じshapeからなるテンソル.
- __from_logits__: 真理値.`output`がsoftmaxの結果,またはロジット値からなるテンソルかどうか.
__戻り値__
出力のテンソル.
----
### sparse_categorical_crossentropy
```python
sparse_categorical_crossentropy(output, target, from_logits=False)
```
整数の目標におけるカテゴリカルクロスエントロピー.
__引数__
- __output__: softmaxに適用したテンソル(`from_logits`がTrueでない限り,`output`はロジット値で表されるでしょう).
- __target__: 整数のテンソル.
- __from_logits__: 真理値.`output`がsoftmaxの結果,またはロジット値からなるテンソルかどうか.
__戻り値__
出力のテンソル.
----
### binary_crossentropy
```python
binary_crossentropy(output, target, from_logits=False)
```
出力テンソルと目標テンソルの間のバイナリクロスエントロピー.
__引数__
- __output__: softmaxに当てはめたテンソル(`from_logits`がTrueでない限り,`output`はロジット値で表されるでしょう).
- __target__: `output`と同じshapeからなるテンソル.
- __from_logits__: 真理値.`output`がsoftmaxの結果,またはロジット値からなるテンソルかどうか.
__戻り値__
テンソル.
----
### sigmoid
```python
sigmoid(x)
```
要素ごとのシグモイド.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### hard_sigmoid
```python
hard_sigmoid(x)
```
セグメントごとのシグモイドの線形近似.
シグモイドよりも高速.`x < -2.5`の場合,`0.`,`x > 2.5`の場合,`1.`,`-2.5 <= x <= 2.5`の場合,`0.2 * x + 0.5`が返される.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### tanh
```python
tanh(x)
```
要素ごとのtanh.
__引数__
- __x__: テンソル,または変数.
__戻り値__
テンソル.
----
### dropout
```python
dropout(x, level, seed=None)
```
`x`の要素をランダムに0にセットし,その上,テンソル全体をスケールさせます.
__引数__
- __x__: テンソル
- __level__: 0に設定されるテンソルにおける要素の割合
- __noise_shape__: ランダムに生成された保持/棄却のフラグのshapeで,`x`のshapeにブロードキャスト可能でなければなりません.
- __seed__: 決定論を保証するランダムシード.
__戻り値__
テンソル.
----
### l2_normalize
```python
l2_normalize(x, axis)
```
指定した軸に沿って,L2ノルムでテンソルを正則化します.
__引数__
- __x__: テンソル,または変数.
- __axis__: 正則化する軸方向.
__戻り値__
テンソル.
----
### in_top_k
```python
in_top_k(predictions, targets, k)
```
`targets`が`predictions`の上位`k`に含まれているかどうか,を返します.
__引数__
- __predictions__: shape`(batch_size, classes)`で`float32`型のテンソル.
- __target__: 長さ`batch_size`で`int32`,または`int64`の1階テンソル.
- __k__: 整数.上位何件を考慮するかの数.
__戻り値__
`batch_size`の長さで真理値からなる1階テンソル.`predictions[i]`が上位`k`に含まれていたら`output[i]`は`True`.
----
### conv1d
```python
conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1)
```
1次元の畳み込み.
__引数__
- __x__: テンソル,または変数.
- __kernel__: カーネルを表すテンソル.
- __strides__: ストライドの整数.
- __padding__: 文字列.`same`,`causal`,または`valid`.
- __data_format__: 文字列`channels_last`,または`channels_first`のどちらか.
- __dilation_rate__: 整数.ディレーションを行う割合.
__戻り値__
1次元の畳み込みの結果からなるテンソル.
----
### conv2d
```python
conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
```
2次元の畳み込み.
__引数__
- __x__: テンソル,または変数.
- __kernel__: カーネルを表すテンソル.
- __strides__: ストライドの整数.
- __padding__: 文字列.`same`,または`valid`.
- __data_format__: 文字列.`channels_last`,または`channels_first`のどちらか.入力/カーネル/出力でTheanoもしくはTensorFlowのデータ形式を利用するかどうか.
- __dilation_rate__: 整数のタプル.
__戻り値__
2次元の畳み込みの結果からなるテンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
----
### conv2d_transpose
```python
conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None)
```
2次元の逆畳み込み(すなわち,転置畳み込み).
__引数__
- __x__: テンソル,または変数.
- __kernel__: カーネルを表すテンソル.
- __output_shape__: 出力するshapeに対する整数の1階テンソル.
- __strides__: ストライドの整数.
- __padding__: 文字列.`same`,または`valid`.
- __data_format__: 文字列.`channels_last`,または`channels_first`のどちらか.入力/カーネル/出力でTheanoもしくはTensorFlowのデータ形式を利用するかどうか.
__戻り値__
2次元の転置畳み込みの結果からなるテンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
----
### separable_conv2d
```python
separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
```
separableフィルタ込みで2次元の畳み込み.
__引数__
- __x__: テンソル,または変数.
- __depthwise_kernel__: 深さごとの畳み込みに対するカーネル.
- __pointwise_kernel__: 1x1の畳み込みに対するカーネル.
- __strides__: ストライドのタプル(長さ2).
- __padding__: パディングのモード.`same`,または`valid`.
- __data_format__: 文字列.`channels_last`,または`channels_first`のどちらか.
- __dilation_rate__: 整数のタプル.ディレーションを行う割合.
__戻り値__
出力テンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
----
### conv3d
```python
conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1))
```
3次元の畳み込み.
__引数__
- __x__: テンソル,または変数.
- __kernel__: カーネルのテンソル.
- __strides__: ストライドのタプル.
- __padding__: 文字列.`same`,または`valid`.
- __data_format__: 文字列.`channels_last`,または`channels_first`のどちらか.入力/カーネル/出力でTheanoもしくはTensorFlowのデータ形式を利用するかどうか.
- __dilation_rate__: 3つの整数からなるタプル
__戻り値__
3次元の畳み込みの結果からなるテンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
----
### pool2d
```python
pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max')
```
2次元のプーリング.
__引数__
- __x__: テンソル,または変数.
- __pool_size__: 2つの整数からなるタプル.
- __strides__: 2つの整数からなるタプル.
- __padding__: 文字列.`same`,または`valid`.
- __data_format__: 文字列.`channels_last`,または`channels_first`のどちらか.
- __pool_mode__: `max`,`avg`のどちらか.
__戻り値__
2次元のプーリングの結果からなるテンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
- __ValueError__: `pool_mode`が`max`,または`avg`ではない場合.
----
### pool3d
```python
pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max')
```
2次元のプーリング.
__引数__
- __x__: テンソル,または変数.
- __pool_size__: 3つの整数からなるタプル.
- __strides__: 3つの整数からなるタプル.
- __padding__: 文字列.`same`,または`valid`.
- __data_format__: 文字列.`channels_last`,または`channels_first`のどちらか.
- __pool_mode__: `max`,`avg`のどちらか.
__戻り値__
3次元のプーリングの結果からなるテンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`,または`channels_first`ではない場合.
- __ValueError__: `pool_mode`が`max`,または`avg`ではない場合.
----
### bias_add
```python
bias_add(x, bias, data_format=None)
```
テンソルにバイアスベクトルを付加します.
__引数__
- __x__: テンソル,または変数.
- __bias__: 付加するバイアスを表すテンソル.
- __data_format__: 3,4,5階テンソルに対するデータの形式: "channels_last",または"channels_first"のどちらか.
__戻り値__
出力テンソル.
__Raises__
- __ValueError__: 以下の2つの場合の一方:
1. 不正な`data_format`が与えられた場合.
2. 不正なbiasのshape.biasはベクトルかndim(x) - 1のテンソルにすべきです.
----
### random_normal
```python
random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None)
```
ガウス分布の値を持つテンソルを返します.
__引数__
- __shape__: 整数のタプル.作成するテンソルのshape.
- __mean__: 浮動小数点数.サンプリングするためのガウス分布の平均.
- __stddev__: 浮動小数点数.サンプリングするためのガウス分布の標準偏差.
- __dtype__: 文字列.返されるテンソルのデータ型.
- __seed__: 整数.ランダムシード.
__戻り値__
テンソル.
----
### random_uniform
```python
random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None)
```
一様分布の値を持つテンソルを返します.
__引数__
- __shape__: 整数のタプル.作成するテンソルのshape.
- __minval__: 浮動小数点数.サンプリングするための一様分布の下限.
- __maxval__: 浮動小数点数.サンプリングするための一様分布の上限.
- __dtype__: 文字列.返されるテンソルのデータ型.
- __seed__: 整数.ランダムシード.
__戻り値__
テンソル.
----
### random_binomial
```python
random_binomial(shape, p=0.0, dtype=None, seed=None)
```
二項分布の値を持つテンソルを返します.
__引数__
- __shape__: 整数のタプル.作成するテンソルのshape.
- __p__: 浮動小数点数.`0. <= p <= 1`,二項分布の確率.
- __dtype__: 文字列.返されるテンソルのデータ型.
- __seed__: 整数.ランダムシード.
__戻り値__
テンソル.
----
### truncated_normal
```python
truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None)
```
切断ガウス分布の値を持つテンソルを返します.
生成された値は,指定された平均値と標準偏差を持つガウス分布に従いますが,平均値から2の標準偏差を超える値が削除され,再選択されます。
__引数__
- __shape__: 整数のタプル.作成するテンソルのshape.
- __mean__: 浮動小数点数.値の平均.
- __stddev__: 浮動小数点数.値の標準偏差.
- __dtype__: 文字列.返されるテンソルのデータ型.
- __seed__: 整数.ランダムシード.
__戻り値__
テンソル.
----
### ctc_label_dense_to_sparse
```python
ctc_label_dense_to_sparse(labels, label_lengths)
```
CTCのラベルを密からスパースなものに変換します.
__引数__
- __labels__: 密なCTCのラベル.
- __label_length__: ラベルの長さ.
__戻り値__
ラベルにおけるスパース表現からなるテンソル.
----
### ctc_batch_cost
```python
ctc_batch_cost(y_true, y_pred, input_length, label_length)
```
各バッチ要素に対してCTCのlossアルゴリズムを実行.
__引数__
- __y_true__: 真のラベルを含むテンソル`(samples, max_string_length)`.
- __y_pred__: 予測値かsoftmaxの出力を含むテンソル`(samples, time_steps, num_categories)`.
- __input_length__: `y_pred`の各バッチの系列長を含むテンソル`(samples,1)`.
- __label_length__: `y_true`の各バッチの系列長を含むテンソル`(samples,1)`.
__戻り値__
各要素のCTCの損失値を含んだshape(samples, 1)のテンソル.
----
### ctc_decode
```python
ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1)
```
softmaxの出力をデコードします.
(最適な探索として知られる)貪欲法かconstrained dictionary searchを使います .
__引数__
- __y_pred__: 予測値かsoftmaxの出力を含むテンソル`(samples, time_steps, num_categories)`.
- __input_length__: y_predの各バッチの系列長を含むテンソル`(samples,1)`.
- __greedy__: `true`なら高速な最適パス探索を行います.このとき,辞書を使わない
- __beam_width__: `greedy`が`False`の場合,この幅を使ったビームサーチを行います.
- __top_paths__: `greedy`が`False`の場合,最も辿る可能性の高いパスがどれだけあるか返されます.
__戻り値__
- __Tuple__:
- __List__: `greedy`が`true`の場合,デコードされたシーケンスを含む1つの要素のリストが返されます.`greedy`が`false`の場合,最も辿る可能性の高いデコードされたシーケンスを返します.
- __Important__: 空白のラベルは`-1`を返されます.デコードされたシーケンスの対数確率を含むテンソル`(top_paths, )`です.
----
### map_fn
```python
map_fn(fn, elems, name=None, dtype=None)
```
関数fnをelemsの要素全てに対して当てはめ,その出力を返します.
__引数__
- __fn__: elemsの各要素に対して呼び出されるCallable.
- __elems__: テンソル.
- __name__: グラフの中のmapのノードに対する文字列の名前.
- __dtype__: 出力のデータ型.
__戻り値__
データ型`dtype`を持つテンソル.
----
### foldl
```python
foldl(fn, elems, initializer=None, name=None)
```
fnを使って左から右にelemsの要素を結合させることでelemsを縮約します.
__引数__
- __fn__: elemsの各要素に対して呼び出されるCallable.例えば,`lambda acc, x: acc + x`
- __elems__: テンソル.
- __initializer__: 使用される最初の値.(Noneの場合は`elems[0]`を指す)
- __name__: グラフの中のfoldlのノードに対する文字列の名前.
__戻り値__
`initializer`の同じ型とshapeを持つテンソル.
----
### foldr
```python
foldr(fn, elems, initializer=None, name=None)
```
fnを使って右から左にelemsの要素を結合させることでelemsを縮約します.
__引数__
- __fn__: elemsの各要素に対して呼び出されるCallable.例えば,`lambda acc, x: acc + x`
- __elems__: テンソル.
- __initializer__: 使用される最初の値.(Noneの場合は`elems[-1]`を指す)
- __name__: グラフの中のfoldrのノードに対する文字列の名前.
__戻り値__
`initializer`の同じ型とshapeを持つテンソル.
----
### local_conv1d
``` python
local_conv1d(inputs, kernel, kernel_size, strides, data_format=None)
```
重みを共有しない1次元畳み込みの適用.
__引数__
- __inputs__: (batch_size, steps, input_dim)のshapeをもつ3階テンソル
- __kernel__: (output_length, feature_dim, filters)のshapeをもつ畳み込みのため共有なしの重み
- __kernel_size__: 1次元の畳み込みにおけるwondowの長さを指定する整数1つをもつタプル
- __strides__: 畳み込みのstrideの長さを指定する整数1つをもつタプル
- __data_format__: channels_first か channels_last のデータフォーマット
__戻り値__
重みを共有しない1次元の畳み込みを適用した (batch_size, output_lenght, filters) のshapeをもつテンソル
__Raises__
- __ValueError__: `data_format`が`channels_last`か`channels_first`でないとき.
---
### local_conv2d
``` python
local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None)
```
重みを共有しない2次元畳み込みの適用.
__引数__
- __inputs__: 4階テンソル: data_format='channels_first'なら (batch_size, filters, new_rows, new_cols),data_format='channels_last'なら (batch_size, new_rows, new_cols, filters)
- __kernel__: (output_items, feature_dim, filters)のshapeをもつ畳み込みのため共有なしの重み
- __kernel_size__: 2次元の畳み込みにおけるwondowの幅と高さを指定する整数2つをもつタプル
- __strides__: 幅と高さにそった畳み込みのstrideを指定する整数2つをもつタプル
- __output_shape__: (output_row, output_col) のタプル
- __data_format__: channels_first か channels_last のデータフォーマット
__戻り値__
4階テンソル: data_format='channels_first'なら(batch_size, filters, new_rows, new_cols)のshapeの4階テンソル,data_format='channels_last'なら(batch_size, new_rows, new_cols, filters)のshapeの4階テンソル.
__Raises__
- __ValueError__: `data_format`が`channels_last`か`channels_first`でないとき.
| keras-docs-ja/sources/backend.md/0 | {
"file_path": "keras-docs-ja/sources/backend.md",
"repo_id": "keras-docs-ja",
"token_count": 38936
} | 85 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L193)</span>
### Add
```python
keras.layers.Add()
```
入力のリスト同士を足し合わせるレイヤー.
入力はすべて同じshapeをもったテンソルのリストで,1つのテンソルを返す(shapeは同じ).
__例__
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
added = keras.layers.Add()([x1, x2]) # equivalent to added = keras.layers.add([x1, x2])
out = keras.layers.Dense(4)(added)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L223)</span>
### Subtract
```python
keras.layers.Subtract()
```
2つの入力の引き算をするレイヤー.
入力は同じshapeのテンソルのリストを2つで,1つのテンソルを返す(inputs[0] - inputs[1]).
返すテンソルも同じshapeです.
__例__
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
# Equivalent to subtracted = keras.layers.subtract([x1, x2])
subtracted = keras.layers.Subtract()([x1, x2])
out = keras.layers.Dense(4)(subtracted)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L260)</span>
### Multiply
```python
keras.layers.Multiply()
```
入力のリストの要素同士の積のレイヤー.
入力はすべて同じshapeをもったテンソルのリストで,1つのテンソルを返す(shapeは同じ).
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L275)</span>
### Average
```python
keras.layers.Average()
```
入力のリストを平均するレイヤー.
入力はすべて同じshapeをもったテンソルのリストで,1つのテンソルを返す(shapeは同じ).
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L290)</span>
### Maximum
```python
keras.layers.Maximum()
```
入力のリストの要素間の最大値を求めるレイヤー.
入力はすべて同じshapeをもったテンソルのリストで,1つのテンソルを返す(shapeは同じ).
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L320)</span>
### Concatenate
```python
keras.layers.Concatenate(axis=-1)
```
入力のリストをconcatenateするレイヤー.
入力は,concatenateする際のaxisを除き,すべて同じshapeをもったテンソルのリストで,全入力をconcatenateした1つのテンソルを返す.
__引数__
- __axis__: concatenateする際のaxis.
- __**kwargs__: 標準的なレイヤーのキーワード引数.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/merge.py#L408)</span>
### Dot
```python
keras.layers.Dot(axes, normalize=False)
```
2つのテンソルのサンプル間でdot積を計算するレイヤー.
例.もしshapeが`batch_size, n`の2つのテンソル`a`と`b`に適用する場合,出力されるテンソルのshapeは,`(batch_size, 1)`,出力の要素 `i` は,`a[i]`と`b[i]`のdot積.
__引数__
- __axes__: 整数か整数のタプル.dot積をとる際にaxisかaxesのどちらを使うか.
- __normalize__: dot積をとる前にdot積のaxisでサンプルをL2正規化するかどうか.
Trueなら,dot積の出力は,2つのサンプルのcosine.
- __**kwargs__: 標準的なレイヤーのキーワード引数.
----
### add
```python
keras.layers.add(inputs)
```
`Add`レイヤーの関数インターフェース.
__引数__
- __inputs__: 入力テンソルのリスト(最低2つ).
- __**kwargs__: 標準的なレイヤーのキーワード引数.
__戻り値__
入力の総和のテンソル.
__例__
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
added = keras.layers.add([x1, x2])
out = keras.layers.Dense(4)(added)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
----
### subtract
```python
keras.layers.subtract(inputs)
```
`Subtract`レイヤーの関数インターフェース.
__引数__
- __inputs__: 入力テンソルのリスト(最低2つ).
- __**kwargs__: 標準的なレイヤーのキーワード引数.
__戻り値__
入力の差のテンソル.
__例__
```python
import keras
input1 = keras.layers.Input(shape=(16,))
x1 = keras.layers.Dense(8, activation='relu')(input1)
input2 = keras.layers.Input(shape=(32,))
x2 = keras.layers.Dense(8, activation='relu')(input2)
subtracted = keras.layers.subtract([x1, x2])
out = keras.layers.Dense(4)(subtracted)
model = keras.models.Model(inputs=[input1, input2], outputs=out)
```
----
### multiply
```python
keras.layers.multiply(inputs)
```
`Multiply`レイヤーの関数インターフェース.
__引数__
- __inputs__: 入力テンソルのリスト(最低2つ).
- __**kwargs__: 標準的なレイヤーのキーワード引数.
__戻り値__
入力の要素同士の積のテンソル.
----
### average
```python
keras.layers.average(inputs)
```
`Average`レイヤーの関数インターフェース.
__引数__
- __inputs__: 入力テンソルのリスト(最低2つ).
- __**kwargs__: 標準的なレイヤーのキーワード引数.
__戻り値__
入力の平均のテンソル.
----
### maximum
```python
keras.layers.maximum(inputs)
```
`Maximum`レイヤーの関数インターフェース.
__引数__
- __inputs__: 入力テンソルのリスト(最低2つ).
- __**kwargs__: 標準的なレイヤーのキーワード引数.
__戻り値__
入力の要素間の最大値のテンソル.
----
### concatenate
```python
keras.layers.concatenate(inputs, axis=-1)
```
`Concatenate`レイヤーの関数インターフェース.
__引数__
- __inputs__: 入力テンソルのリスト(最低2つ).
- __axis__: Concatenation axis.
- __**kwargs__: 標準的なレイヤーのキーワード引数.
__戻り値__
入力を`axis`の方向でconcateしたテンソル.
----
### dot
```python
keras.layers.dot(inputs, axes, normalize=False)
```
`Dot`レイヤーの関数インターフェース.
__引数__
- __inputs__: 入力テンソルのリスト(最低2つ).
- __axes__: 整数か整数のタプル.dot積をとる際にaxisかaxesのどちらを使うか.
- __normalize__: dot積をとる前にdot積のaxisでサンプルをL2正規化するかどうか. Trueなら,dot積の出力は,2つのサンプルのcosine.
- __**kwargs__: 標準的なレイヤーのキーワード引数.
__戻り値__
入力のdot積をとったテンソル.
| keras-docs-ja/sources/layers/merge.md/0 | {
"file_path": "keras-docs-ja/sources/layers/merge.md",
"repo_id": "keras-docs-ja",
"token_count": 3480
} | 86 |
## 正則化の利用方法
正則化によって,最適化中にレイヤーパラメータあるいはレイヤーの出力に制約を課すことができます.これらの正則化はネットワークが最適化する損失関数に組み込まれます.
この正則化はレイヤー毎に適用されます.厳密なAPIはレイヤーに依存しますが,`Dense`,`Conv1D`,`Conv2D`,`Conv3D`レイヤーは統一的なAPIを持っています.
これらのレイヤーは3つの引数を取ります:
- `kernel_regularizer`: `keras.regularizers.Regularizer` のインスタンス
- `bias_regularizer`: `keras.regularizers.Regularizer` のインスタンス
- `activity_regularizer`: `keras.regularizers.Regularizer` のインスタンス
## 例
```python
from keras import regularizers
model.add(Dense(64, input_dim=64,
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
```
## 利用可能な正則化
```python
keras.regularizers.l1(0.)
keras.regularizers.l2(0.)
keras.regularizers.l1_l2(l1=0.01, l2=0.01)
```
## 新しい正則化の定義
重み行列から損失関数に寄与するテンソルを返す任意の関数は,正則化として利用可能です,例:
```python
from keras import backend as K
def l1_reg(weight_matrix):
return 0.01 * K.sum(K.abs(weight_matrix))
model.add(Dense(64, input_dim=64,
kernel_regularizer=l1_reg))
```
また,オブジェクト指向的に正則化を定義できます.[keras/regularizers.py](https://github.com/keras-team/keras/blob/master/keras/regularizers.py)モジュールの例を見てください.
| keras-docs-ja/sources/regularizers.md/0 | {
"file_path": "keras-docs-ja/sources/regularizers.md",
"repo_id": "keras-docs-ja",
"token_count": 810
} | 87 |
# Korean translation of the Keras documentation
This is the repository for the Korean-language `.md` sources files of [keras.io](https://keras.io).
Existing files in `sources/` should be edited in-line.
---
# 케라스 공식 문서 한국어판
케라스 공식 문서의 한국어판입니다. 이미 딥러닝에 익숙한 연구자 및 개발자 외에도 처음 딥러닝을 접하는 사용자들이 최대한 쉽게 이해하고 사용할 수 있도록 그 의미와 용법, 용례가 정확하고 명료하게 그리고 최대한 자연스러운 문장으로 나타나도록 작성되었습니다. :open_book::writing_hand::full_moon_with_face:
## 번역 가이드라인
- 모든 번역문은 **한국어 정서법**을 준수합니다.
- 번역은 문서화 내에 있는 본문 내용과 코드 주석들을 대상으로 합니다.
- 번역시 문장 끝에 붙는 격식체는 '-ㅂ니다'체를 따르며 비속어나 반말은 쓰지 않습니다.
- 큰 따옴표나 작은 따옴표는(',") 특수문자를 사용하지 않고 기본적으로 제공된 것을 사용합니다.
- 코드 강조(syntax highlight) 뒤에 조사가 붙는 경우, 공백을 넣지 않습니다(e.g. `model.fit()`을 실행하면).
- 키워드를 번역할 때 아래에 있는 **작성 규칙** 및 **용어 통일안**을 최우선으로 사용합니다.
- 과한 복문의 경우 단문으로 나누어서 씁니다.
- 원문 내용이 불충분한 경우 원문이 전달하고자 하는 내용을 충실히 전달하는 범위 내에서 사용자가 이해할 수 있도록 간략한 설명을 보충합니다.
- 번역은 다른 언어로 된 문서의 의미를 이해하고 한국어로 다시 표현하는 것이니 번역체는 자제해 주시기 바랍니다(~~우리는 한다 번역을~~).
---
## 작성 규칙
- 용어 번역의 경우 문서 내에서 처음 나온 경우에 한해 subscript로 원어를 병행표기합니다. (예: 층<sub>layer</sub>)
- 발음만 한글로 옮긴 경우 subscript는 생략합니다. (예: 스트라이드)
- 특수한 경우를 제외하면 subscript는 소문자로 작성합니다. (특수한 경우: 1. 대문자 고유명사 및 대문자 약칭, 2. 제목의 경우 관사와 접속사, 전치사를 제외한 단어와 제목 첫 단어의 첫글자는 대문자로 작성)
- list, dict 등 파이썬 기본 자료형의 경우 발음대로 표기하고 원어는 병기하지 않습니다.
- int, float, integer 등 자료형 키워드/단어의 경우
- 문장 내에 등장하는 경우 한국어로 번역합니다. (예: "~ is tuple of integers" → "~는 정수형 튜플입니다.")
- argument등 변수 설명에서 입력값의 자료형을 나타내는 경우 highlight로 표시하고 파이썬 자료형 표기대로 적습니다. (예: __X__: Integer, → `int`.)
- 문장 끝의 colon(:)은 마침표로 대체합니다.
- 문장 끝의 semicolon(;)은 문장을 두 개로 분리하고 필요한 경우 적합한 접속사를 추가합니다.
- Keras를 제외한 모든 API 및 서비스 등의 이름(TensorFlow, NumPy, CNTK, Amazon, Google 등)은 원문 그대로 사용합니다
- 함수 인자 설명시 [__인자__: `data type`, 설명 내용, 기본값 ]의 형식을 따릅니다. (예: __batch_size__: `int` 혹은 `None`. 손실로부터 그래디언트를 구하고 가중치를 업데이트하는 과정 한 번에 사용할 표본의 개수입니다. 기본값은 `32`입니다.)
- **Raises**란의 경우 **오류**로 번역하며, 본문은 "(~하는 경우, ~하면, ~가) 발생합니다."로 정리합니다.
---
## 용어 통일안
- 이하 통일안은 케라스 코리아 번역팀의 논의를 거쳐 합의된 표현의 목록입니다.
- 통일안 선정은 다음과 같은 리소스를 참고하였습니다.
- [국립국어원 언어정보 나눔터 언어 정보화 용어](https://ithub.korean.go.kr/user/word/wordTermManager.do)
- [국립국어원 표준국어대사전](https://stdict.korean.go.kr/main/main.do) / [우리말샘](https://opendict.korean.go.kr/main)
- [한국정보통신기술협회 정보통신용어사전](http://word.tta.or.kr/main.do)
- [대한수학회 수학용어집](http://www.kms.or.kr/mathdict/list.html?key=ename&keyword=norm)
- [한국어 위키백과](https://ko.wikipedia.org)
- 참조대상이 없는 어휘의 경우 의미를 살리되 보편적으로 사용되는 표현을 우선 선정하였습니다. 서로 다른 대상을 가리키는 번역 어휘가 중복되어 쓰이던 경우 최대한 가까운 새로운 어휘로 대체하였습니다.
- 용어집은 새로 도출한 합의안과 함께 개정됩니다.
| English | 한국어 |
|:-------------------|:-----------------------|
| -er| ~화 함수 / 함수|
| 1--9| 1--9|
| accuracy| 정확도|
| argument| 인자|
| (artificial) neural network| (인공) 신경망|
| augmenter| 증강 함수|
| Average Pooling| 평균 풀링|
| axis| 축|
| batch| 배치|
| bias| 편향|
| binary classification| 이진 분류|
| cache| 캐시|
| callback| 콜백|
| cell state| 셀 상태|
| channel| 채널|
| checkpoint| 체크포인트|
| class| 클래스|
| classification| 분류|
| compile| 컴파일|
| constraint| 제약|
| convolutional neural network (CNN)| 합성곱 신경망|
| corpus| 말뭉치|
| dense layer| 완전연결층|
| dimension| 차원|
| dot product| 내적|
| dropout| 드롭아웃|
| element-wise | 원소별|
| embedding| 임베딩|
| encoding| 인코딩|
| epoch| 에폭 (서술적으로 쓸 때는 'n회 반복')|
| factor| 값/요인/요소|
| fully-connected, densely connected| 완전 연결|
| global| 전역|
| generator| 제너레이터|
| gradient| 그래디언트|
| gradient ascent| 경사상승법|
| gradient descent| 경사하강법|
| hidden unit| 은닉 유닛|
| hidden layer| 은닉 층|
| hidden state| 은닉 상태|
| hyperparameter| 하이퍼파라미터|
| identity matrix| 단위 행렬|
| index| 인덱스 (개별 index의 묶음 전체를 가리킬 때는 '목록')|
| input| 입력/입력값|
| instance| 인스턴스|
| initialization| 초기값 생성|
| initializer| 초기화 함수|
| keras| 케라스|
| kernel| 커널|
| label| 레이블|
| layer| 층|
| learning rate| 학습률|
| learning rate decay| 학습률 감소|
| locally| 부분 연결|
| loss function| 손실 함수|
| LSTM| LSTM|
| MaxPooling| 최댓값 풀링|
| mean squared error (MSE)| 평균 제곱 오차(법)|
| metric| (평가) 지표 (문맥에 따라 유연하게 사용)|
| mini-batch| 미니 배치|
| model| 모델|
| momentum| 모멘텀|
| multi-class classification| 다중 분류|
| multilayer perceptron (MLP)| 다층 퍼셉트론|
| neuron| 뉴런|
| node| 노드|
| noise| 노이즈|
| non-negativity| 음이 아닌 ~|
| norm| 노름|
| normalization| 정규화|
| normalize | 정규화하다|
| note| 참고|
| objective function| 목적 함수|
| one-hot encoding| 원-핫 인코딩|
| optimizer| 최적화 함수|
| output| 출력(값)|
| padding| 패딩|
| parameter| (함수의)매개변수|
| parameter| (모델의)파라미터 (가중치와 편향을 함께 이르는 말)|
| placeholder| 플레이스홀더|
| penalty| 페널티|
| pooling| 풀링|
| precision| 정밀도|
| queue| 대기열|
| recurrent neural network (RNN)| 순환 신경망|
| reference| 참고|
| regression| 회귀 분석|
| regression(-ive) model| 회귀 모델|
| regularize(-er)| 규제화/규제 함수|
| repository| 저장소|
| reshape| 형태바꾸기|
| return| 반환값|
| root mean squared error (RMSE)| 평균 제곱근 오차(법)|
| sample| 표본|
| sequence (-tial)| 순서형|
| set| 세트|
| shape| 형태|
| stack| 층을 쌓다|
| stateful| 상태 저장|
| stochastic gradient descent| 확률적 경사하강법|
| stride| 스트라이드|
| target| 목표(값)|
| temporal| 시계열|
| tensor| 텐서|
| test| 시험|
| text| 텍스트|
| timestep| 시간 단계/순서|
| token| 토큰|
| train| (데이터의 경우) 훈련 세트 / (동작의 경우) 학습시키다|
| utility| 도구|
| validation| 검증|
| weight| 가중치|
| wrapper| 래퍼|
| keras-docs-ko/README.md/0 | {
"file_path": "keras-docs-ko/README.md",
"repo_id": "keras-docs-ko",
"token_count": 5771
} | 88 |
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L241)</span>
### Conv1D
```python
keras.layers.Conv1D(filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
1D 합성곱 층<sub>convolution layer</sub>(예: 시계열<sub>temporal</sub> 합성곱).
이 층은 하나의 공간적(혹은 시간적) 차원에서 입력 텐서와 합성곱되어 출력 텐서를 만드는 합성곱 커널을 생성합니다. `use_bias`가 `True`인 경우, 편향<sub>bias</sub>벡터를 생성해 출력 텐서에 더합니다. `activation`이 `None`이 아닌 경우 이 또한 출력 텐서에 적용됩니다.
모델의 가장 처음에 올 경우 `input_shape` 인자를 통해 입력값의 형태를 함께 지정해야 합니다. `input_shape`는 `None` 또는 정수로 된 튜플로 배치 축<sub>axis</sub>은 포함시키지 않습니다. 예를 들어, 128개의 요인<sub>feature</sub>과 10개의 시간 단계로 이루어진 시계열 데이터에 `Conv1D`를 적용하고자 하는 경우 `input_shape=(10, 128)`, `data_format='channels_last'`로 지정합니다. `data_format` 인자를 `'channels_last'`로 정하는 이유는 일반적으로 합성곱 신경망에서 다루는 채널의 위치를 가장 마지막에 두도록 함으로써 층이 입력값의 마지막에 있는 요인 차원을 일종의 채널처럼 취급하게끔 하고 합성곱의 필터가 시계열 차원을 따라서 적용되게끔 하기 위함입니다. 만약 시계열의 길이가 표본 또는 배치별로 다를 경우 `input_shape=(None, 128)`로 지정합니다.
__인자__
- __filters__: `int`. 출력할 결과값의 차원으로 합성곱 필터의 개수를 나타냅니다.
- __kernel_size__: `int` 또는 `int`로 이루어진 튜플/리스트. 1D 합성곱 필터의 크기를 지정합니다.
- __strides__: `int` 또는 `int`로 이루어진 튜플/리스트. 합성곱 필터의 스트라이드를 지정합니다. 기본값은 `(1, 1)`입니다. 만약 팽창 합성곱<sub>dilated convolution</sub>을 사용하고자 할 때 스트라이드의 크기를 `1`보다 크게 지정했다면 `dilation_rate` 인자는 반드시 `1`로 맞춰야 합니다.
- __padding__: `str`. 입력값의 패딩처리 여부를 `'valid'`, `'causal'` 또는 `'same'` 가운데 하나로 지정합니다(대소문자 무관). `'valid'`는 패딩이 없는 경우, `'same'`은 출력의 형태를 입력과 같게 맞추고자 하는 경우에 사용합니다. `'causal'`은 인과적<sub>causal</sub> 혹은 팽창 인과적<sub>dilated causal</sub> 합성곱을 수행하게끔 합니다. 인과적 합성곱은 `t`시점의 결과값이 오직 `t`보다 이전 시점의 입력값에만 영향을 받도록 하는 합성곱 방식입니다. 이 경우 입출력의 길이를 맞추기 위해 `0`값으로 패딩을 처리하게 되며, 시간 순서를 지켜야 하는 데이터를 모델링해야 하는 경우에 유용합니다. 참고: [WaveNet: A Generative Model for Raw Audio, section 2.1](https://arxiv.org/abs/1609.03499)
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, steps, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, steps)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다.
- __dilation_rate__: `int` 또는 `int`로 이루어진 튜플/리스트. 팽창 합성곱 필터의 팽창비율을 결정합니다. 팽창 합성곱은 원래 조밀한 형태 그대로 입력에 적용되는 합성곱 필터를 각 방향으로 원소 사이의 간격을 띄우는 방식으로 팽창시켜 성긴 대신 보다 넓은 영역에 적용될 수 있도록 변형한 합성곱입니다. 자세한 내용은 [Multi-Scale Context Aggregation by Dilated Convolutions](https://arxiv.org/abs/1511.07122v3)을 참고하십시오. 기본값은 `(1, 1)`이며, 현재 버전에서는 `dilation_rate`가 `1`보다 큰 경우 `1`보다 큰 `strides`를 지정할 수 없습니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`으로, 별도로 지정하지 않으면 전달할 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __kernel_initializer__: `kernel` 가중치 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __kernel_regularizer__: `kernel` 가중치 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __kernel_constraint__: `kernel` 가중치 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
__입력 형태__
`(batch, steps, channels)` 형태의 3D 텐서.
__출력 형태__
`(batch, new_steps, filters)` 형태의 3D 텐서. 패딩이나 스트라이드로 인해 `steps`값이 바뀔 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L368)</span>
### Conv2D
```python
keras.layers.Conv2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
2D 합성곱 층(예: 이미지 데이터에서 이루어지는 공간 차원의 합성곱).
이 층은 입력 텐서와 합성곱되어 출력 텐서를 만드는 합성곱 커널을 생성합니다. `use_bias`가 `True`인 경우, 편향 벡터를 생성해 출력 텐서에 더합니다. `activation`이 `None`이 아닌 경우 이 또한 출력 텐서에 적용됩니다.
모델의 가장 처음에 올 경우 `input_shape`인자를 통해 입력값의 형태를 함께 지정해야 합니다. `input_shape`는 `None`또는 정수로 된 튜플로 배치 축은 포함시키지 않습니다. 예를 들어 `data_format='channels_last'`인 128x128 RGB 이미지의 경우 `input_shape=(128, 128, 3)`이 됩니다.
__인자__
- __filters__: `int`. 출력할 결과값의 차원으로 합성곱 필터의 개수를 나타냅니다.
- __kernel_size__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 2D 합성곱 필터의 행방향과 열방향 크기를 지정합니다. `int` 하나를 입력할 경우 모든 방향의 크기를 동일하게 지정합니다.
- __strides__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 합성곱 필터의 스트라이드를 지정합니다. `int` 하나를 입력할 경우 열방향, 행방향의 스트라이드를 동일하게 지정합니다. 기본값은 `(1, 1)`입니다. 만약 팽창 합성곱을 사용하고자 할 때 스트라이드의 크기를 `1`보다 크게 지정했다면 `dilation_rate` 인자는 반드시 `1`로 맞춰야 합니다.
- __padding__: `str`. 입력값의 패딩처리 여부를 `'valid'`, `'causal'` 또는 `'same'` 가운데 하나로 지정합니다(대소문자 무관). `'valid'`는 패딩이 없는 경우, `'same'`은 출력의 형태를 입력과 같게 맞추고자 하는 경우에 사용합니다. `'same'`의 경우 `strides`가 `1`이 아닐 때, 사용하는 백엔드에 따라 값이 조금씩 달라질 수 있습니다([참고](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860)).
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
- __dilation_rate__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 팽창 합성곱 필터의 팽창비율을 결정합니다. 팽창 합성곱은 원래 조밀한 형태 그대로 입력에 적용되는 합성곱 필터를 각 방향으로 원소 사이의 간격을 띄우는 방식으로 팽창시켜 성긴 대신 보다 넓은 영역에 적용될 수 있도록 변형한 합성곱입니다. 자세한 내용은 [Multi-Scale Context Aggregation by Dilated Convolutions](https://arxiv.org/abs/1511.07122v3)을 참고하십시오. 기본값은 `(1, 1)`이며, 현재 버전에서는 `dilation_rate`가 `1`보다 큰 경우 `1`보다 큰 `strides`를 지정할 수 없습니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`으로, 별도로 지정하지 않으면 전달할 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __kernel_initializer__: `kernel` 가중치 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __kernel_regularizer__: `kernel` 가중치 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __kernel_constraint__: `kernel` 가중치 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
__입력 형태__
`data_format`이 `'channels_first'`이면 `(batch, channels, rows, cols)` 형태의 4D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, rows, cols, channels)` 형태의 4D 텐서.
__출력 형태__
`data_format`이 `'channels_first'`이면 `(batch, filters, new_rows, new_cols)` 형태의 4D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, new_rows, new_cols, filters)` 형태의 4D 텐서.
패딩으로 인해 `rows`와 `cols` 값이 바뀔 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1421)</span>
### SeparableConv1D
```python
keras.layers.SeparableConv1D(filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None)
```
깊이별 분리 1D 합성곱<sub>Depthwise Separable 1D Convolution</sub>.
먼저 입력값의 각 채널(깊이)별로 따로 합성곱<sub>depthwise convolution</sub>을 한 뒤에, 다시 합쳐진 출력값의 각 위치<sub>point</sub>에서 채널 차원에 대한 합성곱<sub>pointwise convolution</sub>을 하여 앞의 결과를 하나로 묶습니다. 이때 `depth_multiplier`인자는 채널별 합성곱 단계에서 각 입력 채널당 몇 개의 출력 채널을 생성할 것인지 결정합니다. 직관적으로 볼 때 분리 합성곱은 합성곱 필터를 두 개의 작은 필터로 분해하혀 수행하는 합성곱 또는 극단적 형태의 인셉션 블록으로 이해할 수 있습니다.
__인자__
- __filters__: `int`. 출력할 결과값의 차원으로 합성곱 필터의 갯수를 나타냅니다.
- __kernel_size__: `int` 또는 `int`로 이루어진 튜플/리스트. 1D 합성곱 필터의 크기를 지정합니다.
- __strides__: `int` 또는 `int`로 이루어진 튜플/리스트. 합성곱 필터의 스트라이드를 지정합니다. 기본값은 `(1, 1)`입니다. 만약 팽창 합성곱을 사용하고자 할 때 스트라이드의 크기를 `1`보다 크게 지정했다면 `dilation_rate` 인자는 반드시 `1`로 맞춰야 합니다.
- __padding__: `str`. 입력값의 패딩처리 여부를 `'valid'` 또는 `'same'` 가운데 하나로 지정합니다(대소문자 무관). `'valid'`는 패딩이 없는 경우, `'same'`은 출력의 형태를 입력과 같게 맞추고자 하는 경우에 사용합니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, steps, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, steps)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다.
- __dilation_rate__: `int` 또는 `int`로 이루어진 튜플/리스트. 팽창 합성곱 필터의 팽창비율을 결정합니다. 팽창 합성곱은 원래 조밀한 형태 그대로 입력에 적용되는 합성곱 필터를 각 방향으로 원소 사이의 간격을 띄우는 방식으로 팽창시켜 성긴 대신 보다 넓은 영역에 적용될 수 있도록 변형한 합성곱입니다. 자세한 내용은 [Multi-Scale Context Aggregation by Dilated Convolutions](https://arxiv.org/abs/1511.07122v3)을 참고하십시오. 기본값은 `(1, 1)`이며, 현재 버전에서는 `dilation_rate`가 `1`보다 큰 경우 `1`보다 큰 `strides`를 지정할 수 없습니다.
- __depth_multiplier__: 각 입력 채널당 몇 개의 출력 채널을 생성할 것인지 결정합니다. 출력 채널의 총 개수는 `filters_in * depth_multiplier`가 됩니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`으로, 별도로 지정하지 않으면 전달할 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __depthwise_initializer__: 깊이별 필터 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __pointwise_initializer__: 위치별 필터 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __depthwise_regularizer__: 깊이별 필터 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __pointwise_regularizer__: 위치별 필터 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __depthwise_constraint__: 깊이별 필터 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __pointwise_constraint__: 위치별 필터 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
__입력 형태__
`data_format`이 `'channels_first'`이면 `(batch, channels, steps)` 형태의 3D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, steps, channels)` 형태의 3D 텐서.
__출력 형태__
`data_format`이 `'channels_first'`이면 `(batch, filters, new_steps)` 형태의 3D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, new_steps, filters)` 형태의 3D 텐서.
패딩이나 스트라이드로 인해 `new_steps` 값이 바뀔 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1553)</span>
### SeparableConv2D
```python
keras.layers.SeparableConv2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None)
```
깊이별 분리 2D 합성곱<sub>Depthwise Separable 2D Convolution</sub>.
먼저 입력값의 각 채널(깊이)별로 따로 공간 차원의 합성곱을 한 뒤에, 다시 합쳐진 출력값의 각 위치에서 채널 차원에 대한 합성곱을 하여 앞의 결과를 하나로 묶습니다. 이때 `depth_multiplier`인자는 채널별 합성곱 단계에서 각 입력 채널당 몇 개의 출력 채널을 생성할 것인지 결정합니다. 직관적으로 볼 때 분리 합성곱은 합성곱 필터를 두 개의 작은 필터로 분해하혀 수행하는 합성곱 또는 극단적 형태의 인셉션 블록으로 이해할 수 있습니다.
__인자__
- __filters__: `int`. 출력할 결과값의 차원으로 합성곱 필터의 갯수를 나타냅니다.
- __kernel_size__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 2D 합성곱 필터의 행방향과 열방향 크기를 지정합니다. `int` 하나를 입력할 경우 모든 방향의 크기를 동일하게 지정합니다.
- __strides__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 합성곱 필터의 스트라이드를 지정합니다. `int` 하나를 입력할 경우 열방향, 행방향의 스트라이드를 동일하게 지정합니다. 기본값은 `(1, 1)`입니다. 만약 팽창 합성곱을 사용하고자 할 때 스트라이드의 크기를 `1`보다 크게 지정했다면 `dilation_rate` 인자는 반드시 `1`로 맞춰야 합니다.
- __padding__: `str`. 입력값의 패딩처리 여부를 `'valid'`, `'causal'` 또는 `'same'` 가운데 하나로 지정합니다(대소문자 무관). `'valid'`는 패딩이 없는 경우, `'same'`은 출력의 형태를 입력과 같게 맞추고자 하는 경우에 사용합니다. `'same'`의 경우 `strides`가 `1`이 아닐 때, 사용하는 백엔드에 따라 값이 조금씩 달라질 수 있습니다 [참고](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860).
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
- __dilation_rate__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 팽창 합성곱 필터의 팽창비율을 결정합니다. 팽창 합성곱은 원래 조밀한 형태 그대로 입력에 적용되는 합성곱 필터를 각 방향으로 원소 사이의 간격을 띄우는 방식으로 팽창시켜 성긴 대신 보다 넓은 영역에 적용될 수 있도록 변형한 합성곱입니다. 자세한 내용은 [Multi-Scale Context Aggregation by Dilated Convolutions](https://arxiv.org/abs/1511.07122v3)을 참고하십시오. 기본값은 `(1, 1)`이며, 현재 버전에서는 `dilation_rate`가 `1`보다 큰 경우 `1`보다 큰 `strides`를 지정할 수 없습니다.
- __depth_multiplier__: 각 입력 채널당 몇 개의 출력 채널을 생성할 것인지 결정합니다. 출력 채널의 총 개수는 `filters_in * depth_multiplier`가 됩니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`으로, 별도로 지정하지 않으면 전달할 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __depthwise_initializer__: 깊이별 필터 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __pointwise_initializer__: 위치별 필터 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __depthwise_regularizer__: 깊이별 필터 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __pointwise_regularizer__: 위치별 필터 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __depthwise_constraint__: 깊이별 필터 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __pointwise_constraint__: 위치별 필터 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
__입력 형태__
`data_format`이 `'channels_first'`이면 `(batch, channels, rows, cols)` 형태의 4D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, rows, cols, channels)` 형태의 4D 텐서.
__출력 형태__
`data_format`이 `'channels_first'`이면 `(batch, filters, new_rows, new_cols)` 형태의 4D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, new_rows, new_cols, filters)` 형태의 4D 텐서.
패딩으로 인해 `rows`와 `cols` 값이 바뀔 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1694)</span>
### DepthwiseConv2D
```python
keras.layers.DepthwiseConv2D(kernel_size, strides=(1, 1), padding='valid', depth_multiplier=1, data_format=None, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None)
```
깊이별 2D 합성곱.
깊이별 분리 합성곱의 첫 단계만을 수행하는 층입니다. 입력값의 각 채널(깊이)별로 따로 공간 차원의 합성곱을 수행합니다. 이때 `depth_multiplier`인자는 채널별 합성곱 단계에서 각 입력 채널당 몇 개의 출력 채널을 생성할 것인지 결정합니다.
__인자__
- __kernel_size__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 2D 합성곱 필터의 행방향과 열방향 크기를 지정합니다. `int` 하나를 입력할 경우 모든 방향의 크기를 동일하게 지정합니다.
- __strides__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 합성곱 필터의 스트라이드를 지정합니다. `int` 하나를 입력할 경우 열방향, 행방향의 스트라이드를 동일하게 지정합니다. 기본값은 `(1, 1)`입니다. 만약 팽창 합성곱을 사용하고자 할 때 스트라이드의 크기를 `1`보다 크게 지정했다면 `dilation_rate` 인자는 반드시 `1`로 맞춰야 합니다.
- __padding__: `str`. 입력값의 패딩처리 여부를 `'valid'`, `'causal'` 또는 `'same'` 가운데 하나로 지정합니다(대소문자 무관). `'valid'`는 패딩이 없는 경우, `'same'`은 출력의 형태를 입력과 같게 맞추고자 하는 경우에 사용합니다. `'same'`의 경우 `strides`가 `1`이 아닐 때, 사용하는 백엔드에 따라 값이 조금씩 달라질 수 있습니다 [참고](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860).
- __depth_multiplier__: 각 입력 채널당 몇 개의 출력 채널을 생성할 것인지 결정합니다. 출력 채널의 총 개수는 `filters_in * depth_multiplier`가 됩니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`으로, 별도로 지정하지 않으면 전달할 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __depthwise_initializer__: 깊이별 필터 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __depthwise_regularizer__: 깊이별 필터 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __depthwise_constraint__: 깊이별 필터 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
__입력 형태__
`data_format`이 `'channels_first'`이면 `(batch, channels, rows, cols)` 형태의 4D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, rows, cols, channels)` 형태의 4D 텐서.
__출력 형태__
`data_format`이 `'channels_first'`이면 `(batch, filters, new_rows, new_cols)` 형태의 4D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, new_rows, new_cols, filters)` 형태의 4D 텐서.
패딩으로 인해 `rows`와 `cols` 값이 바뀔 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L628)</span>
### Conv2DTranspose
```python
keras.layers.Conv2DTranspose(filters, kernel_size, strides=(1, 1), padding='valid', output_padding=None, data_format=None, dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
전치된<sub>transposed</sub> 합성곱 층(디컨볼루션<sub>deconvolution</sub>으로 불리기도 합니다).
일반적인 컨볼루션의 역방향으로 변환을 하고자 할 때 전치 합성곱을 사용합니다. 다시 말해, 어떤 특정한 합성곱 연산이 있을 때 그 출력 형태로부터 입력 형태를 향해 합성곱의 연결구조를 유지하면서 거슬러 올라가고자 할 때 쓸 수 있습니다.
모델의 가장 처음에 올 경우 `input_shape` 인자를 통해 입력값의 형태를 함께 지정해야 합니다. `input_shape`는 정수로 된 튜플로 배치 축은 포함시키지 않습니다. 예를 들어 `data_format='channels_last'`인 128x128 RGB 이미지의 경우 `input_shape=(128, 128, 3)`이 됩니다.
__인자__
- __filters__: `int`. 출력할 결과값의 차원으로 합성곱 필터의 개수를 나타냅니다.
- __kernel_size__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 2D 합성곱 필터의 행방향과 열방향 크기를 지정합니다. `int` 하나를 입력할 경우 모든 방향의 크기를 동일하게 지정합니다.
- __strides__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 합성곱 필터의 스트라이드를 지정합니다. `int` 하나를 입력할 경우 행방향과 열방향의 스트라이드를 동일하게 지정합니다. 기본값은 `(1, 1)`입니다. 만약 팽창 합성곱을 사용하고자 할 때 스트라이드의 크기를 `1`보다 크게 지정했다면 `dilation_rate` 인자는 반드시 `1`로 맞춰야 합니다.
- __padding__: `str`. 입력값의 패딩처리 여부를 `'valid'` 또는 `'same'` 가운데 하나로 지정합니다(대소문자 무관). `'valid'`는 패딩이 없는 경우, `'same'`은 출력의 형태를 입력과 같게 맞추고자 하는 경우에 사용합니다. `'same'`의 경우 `strides`가 `1`이 아닐 때, 사용하는 백엔드에 따라 값이 조금씩 달라질 수 있습니다 [참고](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860).
- __output_padding__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 출력 텐서의 행방향과 열방향 패딩을 지정합니다. `int` 하나를 입력할 경우 모든 방향의 크기를 동일하게 지정합니다. 각 차원에 주어진 출력 패딩 값의 크기는 같은 차원에서의 스트라이드 값보다 작아야 합니다. 기본값인 `None`으로 설정할 경우 출력 크기는 자동으로 유추됩니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
- __dilation_rate__: `int` 또는 2개의 `int`로 이루어진 튜플/리스트. 팽창 합성곱 필터의 팽창비율을 결정합니다. 팽창 합성곱은 원래 조밀한 형태 그대로 입력에 적용되는 합성곱 필터를 각 방향으로 원소 사이의 간격을 띄우는 방식으로 팽창시켜 성긴 대신 보다 넓은 영역에 적용될 수 있도록 변형한 합성곱입니다. 자세한 내용은 [Multi-Scale Context Aggregation by Dilated Convolutions](https://arxiv.org/abs/1511.07122v3)을 참고하십시오. 기본값은 `(1, 1)`이며, 현재 버전에서는 `dilation_rate`가 `1`보다 큰 경우 `1`보다 큰 `strides`를 지정할 수 없습니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`으로, 별도로 지정하지 않으면 전달할 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __kernel_initializer__: `kernel` 가중치 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __kernel_regularizer__: `kernel` 가중치 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __kernel_constraint__: `kernel` 가중치 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
__입력 형태__
`data_format`이 `'channels_first'`이면 `(batch, channels, rows, cols)` 형태의 4D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, rows, cols, channels)` 형태의 4D 텐서.
__출력 형태__
`data_format`이 `'channels_first'`이면 `(batch, filters, new_rows, new_cols)` 형태의 4D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, new_rows, new_cols, filters)` 형태의 4D 텐서.
패딩으로 인해 `rows`와 `cols`값이 바뀔 수 있습니다.
`output_padding`을 지정한 경우는 다음 식을 따릅니다.
```
new_rows = ((rows - 1) * strides[0] + kernel_size[0]
- 2 * padding[0] + output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1]
- 2 * padding[1] + output_padding[1])
```
__참고__
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L499)</span>
### Conv3D
```python
keras.layers.Conv3D(filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
3D 합성곱 층 (예: 부피에 대한 공간적 합성곱)
이 층은 입력 텐서와 합성곱되어 출력 텐서를 만드는 합성곱 커널을 생성합니다. `use_bias`가 `True`인 경우, 편향 벡터를 생성해 출력 텐서에 더합니다. `activation`이 `None`이 아닌 경우 이 또한 출력 텐서에 적용됩니다.
모델의 가장 처음에 올 경우 `input_shape` 인자를 통해 입력값의 형태를 함께 지정해야 합니다. `input_shape`는 정수로 된 튜플로 배치 축은 포함시키지 않습니다. 예를 들어 `data_format='channels_last'`이며 채널이 한 개인 128x128x128 입체의 경우 `input_shape=(128, 128, 128, 1)`이 됩니다.
__인자__
- __filters__: `int`. 출력할 결과값의 차원으로 합성곱 필터의 갯수를 나타냅니다.
- __kernel_size__: `int` 또는 3개의 `int`로 이루어진 튜플/리스트. 3D 합성곱 필터의 깊이 및 행방향과 열방향 크기를 지정합니다. `int` 하나를 입력할 경우 모든 방향의 크기를 동일하게 지정합니다.
- __strides__: `int` 또는 3개의 `int`로 이루어진 튜플/리스트. 합성곱 필터의 스트라이드를 지정합니다. `int` 하나를 입력할 경우 모든 방향의 스트라이드를 동일하게 지정합니다. 기본값은 `(1, 1)`입니다. 만약 팽창 합성곱을 사용하고자 할 때 스트라이드의 크기를 `1`보다 크게 지정했다면 `dilation_rate` 인자는 반드시 `1`로 맞춰야 합니다.
- __padding__: `str`. 입력값의 패딩처리 여부를 `'valid'` 또는 `'same'` 가운데 하나로 지정합니다(대소문자 무관). `'valid'`는 패딩이 없는 경우, `'same'`은 출력의 형태를 입력과 같게 맞추고자 하는 경우에 사용합니다. `'same'`의 경우 `strides`가 `1`이 아닐 때, 사용하는 백엔드에 따라 값이 조금씩 달라질 수 있습니다 [참고](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860).
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
- __dilation_rate__: `int` 또는 3개의 `int`로 이루어진 튜플/리스트. 팽창 합성곱 필터의 팽창비율을 결정합니다. 팽창 합성곱은 원래 조밀한 형태 그대로 입력에 적용되는 합성곱 필터를 각 원소 사이를 각 방향으로 원소 사이의 간격을 띄우는 방식으로 팽창시켜 성긴 대신 보다 넓은 영역에 적용될 수 있도록 변형한 합성곱입니다. 자세한 내용은 [Multi-Scale Context Aggregation by Dilated Convolutions](https://arxiv.org/abs/1511.07122v3)을 참고하십시오. 기본값은 `(1, 1)`이며, 현재 버전에서는 `dilation_rate`가 `1`보다 큰 경우 `1`보다 큰 `strides`를 지정할 수 없습니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`으로, 별도로 지정하지 않으면 전달할 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __kernel_initializer__: `kernel` 가중치 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __kernel_regularizer__: `kernel` 가중치 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __kernel_constraint__: `kernel` 가중치 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
__입력 형태__
`data_format`이 `'channels_first'`이면 `(batch, channels, conv_dim1, conv_dim2, conv_dim3)` 형태의 5D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, conv_dim1, conv_dim2, conv_dim3, channels)` 형태의 5D 텐서.
__아웃풋 형태__
`data_format`이 `'channels_first'`이면 `(batch, filters, new_conv_dim1, new_conv_dim2, new_conv_dim3)` 형태의 5D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, new_conv_dim1, new_conv_dim2, new_conv_dim3, filters)` 형태의 5D 텐서.
패딩으로 인해 `new_conv_dim1`, `new_conv_dim2`와 `new_conv_dim3`값이 바뀔 수 있습니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L901)</span>
### Conv3DTranspose
```python
keras.layers.Conv3DTranspose(filters, kernel_size, strides=(1, 1, 1), padding='valid', output_padding=None, data_format=None, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None)
```
전치된 합성곱 층(디컨볼루션으로 불리기도 합니다).
일반적인 컨볼루션의 역방향으로 변환을 하고자 할 때 전치 합성곱을 사용합니다. 다시 말해, 어떤 특정한 합성곱 연산이 있을 때 그 출력 형태로부터 입력 형태를 향해 합성곱의 연결구조를 유지하면서 거슬러 올라가고자 할 때 쓸 수 있습니다.
모델의 가장 처음에 올 경우 `input_shape` 인자를 통해 입력값의 형태를 함께 지정해야 합니다. `input_shape`는 정수로 된 튜플로 배치 축은 포함시키지 않습니다. 예를 들어 `data_format='channels_last'`이며 채널이 3개인 128x128x128 입체의 경우 `input_shape=(128, 128, 128, 3)`이 됩니다.
__인자__
- __filters__: `int`. 출력할 결과값의 차원으로 합성곱 필터의 갯수를 나타냅니다.
- __kernel_size__: `int` 또는 3개의 `int`로 이루어진 튜플/리스트. 3D 합성곱 필터의 깊이 및 행방향과 열방향 크기를 지정합니다. `int` 하나를 입력할 경우 모든 방향의 크기를 동일하게 지정합니다.
- __strides__: `int` 또는 3개의 `int`로 이루어진 튜플/리스트. 합성곱 필터의 스트라이드를 지정합니다. `int` 하나를 입력할 경우 모든 방향의 스트라이드를 동일하게 지정합니다. 기본값은 `(1, 1)`입니다. 만약 팽창 합성곱을 사용하고자 할 때 스트라이드의 크기를 `1`보다 크게 지정했다면 `dilation_rate` 인자는 반드시 `1`로 맞춰야 합니다.
- __padding__: `str`. 입력값의 패딩처리 여부를 `'valid'` 또는 `'same'` 가운데 하나로 지정합니다(대소문자 무관). `'valid'`는 패딩이 없는 경우, `'same'`은 출력의 형태를 입력과 같게 맞추고자 하는 경우에 사용합니다. `'same'`의 경우 `strides`가 `1`이 아닐 때, 사용하는 백엔드에 따라 값이 조금씩 달라질 수 있습니다 [참고](https://github.com/keras-team/keras/pull/9473#issuecomment-372166860).
- __output_padding__: `int` 또는 3개의 `int`로 이루어진 튜플/리스트. 출력 텐서의 깊이 및 행방향과 열방향 패딩을 지정합니다. `int` 하나를 입력할 경우 모든 방향의 크기를 동일하게 지정합니다. 각 차원에 주어진 출력 패딩 값의 크기는 같은 차원에서의 스트라이드 값보다 작아야 합니다. 기본값인 `None`으로 설정할 경우 출력 크기는 자동으로 유추됩니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, depth, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, depth, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
- __dilation_rate__: `int` 또는 3개의 `int`로 이루어진 튜플/리스트. 팽창 합성곱 필터의 팽창비율을 결정합니다. 팽창 합성곱은 원래 조밀한 형태 그대로 입력에 적용되는 합성곱 필터를 각 원소 사이를 각 방향으로 원소 사이의 간격을 띄우는 방식으로 팽창시켜 성긴 대신 보다 넓은 영역에 적용될 수 있도록 변형한 합성곱입니다. 자세한 내용은 [Multi-Scale Context Aggregation by Dilated Convolutions](https://arxiv.org/abs/1511.07122v3)을 참고하십시오. 기본값은 `(1, 1)`이며, 현재 버전에서는 `dilation_rate`가 `1`보다 큰 경우 `1`보다 큰 `strides`를 지정할 수 없습니다.
- __activation__: 사용할 활성화 함수입니다. 기본값은 `None`으로, 별도로 지정하지 않으면 전달할 경우 활성화 함수가 적용되지 않습니다(`a(x) = x`). 참고: [활성화 함수](../activations.md)
- __use_bias__: `bool`. 층의 연산에 편향을 적용할지 여부를 결정합니다.
- __kernel_initializer__: `kernel` 가중치 행렬의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __bias_initializer__: 편향 벡터의 초기화 함수를 결정합니다. 참고: [초기화 함수](../initializers.md)
- __kernel_regularizer__: `kernel` 가중치 행렬에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __bias_regularizer__: 편향 벡터에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __activity_regularizer__: 층의 출력값에 적용할 규제 함수를 결정합니다. 참고: [규제 함수](../regularizers.md)
- __kernel_constraint__: `kernel` 가중치 행렬에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
- __bias_constraint__: 편향 벡터에 적용할 제약을 결정합니다. 참고: [제약](../constraints.md)
__입력 형태__
`data_format`이 `'channels_first'`이면 `(batch, channels, depth, rows, cols)` 형태의 5D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, depth, rows, cols, channels)` 형태의 5D 텐서.
__출력 형태__
`data_format`이 `'channels_first'`이면 `(batch, filters, new_depth, new_rows, new_cols)` 형태의 5D 텐서.
`data_format`이 `'channels_last'`이면 `(batch, new_depth, new_rows, new_cols, filters)` 형태의 5D 텐서.
패딩으로 인해, `depth`, `rows`와 `cols`값이 바뀔 수 있습니다.
`output_padding`을 지정한 경우는 다음 식을 따릅니다.
```
new_depth = ((depth - 1) * strides[0] + kernel_size[0]
- 2 * padding[0] + output_padding[0])
new_rows = ((rows - 1) * strides[1] + kernel_size[1]
- 2 * padding[1] + output_padding[1])
new_cols = ((cols - 1) * strides[2] + kernel_size[2]
- 2 * padding[2] + output_padding[2])
```
__참고__
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2376)</span>
### Cropping1D
```python
keras.layers.Cropping1D(cropping=(1, 1))
```
1D 입력(예: 시계열 데이터)용 크롭핑 층.
시간 차원(축 1)을 따라서 잘라냅니다.
__인자__
- __cropping__: `int` 혹은 2개의 `int`로 이루어진 튜플. 각각 크롭핑할 차원(첫번째 축)의 시작과 끝에서 얼마만큼을 잘라낼 것인지 정합니다. `int` 하나만 입력할 경우 시작과 끝 모두 같은 크기가 적용됩니다. 기본값은 `(1, 1)`입니다.
__입력 형태__
`(batch, axis_to_crop, features)` 형태의 3D 텐서.
__출력 형태__
`(batch, cropped_axis, features)` 형태의 3D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2408)</span>
### Cropping2D
```python
keras.layers.Cropping2D(cropping=((0, 0), (0, 0)), data_format=None)
```
2D 입력(예: 이미지)용 크롭핑 층.
공간 차원(높이와 넓이)을 따라서 잘라냅니다.
__인자__
- __cropping__: `int`, 2개의 `int`로 이루어진 튜플 혹은 2개의 2개의 `int`로 이루어진 튜플.
- 하나의 `int`인 경우 행방향과 열방향에 동일한 크기의 대칭 크롭핑이 적용됩니다.
- 2개 `int`의 튜플인 경우 각각 상하, 좌우에 똑같이 잘라낼 행과 열 값을 나타냅니다 `(symmetric_height_crop, symmetric_width_crop)`.
- 2개 `int`의 튜플 2개로 이루어진 튜플인 경우 `((top_crop, bottom_crop), (left_crop, right_crop))`을 나타냅니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
__입력 형태__
다음과 같은 형태의 4D 텐서를 입력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, rows, cols, channels)`.
- `data_format`이 `'channels_first'`이면 `(batch, channels, rows, cols)`.
__출력 형태__
다음 형태의 4D 텐서를 출력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, cropped_rows, cropped_cols, channels)`.
- `data_format`이 `'channels_first'`이면 `(batch, channels, cropped_rows, cropped_cols)`.
__예시__
```python
# 2D 인풋 이미지 혹은 특성 맵을 잘라냅니다.
model = Sequential()
model.add(Cropping2D(cropping=((2, 2), (4, 4)),
input_shape=(28, 28, 3)))
# 현재 model.output_shape == (None, 24, 20, 3)
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Cropping2D(cropping=((2, 2), (2, 2))))
# 현재 model.output_shape == (None, 20, 16, 64)
```
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2491)</span>
### Cropping3D
```python
keras.layers.Cropping3D(cropping=((1, 1), (1, 1), (1, 1)), data_format=None)
```
3D 입력(예: 공간적 혹은 시공간적)용 크롭핑 층.
__인자__
- __cropping__: `int`, 3개의 `int`로 이루어진 튜플 혹은 3개 2개의 `int`로 이루어진 튜플.
- 하나의 `int`인 경우 깊이, 행방향, 열방향에 동일한 크기의 크롭핑이 적용됩니다.
- 3개 `int`의 튜플인 경우 각각 깊이, 행방향, 에 적용될 값을 나타냅니다. `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- 2개 `int`의 튜플 3개로 이루어진 튜플인 경우 `((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim3_crop))`을 나타냅니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
__입력 형태__
다음 형태의 5D 텐서를 입력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop, depth)`.
- `data_format`이 `'channels_first'`이면 `(batch, depth, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop)`.
__출력 형태__
다음 형태의 5D 텐서를 출력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, first_cropped_axis, second_cropped_axis, third_cropped_axis, depth)`.
- `data_format`이 `'channels_first'`이면 `(batch, depth, first_cropped_axis, second_cropped_axis, third_cropped_axis)`.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1944)</span>
### UpSampling1D
```python
keras.layers.UpSampling1D(size=2)
```
1D 입력용 업샘플링 층.
시간 축을 따라 각 시간 단계를 `size`로 지정한 만큼 반복합니다.
__인자__
- __size__: `int`. 반복할 횟수입니다.
__입력 형태__
`(batch, steps, features)` 형태의 3D 텐서를 입력합니다.
__출력 형태__
`(batch, upsampled_steps, features)` 형태의 3D 텐서를 출력합니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L1974)</span>
### UpSampling2D
```python
keras.layers.UpSampling2D(size=(2, 2), data_format=None, interpolation='nearest')
```
2D 입력용 업샘플링 층.
데이터의 행과 열을 각각 `size[0]`과 `size[1]`회씩 반복합니다.
__인자__
- __size__: `int`, 혹은 2개의 `int`로 이루어진 튜플. 행과 열에서 반복할 횟수입니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
- __interpolation__: `str`. `nearest` 또는 `bilinear`를 지정합니다. `bilinear` 업스케일링의 경우 CNTK는 아직 지원하지 않으며, Theano의 경우 `size=(2, 2)`에 한해서 지원합니다.
__입력 형태__
다음 형태의 4D 텐서를 입력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, rows, cols, channels)`.
- `data_format`이 `'channels_first'`이면 `(batch, channels, rows, cols)`.
__출력 형태__
다음 형태의 4D 텐서를 출력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, upsampled_rows, upsampled_cols, channels)`.
- `data_format`이 `'channels_first'`이면 `(batch, channels, upsampled_rows, upsampled_cols)`.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2032)</span>
### UpSampling3D
```python
keras.layers.UpSampling3D(size=(2, 2, 2), data_format=None)
```
3D 입력용 업샘플링 층.
데이터의 첫 번째, 두 번째, 세 번째 차원을 각각 `size[0]`, `size[1]`, `size[2]`회씩 반복합니다.
__인자__
- __size__: `int`, 혹은 3개의 `int`로 이루어진 튜플. 각각 첫 번째, 두 번째, 세 번째 차원에서 반복할 횟수입니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
__입력 형태__
다음 형태의 5D 텐서를 입력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, dim1, dim2, dim3, channels)`.
- `data_format`이 `'channels_first'`이면 `(batch, channels, dim1, dim2, dim3)`.
__출력 형태__
다음 형태의 5D 텐서를 출력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, upsampled_dim1, upsampled_dim2, upsampled_dim3, channels)`.
- `data_format`이 `'channels_first'`이면 `(batch, channels, upsampled_dim1, upsampled_dim2, upsampled_dim3)`.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2124)</span>
### ZeroPadding1D
```python
keras.layers.ZeroPadding1D(padding=1)
```
1D 입력(예: 시계열 데이터)용 제로 패딩 층.
__인자__
- __padding__: `int`, 혹은 2개의 `int`로 이루어진 튜플/딕셔너리.
- 하나의 `int`인 경우 적용할 차원(축 1)의 시작과 끝에 채울 `0` 값의 개수.
- 2개의 `int` 튜플인 경우 각각 시작과 끝에(`(left_pad, right_pad)`) 채울 `0` 값의 개수.
__입력 형태__
`(batch, axis_to_pad, features)` 형태의 3D 텐서.
__출력 형태__
`(batch, padded_axis, features)` 형태의 3D 텐서.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2159)</span>
### ZeroPadding2D
```python
keras.layers.ZeroPadding2D(padding=(1, 1), data_format=None)
```
2D 입력(예: 이미지 데이터)용 제로-패딩 층.
이 레이어는 이미지 텐서의 상하좌우에 `0`으로 이루어진 행과 열을 더할 수 있습니다.
__인자__
- __padding__: `int`, 2개의 `int`로 이루어진 튜플, 혹은 2개의 `int`로 이루어진 튜플 2개로 이루어진 튜플.
- 하나의 `int`인 경우 행방향과 열방향에 같은 크기의 패딩이 적용됩니다.
- 2개의 `int` 튜플인 경우 각각 행방향, 열방향에 적용될 값을 나타냅니다. `(symmetric_height_pad, symmetric_width_pad)`.
- 2개 정수의 튜플 2개로 이루어진 튜플인 경우 `((top_pad, bottom_pad), (left_pad, right_pad))`에 적용될 값을 나타냅니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
__입력 형태__
다음 형태의 4D 텐서를 입력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, rows, cols, channels)`.
- `data_format`이 `'channels_last'`이면 `(batch, channels, rows, cols)`.
__출력 형태__
다음 형태의 4D 텐서를 입력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, padded_rows, padded_cols, channels)`.
- `data_format`이 `'channels_last'`이면 `(batch, channels, padded_rows, padded_cols)`.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/layers/convolutional.py#L2235)</span>
### ZeroPadding3D
```python
keras.layers.ZeroPadding3D(padding=(1, 1, 1), data_format=None)
```
3D 입력(예: 공간적, 시공간적 데이터)용 제로 패딩 층.
__인자__
- __padding__: `int`, 3개의 `int`로 튜플, 혹은 2개 정수의 튜플 3개로 이루어진 튜플.
- 하나의 `int`인 경우 깊이, 행방향, 열방향에 동일한 크기의 패딩이 적용됩니다.
- 3개 `int`의 튜플인 경우 각각 깊이, 행방향, 열방향에 적용될 값을 나타냅니다. `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.
- 2개 `int`의 튜플 3개로 이루어진 튜플인 경우 `((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim3_pad))`를 나타냅니다.
- __data_format__: `str`. 입력 데이터의 차원 순서를 정의하는 인자로 `'channels_last'`(기본값) 또는 `'channels_first'` 가운데 하나를 지정합니다. 입력 형태가 `(batch, height, width, channels)`로 채널 정보가 마지막에 올 경우 `'channels_last'`를, `(batch, channels, height, width)`로 채널 정보가 먼저 올 경우 `'channels_first'`를 선택합니다. 케라스 설정 `~/.keras/keras.json`파일에 있는 `image_data_format`값을 기본값으로 사용하며, 해당 값이 없는 경우 자동으로 `'channels_last'`를 기본값으로 적용합니다.
__입력 형태__
다음 형태의 5D 텐서를 입력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad, depth)`.
- `data_format`이 `'channels_first'`이면 `(batch, depth, first_axis_to_pad, second_axis_to_pad, third_axis_to_pad)`.
__출력 형태__
다음 형태의 5D 텐서를 출력합니다.
- `data_format`이 `'channels_last'`이면 `(batch, first_padded_axis, second_padded_axis, third_axis_to_pad, depth)`.
- `data_format`이 `'channels_first'`이면 `(batch, depth, first_padded_axis, second_padded_axis, third_axis_to_pad)`.
| keras-docs-ko/sources/layers/convolutional.md/0 | {
"file_path": "keras-docs-ko/sources/layers/convolutional.md",
"repo_id": "keras-docs-ko",
"token_count": 42010
} | 89 |
## 옵티마이저의 사용법
옵티마이저는 Keras 모델을 컴파일하기 위해 필요한 두 개의 매개변수(parameter) 중 하나입니다.
```python
from keras import optimizers
model = Sequential()
model.add(Dense(64, kernel_initializer='uniform', input_shape=(10,)))
model.add(Activation('softmax'))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd)
```
옵티마이저는 위의 예제와 같이 객체를 만들어 `model.compile()`의 인자로 전달하거나, 아래와 같이 이름으로 사용할 수도 있습니다. 후자의 경우에는 해당 옵티마이저의 기본 설정이 사용됩니다.
```python
# 옵티마이저의 이름을 사용하는 경우에는
# 기본 설정이 사용됩니다.
model.compile(loss='mean_squared_error', optimizer='sgd')
```
---
## 모든 Keras 옵티마이저에 공통적인 매개변수
모든 옵티마이저는 `clipnorm`과 `clipvalue` 매개변수를 통해 그래디언트 클리핑(gradient clipping)을 조절할 수 있습니다.
```python
from keras import optimizers
# All parameter gradients will be clipped to
# a maximum norm of 1.
sgd = optimizers.SGD(lr=0.01, clipnorm=1.)
```
```python
from keras import optimizers
# All parameter gradients will be clipped to
# a maximum value of 0.5 and
# a minimum value of -0.5.
sgd = optimizers.SGD(lr=0.01, clipvalue=0.5)
```
---
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L157)</span>
### SGD
```python
keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
```
확률적 경사 하강법(Stochastic Gradient Descent, SGD) 옵티마이저.
모멘텀과 네스테로프 모멘텀(Nesterov momentum), 그리고 학습률 감소 기법(learning rate decay)을 지원합니다.
__인자__
- __lr__: 0보다 크거나 같은 float 값. 학습률.
- __momentum__: 0보다 크거나 같은 float 값.
SGD를 적절한 방향으로 가속화하며, 흔들림(진동)을 줄여주는 매개변수입니다.
- __decay__: 0보다 크거나 같은 float 값. 업데이트마다 적용되는 학습률의 감소율입니다.
- __nesterov__: 불리언. 네스테로프 모멘텀의 적용 여부를 설정합니다.
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L220)</span>
### RMSprop
```python
keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
```
RMSProp 옵티마이저.
RMSProp을 사용할 때는 학습률을 제외한 모든 인자의 기본값을 사용하는 것이 권장됩니다.
일반적으로 순환 신경망(Recurrent Neural Networks)의 옵티마이저로 많이 사용됩니다.
__인자__
- __lr__: 0보다 크거나 같은 float 값. 학습률.
- __rho__: 0보다 크거나 같은 float 값.
- __epsilon__: 0보다 크거나 같은 float형 fuzz factor.
`None`인 경우 `K.epsilon()`이 사용됩니다.
- __decay__: 0보다 크거나 같은 float 값. 업데이트마다 적용되는 학습률의 감소율입니다.
__참고__
- [rmsprop: Divide the gradient by a running average of its recent magnitude](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L288)</span>
### Adagrad
```python
keras.optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
```
Adagrad 옵티마이저.
Adagrad는 모델 파라미터별 학습률을 사용하는 옵티마이저로,
파라미터의 값이 업데이트되는 빈도에 의해 학습률이 결정됩니다.
파라미터가 더 자주 업데이트될수록, 더 작은 학습률이 사용됩니다.
Adagrad를 사용할 때는 모든 인자의 기본값을 사용하는 것이 권장됩니다.
__인자__
- __lr__: 0보다 크거나 같은 float 값. 학습률.
- __epsilon__: 0보다 크거나 같은 float 값.
`None`인 경우 `K.epsilon()`이 사용됩니다.
- __decay__: 0보다 크거나 같은 float 값. 업데이트마다 적용되는 학습률의 감소율입니다.
__참고__
- [Adaptive Subgradient Methods for Online Learning and Stochastic
Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L353)</span>
### Adadelta
```python
keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
```
Adadelta 옵티마이저.
Adadelta는 Adagrad를 확장한 보다 견고한 옵티마이저로
과거의 모든 그래디언트를 축적하는 대신, 그래디언트 업데이트의 이동창(moving window)에 기반하여 학습률을 조절합니다.
이 방법을 사용하면 Adadelta는 많은 업데이트가 이뤄진 후 일지라도 학습을 계속할 수 있습니다.
Adagrad와 비교해 볼 때, Adadelta의 기존 버전에서는 초기 학습률을 설정할 필요가 없었습니다.
하지만 현재의 버전에서는 다른 Keras 옵티마이저들처럼 초기 학습률과 감소율을 설정할 수 있습니다.
Adadelta를 사용할 때는 모든 인자의 기본값을 사용하는 것이 권장됩니다.
__인자__
- __lr__: 0보다 크거나 같은 float 값. 초기 학습률로, 기본값은 1입니다.
기본값을 사용하는 것이 권장됩니다.
- __rho__: 0보다 크거나 같은 float 값.
학습률 감소에 쓰이는 인자로, 각 시점에 유지되는 그래디언트의 비율에 해당합니다.
- __epsilon__: 0보다 크거나 같은 float형 fuzz factor.
`None`인 경우 `K.epsilon()`이 사용됩니다.
- __decay__: 0보다 크거나 같은 float 값. 초기 학습률 감소율입니다.
__참고__
- [Adadelta - an adaptive learning rate method](
https://arxiv.org/abs/1212.5701)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L436)</span>
### Adam
```python
keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
```
Adam 옵티마이저.
매개변수들의 기본값은 논문에서 언급된 내용을 따릅니다.
__인자__
- __lr__: 0보다 크거나 같은 float 값. 학습률.
- __beta_1__: 0보다 크고 1보다 작은 float 값. 일반적으로 1에 가깝게 설정됩니다.
- __beta_2__: 0보다 크고 1보다 작은 float 값. 일반적으로 1에 가깝게 설정됩니다.
- __epsilon__: 0보다 크거나 같은 float형 fuzz factor.
`None`인 경우 `K.epsilon()`이 사용됩니다.
- __decay__: 0보다 크거나 같은 float 값. 업데이트마다 적용되는 학습률의 감소율입니다.
- __amsgrad__: 불리언. Adam의 변형인 AMSGrad의 적용 여부를 설정합니다.
AMSGrad는 "On the Convergence of Adam and Beyond" 논문에서 소개되었습니다.
__참조__
- [Adam - A Method for Stochastic Optimization](
https://arxiv.org/abs/1412.6980v8)
- [On the Convergence of Adam and Beyond](
https://openreview.net/forum?id=ryQu7f-RZ)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L527)</span>
### Adamax
```python
keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0)
```
Adam 논문의 섹션 7에 소개된 Adamax 옵티마이저.
무한 노름(infinity norm)에 기반한 Adam의 변형입니다.
매개변수들의 기본값은 논문에서 언급된 내용을 따릅니다.
__인자__
- __lr__: 0보다 크거나 같은 float 값. 학습률.
- __beta_1__: 0보다 크고 1보다 작은 float 값. 일반적으로 1에 가깝게 설정됩니다.
- __beta_2__: 0보다 크고 1보다 작은 float 값. 일반적으로 1에 가깝게 설정됩니다.
- __epsilon__: 0보다 크거나 같은 float형 fuzz factor.
`None`인 경우 `K.epsilon()`이 사용됩니다.
- __decay__: 0보다 크거나 같은 float 값. 업데이트마다 적용되는 학습률의 감소율입니다.
__참고__
- [Adam - A Method for Stochastic Optimization](
https://arxiv.org/abs/1412.6980v8)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/optimizers.py#L605)</span>
### Nadam
```python
keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)
```
네스테로프 Adam 옵티마이저.
Adam이 본질적으로는 모멘텀이 적용된 RMSprop인 것처럼,
Nadam은 네스테로프 모멘텀이 적용된 RMSprop입니다.
매개변수들의 기본값은 논문에서 언급된 내용을 따릅니다.
Nadam을 사용할 때는 모든 인자의 기본값을 사용하는 것이 권장됩니다.
__인자__
- __lr__: 0보다 크거나 같은 float 값. 학습률.
- __beta_1__: 0보다 크고 1보다 작은 float 값. 일반적으로 1에 가깝게 설정됩니다.
- __beta_2__: 0보다 크고 1보다 작은 float 값. 일반적으로 1에 가깝게 설정됩니다.
- __epsilon__: 0보다 크거나 같은 float형 fuzz factor.
`None`인 경우 `K.epsilon()`이 사용됩니다.
__참고__
- [Nadam report](http://cs229.stanford.edu/proj2015/054_report.pdf)
- [On the importance of initialization and momentum in deep learning](
http://www.cs.toronto.edu/~fritz/absps/momentum.pdf)
| keras-docs-ko/sources/optimizers.md/0 | {
"file_path": "keras-docs-ko/sources/optimizers.md",
"repo_id": "keras-docs-ko",
"token_count": 6194
} | 90 |
# 基于故事和问题训练两个循环神经网络。
两者的合并向量将用于回答一系列 bAbI 任务。
这些结果与 Weston 等人提供的 LSTM 模型的结果相当:[Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks](http://arxiv.org/abs/1502.05698)。
Task Number | FB LSTM Baseline | Keras QA
--- | --- | ---
QA1 - Single Supporting Fact | 50 | 52.1
QA2 - Two Supporting Facts | 20 | 37.0
QA3 - Three Supporting Facts | 20 | 20.5
QA4 - Two Arg. Relations | 61 | 62.9
QA5 - Three Arg. Relations | 70 | 61.9
QA6 - yes/No Questions | 48 | 50.7
QA7 - Counting | 49 | 78.9
QA8 - Lists/Sets | 45 | 77.2
QA9 - Simple Negation | 64 | 64.0
QA10 - Indefinite Knowledge | 44 | 47.7
QA11 - Basic Coreference | 72 | 74.9
QA12 - Conjunction | 74 | 76.4
QA13 - Compound Coreference | 94 | 94.4
QA14 - Time Reasoning | 27 | 34.8
QA15 - Basic Deduction | 21 | 32.4
QA16 - Basic Induction | 23 | 50.6
QA17 - Positional Reasoning | 51 | 49.1
QA18 - Size Reasoning | 52 | 90.8
QA19 - Path Finding | 8 | 9.0
QA20 - Agent's Motivations | 91 | 90.7
有关 bAbI 项目的相关资源,请参考: https://research.facebook.com/researchers/1543934539189348
### 注意
- 使用默认的单词、句子和查询向量尺寸,GRU 模型得到了以下效果:
- 20 轮迭代后,在 QA1 上达到了 52.1% 的测试准确率(在 CPU 上每轮迭代 2 秒);
- 20 轮迭代后,在 QA2 上达到了 37.0% 的测试准确率(在 CPU 上每轮迭代 16 秒)。
相比之下,Facebook的论文中 LSTM baseline 的准确率分别是 50% 和 20%。
- 这个任务并不是笼统地单独去解析问题。这应该可以提高准确率,且是合并两个 RNN 的一次较好实践。
- 故事和问题的 RNN 之间不共享词向量(词嵌入)。
- 注意观察 1000 个训练样本(en-10k)到 10,000 个的准确度如何变化。使用 1000 是为了与原始论文进行对比。
- 尝试使用 GRU, LSTM 和 JZS1-3,因为它们会产生微妙的不同结果。
- 长度和噪声(即「无用」的故事内容)会影响 LSTM/GRU 提供正确答案的能力。在只提供事实的情况下,这些 RNN可以在许多任务上达到 100% 的准确性。 使用注意力过程的记忆网络和神经网络可以有效地搜索这些噪声以找到相关的语句,从而大大提高性能。这在 QA2 和 QA3 上变得尤为明显,两者都远远显著于 QA1。
```python
from __future__ import print_function
from functools import reduce
import re
import tarfile
import numpy as np
from keras.utils.data_utils import get_file
from keras.layers.embeddings import Embedding
from keras import layers
from keras.layers import recurrent
from keras.models import Model
from keras.preprocessing.sequence import pad_sequences
def tokenize(sent):
'''返回包含标点符号的句子的标记。
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split(r'(\W+)', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''解析 bAbi 任务格式中提供的故事
如果 only_supporting 为 true,
则只保留支持答案的句子。
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
if only_supporting:
# 只选择相关的子故事
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# 提供所有子故事
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''给定文件名,读取文件,检索故事,
然后将句子转换为一个独立故事。
如果提供了 max_length,
任何长于 max_length 的故事都将被丢弃。
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data
if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
xs = []
xqs = []
ys = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
# 不要忘记索引 0 已被保留
y = np.zeros(len(word_idx) + 1)
y[word_idx[answer]] = 1
xs.append(x)
xqs.append(xq)
ys.append(y)
return (pad_sequences(xs, maxlen=story_maxlen),
pad_sequences(xqs, maxlen=query_maxlen), np.array(ys))
RNN = recurrent.LSTM
EMBED_HIDDEN_SIZE = 50
SENT_HIDDEN_SIZE = 100
QUERY_HIDDEN_SIZE = 100
BATCH_SIZE = 32
EPOCHS = 20
print('RNN / Embed / Sent / Query = {}, {}, {}, {}'.format(RNN,
EMBED_HIDDEN_SIZE,
SENT_HIDDEN_SIZE,
QUERY_HIDDEN_SIZE))
try:
path = get_file('babi-tasks-v1-2.tar.gz',
origin='https://s3.amazonaws.com/text-datasets/'
'babi_tasks_1-20_v1-2.tar.gz')
except:
print('Error downloading dataset, please download it manually:\n'
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2'
'.tar.gz\n'
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
raise
# 默认 QA1 任务,1000 样本
# challenge = 'tasks_1-20_v1-2/en/qa1_single-supporting-fact_{}.txt'
# QA1 任务,10,000 样本
# challenge = 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt'
# QA2 任务,1000 样本
challenge = 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt'
# QA2 任务,10,000 样本
# challenge = 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt'
with tarfile.open(path) as tar:
train = get_stories(tar.extractfile(challenge.format('train')))
test = get_stories(tar.extractfile(challenge.format('test')))
vocab = set()
for story, q, answer in train + test:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# 保留 0 以留作 pad_sequences 进行 masking
vocab_size = len(vocab) + 1
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
story_maxlen = max(map(len, (x for x, _, _ in train + test)))
query_maxlen = max(map(len, (x for _, x, _ in train + test)))
x, xq, y = vectorize_stories(train, word_idx, story_maxlen, query_maxlen)
tx, txq, ty = vectorize_stories(test, word_idx, story_maxlen, query_maxlen)
print('vocab = {}'.format(vocab))
print('x.shape = {}'.format(x.shape))
print('xq.shape = {}'.format(xq.shape))
print('y.shape = {}'.format(y.shape))
print('story_maxlen, query_maxlen = {}, {}'.format(story_maxlen, query_maxlen))
print('Build model...')
sentence = layers.Input(shape=(story_maxlen,), dtype='int32')
encoded_sentence = layers.Embedding(vocab_size, EMBED_HIDDEN_SIZE)(sentence)
encoded_sentence = RNN(SENT_HIDDEN_SIZE)(encoded_sentence)
question = layers.Input(shape=(query_maxlen,), dtype='int32')
encoded_question = layers.Embedding(vocab_size, EMBED_HIDDEN_SIZE)(question)
encoded_question = RNN(QUERY_HIDDEN_SIZE)(encoded_question)
merged = layers.concatenate([encoded_sentence, encoded_question])
preds = layers.Dense(vocab_size, activation='softmax')(merged)
model = Model([sentence, question], preds)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print('Training')
model.fit([x, xq], y,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_split=0.05)
print('Evaluation')
loss, acc = model.evaluate([tx, txq], ty,
batch_size=BATCH_SIZE)
print('Test loss / test accuracy = {:.4f} / {:.4f}'.format(loss, acc))
```
| keras-docs-zh/sources/examples/babi_rnn.md/0 | {
"file_path": "keras-docs-zh/sources/examples/babi_rnn.md",
"repo_id": "keras-docs-zh",
"token_count": 4756
} | 91 |
# 如何使用有状态 LSTM 模型,有状态与无状态 LSTM 性能比较
[有关 Keras LSTM 模型的更多文档](/layers/recurrent/#lstm)
在输入/输出对上训练模型,其中输入是生成的长度为 `input_len` 的均匀分布随机序列,
输出是窗口长度为 `tsteps` 的输入的移动平均值。`input_len` 和 `tsteps` 都在 "可编辑参数" 部分中定义。
较大的 `tsteps` 值意味着 LSTM 需要更多的内存来确定输入输出关系。
该内存长度由 `lahead` 变量控制(下面有更多详细信息)。
其余参数为:
- `input_len`: 生成的输入序列的长度
- `lahead`: LSTM 针对每个输出点训练的输入序列长度
- `batch_size`, `epochs`: 与 `model.fit(...)` 函数中的参数相同
当 `lahead > 1` 时,模型输入将预处理为数据的 "滚动窗口视图",窗口长度为 `lahead`。
这类似于 sklearn 的 `view_as_windows`,
其中 `window_shape` [是一个数字。](http://scikit-image.org/docs/0.10.x/api/skimage.util.html#view-as-windows)
当 `lahead < tsteps` 时,只有有状态的 LSTM 会收敛,因为它的有状态性使其能够看到超出 lahead 赋予其的 n 点平均值的能力。
无状态 LSTM 不具备此功能,因此受到其 `lahead` 参数的限制,该参数不足以查看 n 点平均值。
当 `lahead >= tsteps` 时,有状态和无状态 LSTM 都会收敛。
```python
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM
# ----------------------------------------------------------
# 可编辑参数
# 阅读脚本头中的文档以获取更多详细信息
# ----------------------------------------------------------
# 输入长度
input_len = 1000
# 用于从训练 LSTM 的输入/输出对中的输入生成输出的移动平均值的窗口长度
# 例如,如果 tsteps=2,input=[1, 2, 3, 4, 5],
# 那么 output=[1.5, 2.5, 3.5, 4.5]
tsteps = 2
# LSTM 针对每个输出点训练的输入序列长度
lahead = 1
# 传递给 "model.fit(...)" 的训练参数
batch_size = 1
epochs = 10
# ------------
# 主程序
# ------------
print("*" * 33)
if lahead >= tsteps:
print("STATELESS LSTM WILL ALSO CONVERGE")
else:
print("STATELESS LSTM WILL NOT CONVERGE")
print("*" * 33)
np.random.seed(1986)
print('Generating Data...')
def gen_uniform_amp(amp=1, xn=10000):
"""生成 -amp 和 +amp 之间且长度为 xn 的均匀随机数据
# 参数
amp: 统一数据的最大/最小范围
xn: 系列长度
"""
data_input = np.random.uniform(-1 * amp, +1 * amp, xn)
data_input = pd.DataFrame(data_input)
return data_input
# 由于输出是输入的移动平均值,
# 因此输出的前几个点将是 NaN,
# 并且在训练 LSTM 之前将其从生成的数据中删除。
# 同样,当 lahead > 1时,"滚动窗口视图"
# 后面的预处理步骤也将导致某些点丢失。
# 出于美学原因,为了在预处理后保持生成的数据长度等于 input_len,请添加一些点以说明将丢失的值。
to_drop = max(tsteps - 1, lahead - 1)
data_input = gen_uniform_amp(amp=0.1, xn=input_len + to_drop)
# 将目标设置为输入的 N 点平均值
expected_output = data_input.rolling(window=tsteps, center=False).mean()
# 当 lahead > 1时,需要将输入转换为 "滚动窗口视图"
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html
if lahead > 1:
data_input = np.repeat(data_input.values, repeats=lahead, axis=1)
data_input = pd.DataFrame(data_input)
for i, c in enumerate(data_input.columns):
data_input[c] = data_input[c].shift(i)
# 丢弃 nan
expected_output = expected_output[to_drop:]
data_input = data_input[to_drop:]
print('Input shape:', data_input.shape)
print('Output shape:', expected_output.shape)
print('Input head: ')
print(data_input.head())
print('Output head: ')
print(expected_output.head())
print('Input tail: ')
print(data_input.tail())
print('Output tail: ')
print(expected_output.tail())
print('Plotting input and expected output')
plt.plot(data_input[0][:10], '.')
plt.plot(expected_output[0][:10], '-')
plt.legend(['Input', 'Expected output'])
plt.title('Input')
plt.show()
def create_model(stateful):
model = Sequential()
model.add(LSTM(20,
input_shape=(lahead, 1),
batch_size=batch_size,
stateful=stateful))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
print('Creating Stateful Model...')
model_stateful = create_model(stateful=True)
# 切分训练/测试数据
def split_data(x, y, ratio=0.8):
to_train = int(input_len * ratio)
# 进行调整以匹配 batch_size
to_train -= to_train % batch_size
x_train = x[:to_train]
y_train = y[:to_train]
x_test = x[to_train:]
y_test = y[to_train:]
# 进行调整以匹配 batch_size
to_drop = x.shape[0] % batch_size
if to_drop > 0:
x_test = x_test[:-1 * to_drop]
y_test = y_test[:-1 * to_drop]
# 一些重塑
reshape_3 = lambda x: x.values.reshape((x.shape[0], x.shape[1], 1))
x_train = reshape_3(x_train)
x_test = reshape_3(x_test)
reshape_2 = lambda x: x.values.reshape((x.shape[0], 1))
y_train = reshape_2(y_train)
y_test = reshape_2(y_test)
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test) = split_data(data_input, expected_output)
print('x_train.shape: ', x_train.shape)
print('y_train.shape: ', y_train.shape)
print('x_test.shape: ', x_test.shape)
print('y_test.shape: ', y_test.shape)
print('Training')
for i in range(epochs):
print('Epoch', i + 1, '/', epochs)
# 请注意,批次 i 中样品 i 的最后状态将用作下一批中样品 i 的初始状态。
# 因此,我们同时以低于 data_input 中包含的原始序列的分辨率对 batch_size 系列进行训练。
# 这些系列中的每一个都偏移一个步骤,并且可以使用 data_input[i::batch_size] 提取。
model_stateful.fit(x_train,
y_train,
batch_size=batch_size,
epochs=1,
verbose=1,
validation_data=(x_test, y_test),
shuffle=False)
model_stateful.reset_states()
print('Predicting')
predicted_stateful = model_stateful.predict(x_test, batch_size=batch_size)
print('Creating Stateless Model...')
model_stateless = create_model(stateful=False)
print('Training')
model_stateless.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
shuffle=False)
print('Predicting')
predicted_stateless = model_stateless.predict(x_test, batch_size=batch_size)
# ----------------------------
print('Plotting Results')
plt.subplot(3, 1, 1)
plt.plot(y_test)
plt.title('Expected')
plt.subplot(3, 1, 2)
# 删除第一个 "tsteps-1",因为不可能预测它们,因为不存在要使用的 "上一个" 时间步
plt.plot((y_test - predicted_stateful).flatten()[tsteps - 1:])
plt.title('Stateful: Expected - Predicted')
plt.subplot(3, 1, 3)
plt.plot((y_test - predicted_stateless).flatten())
plt.title('Stateless: Expected - Predicted')
plt.show()
``` | keras-docs-zh/sources/examples/lstm_stateful.md/0 | {
"file_path": "keras-docs-zh/sources/examples/lstm_stateful.md",
"repo_id": "keras-docs-zh",
"token_count": 3983
} | 92 |
# 在路透社新闻分类主题任务上训练和评估一个简单的 MLP。
```python
from __future__ import print_function
import numpy as np
import keras
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
max_words = 1000
batch_size = 32
epochs = 5
print('Loading data...')
(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_words,
test_split=0.2)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
num_classes = np.max(y_train) + 1
print(num_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(num_words=max_words)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Convert class vector to binary class matrix '
'(for use with categorical_crossentropy)')
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print('y_train shape:', y_train.shape)
print('y_test shape:', y_test.shape)
print('Building model...')
model = Sequential()
model.add(Dense(512, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split=0.1)
score = model.evaluate(x_test, y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
``` | keras-docs-zh/sources/examples/reuters_mlp.md/0 | {
"file_path": "keras-docs-zh/sources/examples/reuters_mlp.md",
"repo_id": "keras-docs-zh",
"token_count": 859
} | 93 |
## 初始化器的用法
初始化定义了设置 Keras 各层权重随机初始值的方法。
用来将初始化器传入 Keras 层的参数名取决于具体的层。通常关键字为 `kernel_initializer` 和 `bias_initializer`:
```python
model.add(Dense(64,
kernel_initializer='random_uniform',
bias_initializer='zeros'))
```
## 预定义初始化器
下面这些是可用的内置初始化器,是 `keras.initializers` 模块的一部分:
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L14)</span>
### Initializer
```python
keras.initializers.Initializer()
```
初始化器基类:所有初始化器继承这个类。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L33)</span>
### Zeros
```python
keras.initializers.Zeros()
```
将张量初始值设为 0 的初始化器。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L41)</span>
### Ones
```python
keras.initializers.Ones()
```
将张量初始值设为 1 的初始化器。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L49)</span>
### Constant
```python
keras.initializers.Constant(value=0)
```
将张量初始值设为一个常数的初始化器。
__参数__
- __value__: 浮点数,生成的张量的值。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L66)</span>
### RandomNormal
```python
keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
```
按照正态分布生成随机张量的初始化器。
__参数__
- __mean__: 一个 Python 标量或者一个标量张量。要生成的随机值的平均数。
- __stddev__: 一个 Python 标量或者一个标量张量。要生成的随机值的标准差。
- __seed__: 一个 Python 整数。用于设置随机数种子。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L97)</span>
### RandomUniform
```python
keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=None)
```
按照均匀分布生成随机张量的初始化器。
__参数__
- __minval__: 一个 Python 标量或者一个标量张量。要生成的随机值的范围下限。
- __maxval__: 一个 Python 标量或者一个标量张量。要生成的随机值的范围下限。默认为浮点类型的 1。
- __seed__: 一个 Python 整数。用于设置随机数种子。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L128)</span>
### TruncatedNormal
```python
keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None)
```
按照截尾正态分布生成随机张量的初始化器。
生成的随机值与 `RandomNormal` 生成的类似,但是在距离平均值两个标准差之外的随机值将被丢弃并重新生成。这是用来生成神经网络权重和滤波器的推荐初始化器。
__Arguments__
- __mean__: 一个 Python 标量或者一个标量张量。要生成的随机值的平均数。
- __stddev__: 一个 Python 标量或者一个标量张量。要生成的随机值的标准差。
- __seed__: 一个 Python 整数。用于设置随机数种子。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L164)</span>
### VarianceScaling
```python
keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None)
```
初始化器能够根据权值的尺寸调整其规模。
使用 `distribution="normal"` 时,样本是从一个以 0 为中心的截断正态分布中抽取的,`stddev = sqrt(scale / n)`,其中 n 是:
- 权值张量中输入单元的数量,如果 mode = "fan_in"。
- 输出单元的数量,如果 mode = "fan_out"。
- 输入和输出单位数量的平均数,如果 mode = "fan_avg"。
使用 `distribution="uniform"` 时,样本是从 [-limit,limit] 内的均匀分布中抽取的,其中 `limit = sqrt(3 * scale / n)`。
__参数__
- __scale__: 缩放因子(正浮点数)。
- __mode__: "fan_in", "fan_out", "fan_avg" 之一。
- __distribution__: 使用的随机分布。"normal", "uniform" 之一。
- __seed__: 一个 Python 整数。作为随机发生器的种子。
__异常__
- __ValueError__: 如果 "scale", mode" 或 "distribution" 参数无效。
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L241)</span>
### Orthogonal
```python
keras.initializers.Orthogonal(gain=1.0, seed=None)
```
生成一个随机正交矩阵的初始化器。
__参数__
- __gain__: 适用于正交矩阵的乘法因子。
- __seed__: 一个 Python 整数。作为随机发生器的种子。
__参考文献__
- [Exact solutions to the nonlinear dynamics of learning in deep linear neural networks](http://arxiv.org/abs/1312.6120)
----
<span style="float:right;">[[source]](https://github.com/keras-team/keras/blob/master/keras/initializers.py#L281)</span>
### Identity
```python
keras.initializers.Identity(gain=1.0)
```
生成单位矩阵的初始化器。
仅用于 2D 方阵。
__参数__
- __gain__: 适用于单位矩阵的乘法因子。
----
### lecun_uniform
```python
keras.initializers.lecun_uniform(seed=None)
```
LeCun 均匀初始化器。
它从 [-limit,limit] 中的均匀分布中抽取样本,
其中 `limit` 是 `sqrt(3 / fan_in)`,
`fan_in` 是权值张量中的输入单位的数量。
__参数__
- __seed__: 一个 Python 整数。作为随机发生器的种子。
__返回__
一个初始化器。
__参考文献__
- [Efficient Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
----
### glorot_normal
```python
keras.initializers.glorot_normal(seed=None)
```
Glorot 正态分布初始化器,也称为 Xavier 正态分布初始化器。
它从以 0 为中心,标准差为 `stddev = sqrt(2 / (fan_in + fan_out))` 的截断正态分布中抽取样本,
其中 `fan_in` 是权值张量中的输入单位的数量,
`fan_out` 是权值张量中的输出单位的数量。
__参数__
- __seed__: 一个 Python 整数。作为随机发生器的种子。
__返回__
一个初始化器。
__参考文献__
- [Understanding the difficulty of training deep feedforward neural networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
----
### glorot_uniform
```python
keras.initializers.glorot_uniform(seed=None)
```
Glorot 均匀分布初始化器,也称为 Xavier 均匀分布初始化器。
它从 [-limit,limit] 中的均匀分布中抽取样本,
其中 `limit` 是 `sqrt(6 / (fan_in + fan_out))`,
`fan_in` 是权值张量中的输入单位的数量,
`fan_out` 是权值张量中的输出单位的数量。
__参数__
- __seed__: 一个 Python 整数。作为随机发生器的种子。
__返回__
一个初始化器。
__参考文献__
- [Understanding the difficulty of training deep feedforward neural networks](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
----
### he_normal
```python
keras.initializers.he_normal(seed=None)
```
He 正态分布初始化器。
它从以 0 为中心,标准差为 `stddev = sqrt(2 / fan_in)` 的截断正态分布中抽取样本,
其中 `fan_in` 是权值张量中的输入单位的数量,
__参数__
- __seed__: 一个 Python 整数。作为随机发生器的种子。
__返回__
一个初始化器。
__参考文献__
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/abs/1502.01852)
----
### lecun_normal
```python
keras.initializers.lecun_normal(seed=None)
```
LeCun 正态分布初始化器。
它从以 0 为中心,标准差为 `stddev = sqrt(1 / fan_in)` 的截断正态分布中抽取样本,
其中 `fan_in` 是权值张量中的输入单位的数量。
__参数__
- __seed__: 一个 Python 整数。作为随机发生器的种子。
__返回__
一个初始化器。
__参考文献__
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
- [Efficient Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
----
### he_uniform
```python
keras.initializers.he_uniform(seed=None)
```
He 均匀方差缩放初始化器。
它从 [-limit,limit] 中的均匀分布中抽取样本,
其中 `limit` 是 `sqrt(6 / fan_in)`,
其中 `fan_in` 是权值张量中的输入单位的数量。
__参数__
- __seed__: 一个 Python 整数。作为随机发生器的种子。
__返回__
一个初始化器。
__参考文献__
- [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](http://arxiv.org/abs/1502.01852)
一个初始化器可以作为一个字符串传递(必须匹配上面的一个可用的初始化器),或者作为一个可调用函数传递:
```python
from keras import initializers
model.add(Dense(64, kernel_initializer=initializers.random_normal(stddev=0.01)))
# 同样有效;将使用默认参数。
model.add(Dense(64, kernel_initializer='random_normal'))
```
## 使用自定义初始化器
如果传递一个自定义的可调用函数,那么它必须使用参数 `shape`(需要初始化的变量的尺寸)和 `dtype`(数据类型):
```python
from keras import backend as K
def my_init(shape, dtype=None):
return K.random_normal(shape, dtype=dtype)
model.add(Dense(64, kernel_initializer=my_init))
```
| keras-docs-zh/sources/initializers.md/0 | {
"file_path": "keras-docs-zh/sources/initializers.md",
"repo_id": "keras-docs-zh",
"token_count": 5063
} | 94 |
# 关于 Keras 模型
在 Keras 中有两类主要的模型:[Sequential 顺序模型](/models/sequential) 和 [使用函数式 API 的 Model 类模型](/models/model)。
这些模型有许多共同的方法和属性:
- `model.layers` 是包含模型网络层的展平列表。
- `model.inputs` 是模型输入张量的列表。
- `model.outputs` 是模型输出张量的列表。
- `model.summary()` 打印出模型概述信息。 它是 [utils.print_summary](/utils/#print_summary) 的简捷调用。
- `model.get_config()` 返回包含模型配置信息的字典。通过以下代码,就可以根据这些配置信息重新实例化模型:
```python
config = model.get_config()
model = Model.from_config(config)
# 或者,对于 Sequential:
model = Sequential.from_config(config)
```
- `model.get_weights()` 返回模型中所有权重张量的列表,类型为 Numpy 数组。
- `model.set_weights(weights)` 从 Numpy 数组中为模型设置权重。列表中的数组必须与 `get_weights()` 返回的权重具有相同的尺寸。
- `model.to_json()` 以 JSON 字符串的形式返回模型的表示。请注意,该表示不包括权重,仅包含结构。你可以通过以下方式从 JSON 字符串重新实例化同一模型(使用重新初始化的权重):
```python
from keras.models import model_from_json
json_string = model.to_json()
model = model_from_json(json_string)
```
- `model.to_yaml()` 以 YAML 字符串的形式返回模型的表示。请注意,该表示不包括权重,只包含结构。你可以通过以下代码,从 YAML 字符串中重新实例化相同的模型(使用重新初始化的权重):
```python
from keras.models import model_from_yaml
yaml_string = model.to_yaml()
model = model_from_yaml(yaml_string)
```
- `model.save_weights(filepath)` 将模型权重存储为 HDF5 文件。
- `model.load_weights(filepath, by_name=False)`: 从 HDF5 文件(由 `save_weights` 创建)中加载权重。默认情况下,模型的结构应该是不变的。 如果想将权重载入不同的模型(部分层相同), 设置 `by_name=True` 来载入那些名字相同的层的权重。
注意:另请参阅[如何安装 HDF5 或 h5py 以保存 Keras 模型](/getting-started/faq/#how-can-i-install-HDF5-or-h5py-to-save-my-models-in-Keras),在常见问题中了解如何安装 `h5py` 的说明。
## Model 类继承
除了这两类模型之外,你还可以通过继承 `Model` 类并在 `call` 方法中实现你自己的前向传播,以创建你自己的完全定制化的模型,(`Model` 类继承 API 引入于 Keras 2.2.0)。
这里是一个用 `Model` 类继承写的简单的多层感知器的例子:
```python
import keras
class SimpleMLP(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleMLP, self).__init__(name='mlp')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, inputs):
x = self.dense1(inputs)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
model = SimpleMLP()
model.compile(...)
model.fit(...)
```
网络层定义在 `__init__(self, ...)` 中,前向传播在 `call(self, inputs)` 中指定。在 `call` 中,你可以指定自定义的损失函数,通过调用 `self.add_loss(loss_tensor)` (就像你在自定义层中一样)。
在类继承模型中,模型的拓扑结构是由 Python 代码定义的(而不是网络层的静态图)。这意味着该模型的拓扑结构不能被检查或序列化。因此,以下方法和属性**不适用于类继承模型**:
- `model.inputs` 和 `model.outputs`。
- `model.to_yaml()` 和 `model.to_json()`。
- `model.get_config()` 和 `model.save()`。
**关键点**:为每个任务使用正确的 API。`Model` 类继承 API 可以为实现复杂模型提供更大的灵活性,但它需要付出代价(比如缺失的特性):它更冗长,更复杂,并且有更多的用户错误机会。如果可能的话,尽可能使用函数式 API,这对用户更友好。
| keras-docs-zh/sources/models/about-keras-models.md/0 | {
"file_path": "keras-docs-zh/sources/models/about-keras-models.md",
"repo_id": "keras-docs-zh",
"token_count": 2677
} | 95 |
<jupyter_start><jupyter_text>MelGAN-based spectrogram inversion using feature matching**Author:** [Darshan Deshpande](https://twitter.com/getdarshan)**Date created:** 02/09/2021**Last modified:** 15/09/2021**Description:** Inversion of audio from mel-spectrograms using the MelGAN architecture and feature matching. IntroductionAutoregressive vocoders have been ubiquitous for a majority of the history of speech processing,but for most of their existence they have lacked parallelism.[MelGAN](https://arxiv.org/pdf/1910.06711v3.pdf) is anon-autoregressive, fully convolutional vocoder architecture used for purposes rangingfrom spectral inversion and speech enhancement to present-day state-of-the-artspeech synthesis when used as a decoderwith models like Tacotron2 or FastSpeech that convert text to mel spectrograms.In this tutorial, we will have a look at the MelGAN architecture and how it can achievefast spectral inversion, i.e. conversion of spectrograms to audio waves. The MelGANimplemented in this tutorial is similar to the original implementation with only thedifference of method of padding for convolutions where we will use 'same' instead ofreflect padding. Importing and Defining Hyperparameters<jupyter_code>!pip install -qqq tensorflow_addons
!pip install -qqq tensorflow-io
import tensorflow as tf
import tensorflow_io as tfio
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow_addons import layers as addon_layers
# Setting logger level to avoid input shape warnings
tf.get_logger().setLevel("ERROR")
# Defining hyperparameters
DESIRED_SAMPLES = 8192
LEARNING_RATE_GEN = 1e-5
LEARNING_RATE_DISC = 1e-6
BATCH_SIZE = 16
mse = keras.losses.MeanSquaredError()
mae = keras.losses.MeanAbsoluteError()<jupyter_output><empty_output><jupyter_text>Loading the DatasetThis example uses the [LJSpeech dataset](https://keithito.com/LJ-Speech-Dataset/).The LJSpeech dataset is primarily used for text-to-speech and consists of 13,100 discretespeech samples taken from 7 non-fiction books, having a total length of approximately 24hours. The MelGAN training is only concerned with the audio waves so we process only theWAV files and ignore the audio annotations.<jupyter_code>!wget https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
!tar -xf /content/LJSpeech-1.1.tar.bz2<jupyter_output><empty_output><jupyter_text>We create a `tf.data.Dataset` to load and process the audio files on the fly.The `preprocess()` function takes the file path as input and returns two instances of thewave, one for input and one as the ground truth for comparison. The input wave will bemapped to a spectrogram using the custom `MelSpec` layer as shown later in this example.<jupyter_code># Splitting the dataset into training and testing splits
wavs = tf.io.gfile.glob("LJSpeech-1.1/wavs/*.wav")
print(f"Number of audio files: {len(wavs)}")
# Mapper function for loading the audio. This function returns two instances of the wave
def preprocess(filename):
audio = tf.audio.decode_wav(tf.io.read_file(filename), 1, DESIRED_SAMPLES).audio
return audio, audio
# Create tf.data.Dataset objects and apply preprocessing
train_dataset = tf.data.Dataset.from_tensor_slices((wavs,))
train_dataset = train_dataset.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Defining custom layers for MelGANThe MelGAN architecture consists of 3 main modules:1. The residual block2. Dilated convolutional block3. Discriminator block Since the network takes a mel-spectrogram as input, we will create an additional customlayerwhich can convert the raw audio wave to a spectrogram on-the-fly. We use the raw audiotensor from `train_dataset` and map it to a mel-spectrogram using the `MelSpec` layerbelow.<jupyter_code># Custom keras layer for on-the-fly audio to spectrogram conversion
class MelSpec(layers.Layer):
def __init__(
self,
frame_length=1024,
frame_step=256,
fft_length=None,
sampling_rate=22050,
num_mel_channels=80,
freq_min=125,
freq_max=7600,
**kwargs,
):
super().__init__(**kwargs)
self.frame_length = frame_length
self.frame_step = frame_step
self.fft_length = fft_length
self.sampling_rate = sampling_rate
self.num_mel_channels = num_mel_channels
self.freq_min = freq_min
self.freq_max = freq_max
# Defining mel filter. This filter will be multiplied with the STFT output
self.mel_filterbank = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=self.num_mel_channels,
num_spectrogram_bins=self.frame_length // 2 + 1,
sample_rate=self.sampling_rate,
lower_edge_hertz=self.freq_min,
upper_edge_hertz=self.freq_max,
)
def call(self, audio, training=True):
# We will only perform the transformation during training.
if training:
# Taking the Short Time Fourier Transform. Ensure that the audio is padded.
# In the paper, the STFT output is padded using the 'REFLECT' strategy.
stft = tf.signal.stft(
tf.squeeze(audio, -1),
self.frame_length,
self.frame_step,
self.fft_length,
pad_end=True,
)
# Taking the magnitude of the STFT output
magnitude = tf.abs(stft)
# Multiplying the Mel-filterbank with the magnitude and scaling it using the db scale
mel = tf.matmul(tf.square(magnitude), self.mel_filterbank)
log_mel_spec = tfio.audio.dbscale(mel, top_db=80)
return log_mel_spec
else:
return audio
def get_config(self):
config = super().get_config()
config.update(
{
"frame_length": self.frame_length,
"frame_step": self.frame_step,
"fft_length": self.fft_length,
"sampling_rate": self.sampling_rate,
"num_mel_channels": self.num_mel_channels,
"freq_min": self.freq_min,
"freq_max": self.freq_max,
}
)
return config<jupyter_output><empty_output><jupyter_text>The residual convolutional block extensively uses dilations and has a total receptivefield of 27 timesteps per block. The dilations must grow as a power of the `kernel_size`to ensure reduction of hissing noise in the output. The network proposed by the paper isas follows:<jupyter_code># Creating the residual stack block
def residual_stack(input, filters):
"""Convolutional residual stack with weight normalization.
Args:
filters: int, determines filter size for the residual stack.
Returns:
Residual stack output.
"""
c1 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(input)
lrelu1 = layers.LeakyReLU()(c1)
c2 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu1)
add1 = layers.Add()([c2, input])
lrelu2 = layers.LeakyReLU()(add1)
c3 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=3, padding="same"), data_init=False
)(lrelu2)
lrelu3 = layers.LeakyReLU()(c3)
c4 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu3)
add2 = layers.Add()([add1, c4])
lrelu4 = layers.LeakyReLU()(add2)
c5 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=9, padding="same"), data_init=False
)(lrelu4)
lrelu5 = layers.LeakyReLU()(c5)
c6 = addon_layers.WeightNormalization(
layers.Conv1D(filters, 3, dilation_rate=1, padding="same"), data_init=False
)(lrelu5)
add3 = layers.Add()([c6, add2])
return add3<jupyter_output><empty_output><jupyter_text>Each convolutional block uses the dilations offered by the residual stackand upsamples the input data by the `upsampling_factor`.<jupyter_code># Dilated convolutional block consisting of the Residual stack
def conv_block(input, conv_dim, upsampling_factor):
"""Dilated Convolutional Block with weight normalization.
Args:
conv_dim: int, determines filter size for the block.
upsampling_factor: int, scale for upsampling.
Returns:
Dilated convolution block.
"""
conv_t = addon_layers.WeightNormalization(
layers.Conv1DTranspose(conv_dim, 16, upsampling_factor, padding="same"),
data_init=False,
)(input)
lrelu1 = layers.LeakyReLU()(conv_t)
res_stack = residual_stack(lrelu1, conv_dim)
lrelu2 = layers.LeakyReLU()(res_stack)
return lrelu2<jupyter_output><empty_output><jupyter_text>The discriminator block consists of convolutions and downsampling layers. This block isessential for the implementation of the feature matching technique.Each discriminator outputs a list of feature maps that will be compared during trainingto compute the feature matching loss.<jupyter_code>def discriminator_block(input):
conv1 = addon_layers.WeightNormalization(
layers.Conv1D(16, 15, 1, "same"), data_init=False
)(input)
lrelu1 = layers.LeakyReLU()(conv1)
conv2 = addon_layers.WeightNormalization(
layers.Conv1D(64, 41, 4, "same", groups=4), data_init=False
)(lrelu1)
lrelu2 = layers.LeakyReLU()(conv2)
conv3 = addon_layers.WeightNormalization(
layers.Conv1D(256, 41, 4, "same", groups=16), data_init=False
)(lrelu2)
lrelu3 = layers.LeakyReLU()(conv3)
conv4 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 41, 4, "same", groups=64), data_init=False
)(lrelu3)
lrelu4 = layers.LeakyReLU()(conv4)
conv5 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 41, 4, "same", groups=256), data_init=False
)(lrelu4)
lrelu5 = layers.LeakyReLU()(conv5)
conv6 = addon_layers.WeightNormalization(
layers.Conv1D(1024, 5, 1, "same"), data_init=False
)(lrelu5)
lrelu6 = layers.LeakyReLU()(conv6)
conv7 = addon_layers.WeightNormalization(
layers.Conv1D(1, 3, 1, "same"), data_init=False
)(lrelu6)
return [lrelu1, lrelu2, lrelu3, lrelu4, lrelu5, lrelu6, conv7]<jupyter_output><empty_output><jupyter_text>Create the generator<jupyter_code>def create_generator(input_shape):
inp = keras.Input(input_shape)
x = MelSpec()(inp)
x = layers.Conv1D(512, 7, padding="same")(x)
x = layers.LeakyReLU()(x)
x = conv_block(x, 256, 8)
x = conv_block(x, 128, 8)
x = conv_block(x, 64, 2)
x = conv_block(x, 32, 2)
x = addon_layers.WeightNormalization(
layers.Conv1D(1, 7, padding="same", activation="tanh")
)(x)
return keras.Model(inp, x)
# We use a dynamic input shape for the generator since the model is fully convolutional
generator = create_generator((None, 1))
generator.summary()<jupyter_output><empty_output><jupyter_text>Create the discriminator<jupyter_code>def create_discriminator(input_shape):
inp = keras.Input(input_shape)
out_map1 = discriminator_block(inp)
pool1 = layers.AveragePooling1D()(inp)
out_map2 = discriminator_block(pool1)
pool2 = layers.AveragePooling1D()(pool1)
out_map3 = discriminator_block(pool2)
return keras.Model(inp, [out_map1, out_map2, out_map3])
# We use a dynamic input shape for the discriminator
# This is done because the input shape for the generator is unknown
discriminator = create_discriminator((None, 1))
discriminator.summary()<jupyter_output><empty_output><jupyter_text>Defining the loss functions**Generator Loss**The generator architecture uses a combination of two losses1. Mean Squared Error:This is the standard MSE generator loss calculated between ones and the outputs from thediscriminator with _N_ layers.2. Feature Matching Loss:This loss involves extracting the outputs of every layer from the discriminator for boththe generator and ground truth and compare each layer output _k_ using Mean Absolute Error.**Discriminator Loss**The discriminator uses the Mean Absolute Error and compares the real data predictionswith ones and generated predictions with zeros.<jupyter_code># Generator loss
def generator_loss(real_pred, fake_pred):
"""Loss function for the generator.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Loss for the generator.
"""
gen_loss = []
for i in range(len(fake_pred)):
gen_loss.append(mse(tf.ones_like(fake_pred[i][-1]), fake_pred[i][-1]))
return tf.reduce_mean(gen_loss)
def feature_matching_loss(real_pred, fake_pred):
"""Implements the feature matching loss.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Feature Matching Loss.
"""
fm_loss = []
for i in range(len(fake_pred)):
for j in range(len(fake_pred[i]) - 1):
fm_loss.append(mae(real_pred[i][j], fake_pred[i][j]))
return tf.reduce_mean(fm_loss)
def discriminator_loss(real_pred, fake_pred):
"""Implements the discriminator loss.
Args:
real_pred: Tensor, output of the ground truth wave passed through the discriminator.
fake_pred: Tensor, output of the generator prediction passed through the discriminator.
Returns:
Discriminator Loss.
"""
real_loss, fake_loss = [], []
for i in range(len(real_pred)):
real_loss.append(mse(tf.ones_like(real_pred[i][-1]), real_pred[i][-1]))
fake_loss.append(mse(tf.zeros_like(fake_pred[i][-1]), fake_pred[i][-1]))
# Calculating the final discriminator loss after scaling
disc_loss = tf.reduce_mean(real_loss) + tf.reduce_mean(fake_loss)
return disc_loss<jupyter_output><empty_output><jupyter_text>Defining the MelGAN model for training.This subclass overrides the `train_step()` method to implement the training logic.<jupyter_code>class MelGAN(keras.Model):
def __init__(self, generator, discriminator, **kwargs):
"""MelGAN trainer class
Args:
generator: keras.Model, Generator model
discriminator: keras.Model, Discriminator model
"""
super().__init__(**kwargs)
self.generator = generator
self.discriminator = discriminator
def compile(
self,
gen_optimizer,
disc_optimizer,
generator_loss,
feature_matching_loss,
discriminator_loss,
):
"""MelGAN compile method.
Args:
gen_optimizer: keras.optimizer, optimizer to be used for training
disc_optimizer: keras.optimizer, optimizer to be used for training
generator_loss: callable, loss function for generator
feature_matching_loss: callable, loss function for feature matching
discriminator_loss: callable, loss function for discriminator
"""
super().compile()
# Optimizers
self.gen_optimizer = gen_optimizer
self.disc_optimizer = disc_optimizer
# Losses
self.generator_loss = generator_loss
self.feature_matching_loss = feature_matching_loss
self.discriminator_loss = discriminator_loss
# Trackers
self.gen_loss_tracker = keras.metrics.Mean(name="gen_loss")
self.disc_loss_tracker = keras.metrics.Mean(name="disc_loss")
def train_step(self, batch):
x_batch_train, y_batch_train = batch
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
# Generating the audio wave
gen_audio_wave = generator(x_batch_train, training=True)
# Generating the features using the discriminator
real_pred = discriminator(y_batch_train)
fake_pred = discriminator(gen_audio_wave)
# Calculating the generator losses
gen_loss = generator_loss(real_pred, fake_pred)
fm_loss = feature_matching_loss(real_pred, fake_pred)
# Calculating final generator loss
gen_fm_loss = gen_loss + 10 * fm_loss
# Calculating the discriminator losses
disc_loss = discriminator_loss(real_pred, fake_pred)
# Calculating and applying the gradients for generator and discriminator
grads_gen = gen_tape.gradient(gen_fm_loss, generator.trainable_weights)
grads_disc = disc_tape.gradient(disc_loss, discriminator.trainable_weights)
gen_optimizer.apply_gradients(zip(grads_gen, generator.trainable_weights))
disc_optimizer.apply_gradients(zip(grads_disc, discriminator.trainable_weights))
self.gen_loss_tracker.update_state(gen_fm_loss)
self.disc_loss_tracker.update_state(disc_loss)
return {
"gen_loss": self.gen_loss_tracker.result(),
"disc_loss": self.disc_loss_tracker.result(),
}<jupyter_output><empty_output><jupyter_text>TrainingThe paper suggests that the training with dynamic shapes takes around 400,000 steps (~500epochs). For this example, we will run it only for a single epoch (819 steps).Longer training time (greater than 300 epochs) will almost certainly provide better results.<jupyter_code>gen_optimizer = keras.optimizers.Adam(
LEARNING_RATE_GEN, beta_1=0.5, beta_2=0.9, clipnorm=1
)
disc_optimizer = keras.optimizers.Adam(
LEARNING_RATE_DISC, beta_1=0.5, beta_2=0.9, clipnorm=1
)
# Start training
generator = create_generator((None, 1))
discriminator = create_discriminator((None, 1))
mel_gan = MelGAN(generator, discriminator)
mel_gan.compile(
gen_optimizer,
disc_optimizer,
generator_loss,
feature_matching_loss,
discriminator_loss,
)
mel_gan.fit(
train_dataset.shuffle(200).batch(BATCH_SIZE).prefetch(tf.data.AUTOTUNE), epochs=1
)<jupyter_output><empty_output><jupyter_text>Testing the modelThe trained model can now be used for real time text-to-speech translation tasks.To test how fast the MelGAN inference can be, let us take a sample audio mel-spectrogramand convert it. Note that the actual model pipeline will not include the `MelSpec` layerand hence this layer will be disabled during inference. The inference input will be amel-spectrogram processed similar to the `MelSpec` layer configuration.For testing this, we will create a randomly uniformly distributed tensor to simulate thebehavior of the inference pipeline.<jupyter_code># Sampling a random tensor to mimic a batch of 128 spectrograms of shape [50, 80]
audio_sample = tf.random.uniform([128, 50, 80])<jupyter_output><empty_output><jupyter_text>Timing the inference speed of a single sample. Running this, you can see that the averageinference time per spectrogram ranges from 8 milliseconds to 10 milliseconds on a K80 GPU which ispretty fast.<jupyter_code>pred = generator.predict(audio_sample, batch_size=32, verbose=1)<jupyter_output><empty_output> | keras-io/examples/audio/ipynb/melgan_spectrogram_inversion.ipynb/0 | {
"file_path": "keras-io/examples/audio/ipynb/melgan_spectrogram_inversion.ipynb",
"repo_id": "keras-io",
"token_count": 7318
} | 96 |
# DCGAN to generate face images
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2019/04/29<br>
**Last modified:** 2023/12/21<br>
**Description:** A simple DCGAN trained using `fit()` by overriding `train_step` on CelebA images.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/generative/ipynb/dcgan_overriding_train_step.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/generative/dcgan_overriding_train_step.py)
---
## Setup
```python
import keras
import tensorflow as tf
from keras import layers
from keras import ops
import matplotlib.pyplot as plt
import os
import gdown
from zipfile import ZipFile
```
---
## Prepare CelebA data
We'll use face images from the CelebA dataset, resized to 64x64.
```python
os.makedirs("celeba_gan")
url = "https://drive.google.com/uc?id=1O7m1010EJjLE5QxLZiM9Fpjs7Oj6e684"
output = "celeba_gan/data.zip"
gdown.download(url, output, quiet=True)
with ZipFile("celeba_gan/data.zip", "r") as zipobj:
zipobj.extractall("celeba_gan")
```
Create a dataset from our folder, and rescale the images to the [0-1] range:
```python
dataset = keras.utils.image_dataset_from_directory(
"celeba_gan", label_mode=None, image_size=(64, 64), batch_size=32
)
dataset = dataset.map(lambda x: x / 255.0)
```
<div class="k-default-codeblock">
```
Found 202599 files.
```
</div>
Let's display a sample image:
```python
for x in dataset:
plt.axis("off")
plt.imshow((x.numpy() * 255).astype("int32")[0])
break
```

---
## Create the discriminator
It maps a 64x64 image to a binary classification score.
```python
discriminator = keras.Sequential(
[
keras.Input(shape=(64, 64, 3)),
layers.Conv2D(64, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Flatten(),
layers.Dropout(0.2),
layers.Dense(1, activation="sigmoid"),
],
name="discriminator",
)
discriminator.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "discriminator"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">3,136</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">131,200</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">262,272</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ flatten (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8192</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8192</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">8,193</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">404,801</span> (1.54 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">404,801</span> (1.54 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Create the generator
It mirrors the discriminator, replacing `Conv2D` layers with `Conv2DTranspose` layers.
```python
latent_dim = 128
generator = keras.Sequential(
[
keras.Input(shape=(latent_dim,)),
layers.Dense(8 * 8 * 128),
layers.Reshape((8, 8, 128)),
layers.Conv2DTranspose(128, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2DTranspose(256, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2DTranspose(512, kernel_size=4, strides=2, padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.Conv2D(3, kernel_size=5, padding="same", activation="sigmoid"),
],
name="generator",
)
generator.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "generator"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ dense_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8192</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,056,768</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ reshape (<span style="color: #0087ff; text-decoration-color: #0087ff">Reshape</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">262,272</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">16</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">524,544</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">32</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_transpose_2 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,097,664</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2DTranspose</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ leaky_re_lu_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">LeakyReLU</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">38,403</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">3,979,651</span> (15.18 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">3,979,651</span> (15.18 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Override `train_step`
```python
class GAN(keras.Model):
def __init__(self, discriminator, generator, latent_dim):
super().__init__()
self.discriminator = discriminator
self.generator = generator
self.latent_dim = latent_dim
self.seed_generator = keras.random.SeedGenerator(1337)
def compile(self, d_optimizer, g_optimizer, loss_fn):
super().compile()
self.d_optimizer = d_optimizer
self.g_optimizer = g_optimizer
self.loss_fn = loss_fn
self.d_loss_metric = keras.metrics.Mean(name="d_loss")
self.g_loss_metric = keras.metrics.Mean(name="g_loss")
@property
def metrics(self):
return [self.d_loss_metric, self.g_loss_metric]
def train_step(self, real_images):
# Sample random points in the latent space
batch_size = ops.shape(real_images)[0]
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
# Decode them to fake images
generated_images = self.generator(random_latent_vectors)
# Combine them with real images
combined_images = ops.concatenate([generated_images, real_images], axis=0)
# Assemble labels discriminating real from fake images
labels = ops.concatenate(
[ops.ones((batch_size, 1)), ops.zeros((batch_size, 1))], axis=0
)
# Add random noise to the labels - important trick!
labels += 0.05 * tf.random.uniform(tf.shape(labels))
# Train the discriminator
with tf.GradientTape() as tape:
predictions = self.discriminator(combined_images)
d_loss = self.loss_fn(labels, predictions)
grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
self.d_optimizer.apply_gradients(
zip(grads, self.discriminator.trainable_weights)
)
# Sample random points in the latent space
random_latent_vectors = keras.random.normal(
shape=(batch_size, self.latent_dim), seed=self.seed_generator
)
# Assemble labels that say "all real images"
misleading_labels = ops.zeros((batch_size, 1))
# Train the generator (note that we should *not* update the weights
# of the discriminator)!
with tf.GradientTape() as tape:
predictions = self.discriminator(self.generator(random_latent_vectors))
g_loss = self.loss_fn(misleading_labels, predictions)
grads = tape.gradient(g_loss, self.generator.trainable_weights)
self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
# Update metrics
self.d_loss_metric.update_state(d_loss)
self.g_loss_metric.update_state(g_loss)
return {
"d_loss": self.d_loss_metric.result(),
"g_loss": self.g_loss_metric.result(),
}
```
---
## Create a callback that periodically saves generated images
```python
class GANMonitor(keras.callbacks.Callback):
def __init__(self, num_img=3, latent_dim=128):
self.num_img = num_img
self.latent_dim = latent_dim
self.seed_generator = keras.random.SeedGenerator(42)
def on_epoch_end(self, epoch, logs=None):
random_latent_vectors = keras.random.normal(
shape=(self.num_img, self.latent_dim), seed=self.seed_generator
)
generated_images = self.model.generator(random_latent_vectors)
generated_images *= 255
generated_images.numpy()
for i in range(self.num_img):
img = keras.utils.array_to_img(generated_images[i])
img.save("generated_img_%03d_%d.png" % (epoch, i))
```
---
## Train the end-to-end model
```python
epochs = 1 # In practice, use ~100 epochs
gan = GAN(discriminator=discriminator, generator=generator, latent_dim=latent_dim)
gan.compile(
d_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
g_optimizer=keras.optimizers.Adam(learning_rate=0.0001),
loss_fn=keras.losses.BinaryCrossentropy(),
)
gan.fit(
dataset, epochs=epochs, callbacks=[GANMonitor(num_img=10, latent_dim=latent_dim)]
)
```
<div class="k-default-codeblock">
```
2/6332 [37m━━━━━━━━━━━━━━━━━━━━ 9:54 94ms/step - d_loss: 0.6792 - g_loss: 0.7880
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1704214667.959762 1319 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
6332/6332 ━━━━━━━━━━━━━━━━━━━━ 557s 84ms/step - d_loss: 0.5616 - g_loss: 1.4099
<keras.src.callbacks.history.History at 0x7f251d32bc40>
```
</div>
Some of the last generated images around epoch 30
(results keep improving after that):

| keras-io/examples/generative/md/dcgan_overriding_train_step.md/0 | {
"file_path": "keras-io/examples/generative/md/dcgan_overriding_train_step.md",
"repo_id": "keras-io",
"token_count": 8942
} | 97 |
"""
Title: Keras debugging tips
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/05/16
Last modified: 2023/11/16
Description: Four simple tips to help you debug your Keras code.
Accelerator: GPU
"""
"""
## Introduction
It's generally possible to do almost anything in Keras *without writing code* per se:
whether you're implementing a new type of GAN or the latest convnet architecture for
image segmentation, you can usually stick to calling built-in methods. Because all
built-in methods do extensive input validation checks, you will have little to no
debugging to do. A Functional API model made entirely of built-in layers will work on
first try -- if you can compile it, it will run.
However, sometimes, you will need to dive deeper and write your own code. Here are some
common examples:
- Creating a new `Layer` subclass.
- Creating a custom `Metric` subclass.
- Implementing a custom `train_step` on a `Model`.
This document provides a few simple tips to help you navigate debugging in these
situations.
"""
"""
## Tip 1: test each part before you test the whole
If you've created any object that has a chance of not working as expected, don't just
drop it in your end-to-end process and watch sparks fly. Rather, test your custom object
in isolation first. This may seem obvious -- but you'd be surprised how often people
don't start with this.
- If you write a custom layer, don't call `fit()` on your entire model just yet. Call
your layer on some test data first.
- If you write a custom metric, start by printing its output for some reference inputs.
Here's a simple example. Let's write a custom layer a bug in it:
"""
import os
# The last example uses tf.GradientTape and thus requires TensorFlow.
# However, all tips here are applicable with all backends.
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
from keras import layers
from keras import ops
import numpy as np
import tensorflow as tf
class MyAntirectifier(layers.Layer):
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer="he_normal",
name="kernel",
trainable=True,
)
def call(self, inputs):
# Take the positive part of the input
pos = ops.relu(inputs)
# Take the negative part of the input
neg = ops.relu(-inputs)
# Concatenate the positive and negative parts
concatenated = ops.concatenate([pos, neg], axis=0)
# Project the concatenation down to the same dimensionality as the input
return ops.matmul(concatenated, self.kernel)
"""
Now, rather than using it in a end-to-end model directly, let's try to call the layer on
some test data:
```python
x = tf.random.normal(shape=(2, 5))
y = MyAntirectifier()(x)
```
We get the following error:
```
...
1 x = tf.random.normal(shape=(2, 5))
----> 2 y = MyAntirectifier()(x)
...
17 neg = tf.nn.relu(-inputs)
18 concatenated = tf.concat([pos, neg], axis=0)
---> 19 return tf.matmul(concatenated, self.kernel)
...
InvalidArgumentError: Matrix size-incompatible: In[0]: [4,5], In[1]: [10,5] [Op:MatMul]
```
Looks like our input tensor in the `matmul` op may have an incorrect shape.
Let's add a print statement to check the actual shapes:
"""
class MyAntirectifier(layers.Layer):
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer="he_normal",
name="kernel",
trainable=True,
)
def call(self, inputs):
pos = ops.relu(inputs)
neg = ops.relu(-inputs)
print("pos.shape:", pos.shape)
print("neg.shape:", neg.shape)
concatenated = ops.concatenate([pos, neg], axis=0)
print("concatenated.shape:", concatenated.shape)
print("kernel.shape:", self.kernel.shape)
return ops.matmul(concatenated, self.kernel)
"""
We get the following:
```
pos.shape: (2, 5)
neg.shape: (2, 5)
concatenated.shape: (4, 5)
kernel.shape: (10, 5)
```
Turns out we had the wrong axis for the `concat` op! We should be concatenating `neg` and
`pos` alongside the feature axis 1, not the batch axis 0. Here's the correct version:
"""
class MyAntirectifier(layers.Layer):
def build(self, input_shape):
output_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(output_dim * 2, output_dim),
initializer="he_normal",
name="kernel",
trainable=True,
)
def call(self, inputs):
pos = ops.relu(inputs)
neg = ops.relu(-inputs)
print("pos.shape:", pos.shape)
print("neg.shape:", neg.shape)
concatenated = ops.concatenate([pos, neg], axis=1)
print("concatenated.shape:", concatenated.shape)
print("kernel.shape:", self.kernel.shape)
return ops.matmul(concatenated, self.kernel)
"""
Now our code works fine:
"""
x = keras.random.normal(shape=(2, 5))
y = MyAntirectifier()(x)
"""
## Tip 2: use `model.summary()` and `plot_model()` to check layer output shapes
If you're working with complex network topologies, you're going to need a way
to visualize how your layers are connected and how they transform the data that passes
through them.
Here's an example. Consider this model with three inputs and two outputs (lifted from the
[Functional API guide](https://keras.io/guides/functional_api/#manipulate-complex-graph-topologies)):
"""
num_tags = 12 # Number of unique issue tags
num_words = 10000 # Size of vocabulary obtained when preprocessing text data
num_departments = 4 # Number of departments for predictions
title_input = keras.Input(
shape=(None,), name="title"
) # Variable-length sequence of ints
body_input = keras.Input(shape=(None,), name="body") # Variable-length sequence of ints
tags_input = keras.Input(
shape=(num_tags,), name="tags"
) # Binary vectors of size `num_tags`
# Embed each word in the title into a 64-dimensional vector
title_features = layers.Embedding(num_words, 64)(title_input)
# Embed each word in the text into a 64-dimensional vector
body_features = layers.Embedding(num_words, 64)(body_input)
# Reduce sequence of embedded words in the title into a single 128-dimensional vector
title_features = layers.LSTM(128)(title_features)
# Reduce sequence of embedded words in the body into a single 32-dimensional vector
body_features = layers.LSTM(32)(body_features)
# Merge all available features into a single large vector via concatenation
x = layers.concatenate([title_features, body_features, tags_input])
# Stick a logistic regression for priority prediction on top of the features
priority_pred = layers.Dense(1, name="priority")(x)
# Stick a department classifier on top of the features
department_pred = layers.Dense(num_departments, name="department")(x)
# Instantiate an end-to-end model predicting both priority and department
model = keras.Model(
inputs=[title_input, body_input, tags_input],
outputs=[priority_pred, department_pred],
)
"""
Calling `summary()` can help you check the output shape of each layer:
"""
model.summary()
"""
You can also visualize the entire network topology alongside output shapes using
`plot_model`:
"""
keras.utils.plot_model(model, show_shapes=True)
"""
With this plot, any connectivity-level error becomes immediately obvious.
"""
"""
## Tip 3: to debug what happens during `fit()`, use `run_eagerly=True`
The `fit()` method is fast: it runs a well-optimized, fully-compiled computation graph.
That's great for performance, but it also means that the code you're executing isn't the
Python code you've written. This can be problematic when debugging. As you may recall,
Python is slow -- so we use it as a staging language, not as an execution language.
Thankfully, there's an easy way to run your code in "debug mode", fully eagerly:
pass `run_eagerly=True` to `compile()`. Your call to `fit()` will now get executed line
by line, without any optimization. It's slower, but it makes it possible to print the
value of intermediate tensors, or to use a Python debugger. Great for debugging.
Here's a basic example: let's write a really simple model with a custom `train_step()` method.
Our model just implements gradient descent, but instead of first-order gradients,
it uses a combination of first-order and second-order gradients. Pretty simple so far.
Can you spot what we're doing wrong?
"""
class MyModel(keras.Model):
def train_step(self, data):
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
y_pred = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compute_loss(y=targets, y_pred=y_pred)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
# Combine first-order and second-order gradients
grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
for metric in self.metrics:
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(targets, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
"""
Let's train a one-layer model on MNIST with this custom loss function.
We pick, somewhat at random, a batch size of 1024 and a learning rate of 0.1. The general
idea being to use larger batches and a larger learning rate than usual, since our
"improved" gradients should lead us to quicker convergence.
"""
# Construct an instance of MyModel
def get_model():
inputs = keras.Input(shape=(784,))
intermediate = layers.Dense(256, activation="relu")(inputs)
outputs = layers.Dense(10, activation="softmax")(intermediate)
model = MyModel(inputs, outputs)
return model
# Prepare data
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784)) / 255
model = get_model()
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
loss="sparse_categorical_crossentropy",
)
model.fit(x_train, y_train, epochs=3, batch_size=1024, validation_split=0.1)
"""
Oh no, it doesn't converge! Something is not working as planned.
Time for some step-by-step printing of what's going on with our gradients.
We add various `print` statements in the `train_step` method, and we make sure to pass
`run_eagerly=True` to `compile()` to run our code step-by-step, eagerly.
"""
class MyModel(keras.Model):
def train_step(self, data):
print()
print("----Start of step: %d" % (self.step_counter,))
self.step_counter += 1
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
y_pred = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compute_loss(y=targets, y_pred=y_pred)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
print("Max of dl_dw[0]: %.4f" % tf.reduce_max(dl_dw[0]))
print("Min of dl_dw[0]: %.4f" % tf.reduce_min(dl_dw[0]))
print("Mean of dl_dw[0]: %.4f" % tf.reduce_mean(dl_dw[0]))
print("-")
print("Max of d2l_dw2[0]: %.4f" % tf.reduce_max(d2l_dw2[0]))
print("Min of d2l_dw2[0]: %.4f" % tf.reduce_min(d2l_dw2[0]))
print("Mean of d2l_dw2[0]: %.4f" % tf.reduce_mean(d2l_dw2[0]))
# Combine first-order and second-order gradients
grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
for metric in self.metrics:
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(targets, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
model = get_model()
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
run_eagerly=True,
)
model.step_counter = 0
# We pass epochs=1 and steps_per_epoch=10 to only run 10 steps of training.
model.fit(x_train, y_train, epochs=1, batch_size=1024, verbose=0, steps_per_epoch=10)
"""
What did we learn?
- The first order and second order gradients can have values that differ by orders of
magnitudes.
- Sometimes, they may not even have the same sign.
- Their values can vary greatly at each step.
This leads us to an obvious idea: let's normalize the gradients before combining them.
"""
class MyModel(keras.Model):
def train_step(self, data):
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
y_pred = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compute_loss(y=targets, y_pred=y_pred)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
dl_dw = [tf.math.l2_normalize(w) for w in dl_dw]
d2l_dw2 = [tf.math.l2_normalize(w) for w in d2l_dw2]
# Combine first-order and second-order gradients
grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
for metric in self.metrics:
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(targets, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
model = get_model()
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
model.fit(x_train, y_train, epochs=5, batch_size=1024, validation_split=0.1)
"""
Now, training converges! It doesn't work well at all, but at least the model learns
something.
After spending a few minutes tuning parameters, we get to the following configuration
that works somewhat well (achieves 97% validation accuracy and seems reasonably robust to
overfitting):
- Use `0.2 * w1 + 0.8 * w2` for combining gradients.
- Use a learning rate that decays linearly over time.
I'm not going to say that the idea works -- this isn't at all how you're supposed to do
second-order optimization (pointers: see the Newton & Gauss-Newton methods, quasi-Newton
methods, and BFGS). But hopefully this demonstration gave you an idea of how you can
debug your way out of uncomfortable training situations.
Remember: use `run_eagerly=True` for debugging what happens in `fit()`. And when your code
is finally working as expected, make sure to remove this flag in order to get the best
runtime performance!
Here's our final training run:
"""
class MyModel(keras.Model):
def train_step(self, data):
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
y_pred = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compute_loss(y=targets, y_pred=y_pred)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
dl_dw = [tf.math.l2_normalize(w) for w in dl_dw]
d2l_dw2 = [tf.math.l2_normalize(w) for w in d2l_dw2]
# Combine first-order and second-order gradients
grads = [0.2 * w1 + 0.8 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
for metric in self.metrics:
if metric.name == "loss":
metric.update_state(loss)
else:
metric.update_state(targets, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
model = get_model()
lr = learning_rate = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.1, decay_steps=25, decay_rate=0.1
)
model.compile(
optimizer=keras.optimizers.SGD(lr),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
model.fit(x_train, y_train, epochs=50, batch_size=2048, validation_split=0.1)
| keras-io/examples/keras_recipes/debugging_tips.py/0 | {
"file_path": "keras-io/examples/keras_recipes/debugging_tips.py",
"repo_id": "keras-io",
"token_count": 6913
} | 98 |
<jupyter_start><jupyter_text>Evaluating and exporting scikit-learn metrics in a Keras callback**Author:** [lukewood](https://lukewood.xyz)**Date created:** 10/07/2021**Last modified:** 11/17/2023**Description:** This example shows how to use Keras callbacks to evaluate and export non-TensorFlow based metrics. Introduction[Keras callbacks](https://keras.io/api/callbacks/) allow for the execution of arbitrarycode at various stages of the Keras training process. While Keras offers first-classsupport for metric evaluation, [Keras metrics](https://keras.io/api/metrics/) may onlyrely on TensorFlow code internally.While there are TensorFlow implementations of many metrics online, some metrics areimplemented using [NumPy](https://numpy.org/) or another Python-based numerical computation library.By performing metric evaluation inside of a Keras callback, we can leverage any existingmetric, and ultimately export the result to TensorBoard. Jaccard score metricThis example makes use of a sklearn metric, `sklearn.metrics.jaccard_score()`, andwrites the result to TensorBoard using the `tf.summary` API.This template can be modified slightly to make it work with any existing sklearn metric.<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras as keras
from keras import layers
from sklearn.metrics import jaccard_score
import numpy as np
import os
class JaccardScoreCallback(keras.callbacks.Callback):
"""Computes the Jaccard score and logs the results to TensorBoard."""
def __init__(self, name, x_test, y_test, log_dir):
self.x_test = x_test
self.y_test = y_test
self.keras_metric = keras.metrics.Mean("jaccard_score")
self.epoch = 0
self.summary_writer = tf.summary.create_file_writer(os.path.join(log_dir, name))
def on_epoch_end(self, batch, logs=None):
self.epoch += 1
self.keras_metric.reset_state()
predictions = self.model.predict(self.x_test)
jaccard_value = jaccard_score(
np.argmax(predictions, axis=-1), self.y_test, average=None
)
self.keras_metric.update_state(jaccard_value)
self._write_metric(
self.keras_metric.name, self.keras_metric.result().numpy().astype(float)
)
def _write_metric(self, name, value):
with self.summary_writer.as_default():
tf.summary.scalar(
name,
value,
step=self.epoch,
)
self.summary_writer.flush()<jupyter_output><empty_output><jupyter_text>Sample usageLet's test our `JaccardScoreCallback` class with a Keras model.<jupyter_code># Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# The data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
batch_size = 128
epochs = 15
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
callbacks = [
JaccardScoreCallback(model.name, x_test, np.argmax(y_test, axis=-1), "logs")
]
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.1,
callbacks=callbacks,
)<jupyter_output><empty_output> | keras-io/examples/keras_recipes/ipynb/sklearn_metric_callbacks.ipynb/0 | {
"file_path": "keras-io/examples/keras_recipes/ipynb/sklearn_metric_callbacks.ipynb",
"repo_id": "keras-io",
"token_count": 1629
} | 99 |
<jupyter_start><jupyter_text>Multimodal entailment**Author:** [Sayak Paul](https://twitter.com/RisingSayak)**Date created:** 2021/08/08**Last modified:** 2021/08/15**Description:** Training a multimodal model for predicting entailment. IntroductionIn this example, we will build and train a model for predicting multimodal entailment. We will beusing the[multimodal entailment dataset](https://github.com/google-research-datasets/recognizing-multimodal-entailment)recently introduced by Google Research. What is multimodal entailment?On social media platforms, to audit and moderate contentwe may want to find answers to thefollowing questions in near real-time:* Does a given piece of information contradict the other?* Does a given piece of information imply the other?In NLP, this task is called analyzing _textual entailment_. However, that's onlywhen the information comes from text content.In practice, it's often the case the information available comes not justfrom text content, but from a multimodal combination of text, images, audio, video, etc._Multimodal entailment_ is simply the extension of textual entailment to a varietyof new input modalities. RequirementsThis example requires TensorFlow 2.5 or higher. In addition, TensorFlow Hub andTensorFlow Text are required for the BERT model([Devlin et al.](https://arxiv.org/abs/1810.04805)). These libraries can be installedusing the following command:<jupyter_code>!pip install -q tensorflow_text<jupyter_output><empty_output><jupyter_text>Imports<jupyter_code>from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text
from tensorflow import keras<jupyter_output><empty_output><jupyter_text>Define a label map<jupyter_code>label_map = {"Contradictory": 0, "Implies": 1, "NoEntailment": 2}<jupyter_output><empty_output><jupyter_text>Collect the datasetThe original dataset is available[here](https://github.com/google-research-datasets/recognizing-multimodal-entailment).It comes with URLs of images which are hosted on Twitter's photo storage system calledthe[Photo Blob Storage (PBS for short)](https://blog.twitter.com/engineering/en_us/a/2012/blobstore-twitter-s-in-house-photo-storage-system).We will be working with the downloaded images along with additional data that comes withthe original dataset. Thanks to[Nilabhra Roy Chowdhury](https://de.linkedin.com/in/nilabhraroychowdhury) who worked onpreparing the image data.<jupyter_code>image_base_path = keras.utils.get_file(
"tweet_images",
"https://github.com/sayakpaul/Multimodal-Entailment-Baseline/releases/download/v1.0.0/tweet_images.tar.gz",
untar=True,
)<jupyter_output><empty_output><jupyter_text>Read the dataset and apply basic preprocessing<jupyter_code>df = pd.read_csv(
"https://github.com/sayakpaul/Multimodal-Entailment-Baseline/raw/main/csvs/tweets.csv"
)
df.sample(10)<jupyter_output><empty_output><jupyter_text>The columns we are interested in are the following:* `text_1`* `image_1`* `text_2`* `image_2`* `label`The entailment task is formulated as the following:***Given the pairs of (`text_1`, `image_1`) and (`text_2`, `image_2`) do they entail (ornot entail or contradict) each other?***We have the images already downloaded. `image_1` is downloaded as `id1` as its filenameand `image2` is downloaded as `id2` as its filename. In the next step, we will add twomore columns to `df` - filepaths of `image_1`s and `image_2`s.<jupyter_code>images_one_paths = []
images_two_paths = []
for idx in range(len(df)):
current_row = df.iloc[idx]
id_1 = current_row["id_1"]
id_2 = current_row["id_2"]
extentsion_one = current_row["image_1"].split(".")[-1]
extentsion_two = current_row["image_2"].split(".")[-1]
image_one_path = os.path.join(image_base_path, str(id_1) + f".{extentsion_one}")
image_two_path = os.path.join(image_base_path, str(id_2) + f".{extentsion_two}")
images_one_paths.append(image_one_path)
images_two_paths.append(image_two_path)
df["image_1_path"] = images_one_paths
df["image_2_path"] = images_two_paths
# Create another column containing the integer ids of
# the string labels.
df["label_idx"] = df["label"].apply(lambda x: label_map[x])<jupyter_output><empty_output><jupyter_text>Dataset visualization<jupyter_code>def visualize(idx):
current_row = df.iloc[idx]
image_1 = plt.imread(current_row["image_1_path"])
image_2 = plt.imread(current_row["image_2_path"])
text_1 = current_row["text_1"]
text_2 = current_row["text_2"]
label = current_row["label"]
plt.subplot(1, 2, 1)
plt.imshow(image_1)
plt.axis("off")
plt.title("Image One")
plt.subplot(1, 2, 2)
plt.imshow(image_1)
plt.axis("off")
plt.title("Image Two")
plt.show()
print(f"Text one: {text_1}")
print(f"Text two: {text_2}")
print(f"Label: {label}")
random_idx = np.random.choice(len(df))
visualize(random_idx)
random_idx = np.random.choice(len(df))
visualize(random_idx)<jupyter_output><empty_output><jupyter_text>Train/test splitThe dataset suffers from[class imbalance problem](https://developers.google.com/machine-learning/glossaryclass-imbalanced-dataset).We can confirm that in the following cell.<jupyter_code>df["label"].value_counts()<jupyter_output><empty_output><jupyter_text>To account for that we will go for a stratified split.<jupyter_code># 10% for test
train_df, test_df = train_test_split(
df, test_size=0.1, stratify=df["label"].values, random_state=42
)
# 5% for validation
train_df, val_df = train_test_split(
train_df, test_size=0.05, stratify=train_df["label"].values, random_state=42
)
print(f"Total training examples: {len(train_df)}")
print(f"Total validation examples: {len(val_df)}")
print(f"Total test examples: {len(test_df)}")<jupyter_output><empty_output><jupyter_text>Data input pipelineTensorFlow Hub provides[variety of BERT family of models](https://www.tensorflow.org/text/tutorials/bert_glueloading_models_from_tensorflow_hub).Each of those models comes with acorresponding preprocessing layer. You can learn more about these models and theirpreprocessing layers from[this resource](https://www.tensorflow.org/text/tutorials/bert_glueloading_models_from_tensorflow_hub).To keep the runtime of this example relatively short, we will use a smaller variant ofthe original BERT model.<jupyter_code># Define TF Hub paths to the BERT encoder and its preprocessor
bert_model_path = (
"https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1"
)
bert_preprocess_path = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"<jupyter_output><empty_output><jupyter_text>Our text preprocessing code mostly comes from[this tutorial](https://www.tensorflow.org/text/tutorials/bert_glue).You are highly encouraged to check out the tutorial to learn more about the inputpreprocessing.<jupyter_code>def make_bert_preprocessing_model(sentence_features, seq_length=128):
"""Returns Model mapping string features to BERT inputs.
Args:
sentence_features: A list with the names of string-valued features.
seq_length: An integer that defines the sequence length of BERT inputs.
Returns:
A Keras Model that can be called on a list or dict of string Tensors
(with the order or names, resp., given by sentence_features) and
returns a dict of tensors for input to BERT.
"""
input_segments = [
tf.keras.layers.Input(shape=(), dtype=tf.string, name=ft)
for ft in sentence_features
]
# Tokenize the text to word pieces.
bert_preprocess = hub.load(bert_preprocess_path)
tokenizer = hub.KerasLayer(bert_preprocess.tokenize, name="tokenizer")
segments = [tokenizer(s) for s in input_segments]
# Optional: Trim segments in a smart way to fit seq_length.
# Simple cases (like this example) can skip this step and let
# the next step apply a default truncation to approximately equal lengths.
truncated_segments = segments
# Pack inputs. The details (start/end token ids, dict of output tensors)
# are model-dependent, so this gets loaded from the SavedModel.
packer = hub.KerasLayer(
bert_preprocess.bert_pack_inputs,
arguments=dict(seq_length=seq_length),
name="packer",
)
model_inputs = packer(truncated_segments)
return keras.Model(input_segments, model_inputs)
bert_preprocess_model = make_bert_preprocessing_model(["text_1", "text_2"])
keras.utils.plot_model(bert_preprocess_model, show_shapes=True, show_dtype=True)<jupyter_output><empty_output><jupyter_text>Run the preprocessor on a sample input<jupyter_code>idx = np.random.choice(len(train_df))
row = train_df.iloc[idx]
sample_text_1, sample_text_2 = row["text_1"], row["text_2"]
print(f"Text 1: {sample_text_1}")
print(f"Text 2: {sample_text_2}")
test_text = [np.array([sample_text_1]), np.array([sample_text_2])]
text_preprocessed = bert_preprocess_model(test_text)
print("Keys : ", list(text_preprocessed.keys()))
print("Shape Word Ids : ", text_preprocessed["input_word_ids"].shape)
print("Word Ids : ", text_preprocessed["input_word_ids"][0, :16])
print("Shape Mask : ", text_preprocessed["input_mask"].shape)
print("Input Mask : ", text_preprocessed["input_mask"][0, :16])
print("Shape Type Ids : ", text_preprocessed["input_type_ids"].shape)
print("Type Ids : ", text_preprocessed["input_type_ids"][0, :16])<jupyter_output><empty_output><jupyter_text>We will now create `tf.data.Dataset` objects from the dataframes.Note that the text inputs will be preprocessed as a part of the data input pipeline. Butthe preprocessing modules can also be a part of their corresponding BERT models. Thishelps reduce the training/serving skew and lets our models operate with raw text inputs.Follow [this tutorial](https://www.tensorflow.org/text/tutorials/classify_text_with_bert)to learn more about how to incorporate the preprocessing modules directly inside themodels.<jupyter_code>def dataframe_to_dataset(dataframe):
columns = ["image_1_path", "image_2_path", "text_1", "text_2", "label_idx"]
dataframe = dataframe[columns].copy()
labels = dataframe.pop("label_idx")
ds = tf.data.Dataset.from_tensor_slices((dict(dataframe), labels))
ds = ds.shuffle(buffer_size=len(dataframe))
return ds<jupyter_output><empty_output><jupyter_text>Preprocessing utilities<jupyter_code>resize = (128, 128)
bert_input_features = ["input_word_ids", "input_type_ids", "input_mask"]
def preprocess_image(image_path):
extension = tf.strings.split(image_path)[-1]
image = tf.io.read_file(image_path)
if extension == b"jpg":
image = tf.image.decode_jpeg(image, 3)
else:
image = tf.image.decode_png(image, 3)
image = tf.image.resize(image, resize)
return image
def preprocess_text(text_1, text_2):
text_1 = tf.convert_to_tensor([text_1])
text_2 = tf.convert_to_tensor([text_2])
output = bert_preprocess_model([text_1, text_2])
output = {feature: tf.squeeze(output[feature]) for feature in bert_input_features}
return output
def preprocess_text_and_image(sample):
image_1 = preprocess_image(sample["image_1_path"])
image_2 = preprocess_image(sample["image_2_path"])
text = preprocess_text(sample["text_1"], sample["text_2"])
return {"image_1": image_1, "image_2": image_2, "text": text}<jupyter_output><empty_output><jupyter_text>Create the final datasets<jupyter_code>batch_size = 32
auto = tf.data.AUTOTUNE
def prepare_dataset(dataframe, training=True):
ds = dataframe_to_dataset(dataframe)
if training:
ds = ds.shuffle(len(train_df))
ds = ds.map(lambda x, y: (preprocess_text_and_image(x), y)).cache()
ds = ds.batch(batch_size).prefetch(auto)
return ds
train_ds = prepare_dataset(train_df)
validation_ds = prepare_dataset(val_df, False)
test_ds = prepare_dataset(test_df, False)<jupyter_output><empty_output><jupyter_text>Model building utilitiesOur final model will accept two images along with their text counterparts. While theimages will be directly fed to the model the text inputs will first be preprocessed andthen will make it into the model. Below is a visual illustration of this approach:The model consists of the following elements:* A standalone encoder for the images. We will use a[ResNet50V2](https://arxiv.org/abs/1603.05027) pre-trained on the ImageNet-1k dataset forthis.* A standalone encoder for the images. A pre-trained BERT will be used for this.After extracting the individual embeddings, they will be projected in an identical space.Finally, their projections will be concatenated and be fed to the final classificationlayer.This is a multi-class classification problem involving the following classes:* NoEntailment* Implies* Contradictory`project_embeddings()`, `create_vision_encoder()`, and `create_text_encoder()` utilitiesare referred from [this example](https://keras.io/examples/nlp/nl_image_search/). Projection utilities<jupyter_code>def project_embeddings(
embeddings, num_projection_layers, projection_dims, dropout_rate
):
projected_embeddings = keras.layers.Dense(units=projection_dims)(embeddings)
for _ in range(num_projection_layers):
x = tf.nn.gelu(projected_embeddings)
x = keras.layers.Dense(projection_dims)(x)
x = keras.layers.Dropout(dropout_rate)(x)
x = keras.layers.Add()([projected_embeddings, x])
projected_embeddings = keras.layers.LayerNormalization()(x)
return projected_embeddings<jupyter_output><empty_output><jupyter_text>Vision encoder utilities<jupyter_code>def create_vision_encoder(
num_projection_layers, projection_dims, dropout_rate, trainable=False
):
# Load the pre-trained ResNet50V2 model to be used as the base encoder.
resnet_v2 = keras.applications.ResNet50V2(
include_top=False, weights="imagenet", pooling="avg"
)
# Set the trainability of the base encoder.
for layer in resnet_v2.layers:
layer.trainable = trainable
# Receive the images as inputs.
image_1 = keras.Input(shape=(128, 128, 3), name="image_1")
image_2 = keras.Input(shape=(128, 128, 3), name="image_2")
# Preprocess the input image.
preprocessed_1 = keras.applications.resnet_v2.preprocess_input(image_1)
preprocessed_2 = keras.applications.resnet_v2.preprocess_input(image_2)
# Generate the embeddings for the images using the resnet_v2 model
# concatenate them.
embeddings_1 = resnet_v2(preprocessed_1)
embeddings_2 = resnet_v2(preprocessed_2)
embeddings = keras.layers.Concatenate()([embeddings_1, embeddings_2])
# Project the embeddings produced by the model.
outputs = project_embeddings(
embeddings, num_projection_layers, projection_dims, dropout_rate
)
# Create the vision encoder model.
return keras.Model([image_1, image_2], outputs, name="vision_encoder")<jupyter_output><empty_output><jupyter_text>Text encoder utilities<jupyter_code>def create_text_encoder(
num_projection_layers, projection_dims, dropout_rate, trainable=False
):
# Load the pre-trained BERT model to be used as the base encoder.
bert = hub.KerasLayer(bert_model_path, name="bert",)
# Set the trainability of the base encoder.
bert.trainable = trainable
# Receive the text as inputs.
bert_input_features = ["input_type_ids", "input_mask", "input_word_ids"]
inputs = {
feature: keras.Input(shape=(128,), dtype=tf.int32, name=feature)
for feature in bert_input_features
}
# Generate embeddings for the preprocessed text using the BERT model.
embeddings = bert(inputs)["pooled_output"]
# Project the embeddings produced by the model.
outputs = project_embeddings(
embeddings, num_projection_layers, projection_dims, dropout_rate
)
# Create the text encoder model.
return keras.Model(inputs, outputs, name="text_encoder")<jupyter_output><empty_output><jupyter_text>Multimodal model utilities<jupyter_code>def create_multimodal_model(
num_projection_layers=1,
projection_dims=256,
dropout_rate=0.1,
vision_trainable=False,
text_trainable=False,
):
# Receive the images as inputs.
image_1 = keras.Input(shape=(128, 128, 3), name="image_1")
image_2 = keras.Input(shape=(128, 128, 3), name="image_2")
# Receive the text as inputs.
bert_input_features = ["input_type_ids", "input_mask", "input_word_ids"]
text_inputs = {
feature: keras.Input(shape=(128,), dtype=tf.int32, name=feature)
for feature in bert_input_features
}
# Create the encoders.
vision_encoder = create_vision_encoder(
num_projection_layers, projection_dims, dropout_rate, vision_trainable
)
text_encoder = create_text_encoder(
num_projection_layers, projection_dims, dropout_rate, text_trainable
)
# Fetch the embedding projections.
vision_projections = vision_encoder([image_1, image_2])
text_projections = text_encoder(text_inputs)
# Concatenate the projections and pass through the classification layer.
concatenated = keras.layers.Concatenate()([vision_projections, text_projections])
outputs = keras.layers.Dense(3, activation="softmax")(concatenated)
return keras.Model([image_1, image_2, text_inputs], outputs)
multimodal_model = create_multimodal_model()
keras.utils.plot_model(multimodal_model, show_shapes=True)<jupyter_output><empty_output><jupyter_text>You can inspect the structure of the individual encoders as well by setting the`expand_nested` argument of `plot_model()` to `True`. You are encouragedto play with the different hyperparameters involved in building this model andobserve how the final performance is affected. Compile and train the model<jupyter_code>multimodal_model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics="accuracy"
)
history = multimodal_model.fit(train_ds, validation_data=validation_ds, epochs=10)<jupyter_output><empty_output><jupyter_text>Evaluate the model<jupyter_code>_, acc = multimodal_model.evaluate(test_ds)
print(f"Accuracy on the test set: {round(acc * 100, 2)}%.")<jupyter_output><empty_output> | keras-io/examples/nlp/ipynb/multimodal_entailment.ipynb/0 | {
"file_path": "keras-io/examples/nlp/ipynb/multimodal_entailment.ipynb",
"repo_id": "keras-io",
"token_count": 6521
} | 100 |
# Named Entity Recognition using Transformers
**Author:** [Varun Singh](https://www.linkedin.com/in/varunsingh2/)<br>
**Date created:** Jun 23, 2021<br>
**Last modified:** Jun 24, 2021<br>
**Description:** NER using the Transformers and data from CoNLL 2003 shared task.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/nlp/ipynb/ner_transformers.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/nlp/ner_transformers.py)
---
## Introduction
Named Entity Recognition (NER) is the process of identifying named entities in text.
Example of named entities are: "Person", "Location", "Organization", "Dates" etc. NER is
essentially a token classification task where every token is classified into one or more
predetermined categories.
In this exercise, we will train a simple Transformer based model to perform NER. We will
be using the data from CoNLL 2003 shared task. For more information about the dataset,
please visit [the dataset website](https://www.clips.uantwerpen.be/conll2003/ner/).
However, since obtaining this data requires an additional step of getting a free license, we will be using
HuggingFace's datasets library which contains a processed version of this dataset.
---
## Install the open source datasets library from HuggingFace
We also download the script used to evaluate NER models.
```python
!pip3 install datasets
!wget https://raw.githubusercontent.com/sighsmile/conlleval/master/conlleval.py
```
<div class="k-default-codeblock">
```
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.110.133, 185.199.111.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 7502 (7.3K) [text/plain]
Saving to: ‘conlleval.py’
```
</div>
<div class="k-default-codeblock">
```
conlleval.py 100%[===================>] 7.33K --.-KB/s in 0s
```
</div>
<div class="k-default-codeblock">
```
2023-11-10 16:58:25 (217 MB/s) - ‘conlleval.py’ saved [7502/7502]
```
</div>
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import os
import keras
import numpy as np
import tensorflow as tf
from keras import layers
from datasets import load_dataset
from collections import Counter
from conlleval import evaluate
```
We will be using the transformer implementation from this fantastic
[example](https://keras.io/examples/nlp/text_classification_with_transformer/).
Let's start by defining a `TransformerBlock` layer:
```python
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super().__init__()
self.att = keras.layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim
)
self.ffn = keras.Sequential(
[
keras.layers.Dense(ff_dim, activation="relu"),
keras.layers.Dense(embed_dim),
]
)
self.layernorm1 = keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = keras.layers.Dropout(rate)
self.dropout2 = keras.layers.Dropout(rate)
def call(self, inputs, training=False):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
```
Next, let's define a `TokenAndPositionEmbedding` layer:
```python
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super().__init__()
self.token_emb = keras.layers.Embedding(
input_dim=vocab_size, output_dim=embed_dim
)
self.pos_emb = keras.layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, inputs):
maxlen = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
position_embeddings = self.pos_emb(positions)
token_embeddings = self.token_emb(inputs)
return token_embeddings + position_embeddings
```
---
## Build the NER model class as a `keras.Model` subclass
```python
class NERModel(keras.Model):
def __init__(
self, num_tags, vocab_size, maxlen=128, embed_dim=32, num_heads=2, ff_dim=32
):
super().__init__()
self.embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
self.transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)
self.dropout1 = layers.Dropout(0.1)
self.ff = layers.Dense(ff_dim, activation="relu")
self.dropout2 = layers.Dropout(0.1)
self.ff_final = layers.Dense(num_tags, activation="softmax")
def call(self, inputs, training=False):
x = self.embedding_layer(inputs)
x = self.transformer_block(x)
x = self.dropout1(x, training=training)
x = self.ff(x)
x = self.dropout2(x, training=training)
x = self.ff_final(x)
return x
```
---
## Load the CoNLL 2003 dataset from the datasets library and process it
```python
conll_data = load_dataset("conll2003")
```
We will export this data to a tab-separated file format which will be easy to read as a
`tf.data.Dataset` object.
```python
def export_to_file(export_file_path, data):
with open(export_file_path, "w") as f:
for record in data:
ner_tags = record["ner_tags"]
tokens = record["tokens"]
if len(tokens) > 0:
f.write(
str(len(tokens))
+ "\t"
+ "\t".join(tokens)
+ "\t"
+ "\t".join(map(str, ner_tags))
+ "\n"
)
os.mkdir("data")
export_to_file("./data/conll_train.txt", conll_data["train"])
export_to_file("./data/conll_val.txt", conll_data["validation"])
```
---
## Make the NER label lookup table
NER labels are usually provided in IOB, IOB2 or IOBES formats. Checkout this link for
more information:
[Wikipedia](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging))
Note that we start our label numbering from 1 since 0 will be reserved for padding. We
have a total of 10 labels: 9 from the NER dataset and one for padding.
```python
def make_tag_lookup_table():
iob_labels = ["B", "I"]
ner_labels = ["PER", "ORG", "LOC", "MISC"]
all_labels = [(label1, label2) for label2 in ner_labels for label1 in iob_labels]
all_labels = ["-".join([a, b]) for a, b in all_labels]
all_labels = ["[PAD]", "O"] + all_labels
return dict(zip(range(0, len(all_labels) + 1), all_labels))
mapping = make_tag_lookup_table()
print(mapping)
```
<div class="k-default-codeblock">
```
{0: '[PAD]', 1: 'O', 2: 'B-PER', 3: 'I-PER', 4: 'B-ORG', 5: 'I-ORG', 6: 'B-LOC', 7: 'I-LOC', 8: 'B-MISC', 9: 'I-MISC'}
```
</div>
Get a list of all tokens in the training dataset. This will be used to create the
vocabulary.
```python
all_tokens = sum(conll_data["train"]["tokens"], [])
all_tokens_array = np.array(list(map(str.lower, all_tokens)))
counter = Counter(all_tokens_array)
print(len(counter))
num_tags = len(mapping)
vocab_size = 20000
# We only take (vocab_size - 2) most commons words from the training data since
# the `StringLookup` class uses 2 additional tokens - one denoting an unknown
# token and another one denoting a masking token
vocabulary = [token for token, count in counter.most_common(vocab_size - 2)]
# The StringLook class will convert tokens to token IDs
lookup_layer = keras.layers.StringLookup(vocabulary=vocabulary)
```
<div class="k-default-codeblock">
```
21009
```
</div>
Create 2 new `Dataset` objects from the training and validation data
```python
train_data = tf.data.TextLineDataset("./data/conll_train.txt")
val_data = tf.data.TextLineDataset("./data/conll_val.txt")
```
Print out one line to make sure it looks good. The first record in the line is the number of tokens.
After that we will have all the tokens followed by all the ner tags.
```python
print(list(train_data.take(1).as_numpy_iterator()))
```
<div class="k-default-codeblock">
```
[b'9\tEU\trejects\tGerman\tcall\tto\tboycott\tBritish\tlamb\t.\t3\t0\t7\t0\t0\t0\t7\t0\t0']
```
</div>
We will be using the following map function to transform the data in the dataset:
```python
def map_record_to_training_data(record):
record = tf.strings.split(record, sep="\t")
length = tf.strings.to_number(record[0], out_type=tf.int32)
tokens = record[1 : length + 1]
tags = record[length + 1 :]
tags = tf.strings.to_number(tags, out_type=tf.int64)
tags += 1
return tokens, tags
def lowercase_and_convert_to_ids(tokens):
tokens = tf.strings.lower(tokens)
return lookup_layer(tokens)
# We use `padded_batch` here because each record in the dataset has a
# different length.
batch_size = 32
train_dataset = (
train_data.map(map_record_to_training_data)
.map(lambda x, y: (lowercase_and_convert_to_ids(x), y))
.padded_batch(batch_size)
)
val_dataset = (
val_data.map(map_record_to_training_data)
.map(lambda x, y: (lowercase_and_convert_to_ids(x), y))
.padded_batch(batch_size)
)
ner_model = NERModel(num_tags, vocab_size, embed_dim=32, num_heads=4, ff_dim=64)
```
We will be using a custom loss function that will ignore the loss from padded tokens.
```python
class CustomNonPaddingTokenLoss(keras.losses.Loss):
def __init__(self, name="custom_ner_loss"):
super().__init__(name=name)
def call(self, y_true, y_pred):
loss_fn = keras.losses.SparseCategoricalCrossentropy(
from_logits=False, reduction=None
)
loss = loss_fn(y_true, y_pred)
mask = tf.cast((y_true > 0), dtype=tf.float32)
loss = loss * mask
return tf.reduce_sum(loss) / tf.reduce_sum(mask)
loss = CustomNonPaddingTokenLoss()
```
---
## Compile and fit the model
```python
ner_model.compile(optimizer="adam", loss=loss)
ner_model.fit(train_dataset, epochs=10)
def tokenize_and_convert_to_ids(text):
tokens = text.split()
return lowercase_and_convert_to_ids(tokens)
# Sample inference using the trained model
sample_input = tokenize_and_convert_to_ids(
"eu rejects german call to boycott british lamb"
)
sample_input = tf.reshape(sample_input, shape=[1, -1])
print(sample_input)
output = ner_model.predict(sample_input)
prediction = np.argmax(output, axis=-1)[0]
prediction = [mapping[i] for i in prediction]
# eu -> B-ORG, german -> B-MISC, british -> B-MISC
print(prediction)
```
<div class="k-default-codeblock">
```
Epoch 1/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 300s 671ms/step - loss: 0.9260
Epoch 2/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.2909
Epoch 3/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1589
Epoch 4/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.1176
Epoch 5/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0941
Epoch 6/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0747
Epoch 7/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0597
Epoch 8/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0534
Epoch 9/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0459
Epoch 10/10
439/439 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 0.0408
tf.Tensor([[ 988 10950 204 628 6 3938 215 5773]], shape=(1, 8), dtype=int64)
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 600ms/step
['B-ORG', 'O', 'B-MISC', 'O', 'O', 'O', 'B-MISC', 'O']
```
</div>
---
## Metrics calculation
Here is a function to calculate the metrics. The function calculates F1 score for the
overall NER dataset as well as individual scores for each NER tag.
```python
def calculate_metrics(dataset):
all_true_tag_ids, all_predicted_tag_ids = [], []
for x, y in dataset:
output = ner_model.predict(x, verbose=0)
predictions = np.argmax(output, axis=-1)
predictions = np.reshape(predictions, [-1])
true_tag_ids = np.reshape(y, [-1])
mask = (true_tag_ids > 0) & (predictions > 0)
true_tag_ids = true_tag_ids[mask]
predicted_tag_ids = predictions[mask]
all_true_tag_ids.append(true_tag_ids)
all_predicted_tag_ids.append(predicted_tag_ids)
all_true_tag_ids = np.concatenate(all_true_tag_ids)
all_predicted_tag_ids = np.concatenate(all_predicted_tag_ids)
predicted_tags = [mapping[tag] for tag in all_predicted_tag_ids]
real_tags = [mapping[tag] for tag in all_true_tag_ids]
evaluate(real_tags, predicted_tags)
calculate_metrics(val_dataset)
```
<div class="k-default-codeblock">
```
processed 51362 tokens with 5942 phrases; found: 5659 phrases; correct: 3941.
accuracy: 64.49%; (non-O)
accuracy: 93.23%; precision: 69.64%; recall: 66.32%; FB1: 67.94
LOC: precision: 82.77%; recall: 79.26%; FB1: 80.98 1759
MISC: precision: 74.94%; recall: 68.11%; FB1: 71.36 838
ORG: precision: 55.94%; recall: 65.32%; FB1: 60.27 1566
PER: precision: 65.57%; recall: 53.26%; FB1: 58.78 1496
```
</div>
---
## Conclusions
In this exercise, we created a simple transformer based named entity recognition model.
We trained it on the CoNLL 2003 shared task data and got an overall F1 score of around 70%.
State of the art NER models fine-tuned on pretrained models such as BERT or ELECTRA can easily
get much higher F1 score -between 90-95% on this dataset owing to the inherent knowledge
of words as part of the pretraining process and the usage of subword tokenization.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/ner-with-transformers)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/ner_with_transformers)."""
| keras-io/examples/nlp/md/ner_transformers.md/0 | {
"file_path": "keras-io/examples/nlp/md/ner_transformers.md",
"repo_id": "keras-io",
"token_count": 5927
} | 101 |
"""
Title: Training a language model from scratch with 🤗 Transformers and TPUs
Authors: [Matthew Carrigan](https://twitter.com/carrigmat), [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2023/05/21
Last modified: 2023/05/21
Description: Train a masked language model on TPUs using 🤗 Transformers.
Accelerator: TPU
"""
"""
## Introduction
In this example, we cover how to train a masked language model using TensorFlow,
[🤗 Transformers](https://huggingface.co/transformers/index),
and TPUs.
TPU training is a useful skill to have: TPU pods are high-performance and extremely
scalable, making it easy to train models at any scale from a few tens of millions of
parameters up to truly enormous sizes: Google's PaLM model
(over 500 billion parameters!) was trained entirely on TPU pods.
We've previously written a
[**tutorial**](https://huggingface.co/docs/transformers/main/perf_train_tpu_tf)
and a
[**Colab example**](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb)
showing small-scale TPU training with TensorFlow and introducing the core concepts you
need to understand to get your model working on TPU. However, our Colab example doesn't
contain all the steps needed to train a language model from scratch such as
training the tokenizer. So, we wanted to provide a consolidated example of
walking you through every critical step involved there.
As in our Colab example, we're taking advantage of TensorFlow's very clean TPU support
via XLA and `TPUStrategy`. We'll also be benefiting from the fact that the majority of
the TensorFlow models in 🤗 Transformers are fully
[XLA-compatible](https://huggingface.co/blog/tf-xla-generate).
So surprisingly, little work is needed to get them to run on TPU.
This example is designed to be **scalable** and much closer to a realistic training run
-- although we only use a BERT-sized model by default, the code could be expanded to a
much larger model and a much more powerful TPU pod slice by changing a few configuration
options.
The following diagram gives you a pictorial overview of the steps involved in training a
language model with 🤗 Transformers using TensorFlow and TPUs:

*(Contents of this example overlap with
[this blog post](https://huggingface.co/blog/tf_tpu)).*
"""
"""
## Data
We use the
[WikiText dataset (v1)](https://huggingface.co/datasets/wikitext).
You can head over to the
[dataset page on the Hugging Face Hub](https://huggingface.co/datasets/wikitext)
to explore the dataset.

Since the dataset is already available on the Hub in a compatible format, we can easily
load and interact with it using
[🤗 datasets](https://hf.co/docs/datasets).
However, training a language model from scratch also requires a separate
tokenizer training step. We skip that part in this example for brevity, but,
here's a gist of what we can do to train a tokenizer from scratch:
- Load the `train` split of the WikiText using 🤗 datasets.
- Leverage
[🤗 tokenizers](https://huggingface.co/docs/tokenizers/index)
to train a
[**Unigram model**](https://huggingface.co/course/chapter6/7?fw=pt).
- Upload the trained tokenizer on the Hub.
You can find the tokenizer training
code
[**here**](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling-tpu#training-a-tokenizer)
and the tokenizer
[**here**](https://huggingface.co/tf-tpu/unigram-tokenizer-wikitext).
This script also allows you to run it with
[**any compatible dataset**](https://huggingface.co/datasets?task_ids=task_ids:language-modeling)
from the Hub.
"""
"""
## Tokenizing the data and creating TFRecords
Once the tokenizer is trained, we can use it on all the dataset splits
(`train`, `validation`, and `test` in this case) and create TFRecord shards out of them.
Having the data splits spread across multiple TFRecord shards helps with massively
parallel processing as opposed to having each split in single TFRecord files.
We tokenize the samples individually. We then take a batch of samples, concatenate them
together, and split them into several chunks of a fixed size (128 in our case). We follow
this strategy rather than tokenizing a batch of samples with a fixed length to avoid
aggressively discarding text content (because of truncation).
We then take these tokenized samples in batches and serialize those batches as multiple
TFRecord shards, where the total dataset length and individual shard size determine the
number of shards. Finally, these shards are pushed to a
[Google Cloud Storage (GCS) bucket](https://cloud.google.com/storage/docs/json_api/v1/buckets).
If you're using a TPU node for training, then the data needs to be streamed from a GCS
bucket since the node host memory is very small. But for TPU VMs, we can use datasets
locally or even attach persistent storage to those VMs. Since TPU nodes (which is what we
have in a Colab) are still quite heavily used, we based our example on using a GCS bucket
for data storage.
You can see all of this in code in
[this script](https://github.com/huggingface/transformers/blob/main/examples/tensorflow/language-modeling-tpu/prepare_tfrecord_shards.py).
For convenience, we have also hosted the resultant TFRecord shards in
[this repository](https://huggingface.co/datasets/tf-tpu/wikitext-v1-tfrecords)
on the Hub.
Once the data is tokenized and serialized into TFRecord shards, we can proceed toward
training.
"""
"""
## Training
### Setup and imports
Let's start by installing 🤗 Transformers.
"""
"""shell
pip install transformers -q
"""
"""
Then, let's import the modules we need.
"""
import os
import re
import tensorflow as tf
import transformers
"""
### Initialize TPUs
"""
"""
Then let's connect to our TPU and determine the distribution strategy:
"""
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
print(f"Available number of replicas: {strategy.num_replicas_in_sync}")
"""
We then load the tokenizer. For more details on the tokenizer, check out
[its repository](https://huggingface.co/tf-tpu/unigram-tokenizer-wikitext).
For the model, we use RoBERTa (the base variant), introduced in
[this paper](https://arxiv.org/abs/1907.11692).
"""
"""
### Initialize the tokenizer
"""
tokenizer = "tf-tpu/unigram-tokenizer-wikitext"
pretrained_model_config = "roberta-base"
tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer)
config = transformers.AutoConfig.from_pretrained(pretrained_model_config)
config.vocab_size = tokenizer.vocab_size
"""
### Prepare the datasets
"""
"""
We now load the TFRecord shards of the WikiText dataset (which the Hugging Face team
prepared beforehand for this example):
"""
train_dataset_path = "gs://tf-tpu-training-resources/train"
eval_dataset_path = "gs://tf-tpu-training-resources/validation"
training_records = tf.io.gfile.glob(os.path.join(train_dataset_path, "*.tfrecord"))
eval_records = tf.io.gfile.glob(os.path.join(eval_dataset_path, "*.tfrecord"))
"""
Now, we will write a utility to count the number of training samples we have. We need to
know this value in order properly initialize our optimizer later:
"""
def count_samples(file_list):
num_samples = 0
for file in file_list:
filename = file.split("/")[-1]
sample_count = re.search(r"-\d+-(\d+)\.tfrecord", filename).group(1)
sample_count = int(sample_count)
num_samples += sample_count
return num_samples
num_train_samples = count_samples(training_records)
print(f"Number of total training samples: {num_train_samples}")
"""
Let's now prepare our datasets for training and evaluation. We start by writing our
utilities. First, we need to be able to decode the TFRecords:
"""
max_sequence_length = 512
def decode_fn(example):
features = {
"input_ids": tf.io.FixedLenFeature(
dtype=tf.int64, shape=(max_sequence_length,)
),
"attention_mask": tf.io.FixedLenFeature(
dtype=tf.int64, shape=(max_sequence_length,)
),
}
return tf.io.parse_single_example(example, features)
"""
Here, `max_sequence_length` needs to be the same as the one used during preparing the
TFRecord shards.Refer to
[this script](https://github.com/huggingface/transformers/blob/main/examples/tensorflow/language-modeling-tpu/prepare_tfrecord_shards.py)
for more details.
Next up, we have our masking utility that is responsible for masking parts of the inputs
and preparing labels for the masked language model to learn from. We leverage the
[`DataCollatorForLanguageModeling`](https://huggingface.co/docs/transformers/v4.29.1/en/main_classes/data_collator#transformers.DataCollatorForLanguageModeling)
for this purpose.
"""
# We use a standard masking probability of 0.15. `mlm_probability` denotes
# probability with which we mask the input tokens in a sequence.
mlm_probability = 0.15
data_collator = transformers.DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm_probability=mlm_probability, mlm=True, return_tensors="tf"
)
def mask_with_collator(batch):
special_tokens_mask = (
~tf.cast(batch["attention_mask"], tf.bool)
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
batch["input_ids"], batch["labels"] = data_collator.tf_mask_tokens(
batch["input_ids"],
vocab_size=len(tokenizer),
mask_token_id=tokenizer.mask_token_id,
special_tokens_mask=special_tokens_mask,
)
return batch
"""
And now is the time to write the final data preparation utility to put it all together in
a `tf.data.Dataset` object:
"""
auto = tf.data.AUTOTUNE
shuffle_buffer_size = 2**18
def prepare_dataset(
records, decode_fn, mask_fn, batch_size, shuffle, shuffle_buffer_size=None
):
num_samples = count_samples(records)
dataset = tf.data.Dataset.from_tensor_slices(records)
if shuffle:
dataset = dataset.shuffle(len(dataset))
dataset = tf.data.TFRecordDataset(dataset, num_parallel_reads=auto)
# TF can't infer the total sample count because it doesn't read
# all the records yet, so we assert it here.
dataset = dataset.apply(tf.data.experimental.assert_cardinality(num_samples))
dataset = dataset.map(decode_fn, num_parallel_calls=auto)
if shuffle:
assert shuffle_buffer_size is not None
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.map(mask_fn, num_parallel_calls=auto)
dataset = dataset.prefetch(auto)
return dataset
"""
Let's prepare our datasets with these utilities:
"""
per_replica_batch_size = 16 # Change as needed.
batch_size = per_replica_batch_size * strategy.num_replicas_in_sync
shuffle_buffer_size = 2**18 # Default corresponds to a 1GB buffer for seq_len 512
train_dataset = prepare_dataset(
training_records,
decode_fn=decode_fn,
mask_fn=mask_with_collator,
batch_size=batch_size,
shuffle=True,
shuffle_buffer_size=shuffle_buffer_size,
)
eval_dataset = prepare_dataset(
eval_records,
decode_fn=decode_fn,
mask_fn=mask_with_collator,
batch_size=batch_size,
shuffle=False,
)
"""
Let's now investigate how a single batch of dataset looks like.
"""
single_batch = next(iter(train_dataset))
print(single_batch.keys())
"""
* `input_ids` denotes the tokenized versions of the input samples containing the mask
tokens as well.
* `attention_mask` denotes the mask to be used when performing attention operations.
* `labels` denotes the actual values of masked tokens the model is supposed to learn from.
"""
for k in single_batch:
if k == "input_ids":
input_ids = single_batch[k]
print(f"Input shape: {input_ids.shape}")
if k == "labels":
labels = single_batch[k]
print(f"Label shape: {labels.shape}")
"""
Now, we can leverage our `tokenizer` to investigate the values of the tokens. Let's start
with `input_ids`:
"""
idx = 0
print("Taking the first sample:\n")
print(tokenizer.decode(input_ids[idx].numpy()))
"""
As expected, the decoded tokens contain the special tokens including the mask tokens as
well. Let's now investigate the mask tokens:
"""
# Taking the first 30 tokens of the first sequence.
print(labels[0].numpy()[:30])
"""
Here, `-100` means that the corresponding tokens in the `input_ids` are NOT masked and
non `-100` values denote the actual values of the masked tokens.
"""
"""
## Initialize the mode and and the optimizer
"""
"""
With the datasets prepared, we now initialize and compile our model and optimizer within
the `strategy.scope()`:
"""
# For this example, we keep this value to 10. But for a realistic run, start with 500.
num_epochs = 10
steps_per_epoch = num_train_samples // (
per_replica_batch_size * strategy.num_replicas_in_sync
)
total_train_steps = steps_per_epoch * num_epochs
learning_rate = 0.0001
weight_decay_rate = 1e-3
with strategy.scope():
model = transformers.TFAutoModelForMaskedLM.from_config(config)
model(
model.dummy_inputs
) # Pass some dummy inputs through the model to ensure all the weights are built
optimizer, schedule = transformers.create_optimizer(
num_train_steps=total_train_steps,
num_warmup_steps=total_train_steps // 20,
init_lr=learning_rate,
weight_decay_rate=weight_decay_rate,
)
model.compile(optimizer=optimizer, metrics=["accuracy"])
"""
A couple of things to note here:
* The
[`create_optimizer()`](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules#transformers.create_optimizer)
function creates an Adam optimizer with a learning rate schedule using a warmup phase
followed by a linear decay. Since we're using weight decay here, under the hood,
`create_optimizer()` instantiates
[the right variant of Adam](https://github.com/huggingface/transformers/blob/118e9810687dd713b6be07af79e80eeb1d916908/src/transformers/optimization_tf.py#L172)
to enable weight decay.
* While compiling the model, we're NOT using any `loss` argument. This is because
the TensorFlow models internally compute the loss when expected labels are provided.
Based on the model type and the labels being used, `transformers` will automatically
infer the loss to use.
"""
"""
### Start training!
"""
"""
Next, we set up a handy callback to push the intermediate training checkpoints to the
Hugging Face Hub. To be able to operationalize this callback, we need to log in to our
Hugging Face account (if you don't have one, you create one
[here](https://huggingface.co/join) for free). Execute the code below for logging in:
```python
from huggingface_hub import notebook_login
notebook_login()
```
"""
"""
Let's now define the
[`PushToHubCallback`](https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.PushToHubCallback):
"""
hub_model_id = output_dir = "masked-lm-tpu"
callbacks = []
callbacks.append(
transformers.PushToHubCallback(
output_dir=output_dir, hub_model_id=hub_model_id, tokenizer=tokenizer
)
)
"""
And now, we're ready to chug the TPUs:
"""
# In the interest of the runtime of this example,
# we limit the number of batches to just 2.
model.fit(
train_dataset.take(2),
validation_data=eval_dataset.take(2),
epochs=num_epochs,
callbacks=callbacks,
)
# After training we also serialize the final model.
model.save_pretrained(output_dir)
"""
Once your training is complete, you can easily perform inference like so:
"""
from transformers import pipeline
# Replace your `model_id` here.
# Here, we're using a model that the Hugging Face team trained for longer.
model_id = "tf-tpu/roberta-base-epochs-500-no-wd"
unmasker = pipeline("fill-mask", model=model_id, framework="tf")
print(unmasker("Goal of my life is to [MASK]."))
"""
And that's it!
If you enjoyed this example, we encourage you to check out the full codebase
[here](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling-tpu)
and the accompanying blog post
[here](https://huggingface.co/blog/tf_tpu).
"""
| keras-io/examples/nlp/mlm_training_tpus.py/0 | {
"file_path": "keras-io/examples/nlp/mlm_training_tpus.py",
"repo_id": "keras-io",
"token_count": 5423
} | 102 |
"""
Title: Text classification with Switch Transformer
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2020/05/10
Last modified: 2021/02/15
Description: Implement a Switch Transformer for text classification.
Accelerator: GPU
"""
"""
## Introduction
This example demonstrates the implementation of the
[Switch Transformer](https://arxiv.org/abs/2101.03961) model for text
classification.
The Switch Transformer replaces the feedforward network (FFN) layer in the standard
Transformer with a Mixture of Expert (MoE) routing layer, where each expert operates
independently on the tokens in the sequence. This allows increasing the model size without
increasing the computation needed to process each example.
Note that, for training the Switch Transformer efficiently, data and model parallelism
need to be applied, so that expert modules can run simultaneously, each on its own accelerator.
While the implementation described in the paper uses the
[TensorFlow Mesh](https://github.com/tensorflow/mesh) framework for distributed training,
this example presents a simple, non-distributed implementation of the Switch Transformer
model for demonstration purposes.
"""
"""
## Setup
"""
import keras
from keras import ops
from keras import layers
"""
## Download and prepare dataset
"""
vocab_size = 20000 # Only consider the top 20k words
num_tokens_per_example = 200 # Only consider the first 200 words of each movie review
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
x_train = keras.utils.pad_sequences(x_train, maxlen=num_tokens_per_example)
x_val = keras.utils.pad_sequences(x_val, maxlen=num_tokens_per_example)
"""
## Define hyperparameters
"""
embed_dim = 32 # Embedding size for each token.
num_heads = 2 # Number of attention heads
ff_dim = 32 # Hidden layer size in feedforward network.
num_experts = 10 # Number of experts used in the Switch Transformer.
batch_size = 50 # Batch size.
learning_rate = 0.001 # Learning rate.
dropout_rate = 0.25 # Dropout rate.
num_epochs = 3 # Number of epochs.
num_tokens_per_batch = (
batch_size * num_tokens_per_example
) # Total number of tokens per batch.
print(f"Number of tokens per batch: {num_tokens_per_batch}")
"""
## Implement token & position embedding layer
It consists of two seperate embedding layers, one for tokens, one for token index (positions).
"""
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super().__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = ops.shape(x)[-1]
positions = ops.arange(start=0, stop=maxlen, step=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
"""
## Implement the feedforward network
This is used as the Mixture of Experts in the Switch Transformer.
"""
def create_feedforward_network(ff_dim, embed_dim, name=None):
return keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(embed_dim)], name=name
)
"""
## Implement the load-balanced loss
This is an auxiliary loss to encourage a balanced load across experts.
"""
def load_balanced_loss(router_probs, expert_mask):
# router_probs [tokens_per_batch, num_experts] is the probability assigned for
# each expert per token. expert_mask [tokens_per_batch, num_experts] contains
# the expert with the highest router probability in one−hot format.
num_experts = ops.shape(expert_mask)[-1]
# Get the fraction of tokens routed to each expert.
# density is a vector of length num experts that sums to 1.
density = ops.mean(expert_mask, axis=0)
# Get fraction of probability mass assigned to each expert from the router
# across all tokens. density_proxy is a vector of length num experts that sums to 1.
density_proxy = ops.mean(router_probs, axis=0)
# Want both vectors to have uniform allocation (1/num experts) across all
# num_expert elements. The two vectors will be pushed towards uniform allocation
# when the dot product is minimized.
loss = ops.mean(density_proxy * density) * ops.cast((num_experts**2), "float32")
return loss
"""
### Implement the router as a layer
"""
class Router(layers.Layer):
def __init__(self, num_experts, expert_capacity):
self.num_experts = num_experts
self.route = layers.Dense(units=num_experts)
self.expert_capacity = expert_capacity
super().__init__()
def call(self, inputs, training=False):
# inputs shape: [tokens_per_batch, embed_dim]
# router_logits shape: [tokens_per_batch, num_experts]
router_logits = self.route(inputs)
if training:
# Add noise for exploration across experts.
router_logits += keras.random.uniform(
shape=router_logits.shape, minval=0.9, maxval=1.1
)
# Probabilities for each token of what expert it should be sent to.
router_probs = keras.activations.softmax(router_logits, axis=-1)
# Get the top−1 expert for each token. expert_gate is the top−1 probability
# from the router for each token. expert_index is what expert each token
# is going to be routed to.
expert_gate, expert_index = ops.top_k(router_probs, k=1)
# expert_mask shape: [tokens_per_batch, num_experts]
expert_mask = ops.one_hot(expert_index, self.num_experts)
# Compute load balancing loss.
aux_loss = load_balanced_loss(router_probs, expert_mask)
self.add_loss(aux_loss)
# Experts have a fixed capacity, ensure we do not exceed it. Construct
# the batch indices, to each expert, with position in expert make sure that
# not more that expert capacity examples can be routed to each expert.
position_in_expert = ops.cast(
ops.cumsum(expert_mask, axis=0) * expert_mask, "int32"
)
# Keep only tokens that fit within expert capacity.
expert_mask *= ops.cast(
ops.less(ops.cast(position_in_expert, "int32"), self.expert_capacity),
"float32",
)
expert_mask_flat = ops.sum(expert_mask, axis=-1)
# Mask out the experts that have overflowed the expert capacity.
expert_gate *= expert_mask_flat
# Combine expert outputs and scaling with router probability.
# combine_tensor shape: [tokens_per_batch, num_experts, expert_capacity]
combined_tensor = ops.expand_dims(
expert_gate
* expert_mask_flat
* ops.squeeze(ops.one_hot(expert_index, self.num_experts), 1),
-1,
) * ops.squeeze(ops.one_hot(position_in_expert, self.expert_capacity), 1)
# Create binary dispatch_tensor [tokens_per_batch, num_experts, expert_capacity]
# that is 1 if the token gets routed to the corresponding expert.
dispatch_tensor = ops.cast(combined_tensor, "float32")
return dispatch_tensor, combined_tensor
"""
### Implement a Switch layer
"""
class Switch(layers.Layer):
def __init__(
self, num_experts, embed_dim, ff_dim, num_tokens_per_batch, capacity_factor=1
):
self.num_experts = num_experts
self.embed_dim = embed_dim
self.experts = [
create_feedforward_network(ff_dim, embed_dim) for _ in range(num_experts)
]
self.expert_capacity = num_tokens_per_batch // self.num_experts
self.router = Router(self.num_experts, self.expert_capacity)
super().__init__()
def call(self, inputs):
batch_size = ops.shape(inputs)[0]
num_tokens_per_example = ops.shape(inputs)[1]
# inputs shape: [num_tokens_per_batch, embed_dim]
inputs = ops.reshape(inputs, [num_tokens_per_batch, self.embed_dim])
# dispatch_tensor shape: [expert_capacity, num_experts, tokens_per_batch]
# combine_tensor shape: [tokens_per_batch, num_experts, expert_capacity]
dispatch_tensor, combine_tensor = self.router(inputs)
# expert_inputs shape: [num_experts, expert_capacity, embed_dim]
expert_inputs = ops.einsum("ab,acd->cdb", inputs, dispatch_tensor)
expert_inputs = ops.reshape(
expert_inputs, [self.num_experts, self.expert_capacity, self.embed_dim]
)
# Dispatch to experts
expert_input_list = ops.unstack(expert_inputs, axis=0)
expert_output_list = [
self.experts[idx](expert_input)
for idx, expert_input in enumerate(expert_input_list)
]
# expert_outputs shape: [expert_capacity, num_experts, embed_dim]
expert_outputs = ops.stack(expert_output_list, axis=1)
# expert_outputs_combined shape: [tokens_per_batch, embed_dim]
expert_outputs_combined = ops.einsum(
"abc,xba->xc", expert_outputs, combine_tensor
)
# output shape: [batch_size, num_tokens_per_example, embed_dim]
outputs = ops.reshape(
expert_outputs_combined,
[batch_size, num_tokens_per_example, self.embed_dim],
)
return outputs
"""
## Implement a Transformer block layer
"""
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ffn, dropout_rate=0.1):
super().__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
# The ffn can be either a standard feedforward network or a switch
# layer with a Mixture of Experts.
self.ffn = ffn
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(dropout_rate)
self.dropout2 = layers.Dropout(dropout_rate)
def call(self, inputs, training=False):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
"""
## Implement the classifier
The `TransformerBlock` layer outputs one vector for each time step of our input sequence.
Here, we take the mean across all time steps and use a feedforward network on top
of it to classify text.
"""
def create_classifier():
switch = Switch(num_experts, embed_dim, ff_dim, num_tokens_per_batch)
transformer_block = TransformerBlock(embed_dim // num_heads, num_heads, switch)
inputs = layers.Input(shape=(num_tokens_per_example,))
embedding_layer = TokenAndPositionEmbedding(
num_tokens_per_example, vocab_size, embed_dim
)
x = embedding_layer(inputs)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(dropout_rate)(x)
x = layers.Dense(ff_dim, activation="relu")(x)
x = layers.Dropout(dropout_rate)(x)
outputs = layers.Dense(2, activation="softmax")(x)
classifier = keras.Model(inputs=inputs, outputs=outputs)
return classifier
"""
## Train and evaluate the model
"""
def run_experiment(classifier):
classifier.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
history = classifier.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_data=(x_val, y_val),
)
return history
classifier = create_classifier()
run_experiment(classifier)
"""
## Conclusion
Compared to the standard Transformer architecture, the Switch Transformer can have a much
larger number of parameters, leading to increased model
capacity, while maintaining a reasonable computational cost.
"""
| keras-io/examples/nlp/text_classification_with_switch_transformer.py/0 | {
"file_path": "keras-io/examples/nlp/text_classification_with_switch_transformer.py",
"repo_id": "keras-io",
"token_count": 4609
} | 103 |
# Collaborative Filtering for Movie Recommendations
**Author:** [Siddhartha Banerjee](https://twitter.com/sidd2006)<br>
**Date created:** 2020/05/24<br>
**Last modified:** 2020/05/24<br>
**Description:** Recommending movies using a model trained on Movielens dataset.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/structured_data/ipynb/collaborative_filtering_movielens.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/structured_data/collaborative_filtering_movielens.py)
---
## Introduction
This example demonstrates
[Collaborative filtering](https://en.wikipedia.org/wiki/Collaborative_filtering)
using the [Movielens dataset](https://www.kaggle.com/c/movielens-100k)
to recommend movies to users.
The MovieLens ratings dataset lists the ratings given by a set of users to a set of movies.
Our goal is to be able to predict ratings for movies a user has not yet watched.
The movies with the highest predicted ratings can then be recommended to the user.
The steps in the model are as follows:
1. Map user ID to a "user vector" via an embedding matrix
2. Map movie ID to a "movie vector" via an embedding matrix
3. Compute the dot product between the user vector and movie vector, to obtain
the a match score between the user and the movie (predicted rating).
4. Train the embeddings via gradient descent using all known user-movie pairs.
**References:**
- [Collaborative Filtering](https://dl.acm.org/doi/pdf/10.1145/371920.372071)
- [Neural Collaborative Filtering](https://dl.acm.org/doi/pdf/10.1145/3038912.3052569)
```python
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from zipfile import ZipFile
import keras
from keras import layers
from keras import ops
```
---
## First, load the data and apply preprocessing
```python
# Download the actual data from http://files.grouplens.org/datasets/movielens/ml-latest-small.zip"
# Use the ratings.csv file
movielens_data_file_url = (
"http://files.grouplens.org/datasets/movielens/ml-latest-small.zip"
)
movielens_zipped_file = keras.utils.get_file(
"ml-latest-small.zip", movielens_data_file_url, extract=False
)
keras_datasets_path = Path(movielens_zipped_file).parents[0]
movielens_dir = keras_datasets_path / "ml-latest-small"
# Only extract the data the first time the script is run.
if not movielens_dir.exists():
with ZipFile(movielens_zipped_file, "r") as zip:
# Extract files
print("Extracting all the files now...")
zip.extractall(path=keras_datasets_path)
print("Done!")
ratings_file = movielens_dir / "ratings.csv"
df = pd.read_csv(ratings_file)
```
<div class="k-default-codeblock">
```
Downloading data from http://files.grouplens.org/datasets/movielens/ml-latest-small.zip
978202/978202 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
Extracting all the files now...
Done!
```
</div>
First, need to perform some preprocessing to encode users and movies as integer indices.
```python
user_ids = df["userId"].unique().tolist()
user2user_encoded = {x: i for i, x in enumerate(user_ids)}
userencoded2user = {i: x for i, x in enumerate(user_ids)}
movie_ids = df["movieId"].unique().tolist()
movie2movie_encoded = {x: i for i, x in enumerate(movie_ids)}
movie_encoded2movie = {i: x for i, x in enumerate(movie_ids)}
df["user"] = df["userId"].map(user2user_encoded)
df["movie"] = df["movieId"].map(movie2movie_encoded)
num_users = len(user2user_encoded)
num_movies = len(movie_encoded2movie)
df["rating"] = df["rating"].values.astype(np.float32)
# min and max ratings will be used to normalize the ratings later
min_rating = min(df["rating"])
max_rating = max(df["rating"])
print(
"Number of users: {}, Number of Movies: {}, Min rating: {}, Max rating: {}".format(
num_users, num_movies, min_rating, max_rating
)
)
```
<div class="k-default-codeblock">
```
Number of users: 610, Number of Movies: 9724, Min rating: 0.5, Max rating: 5.0
```
</div>
---
## Prepare training and validation data
```python
df = df.sample(frac=1, random_state=42)
x = df[["user", "movie"]].values
# Normalize the targets between 0 and 1. Makes it easy to train.
y = df["rating"].apply(lambda x: (x - min_rating) / (max_rating - min_rating)).values
# Assuming training on 90% of the data and validating on 10%.
train_indices = int(0.9 * df.shape[0])
x_train, x_val, y_train, y_val = (
x[:train_indices],
x[train_indices:],
y[:train_indices],
y[train_indices:],
)
```
---
## Create the model
We embed both users and movies in to 50-dimensional vectors.
The model computes a match score between user and movie embeddings via a dot product,
and adds a per-movie and per-user bias. The match score is scaled to the `[0, 1]`
interval via a sigmoid (since our ratings are normalized to this range).
```python
EMBEDDING_SIZE = 50
class RecommenderNet(keras.Model):
def __init__(self, num_users, num_movies, embedding_size, **kwargs):
super().__init__(**kwargs)
self.num_users = num_users
self.num_movies = num_movies
self.embedding_size = embedding_size
self.user_embedding = layers.Embedding(
num_users,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.user_bias = layers.Embedding(num_users, 1)
self.movie_embedding = layers.Embedding(
num_movies,
embedding_size,
embeddings_initializer="he_normal",
embeddings_regularizer=keras.regularizers.l2(1e-6),
)
self.movie_bias = layers.Embedding(num_movies, 1)
def call(self, inputs):
user_vector = self.user_embedding(inputs[:, 0])
user_bias = self.user_bias(inputs[:, 0])
movie_vector = self.movie_embedding(inputs[:, 1])
movie_bias = self.movie_bias(inputs[:, 1])
dot_user_movie = ops.tensordot(user_vector, movie_vector, 2)
# Add all the components (including bias)
x = dot_user_movie + user_bias + movie_bias
# The sigmoid activation forces the rating to between 0 and 1
return ops.nn.sigmoid(x)
model = RecommenderNet(num_users, num_movies, EMBEDDING_SIZE)
model.compile(
loss=keras.losses.BinaryCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=0.001),
)
```
---
## Train the model based on the data split
```python
history = model.fit(
x=x_train,
y=y_train,
batch_size=64,
epochs=5,
verbose=1,
validation_data=(x_val, y_val),
)
```
<div class="k-default-codeblock">
```
Epoch 1/5
1418/1418 ━━━━━━━━━━━━━━━━━━━━ 2s 1ms/step - loss: 0.6591 - val_loss: 0.6201
Epoch 2/5
1418/1418 ━━━━━━━━━━━━━━━━━━━━ 1s 894us/step - loss: 0.6159 - val_loss: 0.6191
Epoch 3/5
1418/1418 ━━━━━━━━━━━━━━━━━━━━ 1s 977us/step - loss: 0.6093 - val_loss: 0.6138
Epoch 4/5
1418/1418 ━━━━━━━━━━━━━━━━━━━━ 1s 865us/step - loss: 0.6100 - val_loss: 0.6123
Epoch 5/5
1418/1418 ━━━━━━━━━━━━━━━━━━━━ 1s 854us/step - loss: 0.6072 - val_loss: 0.6121
```
</div>
---
## Plot training and validation loss
```python
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
```

---
## Show top 10 movie recommendations to a user
```python
movie_df = pd.read_csv(movielens_dir / "movies.csv")
# Let us get a user and see the top recommendations.
user_id = df.userId.sample(1).iloc[0]
movies_watched_by_user = df[df.userId == user_id]
movies_not_watched = movie_df[
~movie_df["movieId"].isin(movies_watched_by_user.movieId.values)
]["movieId"]
movies_not_watched = list(
set(movies_not_watched).intersection(set(movie2movie_encoded.keys()))
)
movies_not_watched = [[movie2movie_encoded.get(x)] for x in movies_not_watched]
user_encoder = user2user_encoded.get(user_id)
user_movie_array = np.hstack(
([[user_encoder]] * len(movies_not_watched), movies_not_watched)
)
ratings = model.predict(user_movie_array).flatten()
top_ratings_indices = ratings.argsort()[-10:][::-1]
recommended_movie_ids = [
movie_encoded2movie.get(movies_not_watched[x][0]) for x in top_ratings_indices
]
print("Showing recommendations for user: {}".format(user_id))
print("====" * 9)
print("Movies with high ratings from user")
print("----" * 8)
top_movies_user = (
movies_watched_by_user.sort_values(by="rating", ascending=False)
.head(5)
.movieId.values
)
movie_df_rows = movie_df[movie_df["movieId"].isin(top_movies_user)]
for row in movie_df_rows.itertuples():
print(row.title, ":", row.genres)
print("----" * 8)
print("Top 10 movie recommendations")
print("----" * 8)
recommended_movies = movie_df[movie_df["movieId"].isin(recommended_movie_ids)]
for row in recommended_movies.itertuples():
print(row.title, ":", row.genres)
```
<div class="k-default-codeblock">
```
272/272 ━━━━━━━━━━━━━━━━━━━━ 0s 714us/step
Showing recommendations for user: 249
====================================
Movies with high ratings from user
--------------------------------
Fight Club (1999) : Action|Crime|Drama|Thriller
Serenity (2005) : Action|Adventure|Sci-Fi
Departed, The (2006) : Crime|Drama|Thriller
Prisoners (2013) : Drama|Mystery|Thriller
Arrival (2016) : Sci-Fi
--------------------------------
Top 10 movie recommendations
--------------------------------
In the Name of the Father (1993) : Drama
Monty Python and the Holy Grail (1975) : Adventure|Comedy|Fantasy
Princess Bride, The (1987) : Action|Adventure|Comedy|Fantasy|Romance
Lawrence of Arabia (1962) : Adventure|Drama|War
Apocalypse Now (1979) : Action|Drama|War
Full Metal Jacket (1987) : Drama|War
Amadeus (1984) : Drama
Glory (1989) : Drama|War
Chinatown (1974) : Crime|Film-Noir|Mystery|Thriller
City of God (Cidade de Deus) (2002) : Action|Adventure|Crime|Drama|Thriller
```
</div>
| keras-io/examples/structured_data/md/collaborative_filtering_movielens.md/0 | {
"file_path": "keras-io/examples/structured_data/md/collaborative_filtering_movielens.md",
"repo_id": "keras-io",
"token_count": 4009
} | 104 |
# Event classification for payment card fraud detection
**Author:** [achoum](https://github.com/achoum/)<br>
**Date created:** 2024/02/01<br>
**Last modified:** 2024/02/01<br>
**Description:** Detection of fraudulent payment card transactions using Temporian and a feed-forward neural network.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/timeseries/ipynb/event_classification_for_payment_card_fraud_detection.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/timeseries/event_classification_for_payment_card_fraud_detection.py)
This notebook depends on Keras 3, Temporian, and a few other libraries. You can
install them as follow:
```shell
pip install temporian keras pandas tf-nightly scikit-learn -U
```
```python
import keras # To train the Machine Learning model
import temporian as tp # To convert transactions into tabular data
import numpy as np
import os
import pandas as pd
import datetime
import math
import tensorflow as tf
from sklearn.metrics import RocCurveDisplay
```
---
## Introduction
Payment fraud detection is critical for banks, businesses, and consumers. In
Europe alone, fraudulent transactions were estimated at
[€1.89 billion in 2019](https://www.ecb.europa.eu/pub/pdf/cardfraud/ecb.cardfraudreport202110~cac4c418e8.en.pdf).
Worldwide, approximately
[3.6%](https://www.cybersource.com/content/dam/documents/campaign/fraud-report/global-fraud-report-2022.pdf)
of commerce revenue is lost to fraud. In this notebook, we train and evaluate a
model to detect fraudulent transactions using the synthetic dataset attached to
the book
[Reproducible Machine Learning for Credit Card Fraud Detection](https://fraud-detection-handbook.github.io/fraud-detection-handbook/Foreword.html)
by Le Borgne et al.
Fraudulent transactions often cannot be detected by looking at transactions in
isolation. Instead, fraudulent transactions are detected by looking at patterns
across multiple transactions from the same user, to the same merchant, or with
other types of relationships. To express these relationships in a way that is
understandable by a machine learning model, and to augment features with feature
engineering, we We use the
[Temporian](https://temporian.readthedocs.io/en/latest) preprocessing library.
We preprocess a transaction dataset into a tabular dataset and use a
feed-forward neural network to learn the patterns of fraud and make predictions.
---
## Loading the dataset
The dataset contains payment transactions sampled between April 1, 2018 and
September 30, 2018. The transactions are stored in CSV files, one for each day.
**Note:** Downloading the dataset takes ~1 minute.
```python
start_date = datetime.date(2018, 4, 1)
end_date = datetime.date(2018, 9, 30)
# Load the dataset as a Pandas dataframe.
cache_path = "fraud_detection_cache.csv"
if not os.path.exists(cache_path):
print("Download dataset")
dataframes = []
num_files = (end_date - start_date).days
counter = 0
while start_date <= end_date:
if counter % (num_files // 10) == 0:
print(f"[{100 * (counter+1) // num_files}%]", end="", flush=True)
print(".", end="", flush=True)
url = f"https://github.com/Fraud-Detection-Handbook/simulated-data-raw/raw/6e67dbd0a3bfe0d7ec33abc4bce5f37cd4ff0d6a/data/{start_date}.pkl"
dataframes.append(pd.read_pickle(url))
start_date += datetime.timedelta(days=1)
counter += 1
print("done", flush=True)
transactions_dataframe = pd.concat(dataframes)
transactions_dataframe.to_csv(cache_path, index=False)
else:
print("Load dataset from cache")
transactions_dataframe = pd.read_csv(
cache_path, dtype={"CUSTOMER_ID": bytes, "TERMINAL_ID": bytes}
)
print(f"Found {len(transactions_dataframe)} transactions")
```
<div class="k-default-codeblock">
```
Download dataset
[0%]..................[10%]..................[20%]..................[30%]..................[40%]..................[50%]..................[59%]..................[69%]..................[79%]..................[89%]..................[99%]...done
Found 1754155 transactions
```
</div>
Each transaction is represented by a single row, with the following columns of
interest:
- **TX_DATETIME**: The date and time of the transaction.
- **CUSTOMER_ID**: The unique identifier of the customer.
- **TERMINAL_ID**: The identifier of the terminal where the transaction was
made.
- **TX_AMOUNT**: The amount of the transaction.
- **TX_FRAUD**: Whether the transaction is fraudulent (1) or not (0).
```python
transactions_dataframe = transactions_dataframe[
["TX_DATETIME", "CUSTOMER_ID", "TERMINAL_ID", "TX_AMOUNT", "TX_FRAUD"]
]
transactions_dataframe.head(4)
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
<div class="k-default-codeblock">
```
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
```
</div>
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>TX_DATETIME</th>
<th>CUSTOMER_ID</th>
<th>TERMINAL_ID</th>
<th>TX_AMOUNT</th>
<th>TX_FRAUD</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>2018-04-01 00:00:31</td>
<td>596</td>
<td>3156</td>
<td>57.16</td>
<td>0</td>
</tr>
<tr>
<th>1</th>
<td>2018-04-01 00:02:10</td>
<td>4961</td>
<td>3412</td>
<td>81.51</td>
<td>0</td>
</tr>
<tr>
<th>2</th>
<td>2018-04-01 00:07:56</td>
<td>2</td>
<td>1365</td>
<td>146.00</td>
<td>0</td>
</tr>
<tr>
<th>3</th>
<td>2018-04-01 00:09:29</td>
<td>4128</td>
<td>8737</td>
<td>64.49</td>
<td>0</td>
</tr>
</tbody>
</table>
</div>
The dataset is highly imbalanced, with the majority of transactions being
legitimate.
```python
fraudulent_rate = transactions_dataframe["TX_FRAUD"].mean()
print("Rate of fraudulent transactions:", fraudulent_rate)
```
<div class="k-default-codeblock">
```
Rate of fraudulent transactions: 0.008369271814634397
```
</div>
The
[pandas dataframe](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html)
is converted into a
[Temporian EventSet](https://temporian.readthedocs.io/en/latest/reference/temporian/EventSet/),
which is better suited for the data exploration and feature preprocessing of the
next steps.
```python
transactions_evset = tp.from_pandas(transactions_dataframe, timestamps="TX_DATETIME")
transactions_evset
```
<div class="k-default-codeblock">
```
WARNING:root:Feature "CUSTOMER_ID" is an array of numpy.object_ and will be casted to numpy.string_ (Note: numpy.string_ is equivalent to numpy.bytes_).
WARNING:root:Feature "TERMINAL_ID" is an array of numpy.object_ and will be casted to numpy.string_ (Note: numpy.string_ is equivalent to numpy.bytes_).
```
</div>
<div>
<div style="display: table; margin-bottom:11px; padding:5px; font-size:small; line-height:120%; border:1px solid rgba(127, 127, 127, 0.2)">
<div style="display: table">
<span>
<span style="font-weight:bold">features</span>
<span style=""> [4]:</span>
</span>
<span style="color:#0077BB; font-weight:bold">CUSTOMER_ID</span>
<span style="color:#009988"> (str_)</span>
<span style="">, </span>
<span style="color:#0077BB; font-weight:bold">TERMINAL_ID</span>
<span style="color:#009988"> (str_)</span>
<span style="">, </span>
<span style="color:#0077BB; font-weight:bold">TX_AMOUNT</span>
<span style="color:#009988"> (float64)</span>
<span style="">, </span>
<span style="color:#0077BB; font-weight:bold">TX_FRAUD</span>
<span style="color:#009988"> (int64)</span>
</div>
<div style="display: table">
<span>
<span style="font-weight:bold">indexes</span>
<span style=""> [0]:</span>
</span>
<span style="font-style:italic">none</span>
</div>
<div style="display: table">
<span style="font-weight:bold">events: </span>
<span style="">1754155</span>
</div>
<div style="display: table">
<span style="font-weight:bold">index values: </span>
<span style="">1</span>
</div>
<div style="display: table">
<span style="font-weight:bold">memory usage: </span>
<span style="">28.1 MB</span>
</div>
</div>
<div style="display: table">
<span style="font-weight:bold">index</span>
<span style=""> (</span>
<span style="">) with 1754155 events</span>
</div>
<table style="margin-left:20px; border:1px solid rgba(127, 127, 127, 0.2)">
<tr>
<th>
<b>
<span style="">timestamp</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">CUSTOMER_ID</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">TERMINAL_ID</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">TX_AMOUNT</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">TX_FRAUD</span>
</b>
</th>
</tr>
<tr>
<td>
<span style="">2018-04-01 00:00:31+00:00</span>
</td>
<td>
<span style="">596</span>
</td>
<td>
<span style="">3156</span>
</td>
<td>
<span style="">57.16</span>
</td>
<td>
<span style="">0</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-01 00:02:10+00:00</span>
</td>
<td>
<span style="">4961</span>
</td>
<td>
<span style="">3412</span>
</td>
<td>
<span style="">81.51</span>
</td>
<td>
<span style="">0</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-01 00:07:56+00:00</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">1365</span>
</td>
<td>
<span style="">146</span>
</td>
<td>
<span style="">0</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-01 00:09:29+00:00</span>
</td>
<td>
<span style="">4128</span>
</td>
<td>
<span style="">8737</span>
</td>
<td>
<span style="">64.49</span>
</td>
<td>
<span style="">0</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-01 00:10:34+00:00</span>
</td>
<td>
<span style="">927</span>
</td>
<td>
<span style="">9906</span>
</td>
<td>
<span style="">50.99</span>
</td>
<td>
<span style="">0</span>
</td>
</tr>
<tr>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
</tr>
</table>
</div>
It is possible to plot the entire dataset, but the resulting plot will be
difficult to read. Instead, we can group the transactions per client.
```python
transactions_evset.add_index("CUSTOMER_ID").plot(indexes="3774")
```

Note the few fraudulent transactions for this client.
---
## Preparing the training data
Fraudulent transactions in isolation cannot be detected. Instead, we need to
connect related transactions. For each transaction, we compute the sum and count
of transactions for the same terminal in the last `n` days. Because we don't
know the correct value for `n`, we use multiple values for `n` and compute a
set of features for each of them.
```python
# Group the transactions per terminal
transactions_per_terminal = transactions_evset.add_index("TERMINAL_ID")
# Moving statistics per terminal
tmp_features = []
for n in [7, 14, 28]:
tmp_features.append(
transactions_per_terminal["TX_AMOUNT"]
.moving_sum(tp.duration.days(n))
.rename(f"sum_transactions_{n}_days")
)
tmp_features.append(
transactions_per_terminal.moving_count(tp.duration.days(n)).rename(
f"count_transactions_{n}_days"
)
)
feature_set_1 = tp.glue(*tmp_features)
feature_set_1
```
<div>
<div style="display: table; margin-bottom:11px; padding:5px; font-size:small; line-height:120%; border:1px solid rgba(127, 127, 127, 0.2)">
<div style="display: table">
<span>
<span style="font-weight:bold">features</span>
<span style=""> [6]:</span>
</span>
<span style="color:#0077BB; font-weight:bold">sum_transactions_7_days</span>
<span style="color:#009988"> (float64)</span>
<span style="">, </span>
<span style="color:#0077BB; font-weight:bold">count_transactions_7_days</span>
<span style="color:#009988"> (int32)</span>
<span style="">, </span>
<span style="color:#0077BB; font-weight:bold">sum_transactions_14_days</span>
<span style="color:#009988"> (float64)</span>
<span style="">, </span>
<span style="color:#0077BB; font-weight:bold">count_transactions_14_days</span>
<span style="color:#009988"> (int32)</span>
<span style="">, </span>
<span style="color:#0077BB; font-weight:bold">sum_transactions_28_days</span>
<span style="color:#009988"> (float64)</span>
<span style="">, </span>
<span style="color:#0077BB; font-weight:bold">count_transactions_28_days</span>
<span style="color:#009988"> (int32)</span>
</div>
<div style="display: table">
<span>
<span style="font-weight:bold">indexes</span>
<span style=""> [1]:</span>
</span>
<span style="color:#EE7733; font-weight:bold">TERMINAL_ID</span>
<span style="color:#009988"> (str_)</span>
</div>
<div style="display: table">
<span style="font-weight:bold">events: </span>
<span style="">1754155</span>
</div>
<div style="display: table">
<span style="font-weight:bold">index values: </span>
<span style="">10000</span>
</div>
<div style="display: table">
<span style="font-weight:bold">memory usage: </span>
<span style="">85.8 MB</span>
</div>
</div>
<div style="display: table">
<span style="font-weight:bold">index</span>
<span style=""> (</span>
<span style="color:#EE7733; font-weight:bold">TERMINAL_ID: </span>
<span style="color:#EE3377">0</span>
<span style="">) with 178 events</span>
</div>
<table style="margin-left:20px; border:1px solid rgba(127, 127, 127, 0.2)">
<tr>
<th>
<b>
<span style="">timestamp</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_7_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_7_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_14_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_14_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_28_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_28_days</span>
</b>
</th>
</tr>
<tr>
<td>
<span style="">2018-04-02 01:00:01+00:00</span>
</td>
<td>
<span style="">16.07</span>
</td>
<td>
<span style="">1</span>
</td>
<td>
<span style="">16.07</span>
</td>
<td>
<span style="">1</span>
</td>
<td>
<span style="">16.07</span>
</td>
<td>
<span style="">1</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-02 09:49:55+00:00</span>
</td>
<td>
<span style="">83.9</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">83.9</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">83.9</span>
</td>
<td>
<span style="">2</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-03 12:14:41+00:00</span>
</td>
<td>
<span style="">110.7</span>
</td>
<td>
<span style="">3</span>
</td>
<td>
<span style="">110.7</span>
</td>
<td>
<span style="">3</span>
</td>
<td>
<span style="">110.7</span>
</td>
<td>
<span style="">3</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-05 16:47:41+00:00</span>
</td>
<td>
<span style="">151.2</span>
</td>
<td>
<span style="">4</span>
</td>
<td>
<span style="">151.2</span>
</td>
<td>
<span style="">4</span>
</td>
<td>
<span style="">151.2</span>
</td>
<td>
<span style="">4</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-07 06:05:21+00:00</span>
</td>
<td>
<span style="">199.6</span>
</td>
<td>
<span style="">5</span>
</td>
<td>
<span style="">199.6</span>
</td>
<td>
<span style="">5</span>
</td>
<td>
<span style="">199.6</span>
</td>
<td>
<span style="">5</span>
</td>
</tr>
<tr>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
</tr>
</table>
<div style="display: table">
<span style="font-weight:bold">index</span>
<span style=""> (</span>
<span style="color:#EE7733; font-weight:bold">TERMINAL_ID: </span>
<span style="color:#EE3377">1</span>
<span style="">) with 139 events</span>
</div>
<table style="margin-left:20px; border:1px solid rgba(127, 127, 127, 0.2)">
<tr>
<th>
<b>
<span style="">timestamp</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_7_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_7_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_14_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_14_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_28_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_28_days</span>
</b>
</th>
</tr>
<tr>
<td>
<span style="">2018-04-01 16:24:39+00:00</span>
</td>
<td>
<span style="">70.36</span>
</td>
<td>
<span style="">1</span>
</td>
<td>
<span style="">70.36</span>
</td>
<td>
<span style="">1</span>
</td>
<td>
<span style="">70.36</span>
</td>
<td>
<span style="">1</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-02 11:25:03+00:00</span>
</td>
<td>
<span style="">87.79</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">87.79</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">87.79</span>
</td>
<td>
<span style="">2</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-04 08:31:48+00:00</span>
</td>
<td>
<span style="">211.6</span>
</td>
<td>
<span style="">3</span>
</td>
<td>
<span style="">211.6</span>
</td>
<td>
<span style="">3</span>
</td>
<td>
<span style="">211.6</span>
</td>
<td>
<span style="">3</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-04 14:15:28+00:00</span>
</td>
<td>
<span style="">315</span>
</td>
<td>
<span style="">4</span>
</td>
<td>
<span style="">315</span>
</td>
<td>
<span style="">4</span>
</td>
<td>
<span style="">315</span>
</td>
<td>
<span style="">4</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-04 20:54:17+00:00</span>
</td>
<td>
<span style="">446.5</span>
</td>
<td>
<span style="">5</span>
</td>
<td>
<span style="">446.5</span>
</td>
<td>
<span style="">5</span>
</td>
<td>
<span style="">446.5</span>
</td>
<td>
<span style="">5</span>
</td>
</tr>
<tr>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
</tr>
</table>
<div style="display: table">
<span style="font-weight:bold">index</span>
<span style=""> (</span>
<span style="color:#EE7733; font-weight:bold">TERMINAL_ID: </span>
<span style="color:#EE3377">10</span>
<span style="">) with 151 events</span>
</div>
<table style="margin-left:20px; border:1px solid rgba(127, 127, 127, 0.2)">
<tr>
<th>
<b>
<span style="">timestamp</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_7_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_7_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_14_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_14_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_28_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_28_days</span>
</b>
</th>
</tr>
<tr>
<td>
<span style="">2018-04-01 14:11:55+00:00</span>
</td>
<td>
<span style="">2.9</span>
</td>
<td>
<span style="">1</span>
</td>
<td>
<span style="">2.9</span>
</td>
<td>
<span style="">1</span>
</td>
<td>
<span style="">2.9</span>
</td>
<td>
<span style="">1</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-02 11:01:07+00:00</span>
</td>
<td>
<span style="">17.04</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">17.04</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">17.04</span>
</td>
<td>
<span style="">2</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-03 13:46:58+00:00</span>
</td>
<td>
<span style="">118.2</span>
</td>
<td>
<span style="">3</span>
</td>
<td>
<span style="">118.2</span>
</td>
<td>
<span style="">3</span>
</td>
<td>
<span style="">118.2</span>
</td>
<td>
<span style="">3</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-04 03:27:11+00:00</span>
</td>
<td>
<span style="">161.7</span>
</td>
<td>
<span style="">4</span>
</td>
<td>
<span style="">161.7</span>
</td>
<td>
<span style="">4</span>
</td>
<td>
<span style="">161.7</span>
</td>
<td>
<span style="">4</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-05 17:58:10+00:00</span>
</td>
<td>
<span style="">171.3</span>
</td>
<td>
<span style="">5</span>
</td>
<td>
<span style="">171.3</span>
</td>
<td>
<span style="">5</span>
</td>
<td>
<span style="">171.3</span>
</td>
<td>
<span style="">5</span>
</td>
</tr>
<tr>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
</tr>
</table>
<div style="display: table">
<span style="font-weight:bold">index</span>
<span style=""> (</span>
<span style="color:#EE7733; font-weight:bold">TERMINAL_ID: </span>
<span style="color:#EE3377">100</span>
<span style="">) with 188 events</span>
</div>
<table style="margin-left:20px; border:1px solid rgba(127, 127, 127, 0.2)">
<tr>
<th>
<b>
<span style="">timestamp</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_7_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_7_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_14_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_14_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">sum_transactions_28_days</span>
</b>
</th>
<th>
<b>
<span style="color:#0077BB; font-weight:bold; ">count_transactions_28_days</span>
</b>
</th>
</tr>
<tr>
<td>
<span style="">2018-04-02 10:37:42+00:00</span>
</td>
<td>
<span style="">6.31</span>
</td>
<td>
<span style="">1</span>
</td>
<td>
<span style="">6.31</span>
</td>
<td>
<span style="">1</span>
</td>
<td>
<span style="">6.31</span>
</td>
<td>
<span style="">1</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-04 19:14:23+00:00</span>
</td>
<td>
<span style="">12.26</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">12.26</span>
</td>
<td>
<span style="">2</span>
</td>
<td>
<span style="">12.26</span>
</td>
<td>
<span style="">2</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-07 04:01:22+00:00</span>
</td>
<td>
<span style="">65.12</span>
</td>
<td>
<span style="">3</span>
</td>
<td>
<span style="">65.12</span>
</td>
<td>
<span style="">3</span>
</td>
<td>
<span style="">65.12</span>
</td>
<td>
<span style="">3</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-07 12:18:27+00:00</span>
</td>
<td>
<span style="">112.4</span>
</td>
<td>
<span style="">4</span>
</td>
<td>
<span style="">112.4</span>
</td>
<td>
<span style="">4</span>
</td>
<td>
<span style="">112.4</span>
</td>
<td>
<span style="">4</span>
</td>
</tr>
<tr>
<td>
<span style="">2018-04-07 21:11:03+00:00</span>
</td>
<td>
<span style="">170.4</span>
</td>
<td>
<span style="">5</span>
</td>
<td>
<span style="">170.4</span>
</td>
<td>
<span style="">5</span>
</td>
<td>
<span style="">170.4</span>
</td>
<td>
<span style="">5</span>
</td>
</tr>
<tr>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
<td>
<span style="">…</span>
</td>
</tr>
</table>
<span style="">… (9996 more indexes not shown)</span>
</div>
Let's look at the features of terminal "3774".
```python
feature_set_1.plot(indexes="3774")
```

A transaction's fraudulent status is not known at the time of the transaction
(otherwise, there would be no problem). However, the banks knows if a
transacation is fraudulent one week after it is made. We create a set of
features that indicate the number and ratio of fraudulent transactions in the
last N days.
```python
# Lag the transactions by one week.
lagged_transactions = transactions_per_terminal.lag(tp.duration.weeks(1))
# Moving statistics per customer
tmp_features = []
for n in [7, 14, 28]:
tmp_features.append(
lagged_transactions["TX_FRAUD"]
.moving_sum(tp.duration.days(n), sampling=transactions_per_terminal)
.rename(f"count_fraud_transactions_{n}_days")
)
tmp_features.append(
lagged_transactions["TX_FRAUD"]
.cast(tp.float32)
.simple_moving_average(tp.duration.days(n), sampling=transactions_per_terminal)
.rename(f"rate_fraud_transactions_{n}_days")
)
feature_set_2 = tp.glue(*tmp_features)
```
Transaction date and time can be correlated with fraud. While each transaction
has a timestamp, a machine learning model might struggle to consume them
directly. Instead, we extract various informative calendar features from the
timestamps, such as hour, day of the week (e.g., Monday, Tuesday), and day of
the month (1-31).
```python
feature_set_3 = tp.glue(
transactions_per_terminal.calendar_hour(),
transactions_per_terminal.calendar_day_of_week(),
)
```
Finally, we group together all the features and the label.
```python
all_data = tp.glue(
transactions_per_terminal, feature_set_1, feature_set_2, feature_set_3
).drop_index()
print("All the available features:")
all_data.schema.feature_names()
```
<div class="k-default-codeblock">
```
All the available features:
['CUSTOMER_ID',
'TX_AMOUNT',
'TX_FRAUD',
'sum_transactions_7_days',
'count_transactions_7_days',
'sum_transactions_14_days',
'count_transactions_14_days',
'sum_transactions_28_days',
'count_transactions_28_days',
'count_fraud_transactions_7_days',
'rate_fraud_transactions_7_days',
'count_fraud_transactions_14_days',
'rate_fraud_transactions_14_days',
'count_fraud_transactions_28_days',
'rate_fraud_transactions_28_days',
'calendar_hour',
'calendar_day_of_week',
'TERMINAL_ID']
```
</div>
We extract the name of the input features.
```python
input_feature_names = [k for k in all_data.schema.feature_names() if k.islower()]
print("The model's input features:")
input_feature_names
```
<div class="k-default-codeblock">
```
The model's input features:
['sum_transactions_7_days',
'count_transactions_7_days',
'sum_transactions_14_days',
'count_transactions_14_days',
'sum_transactions_28_days',
'count_transactions_28_days',
'count_fraud_transactions_7_days',
'rate_fraud_transactions_7_days',
'count_fraud_transactions_14_days',
'rate_fraud_transactions_14_days',
'count_fraud_transactions_28_days',
'rate_fraud_transactions_28_days',
'calendar_hour',
'calendar_day_of_week']
```
</div>
For neural networks to work correctly, numerical inputs must be normalized. A
common approach is to apply z-normalization, which involves subtracting the mean
and dividing by the standard deviation estimated from the training data to each
value. In forecasting, such z-normalization is not recommended as it would lead
to future leakage. Specifically, to classify a transaction at time t, we cannot
rely on data after time t since, at serving time when making a prediction at
time t, no subsequent data is available yet. In short, at time t, we are limited
to using data that precedes or is concurrent with time t.
The solution is therefore to apply z-normalization **over time**, which means
that we normalize each transaction using the mean and standard deviation
computed from the past data **for that transaction**.
Future leakage is pernicious. Luckily, Temporian is here to help: the only
operator that can cause future leakage is `EventSet.leak()`. If you are not
using `EventSet.leak()`, your preprocessing is **guaranteed** not to create
future leakage.
**Note:** For advanced pipelines, you can also check programatically that a
feature does not depends on an `EventSet.leak()` operation.
```python
# Cast all values (e.g. ints) to floats.
values = all_data[input_feature_names].cast(tp.float32)
# Apply z-normalization overtime.
normalized_features = (
values - values.simple_moving_average(math.inf)
) / values.moving_standard_deviation(math.inf)
# Restore the original name of the features.
normalized_features = normalized_features.rename(values.schema.feature_names())
print(normalized_features)
```
<div class="k-default-codeblock">
```
indexes: []
features: [('sum_transactions_7_days', float32), ('count_transactions_7_days', float32), ('sum_transactions_14_days', float32), ('count_transactions_14_days', float32), ('sum_transactions_28_days', float32), ('count_transactions_28_days', float32), ('count_fraud_transactions_7_days', float32), ('rate_fraud_transactions_7_days', float32), ('count_fraud_transactions_14_days', float32), ('rate_fraud_transactions_14_days', float32), ('count_fraud_transactions_28_days', float32), ('rate_fraud_transactions_28_days', float32), ('calendar_hour', float32), ('calendar_day_of_week', float32)]
events:
(1754155 events):
timestamps: ['2018-04-01T00:00:31' '2018-04-01T00:02:10' '2018-04-01T00:07:56' ...
'2018-09-30T23:58:21' '2018-09-30T23:59:52' '2018-09-30T23:59:57']
'sum_transactions_7_days': [ 0. 1. 1.3636 ... -0.064 -0.2059 0.8428]
'count_transactions_7_days': [ nan nan nan ... 1.0128 0.6892 1.66 ]
'sum_transactions_14_days': [ 0. 1. 1.3636 ... -0.7811 0.156 1.379 ]
'count_transactions_14_days': [ nan nan nan ... 0.2969 0.2969 2.0532]
'sum_transactions_28_days': [ 0. 1. 1.3636 ... -0.7154 -0.2989 1.9396]
'count_transactions_28_days': [ nan nan nan ... 0.1172 -0.1958 1.8908]
'count_fraud_transactions_7_days': [ nan nan nan ... -0.1043 -0.1043 -0.1043]
'rate_fraud_transactions_7_days': [ nan nan nan ... -0.1137 -0.1137 -0.1137]
'count_fraud_transactions_14_days': [ nan nan nan ... -0.1133 -0.1133 0.9303]
'rate_fraud_transactions_14_days': [ nan nan nan ... -0.1216 -0.1216 0.5275]
...
memory usage: 112.3 MB
```
</div>
<div class="k-default-codeblock">
```
/home/gbm/my_venv/lib/python3.11/site-packages/temporian/implementation/numpy/operators/binary/arithmetic.py:100: RuntimeWarning: invalid value encountered in divide
return evset_1_feature / evset_2_feature
```
</div>
The first transactions will be normalized using poor estimates of the mean and
standard deviation since there are only a few transactions before them. To
mitigate this issue, we remove the first week of data from the training dataset.
Notice that the first values contain NaN. In Temporian, NaN represents missing
values, and all operators handle them accordingly. For instance, when
calculating a moving average, NaN values are not included in the calculation
and do not generate a NaN result.
However, neural networks cannot natively handle NaN values. So, we replace them
with zeros.
```python
normalized_features = normalized_features.fillna(0.0)
```
Finally, we group together the features and the labels.
```python
normalized_all_data = tp.glue(normalized_features, all_data["TX_FRAUD"])
```
---
## Split dataset into a train, validation and test set
To evaluate the quality of our machine learning model, we need training,
validation and test sets. Since the system is dynamic (new fraud patterns are
being created all the time), it is important for the training set to come before
the validation set, and the validation set come before the testing set:
- **Training:** April 8, 2018 to July 31, 2018
- **Validation:** August 1, 2018 to August 31, 2018
- **Testing:** September 1, 2018 to September 30, 2018
For the example to run faster, we will effectively reduce the size of the
training set to:
- **Training:** July 1, 2018 to July 31, 2018
```python
# begin_train = datetime.datetime(2018, 4, 8).timestamp() # Full training dataset
begin_train = datetime.datetime(2018, 7, 1).timestamp() # Reduced training dataset
begin_valid = datetime.datetime(2018, 8, 1).timestamp()
begin_test = datetime.datetime(2018, 9, 1).timestamp()
is_train = (normalized_all_data.timestamps() >= begin_train) & (
normalized_all_data.timestamps() < begin_valid
)
is_valid = (normalized_all_data.timestamps() >= begin_valid) & (
normalized_all_data.timestamps() < begin_test
)
is_test = normalized_all_data.timestamps() >= begin_test
```
`is_train`, `is_valid` and `is_test` are boolean features overtime that indicate
the limit of the tree folds. Let's plot them.
```python
tp.plot(
[
is_train.rename("is_train"),
is_valid.rename("is_valid"),
is_test.rename("is_test"),
]
)
```

We filter the input features and label in each fold.
```python
train_ds_evset = normalized_all_data.filter(is_train)
valid_ds_evset = normalized_all_data.filter(is_valid)
test_ds_evset = normalized_all_data.filter(is_test)
print(f"Training examples: {train_ds_evset.num_events()}")
print(f"Validation examples: {valid_ds_evset.num_events()}")
print(f"Testing examples: {test_ds_evset.num_events()}")
```
<div class="k-default-codeblock">
```
Training examples: 296924
Validation examples: 296579
Testing examples: 288064
```
</div>
It is important to split the dataset **after** the features have been computed
because some of the features for the training dataset are computed from
transactions during the training window.
---
## Create TensorFlow datasets
We convert the datasets from EventSets to TensorFlow Datasets as Keras consumes
them natively.
```python
non_batched_train_ds = tp.to_tensorflow_dataset(train_ds_evset)
non_batched_valid_ds = tp.to_tensorflow_dataset(valid_ds_evset)
non_batched_test_ds = tp.to_tensorflow_dataset(test_ds_evset)
```
The following processing steps are applied using TensorFlow datasets:
1. The features and labels are separated using `extract_features_and_label` in
the format that Keras expects.
1. The dataset is batched, which means that the examples are grouped into
mini-batches.
1. The training examples are shuffled to improve the quality of mini-batch
training.
As we noted before, the dataset is imbalanced in the direction of legitimate
transactions. While we want to evaluate our model on this original distribution,
neural networks often train poorly on strongly imbalanced datasets. Therefore,
we resample the training dataset to a ratio of 80% legitimate / 20% fraudulent
using `rejection_resample`.
```python
def extract_features_and_label(example):
features = {k: example[k] for k in input_feature_names}
labels = tf.cast(example["TX_FRAUD"], tf.int32)
return features, labels
# Target ratio of fraudulent transactions in the training dataset.
target_rate = 0.2
# Number of examples in a mini-batch.
batch_size = 32
train_ds = (
non_batched_train_ds.shuffle(10000)
.rejection_resample(
class_func=lambda x: tf.cast(x["TX_FRAUD"], tf.int32),
target_dist=[1 - target_rate, target_rate],
initial_dist=[1 - fraudulent_rate, fraudulent_rate],
)
.map(lambda _, x: x) # Remove the label copy added by "rejection_resample".
.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)
# The test and validation dataset does not need resampling or shuffling.
valid_ds = (
non_batched_valid_ds.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)
test_ds = (
non_batched_test_ds.batch(batch_size)
.map(extract_features_and_label)
.prefetch(tf.data.AUTOTUNE)
)
```
<div class="k-default-codeblock">
```
WARNING:tensorflow:From /home/gbm/my_venv/lib/python3.11/site-packages/tensorflow/python/data/ops/dataset_ops.py:4956: Print (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2018-08-20.
Instructions for updating:
Use tf.print instead of tf.Print. Note that tf.print returns a no-output operator that directly prints the output. Outside of defuns or eager mode, this operator will not be executed unless it is directly specified in session.run or used as a control dependency for other operators. This is only a concern in graph mode. Below is an example of how to ensure tf.print executes in graph mode:
```
</div>
<div class="k-default-codeblock">
```
WARNING:tensorflow:From /home/gbm/my_venv/lib/python3.11/site-packages/tensorflow/python/data/ops/dataset_ops.py:4956: Print (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2018-08-20.
Instructions for updating:
Use tf.print instead of tf.Print. Note that tf.print returns a no-output operator that directly prints the output. Outside of defuns or eager mode, this operator will not be executed unless it is directly specified in session.run or used as a control dependency for other operators. This is only a concern in graph mode. Below is an example of how to ensure tf.print executes in graph mode:
```
</div>
We print the first four examples of the training dataset. This is a simple way
to identify some of the errors that could have been made above.
```python
for features, labels in train_ds.take(1):
print("features")
for feature_name, feature_value in features.items():
print(f"\t{feature_name}: {feature_value[:4]}")
print(f"labels: {labels[:4]}")
```
<div class="k-default-codeblock">
```
features
sum_transactions_7_days: [-0.9417254 -1.1157728 -0.5594417 0.7264878]
count_transactions_7_days: [-0.23363686 -0.8702531 -0.23328805 0.7198456 ]
sum_transactions_14_days: [-0.9084115 2.8127224 0.7297886 0.0666021]
count_transactions_14_days: [-0.54289246 2.4122045 0.1963075 0.3798441 ]
sum_transactions_28_days: [-0.44202712 2.3494742 0.20992276 0.97425723]
count_transactions_28_days: [0.02585898 1.8197156 0.12127225 0.9692807 ]
count_fraud_transactions_7_days: [ 8.007475 -0.09783722 1.9282814 -0.09780706]
rate_fraud_transactions_7_days: [14.308702 -0.10952345 1.6929103 -0.10949575]
count_fraud_transactions_14_days: [12.411182 -0.1045466 1.0330476 -0.1045142]
rate_fraud_transactions_14_days: [15.742149 -0.11567765 1.0170861 -0.11565071]
count_fraud_transactions_28_days: [ 7.420907 -0.11298086 0.572011 -0.11293571]
rate_fraud_transactions_28_days: [10.065552 -0.12640427 0.5862939 -0.12637936]
calendar_hour: [-0.68766755 0.6972711 -1.6792761 0.49967623]
calendar_day_of_week: [1.492013 1.4789637 1.4978485 1.4818214]
labels: [1 0 0 0]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
```
</div>
---
## Train the model
The original dataset is transactional, but the processed data is tabular and
only contains normalized numerical values. Therefore, we train a feed-forward
neural network.
```python
inputs = [keras.Input(shape=(1,), name=name) for name in input_feature_names]
x = keras.layers.concatenate(inputs)
x = keras.layers.Dense(32, activation="sigmoid")(x)
x = keras.layers.Dense(16, activation="sigmoid")(x)
x = keras.layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=x)
```
Our goal is to differentiate between the fraudulent and legitimate transactions,
so we use a binary classification objective. Because the dataset is imbalanced,
accuracy is not an informative metric. Instead, we evaluate the model using the
[area under the curve](https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve)
(AUC).
```python
model.compile(
optimizer=keras.optimizers.Adam(0.01),
loss=keras.losses.BinaryCrossentropy(),
metrics=[keras.metrics.Accuracy(), keras.metrics.AUC()],
)
model.fit(train_ds, validation_data=valid_ds)
```
<div class="k-default-codeblock">
```
5/Unknown 1s 15ms/step - accuracy: 0.0000e+00 - auc: 0.4480 - loss: 0.7678
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
Proportion of examples rejected by sampler is high: [0.991630733][0.991630733 0.00836927164][0 1]
433/Unknown 23s 51ms/step - accuracy: 0.0000e+00 - auc: 0.8060 - loss: 0.3632
/usr/lib/python3.11/contextlib.py:155: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset.
self.gen.throw(typ, value, traceback)
433/433 ━━━━━━━━━━━━━━━━━━━━ 30s 67ms/step - accuracy: 0.0000e+00 - auc: 0.8060 - loss: 0.3631 - val_accuracy: 0.0000e+00 - val_auc: 0.8252 - val_loss: 0.2133
<keras.src.callbacks.history.History at 0x7f8f74f0d750>
```
</div>
We evaluate the model on the test dataset.
```python
model.evaluate(test_ds)
```
<div class="k-default-codeblock">
```
9002/9002 ━━━━━━━━━━━━━━━━━━━━ 7s 811us/step - accuracy: 0.0000e+00 - auc: 0.8357 - loss: 0.2161
[0.2171599417924881, 0.0, 0.8266682028770447]
```
</div>
With and AUC of ~83%, our simple fraud detector is showing encouraging
results.
Plotting the ROC curve is a good solution to understand and select the operation
point of the model i.e. the threshold applied on the model output to
differentiate between fraudulent and legitimate transactions.
Compute the test predictions:
```python
predictions = model.predict(test_ds)
predictions = np.nan_to_num(predictions, nan=0)
```
<div class="k-default-codeblock">
```
9002/9002 ━━━━━━━━━━━━━━━━━━━━ 10s 1ms/step
```
</div>
Extract the labels from the test set:
```python
labels = np.concatenate([label for _, label in test_ds])
```
Finaly, we plot the ROC curve.
```python
_ = RocCurveDisplay.from_predictions(labels, predictions)
```

The Keras model is ready to be used on transactions with an unknown fraud
status, a.k.a. serving. We save the model on disk for future use.
**Note:** The model does not include the data preparation and preprocessing steps
done in Pandas and Temporian. They have to be applied manually to the data fed
into the model. While not demonstrated here, Temporian preprocessing can also be
saved to disk with
[tp.save](https://temporian.readthedocs.io/en/latest/reference/temporian/serialization/save/).
```python
model.save("fraud_detection_model.keras")
```
The model can be later reloaded with:
```python
loaded_model = keras.saving.load_model("fraud_detection_model.keras")
# Generate predictions with the loaded model on 5 test examples.
loaded_model.predict(test_ds.rebatch(5).take(1))
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 71ms/step
/usr/lib/python3.11/contextlib.py:155: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset.
self.gen.throw(typ, value, traceback)
array([[0.08197185],
[0.16517264],
[0.13180313],
[0.10209075],
[0.14283912]], dtype=float32)
```
</div>
---
## Conclusion
We trained a feed-forward neural network to identify fraudulent transactions. To
feed them into the model, the transactions were preprocessed and transformed
into a tabular dataset using
[Temporian](https://temporian.readthedocs.io/en/latest/). Now, a question to the
reader: What could be done to further improve the model's performance?
Here are some ideas:
- Train the model on the entire dataset instead of a single month of data.
- Train the model for more epochs and use early stopping to ensure that the
model is fully trained without overfitting.
- Make the feed-forward network more powerful by increasing the number of layers
while ensuring that the model is regularized.
- Compute additional preprocessing features. For example, in addition to
aggregating transactions by terminal, aggregate transactions by client.
- Use the Keras Tuner to perform hyperparameter tuning on the model. Note that
the parameters of the preprocessing (e.g., the number of days of
aggregations) are also hyperparameters that can be tuned.
| keras-io/examples/timeseries/md/event_classification_for_payment_card_fraud_detection.md/0 | {
"file_path": "keras-io/examples/timeseries/md/event_classification_for_payment_card_fraud_detection.md",
"repo_id": "keras-io",
"token_count": 23711
} | 105 |
"""
Title: Highly accurate boundaries segmentation using BASNet
Author: [Hamid Ali](https://github.com/hamidriasat)
Date created: 2023/05/30
Last modified: 2023/07/13
Description: Boundaries aware segmentation model trained on the DUTS dataset.
Accelerator: GPU
"""
"""
## Introduction
Deep semantic segmentation algorithms have improved a lot recently, but still fails to correctly
predict pixels around object boundaries. In this example we implement
**Boundary-Aware Segmentation Network (BASNet)**, using two stage predict and refine
architecture, and a hybrid loss it can predict highly accurate boundaries and fine structures
for image segmentation.
### References:
- [Boundary-Aware Segmentation Network for Mobile and Web Applications](https://arxiv.org/abs/2101.04704)
- [BASNet Keras Implementation](https://github.com/hamidriasat/BASNet/tree/basnet_keras)
- [Learning to Detect Salient Objects with Image-level Supervision](https://openaccess.thecvf.com/content_cvpr_2017/html/Wang_Learning_to_Detect_CVPR_2017_paper.html)
"""
"""
## Download the Data
We will use the [DUTS-TE](http://saliencydetection.net/duts/) dataset for training. It has 5,019
images but we will use 140 for training and validation to save notebook running time. DUTS is
relatively large salient object segmentation dataset. which contain diversified textures and
structures common to real-world images in both foreground and background.
"""
"""shell
wget http://saliencydetection.net/duts/download/DUTS-TE.zip
unzip -q DUTS-TE.zip
"""
import os
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import keras_cv
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, backend
"""
## Define Hyperparameters
"""
IMAGE_SIZE = 288
BATCH_SIZE = 4
OUT_CLASSES = 1
TRAIN_SPLIT_RATIO = 0.90
DATA_DIR = "./DUTS-TE/"
"""
## Create TensorFlow Dataset
We will use `load_paths()` to load and split 140 paths into train and validation set, and
`load_dataset()` to convert paths into `tf.data.Dataset` object.
"""
def load_paths(path, split_ratio):
images = sorted(glob(os.path.join(path, "DUTS-TE-Image/*")))[:140]
masks = sorted(glob(os.path.join(path, "DUTS-TE-Mask/*")))[:140]
len_ = int(len(images) * split_ratio)
return (images[:len_], masks[:len_]), (images[len_:], masks[len_:])
def read_image(path, size, mode):
x = keras.utils.load_img(path, target_size=size, color_mode=mode)
x = keras.utils.img_to_array(x)
x = (x / 255.0).astype(np.float32)
return x
def preprocess(x_batch, y_batch, img_size, out_classes):
def f(_x, _y):
_x, _y = _x.decode(), _y.decode()
_x = read_image(_x, (img_size, img_size), mode="rgb") # image
_y = read_image(_y, (img_size, img_size), mode="grayscale") # mask
return _x, _y
images, masks = tf.numpy_function(f, [x_batch, y_batch], [tf.float32, tf.float32])
images.set_shape([img_size, img_size, 3])
masks.set_shape([img_size, img_size, out_classes])
return images, masks
def load_dataset(image_paths, mask_paths, img_size, out_classes, batch, shuffle=True):
dataset = tf.data.Dataset.from_tensor_slices((image_paths, mask_paths))
if shuffle:
dataset = dataset.cache().shuffle(buffer_size=1000)
dataset = dataset.map(
lambda x, y: preprocess(x, y, img_size, out_classes),
num_parallel_calls=tf.data.AUTOTUNE,
)
dataset = dataset.batch(batch)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
train_paths, val_paths = load_paths(DATA_DIR, TRAIN_SPLIT_RATIO)
train_dataset = load_dataset(
train_paths[0], train_paths[1], IMAGE_SIZE, OUT_CLASSES, BATCH_SIZE, shuffle=True
)
val_dataset = load_dataset(
val_paths[0], val_paths[1], IMAGE_SIZE, OUT_CLASSES, BATCH_SIZE, shuffle=False
)
print(f"Train Dataset: {train_dataset}")
print(f"Validation Dataset: {val_dataset}")
"""
## Visualize Data
"""
def display(display_list):
title = ["Input Image", "True Mask", "Predicted Mask"]
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(title[i])
plt.imshow(keras.utils.array_to_img(display_list[i]), cmap="gray")
plt.axis("off")
plt.show()
for image, mask in val_dataset.take(1):
display([image[0], mask[0]])
"""
## Analyze Mask
Lets print unique values of above displayed mask. You can see despite belonging to one class, it's
intensity is changing between low(0) to high(255). This variation in intensity makes it hard for
network to generate good segmentation map for **salient or camouflaged object segmentation**.
Because of its Residual Refined Module (RMs), BASNet is good in generating highly accurate
boundaries and fine structures.
"""
print(f"Unique values count: {len(np.unique((mask[0] * 255)))}")
print("Unique values:")
print(np.unique((mask[0] * 255)).astype(int))
"""
## Building the BASNet Model
BASNet comprises of a predict-refine architecture and a hybrid loss. The predict-refine
architecture consists of a densely supervised encoder-decoder network and a residual refinement
module, which are respectively used to predict and refine a segmentation probability map.

"""
def basic_block(x_input, filters, stride=1, down_sample=None, activation=None):
"""Creates a residual(identity) block with two 3*3 convolutions."""
residual = x_input
x = layers.Conv2D(filters, (3, 3), strides=stride, padding="same", use_bias=False)(
x_input
)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(filters, (3, 3), strides=(1, 1), padding="same", use_bias=False)(
x
)
x = layers.BatchNormalization()(x)
if down_sample is not None:
residual = down_sample
x = layers.Add()([x, residual])
if activation is not None:
x = layers.Activation(activation)(x)
return x
def convolution_block(x_input, filters, dilation=1):
"""Apply convolution + batch normalization + relu layer."""
x = layers.Conv2D(filters, (3, 3), padding="same", dilation_rate=dilation)(x_input)
x = layers.BatchNormalization()(x)
return layers.Activation("relu")(x)
def segmentation_head(x_input, out_classes, final_size):
"""Map each decoder stage output to model output classes."""
x = layers.Conv2D(out_classes, kernel_size=(3, 3), padding="same")(x_input)
if final_size is not None:
x = layers.Resizing(final_size[0], final_size[1])(x)
return x
def get_resnet_block(_resnet, block_num):
"""Extract and return ResNet-34 block."""
resnet_layers = [3, 4, 6, 3] # ResNet-34 layer sizes at different block.
return keras.models.Model(
inputs=_resnet.get_layer(f"v2_stack_{block_num}_block1_1_conv").input,
outputs=_resnet.get_layer(
f"v2_stack_{block_num}_block{resnet_layers[block_num]}_add"
).output,
name=f"resnet34_block{block_num + 1}",
)
"""
## Prediction Module
Prediction module is a heavy encoder decoder structure like U-Net. The encoder includes an input
convolutional layer and six stages. First four are adopted from ResNet-34 and rest are basic
res-blocks. Since first convolution and pooling layer of ResNet-34 is skipped so we will use
`get_resnet_block()` to extract first four blocks. Both bridge and decoder uses three
convolutional layers with side outputs. The module produces seven segmentation probability
maps during training, with the last one considered the final output.
"""
def basnet_predict(input_shape, out_classes):
"""BASNet Prediction Module, it outputs coarse label map."""
filters = 64
num_stages = 6
x_input = layers.Input(input_shape)
# -------------Encoder--------------
x = layers.Conv2D(filters, kernel_size=(3, 3), padding="same")(x_input)
resnet = keras_cv.models.ResNet34Backbone(
include_rescaling=False,
)
encoder_blocks = []
for i in range(num_stages):
if i < 4: # First four stages are adopted from ResNet-34 blocks.
x = get_resnet_block(resnet, i)(x)
encoder_blocks.append(x)
x = layers.Activation("relu")(x)
else: # Last 2 stages consist of three basic resnet blocks.
x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)
x = basic_block(x, filters=filters * 8, activation="relu")
x = basic_block(x, filters=filters * 8, activation="relu")
x = basic_block(x, filters=filters * 8, activation="relu")
encoder_blocks.append(x)
# -------------Bridge-------------
x = convolution_block(x, filters=filters * 8, dilation=2)
x = convolution_block(x, filters=filters * 8, dilation=2)
x = convolution_block(x, filters=filters * 8, dilation=2)
encoder_blocks.append(x)
# -------------Decoder-------------
decoder_blocks = []
for i in reversed(range(num_stages)):
if i != (num_stages - 1): # Except first, scale other decoder stages.
shape = keras.backend.int_shape(x)
x = layers.Resizing(shape[1] * 2, shape[2] * 2)(x)
x = layers.concatenate([encoder_blocks[i], x], axis=-1)
x = convolution_block(x, filters=filters * 8)
x = convolution_block(x, filters=filters * 8)
x = convolution_block(x, filters=filters * 8)
decoder_blocks.append(x)
decoder_blocks.reverse() # Change order from last to first decoder stage.
decoder_blocks.append(encoder_blocks[-1]) # Copy bridge to decoder.
# -------------Side Outputs--------------
decoder_blocks = [
segmentation_head(decoder_block, out_classes, input_shape[:2])
for decoder_block in decoder_blocks
]
return keras.models.Model(inputs=[x_input], outputs=decoder_blocks)
"""
## Residual Refinement Module
Refinement Modules (RMs), designed as a residual block aim to refines the coarse(blurry and noisy
boundaries) segmentation maps generated by prediction module. Similar to prediction module it's
also an encode decoder structure but with light weight 4 stages, each containing one
`convolutional block()` init. At the end it adds both coarse and residual output to generate
refined output.
"""
def basnet_rrm(base_model, out_classes):
"""BASNet Residual Refinement Module(RRM) module, output fine label map."""
num_stages = 4
filters = 64
x_input = base_model.output[0]
# -------------Encoder--------------
x = layers.Conv2D(filters, kernel_size=(3, 3), padding="same")(x_input)
encoder_blocks = []
for _ in range(num_stages):
x = convolution_block(x, filters=filters)
encoder_blocks.append(x)
x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))(x)
# -------------Bridge--------------
x = convolution_block(x, filters=filters)
# -------------Decoder--------------
for i in reversed(range(num_stages)):
shape = keras.backend.int_shape(x)
x = layers.Resizing(shape[1] * 2, shape[2] * 2)(x)
x = layers.concatenate([encoder_blocks[i], x], axis=-1)
x = convolution_block(x, filters=filters)
x = segmentation_head(x, out_classes, None) # Segmentation head.
# ------------- refined = coarse + residual
x = layers.Add()([x_input, x]) # Add prediction + refinement output
return keras.models.Model(inputs=[base_model.input], outputs=[x])
"""
## Combine Predict and Refinement Module
"""
def basnet(input_shape, out_classes):
"""BASNet, it's a combination of two modules
Prediction Module and Residual Refinement Module(RRM)."""
# Prediction model.
predict_model = basnet_predict(input_shape, out_classes)
# Refinement model.
refine_model = basnet_rrm(predict_model, out_classes)
output = [refine_model.output] # Combine outputs.
output.extend(predict_model.output)
output = [layers.Activation("sigmoid")(_) for _ in output] # Activations.
return keras.models.Model(inputs=[predict_model.input], outputs=output)
"""
## Hybrid Loss
Another important feature of BASNet is its hybrid loss function, which is a combination of
binary cross entropy, structural similarity and intersection-over-union losses, which guide
the network to learn three-level (i.e., pixel, patch and map level) hierarchy representations.
"""
class BasnetLoss(keras.losses.Loss):
"""BASNet hybrid loss."""
def __init__(self, **kwargs):
super().__init__(name="basnet_loss", **kwargs)
self.smooth = 1.0e-9
# Binary Cross Entropy loss.
self.cross_entropy_loss = keras.losses.BinaryCrossentropy()
# Structural Similarity Index value.
self.ssim_value = tf.image.ssim
# Jaccard / IoU loss.
self.iou_value = self.calculate_iou
def calculate_iou(
self,
y_true,
y_pred,
):
"""Calculate intersection over union (IoU) between images."""
intersection = backend.sum(backend.abs(y_true * y_pred), axis=[1, 2, 3])
union = backend.sum(y_true, [1, 2, 3]) + backend.sum(y_pred, [1, 2, 3])
union = union - intersection
return backend.mean(
(intersection + self.smooth) / (union + self.smooth), axis=0
)
def call(self, y_true, y_pred):
cross_entropy_loss = self.cross_entropy_loss(y_true, y_pred)
ssim_value = self.ssim_value(y_true, y_pred, max_val=1)
ssim_loss = backend.mean(1 - ssim_value + self.smooth, axis=0)
iou_value = self.iou_value(y_true, y_pred)
iou_loss = 1 - iou_value
# Add all three losses.
return cross_entropy_loss + ssim_loss + iou_loss
basnet_model = basnet(
input_shape=[IMAGE_SIZE, IMAGE_SIZE, 3], out_classes=OUT_CLASSES
) # Create model.
basnet_model.summary() # Show model summary.
optimizer = keras.optimizers.Adam(learning_rate=1e-4, epsilon=1e-8)
# Compile model.
basnet_model.compile(
loss=BasnetLoss(),
optimizer=optimizer,
metrics=[keras.metrics.MeanAbsoluteError(name="mae")],
)
"""
### Train the Model
"""
basnet_model.fit(train_dataset, validation_data=val_dataset, epochs=1)
"""
### Visualize Predictions
In paper BASNet was trained on DUTS-TR dataset, which has 10553 images. Model was trained for 400k
iterations with a batch size of eight and without a validation dataset. After training model was
evaluated on DUTS-TE dataset and achieved a mean absolute error of `0.042`.
Since BASNet is a deep model and cannot be trained in a short amount of time which is a
requirement for keras example notebook, so we will load pretrained weights from [here](https://github.com/hamidriasat/BASNet/tree/basnet_keras)
to show model prediction. Due to computer power limitation this model was trained for 120k
iterations but it still demonstrates its capabilities. For further details about
trainings parameters please check given link.
"""
"""shell
!gdown 1OWKouuAQ7XpXZbWA3mmxDPrFGW71Axrg
"""
def normalize_output(prediction):
max_value = np.max(prediction)
min_value = np.min(prediction)
return (prediction - min_value) / (max_value - min_value)
# Load weights.
basnet_model.load_weights("./basnet_weights.h5")
"""
### Make Predictions
"""
for image, mask in val_dataset.take(1):
pred_mask = basnet_model.predict(image)
display([image[0], mask[0], normalize_output(pred_mask[0][0])])
| keras-io/examples/vision/basnet_segmentation.py/0 | {
"file_path": "keras-io/examples/vision/basnet_segmentation.py",
"repo_id": "keras-io",
"token_count": 5735
} | 106 |
"""
Title: Using the Forward-Forward Algorithm for Image Classification
Author: [Suvaditya Mukherjee](https://twitter.com/halcyonrayes)
Date created: 2023/01/08
Last modified: 2023/01/08
Description: Training a Dense-layer model using the Forward-Forward algorithm.
Accelerator: GPU
"""
"""
## Introduction
The following example explores how to use the Forward-Forward algorithm to perform
training instead of the traditionally-used method of backpropagation, as proposed by
Hinton in
[The Forward-Forward Algorithm: Some Preliminary Investigations](https://www.cs.toronto.edu/~hinton/FFA13.pdf)
(2022).
The concept was inspired by the understanding behind
[Boltzmann Machines](http://www.cs.toronto.edu/~fritz/absps/dbm.pdf). Backpropagation
involves calculating the difference between actual and predicted output via a cost
function to adjust network weights. On the other hand, the FF Algorithm suggests the
analogy of neurons which get "excited" based on looking at a certain recognized
combination of an image and its correct corresponding label.
This method takes certain inspiration from the biological learning process that occurs in
the cortex. A significant advantage that this method brings is the fact that
backpropagation through the network does not need to be performed anymore, and that
weight updates are local to the layer itself.
As this is yet still an experimental method, it does not yield state-of-the-art results.
But with proper tuning, it is supposed to come close to the same.
Through this example, we will examine a process that allows us to implement the
Forward-Forward algorithm within the layers themselves, instead of the traditional method
of relying on the global loss functions and optimizers.
The tutorial is structured as follows:
- Perform necessary imports
- Load the [MNIST dataset](http://yann.lecun.com/exdb/mnist/)
- Visualize Random samples from the MNIST dataset
- Define a `FFDense` Layer to override `call` and implement a custom `forwardforward`
method which performs weight updates.
- Define a `FFNetwork` Layer to override `train_step`, `predict` and implement 2 custom
functions for per-sample prediction and overlaying labels
- Convert MNIST from `NumPy` arrays to `tf.data.Dataset`
- Fit the network
- Visualize results
- Perform inference on test samples
As this example requires the customization of certain core functions with
`keras.layers.Layer` and `keras.models.Model`, refer to the following resources for
a primer on how to do so:
- [Customizing what happens in `model.fit()`](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit)
- [Making new Layers and Models via subclassing](https://www.tensorflow.org/guide/keras/custom_layers_and_models)
"""
"""
## Setup imports
"""
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import random
from tensorflow.compiler.tf2xla.python import xla
"""
## Load the dataset and visualize the data
We use the `keras.datasets.mnist.load_data()` utility to directly pull the MNIST dataset
in the form of `NumPy` arrays. We then arrange it in the form of the train and test
splits.
Following loading the dataset, we select 4 random samples from within the training set
and visualize them using `matplotlib.pyplot`.
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
print("4 Random Training samples and labels")
idx1, idx2, idx3, idx4 = random.sample(range(0, x_train.shape[0]), 4)
img1 = (x_train[idx1], y_train[idx1])
img2 = (x_train[idx2], y_train[idx2])
img3 = (x_train[idx3], y_train[idx3])
img4 = (x_train[idx4], y_train[idx4])
imgs = [img1, img2, img3, img4]
plt.figure(figsize=(10, 10))
for idx, item in enumerate(imgs):
image, label = item[0], item[1]
plt.subplot(2, 2, idx + 1)
plt.imshow(image, cmap="gray")
plt.title(f"Label : {label}")
plt.show()
"""
## Define `FFDense` custom layer
In this custom layer, we have a base `keras.layers.Dense` object which acts as the
base `Dense` layer within. Since weight updates will happen within the layer itself, we
add an `keras.optimizers.Optimizer` object that is accepted from the user. Here, we
use `Adam` as our optimizer with a rather higher learning rate of `0.03`.
Following the algorithm's specifics, we must set a `threshold` parameter that will be
used to make the positive-negative decision in each prediction. This is set to a default
of 2.0.
As the epochs are localized to the layer itself, we also set a `num_epochs` parameter
(defaults to 50).
We override the `call` method in order to perform a normalization over the complete
input space followed by running it through the base `Dense` layer as would happen in a
normal `Dense` layer call.
We implement the Forward-Forward algorithm which accepts 2 kinds of input tensors, each
representing the positive and negative samples respectively. We write a custom training
loop here with the use of `tf.GradientTape()`, within which we calculate a loss per
sample by taking the distance of the prediction from the threshold to understand the
error and taking its mean to get a `mean_loss` metric.
With the help of `tf.GradientTape()` we calculate the gradient updates for the trainable
base `Dense` layer and apply them using the layer's local optimizer.
Finally, we return the `call` result as the `Dense` results of the positive and negative
samples while also returning the last `mean_loss` metric and all the loss values over a
certain all-epoch run.
"""
class FFDense(keras.layers.Layer):
"""
A custom ForwardForward-enabled Dense layer. It has an implementation of the
Forward-Forward network internally for use.
This layer must be used in conjunction with the `FFNetwork` model.
"""
def __init__(
self,
units,
optimizer,
loss_metric,
num_epochs=50,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
**kwargs,
):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(
units=units,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
)
self.relu = keras.layers.ReLU()
self.optimizer = optimizer
self.loss_metric = loss_metric
self.threshold = 1.5
self.num_epochs = num_epochs
# We perform a normalization step before we run the input through the Dense
# layer.
def call(self, x):
x_norm = tf.norm(x, ord=2, axis=1, keepdims=True)
x_norm = x_norm + 1e-4
x_dir = x / x_norm
res = self.dense(x_dir)
return self.relu(res)
# The Forward-Forward algorithm is below. We first perform the Dense-layer
# operation and then get a Mean Square value for all positive and negative
# samples respectively.
# The custom loss function finds the distance between the Mean-squared
# result and the threshold value we set (a hyperparameter) that will define
# whether the prediction is positive or negative in nature. Once the loss is
# calculated, we get a mean across the entire batch combined and perform a
# gradient calculation and optimization step. This does not technically
# qualify as backpropagation since there is no gradient being
# sent to any previous layer and is completely local in nature.
def forward_forward(self, x_pos, x_neg):
for i in range(self.num_epochs):
with tf.GradientTape() as tape:
g_pos = tf.math.reduce_mean(tf.math.pow(self.call(x_pos), 2), 1)
g_neg = tf.math.reduce_mean(tf.math.pow(self.call(x_neg), 2), 1)
loss = tf.math.log(
1
+ tf.math.exp(
tf.concat([-g_pos + self.threshold, g_neg - self.threshold], 0)
)
)
mean_loss = tf.cast(tf.math.reduce_mean(loss), tf.float32)
self.loss_metric.update_state([mean_loss])
gradients = tape.gradient(mean_loss, self.dense.trainable_weights)
self.optimizer.apply_gradients(zip(gradients, self.dense.trainable_weights))
return (
tf.stop_gradient(self.call(x_pos)),
tf.stop_gradient(self.call(x_neg)),
self.loss_metric.result(),
)
"""
## Define the `FFNetwork` Custom Model
With our custom layer defined, we also need to override the `train_step` method and
define a custom `keras.models.Model` that works with our `FFDense` layer.
For this algorithm, we must 'embed' the labels onto the original image. To do so, we
exploit the structure of MNIST images where the top-left 10 pixels are always zeros. We
use that as a label space in order to visually one-hot-encode the labels within the image
itself. This action is performed by the `overlay_y_on_x` function.
We break down the prediction function with a per-sample prediction function which is then
called over the entire test set by the overriden `predict()` function. The prediction is
performed here with the help of measuring the `excitation` of the neurons per layer for
each image. This is then summed over all layers to calculate a network-wide 'goodness
score'. The label with the highest 'goodness score' is then chosen as the sample
prediction.
The `train_step` function is overriden to act as the main controlling loop for running
training on each layer as per the number of epochs per layer.
"""
class FFNetwork(keras.Model):
"""
A `keras.Model` that supports a `FFDense` network creation. This model
can work for any kind of classification task. It has an internal
implementation with some details specific to the MNIST dataset which can be
changed as per the use-case.
"""
# Since each layer runs gradient-calculation and optimization locally, each
# layer has its own optimizer that we pass. As a standard choice, we pass
# the `Adam` optimizer with a default learning rate of 0.03 as that was
# found to be the best rate after experimentation.
# Loss is tracked using `loss_var` and `loss_count` variables.
# Use legacy optimizer for Layer Optimizer to fix issue
# https://github.com/keras-team/keras-io/issues/1241
def __init__(
self,
dims,
layer_optimizer=keras.optimizers.legacy.Adam(learning_rate=0.03),
**kwargs,
):
super().__init__(**kwargs)
self.layer_optimizer = layer_optimizer
self.loss_var = tf.Variable(0.0, trainable=False, dtype=tf.float32)
self.loss_count = tf.Variable(0.0, trainable=False, dtype=tf.float32)
self.layer_list = [keras.Input(shape=(dims[0],))]
for d in range(len(dims) - 1):
self.layer_list += [
FFDense(
dims[d + 1],
optimizer=self.layer_optimizer,
loss_metric=keras.metrics.Mean(),
)
]
# This function makes a dynamic change to the image wherein the labels are
# put on top of the original image (for this example, as MNIST has 10
# unique labels, we take the top-left corner's first 10 pixels). This
# function returns the original data tensor with the first 10 pixels being
# a pixel-based one-hot representation of the labels.
@tf.function(reduce_retracing=True)
def overlay_y_on_x(self, data):
X_sample, y_sample = data
max_sample = tf.reduce_max(X_sample, axis=0, keepdims=True)
max_sample = tf.cast(max_sample, dtype=tf.float64)
X_zeros = tf.zeros([10], dtype=tf.float64)
X_update = xla.dynamic_update_slice(X_zeros, max_sample, [y_sample])
X_sample = xla.dynamic_update_slice(X_sample, X_update, [0])
return X_sample, y_sample
# A custom `predict_one_sample` performs predictions by passing the images
# through the network, measures the results produced by each layer (i.e.
# how high/low the output values are with respect to the set threshold for
# each label) and then simply finding the label with the highest values.
# In such a case, the images are tested for their 'goodness' with all
# labels.
@tf.function(reduce_retracing=True)
def predict_one_sample(self, x):
goodness_per_label = []
x = tf.reshape(x, [tf.shape(x)[0] * tf.shape(x)[1]])
for label in range(10):
h, label = self.overlay_y_on_x(data=(x, label))
h = tf.reshape(h, [-1, tf.shape(h)[0]])
goodness = []
for layer_idx in range(1, len(self.layer_list)):
layer = self.layer_list[layer_idx]
h = layer(h)
goodness += [tf.math.reduce_mean(tf.math.pow(h, 2), 1)]
goodness_per_label += [
tf.expand_dims(tf.reduce_sum(goodness, keepdims=True), 1)
]
goodness_per_label = tf.concat(goodness_per_label, 1)
return tf.cast(tf.argmax(goodness_per_label, 1), tf.float64)
def predict(self, data):
x = data
preds = list()
preds = tf.map_fn(fn=self.predict_one_sample, elems=x)
return np.asarray(preds, dtype=int)
# This custom `train_step` function overrides the internal `train_step`
# implementation. We take all the input image tensors, flatten them and
# subsequently produce positive and negative samples on the images.
# A positive sample is an image that has the right label encoded on it with
# the `overlay_y_on_x` function. A negative sample is an image that has an
# erroneous label present on it.
# With the samples ready, we pass them through each `FFLayer` and perform
# the Forward-Forward computation on it. The returned loss is the final
# loss value over all the layers.
@tf.function(jit_compile=True)
def train_step(self, data):
x, y = data
# Flatten op
x = tf.reshape(x, [-1, tf.shape(x)[1] * tf.shape(x)[2]])
x_pos, y = tf.map_fn(fn=self.overlay_y_on_x, elems=(x, y))
random_y = tf.random.shuffle(y)
x_neg, y = tf.map_fn(fn=self.overlay_y_on_x, elems=(x, random_y))
h_pos, h_neg = x_pos, x_neg
for idx, layer in enumerate(self.layers):
if isinstance(layer, FFDense):
print(f"Training layer {idx+1} now : ")
h_pos, h_neg, loss = layer.forward_forward(h_pos, h_neg)
self.loss_var.assign_add(loss)
self.loss_count.assign_add(1.0)
else:
print(f"Passing layer {idx+1} now : ")
x = layer(x)
mean_res = tf.math.divide(self.loss_var, self.loss_count)
return {"FinalLoss": mean_res}
"""
## Convert MNIST `NumPy` arrays to `tf.data.Dataset`
We now perform some preliminary processing on the `NumPy` arrays and then convert them
into the `tf.data.Dataset` format which allows for optimized loading.
"""
x_train = x_train.astype(float) / 255
x_test = x_test.astype(float) / 255
y_train = y_train.astype(int)
y_test = y_test.astype(int)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_dataset = train_dataset.batch(60000)
test_dataset = test_dataset.batch(10000)
"""
## Fit the network and visualize results
Having performed all previous set-up, we are now going to run `model.fit()` and run 250
model epochs, which will perform 50*250 epochs on each layer. We get to see the plotted loss
curve as each layer is trained.
"""
model = FFNetwork(dims=[784, 500, 500])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.03),
loss="mse",
jit_compile=True,
metrics=[keras.metrics.Mean()],
)
epochs = 250
history = model.fit(train_dataset, epochs=epochs)
"""
## Perform inference and testing
Having trained the model to a large extent, we now see how it performs on the
test set. We calculate the Accuracy Score to understand the results closely.
"""
preds = model.predict(tf.convert_to_tensor(x_test))
preds = preds.reshape((preds.shape[0], preds.shape[1]))
results = accuracy_score(preds, y_test)
print(f"Test Accuracy score : {results*100}%")
plt.plot(range(len(history.history["FinalLoss"])), history.history["FinalLoss"])
plt.title("Loss over training")
plt.show()
"""
## Conclusion
This example has hereby demonstrated how the Forward-Forward algorithm works using
the TensorFlow and Keras packages. While the investigation results presented by Prof. Hinton
in their paper are currently still limited to smaller models and datasets like MNIST and
Fashion-MNIST, subsequent results on larger models like LLMs are expected in future
papers.
Through the paper, Prof. Hinton has reported results of 1.36% test accuracy error with a
2000-units, 4 hidden-layer, fully-connected network run over 60 epochs (while mentioning
that backpropagation takes only 20 epochs to achieve similar performance). Another run of
doubling the learning rate and training for 40 epochs yields a slightly worse error rate
of 1.46%
The current example does not yield state-of-the-art results. But with proper tuning of
the Learning Rate, model architecture (number of units in `Dense` layers, kernel
activations, initializations, regularization etc.), the results can be improved
to match the claims of the paper.
"""
| keras-io/examples/vision/forwardforward.py/0 | {
"file_path": "keras-io/examples/vision/forwardforward.py",
"repo_id": "keras-io",
"token_count": 6333
} | 107 |
<jupyter_start><jupyter_text>3D image classification from CT scans**Author:** [Hasib Zunair](https://twitter.com/hasibzunair)**Date created:** 2020/09/23**Last modified:** 2024/01/11**Description:** Train a 3D convolutional neural network to predict presence of pneumonia. IntroductionThis example will show the steps needed to build a 3D convolutional neural network (CNN)to predict the presence of viral pneumonia in computer tomography (CT) scans. 2D CNNs arecommonly used to process RGB images (3 channels). A 3D CNN is simply the 3Dequivalent: it takes as input a 3D volume or a sequence of 2D frames (e.g. slices in a CT scan),3D CNNs are a powerful model for learning representations for volumetric data. References- [A survey on Deep Learning Advances on Different 3D DataRepresentations](https://arxiv.org/abs/1808.01462)- [VoxNet: A 3D Convolutional Neural Network for Real-Time Object Recognition](https://www.ri.cmu.edu/pub_files/2015/9/voxnet_maturana_scherer_iros15.pdf)- [FusionNet: 3D Object Classification Using MultipleData Representations](https://arxiv.org/abs/1607.05695)- [Uniformizing Techniques to Process CT scans with 3D CNNs for Tuberculosis Prediction](https://arxiv.org/abs/2007.13224) Setup<jupyter_code>import os
import zipfile
import numpy as np
import tensorflow as tf # for data preprocessing
import keras
from keras import layers<jupyter_output><empty_output><jupyter_text>Downloading the MosMedData: Chest CT Scans with COVID-19 Related FindingsIn this example, we use a subset of the[MosMedData: Chest CT Scans with COVID-19 Related Findings](https://www.medrxiv.org/content/10.1101/2020.05.20.20100362v1).This dataset consists of lung CT scans with COVID-19 related findings, as well as without such findings.We will be using the associated radiological findings of the CT scans as labels to builda classifier to predict presence of viral pneumonia.Hence, the task is a binary classification problem.<jupyter_code># Download url of normal CT scans.
url = "https://github.com/hasibzunair/3D-image-classification-tutorial/releases/download/v0.2/CT-0.zip"
filename = os.path.join(os.getcwd(), "CT-0.zip")
keras.utils.get_file(filename, url)
# Download url of abnormal CT scans.
url = "https://github.com/hasibzunair/3D-image-classification-tutorial/releases/download/v0.2/CT-23.zip"
filename = os.path.join(os.getcwd(), "CT-23.zip")
keras.utils.get_file(filename, url)
# Make a directory to store the data.
os.makedirs("MosMedData")
# Unzip data in the newly created directory.
with zipfile.ZipFile("CT-0.zip", "r") as z_fp:
z_fp.extractall("./MosMedData/")
with zipfile.ZipFile("CT-23.zip", "r") as z_fp:
z_fp.extractall("./MosMedData/")<jupyter_output><empty_output><jupyter_text>Loading data and preprocessingThe files are provided in Nifti format with the extension .nii. To read thescans, we use the `nibabel` package.You can install the package via `pip install nibabel`. CT scans store raw voxelintensity in Hounsfield units (HU). They range from -1024 to above 2000 in this dataset.Above 400 are bones with different radiointensity, so this is used as a higher bound. A thresholdbetween -1000 and 400 is commonly used to normalize CT scans.To process the data, we do the following:* We first rotate the volumes by 90 degrees, so the orientation is fixed* We scale the HU values to be between 0 and 1.* We resize width, height and depth.Here we define several helper functions to process the data. These functionswill be used when building training and validation datasets.<jupyter_code>import nibabel as nib
from scipy import ndimage
def read_nifti_file(filepath):
"""Read and load volume"""
# Read file
scan = nib.load(filepath)
# Get raw data
scan = scan.get_fdata()
return scan
def normalize(volume):
"""Normalize the volume"""
min = -1000
max = 400
volume[volume < min] = min
volume[volume > max] = max
volume = (volume - min) / (max - min)
volume = volume.astype("float32")
return volume
def resize_volume(img):
"""Resize across z-axis"""
# Set the desired depth
desired_depth = 64
desired_width = 128
desired_height = 128
# Get current depth
current_depth = img.shape[-1]
current_width = img.shape[0]
current_height = img.shape[1]
# Compute depth factor
depth = current_depth / desired_depth
width = current_width / desired_width
height = current_height / desired_height
depth_factor = 1 / depth
width_factor = 1 / width
height_factor = 1 / height
# Rotate
img = ndimage.rotate(img, 90, reshape=False)
# Resize across z-axis
img = ndimage.zoom(img, (width_factor, height_factor, depth_factor), order=1)
return img
def process_scan(path):
"""Read and resize volume"""
# Read scan
volume = read_nifti_file(path)
# Normalize
volume = normalize(volume)
# Resize width, height and depth
volume = resize_volume(volume)
return volume<jupyter_output><empty_output><jupyter_text>Let's read the paths of the CT scans from the class directories.<jupyter_code># Folder "CT-0" consist of CT scans having normal lung tissue,
# no CT-signs of viral pneumonia.
normal_scan_paths = [
os.path.join(os.getcwd(), "MosMedData/CT-0", x)
for x in os.listdir("MosMedData/CT-0")
]
# Folder "CT-23" consist of CT scans having several ground-glass opacifications,
# involvement of lung parenchyma.
abnormal_scan_paths = [
os.path.join(os.getcwd(), "MosMedData/CT-23", x)
for x in os.listdir("MosMedData/CT-23")
]
print("CT scans with normal lung tissue: " + str(len(normal_scan_paths)))
print("CT scans with abnormal lung tissue: " + str(len(abnormal_scan_paths)))<jupyter_output><empty_output><jupyter_text>Build train and validation datasetsRead the scans from the class directories and assign labels. Downsample the scans to haveshape of 128x128x64. Rescale the raw HU values to the range 0 to 1.Lastly, split the dataset into train and validation subsets.<jupyter_code># Read and process the scans.
# Each scan is resized across height, width, and depth and rescaled.
abnormal_scans = np.array([process_scan(path) for path in abnormal_scan_paths])
normal_scans = np.array([process_scan(path) for path in normal_scan_paths])
# For the CT scans having presence of viral pneumonia
# assign 1, for the normal ones assign 0.
abnormal_labels = np.array([1 for _ in range(len(abnormal_scans))])
normal_labels = np.array([0 for _ in range(len(normal_scans))])
# Split data in the ratio 70-30 for training and validation.
x_train = np.concatenate((abnormal_scans[:70], normal_scans[:70]), axis=0)
y_train = np.concatenate((abnormal_labels[:70], normal_labels[:70]), axis=0)
x_val = np.concatenate((abnormal_scans[70:], normal_scans[70:]), axis=0)
y_val = np.concatenate((abnormal_labels[70:], normal_labels[70:]), axis=0)
print(
"Number of samples in train and validation are %d and %d."
% (x_train.shape[0], x_val.shape[0])
)<jupyter_output><empty_output><jupyter_text>Data augmentationThe CT scans also augmented by rotating at random angles during training. Sincethe data is stored in rank-3 tensors of shape `(samples, height, width, depth)`,we add a dimension of size 1 at axis 4 to be able to perform 3D convolutions onthe data. The new shape is thus `(samples, height, width, depth, 1)`. There aredifferent kinds of preprocessing and augmentation techniques out there,this example shows a few simple ones to get started.<jupyter_code>import random
from scipy import ndimage
def rotate(volume):
"""Rotate the volume by a few degrees"""
def scipy_rotate(volume):
# define some rotation angles
angles = [-20, -10, -5, 5, 10, 20]
# pick angles at random
angle = random.choice(angles)
# rotate volume
volume = ndimage.rotate(volume, angle, reshape=False)
volume[volume < 0] = 0
volume[volume > 1] = 1
return volume
augmented_volume = tf.numpy_function(scipy_rotate, [volume], tf.float32)
return augmented_volume
def train_preprocessing(volume, label):
"""Process training data by rotating and adding a channel."""
# Rotate volume
volume = rotate(volume)
volume = tf.expand_dims(volume, axis=3)
return volume, label
def validation_preprocessing(volume, label):
"""Process validation data by only adding a channel."""
volume = tf.expand_dims(volume, axis=3)
return volume, label<jupyter_output><empty_output><jupyter_text>While defining the train and validation data loader, the training data is passed throughand augmentation function which randomly rotates volume at different angles. Note that bothtraining and validation data are already rescaled to have values between 0 and 1.<jupyter_code># Define data loaders.
train_loader = tf.data.Dataset.from_tensor_slices((x_train, y_train))
validation_loader = tf.data.Dataset.from_tensor_slices((x_val, y_val))
batch_size = 2
# Augment the on the fly during training.
train_dataset = (
train_loader.shuffle(len(x_train))
.map(train_preprocessing)
.batch(batch_size)
.prefetch(2)
)
# Only rescale.
validation_dataset = (
validation_loader.shuffle(len(x_val))
.map(validation_preprocessing)
.batch(batch_size)
.prefetch(2)
)<jupyter_output><empty_output><jupyter_text>Visualize an augmented CT scan.<jupyter_code>import matplotlib.pyplot as plt
data = train_dataset.take(1)
images, labels = list(data)[0]
images = images.numpy()
image = images[0]
print("Dimension of the CT scan is:", image.shape)
plt.imshow(np.squeeze(image[:, :, 30]), cmap="gray")<jupyter_output><empty_output><jupyter_text>Since a CT scan has many slices, let's visualize a montage of the slices.<jupyter_code>def plot_slices(num_rows, num_columns, width, height, data):
"""Plot a montage of 20 CT slices"""
data = np.rot90(np.array(data))
data = np.transpose(data)
data = np.reshape(data, (num_rows, num_columns, width, height))
rows_data, columns_data = data.shape[0], data.shape[1]
heights = [slc[0].shape[0] for slc in data]
widths = [slc.shape[1] for slc in data[0]]
fig_width = 12.0
fig_height = fig_width * sum(heights) / sum(widths)
f, axarr = plt.subplots(
rows_data,
columns_data,
figsize=(fig_width, fig_height),
gridspec_kw={"height_ratios": heights},
)
for i in range(rows_data):
for j in range(columns_data):
axarr[i, j].imshow(data[i][j], cmap="gray")
axarr[i, j].axis("off")
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.show()
# Visualize montage of slices.
# 4 rows and 10 columns for 100 slices of the CT scan.
plot_slices(4, 10, 128, 128, image[:, :, :40])<jupyter_output><empty_output><jupyter_text>Define a 3D convolutional neural networkTo make the model easier to understand, we structure it into blocks.The architecture of the 3D CNN used in this exampleis based on [this paper](https://arxiv.org/abs/2007.13224).<jupyter_code>def get_model(width=128, height=128, depth=64):
"""Build a 3D convolutional neural network model."""
inputs = keras.Input((width, height, depth, 1))
x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(inputs)
x = layers.MaxPool3D(pool_size=2)(x)
x = layers.BatchNormalization()(x)
x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x)
x = layers.MaxPool3D(pool_size=2)(x)
x = layers.BatchNormalization()(x)
x = layers.Conv3D(filters=128, kernel_size=3, activation="relu")(x)
x = layers.MaxPool3D(pool_size=2)(x)
x = layers.BatchNormalization()(x)
x = layers.Conv3D(filters=256, kernel_size=3, activation="relu")(x)
x = layers.MaxPool3D(pool_size=2)(x)
x = layers.BatchNormalization()(x)
x = layers.GlobalAveragePooling3D()(x)
x = layers.Dense(units=512, activation="relu")(x)
x = layers.Dropout(0.3)(x)
outputs = layers.Dense(units=1, activation="sigmoid")(x)
# Define the model.
model = keras.Model(inputs, outputs, name="3dcnn")
return model
# Build model.
model = get_model(width=128, height=128, depth=64)
model.summary()<jupyter_output><empty_output><jupyter_text>Train model<jupyter_code># Compile model.
initial_learning_rate = 0.0001
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
model.compile(
loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
metrics=["acc"],
run_eagerly=True,
)
# Define callbacks.
checkpoint_cb = keras.callbacks.ModelCheckpoint(
"3d_image_classification.keras", save_best_only=True
)
early_stopping_cb = keras.callbacks.EarlyStopping(monitor="val_acc", patience=15)
# Train the model, doing validation at the end of each epoch
epochs = 100
model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=epochs,
shuffle=True,
verbose=2,
callbacks=[checkpoint_cb, early_stopping_cb],
)<jupyter_output><empty_output><jupyter_text>It is important to note that the number of samples is very small (only 200) and we don'tspecify a random seed. As such, you can expect significant variance in the results. The full datasetwhich consists of over 1000 CT scans can be found [here](https://www.medrxiv.org/content/10.1101/2020.05.20.20100362v1). Using the fulldataset, an accuracy of 83% was achieved. A variability of 6-7% in the classificationperformance is observed in both cases. Visualizing model performanceHere the model accuracy and loss for the training and the validation sets are plotted.Since the validation set is class-balanced, accuracy provides an unbiased representationof the model's performance.<jupyter_code>fig, ax = plt.subplots(1, 2, figsize=(20, 3))
ax = ax.ravel()
for i, metric in enumerate(["acc", "loss"]):
ax[i].plot(model.history.history[metric])
ax[i].plot(model.history.history["val_" + metric])
ax[i].set_title("Model {}".format(metric))
ax[i].set_xlabel("epochs")
ax[i].set_ylabel(metric)
ax[i].legend(["train", "val"])<jupyter_output><empty_output><jupyter_text>Make predictions on a single CT scan<jupyter_code># Load best weights.
model.load_weights("3d_image_classification.keras")
prediction = model.predict(np.expand_dims(x_val[0], axis=0))[0]
scores = [1 - prediction[0], prediction[0]]
class_names = ["normal", "abnormal"]
for score, name in zip(scores, class_names):
print(
"This model is %.2f percent confident that CT scan is %s"
% ((100 * score), name)
)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/3D_image_classification.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/3D_image_classification.ipynb",
"repo_id": "keras-io",
"token_count": 5143
} | 108 |
<jupyter_start><jupyter_text>Monocular depth estimation**Author:** [Victor Basu](https://www.linkedin.com/in/victor-basu-520958147)**Date created:** 2021/08/30**Last modified:** 2021/08/30**Description:** Implement a depth estimation model with a convnet. Introduction_Depth estimation_ is a crucial step towards inferring scene geometry from 2D images.The goal in _monocular depth estimation_ is to predict the depth value of each pixel orinferring depth information, given only a single RGB image as input.This example will show an approach to build a depth estimation model with a convnetand simple loss functions. Setup<jupyter_code>import os
import sys
import tensorflow as tf
from tensorflow.keras import layers
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
tf.random.set_seed(123)<jupyter_output><empty_output><jupyter_text>Downloading the datasetWe will be using the dataset **DIODE: A Dense Indoor and Outdoor Depth Dataset** for thistutorial. However, we use the validation set generating training and evaluation subsetsfor our model. The reason we use the validation set rather than the training set of the original dataset is becausethe training set consists of 81GB of data, which is challenging to download comparedto the validation set which is only 2.6GB.Other datasets that you could use are**[NYU-v2](https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html)**and **[KITTI](http://www.cvlibs.net/datasets/kitti/)**.<jupyter_code>annotation_folder = "/dataset/"
if not os.path.exists(os.path.abspath(".") + annotation_folder):
annotation_zip = tf.keras.utils.get_file(
"val.tar.gz",
cache_subdir=os.path.abspath("."),
origin="http://diode-dataset.s3.amazonaws.com/val.tar.gz",
extract=True,
)<jupyter_output><empty_output><jupyter_text>Preparing the datasetWe only use the indoor images to train our depth estimation model.<jupyter_code>path = "val/indoors"
filelist = []
for root, dirs, files in os.walk(path):
for file in files:
filelist.append(os.path.join(root, file))
filelist.sort()
data = {
"image": [x for x in filelist if x.endswith(".png")],
"depth": [x for x in filelist if x.endswith("_depth.npy")],
"mask": [x for x in filelist if x.endswith("_depth_mask.npy")],
}
df = pd.DataFrame(data)
df = df.sample(frac=1, random_state=42)<jupyter_output><empty_output><jupyter_text>Preparing hyperparameters<jupyter_code>HEIGHT = 256
WIDTH = 256
LR = 0.0002
EPOCHS = 30
BATCH_SIZE = 32<jupyter_output><empty_output><jupyter_text>Building a data pipeline1. The pipeline takes a dataframe containing the path for the RGB images,as well as the depth and depth mask files.2. It reads and resize the RGB images.3. It reads the depth and depth mask files, process them to generate the depth map image andresize it.4. It returns the RGB images and the depth map images for a batch.<jupyter_code>class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, data, batch_size=6, dim=(768, 1024), n_channels=3, shuffle=True):
"""
Initialization
"""
self.data = data
self.indices = self.data.index.tolist()
self.dim = dim
self.n_channels = n_channels
self.batch_size = batch_size
self.shuffle = shuffle
self.min_depth = 0.1
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.data) / self.batch_size))
def __getitem__(self, index):
if (index + 1) * self.batch_size > len(self.indices):
self.batch_size = len(self.indices) - index * self.batch_size
# Generate one batch of data
# Generate indices of the batch
index = self.indices[index * self.batch_size : (index + 1) * self.batch_size]
# Find list of IDs
batch = [self.indices[k] for k in index]
x, y = self.data_generation(batch)
return x, y
def on_epoch_end(self):
"""
Updates indexes after each epoch
"""
self.index = np.arange(len(self.indices))
if self.shuffle == True:
np.random.shuffle(self.index)
def load(self, image_path, depth_map, mask):
"""Load input and target image."""
image_ = cv2.imread(image_path)
image_ = cv2.cvtColor(image_, cv2.COLOR_BGR2RGB)
image_ = cv2.resize(image_, self.dim)
image_ = tf.image.convert_image_dtype(image_, tf.float32)
depth_map = np.load(depth_map).squeeze()
mask = np.load(mask)
mask = mask > 0
max_depth = min(300, np.percentile(depth_map, 99))
depth_map = np.clip(depth_map, self.min_depth, max_depth)
depth_map = np.log(depth_map, where=mask)
depth_map = np.ma.masked_where(~mask, depth_map)
depth_map = np.clip(depth_map, 0.1, np.log(max_depth))
depth_map = cv2.resize(depth_map, self.dim)
depth_map = np.expand_dims(depth_map, axis=2)
depth_map = tf.image.convert_image_dtype(depth_map, tf.float32)
return image_, depth_map
def data_generation(self, batch):
x = np.empty((self.batch_size, *self.dim, self.n_channels))
y = np.empty((self.batch_size, *self.dim, 1))
for i, batch_id in enumerate(batch):
x[i,], y[i,] = self.load(
self.data["image"][batch_id],
self.data["depth"][batch_id],
self.data["mask"][batch_id],
)
return x, y<jupyter_output><empty_output><jupyter_text>Visualizing samples<jupyter_code>def visualize_depth_map(samples, test=False, model=None):
input, target = samples
cmap = plt.cm.jet
cmap.set_bad(color="black")
if test:
pred = model.predict(input)
fig, ax = plt.subplots(6, 3, figsize=(50, 50))
for i in range(6):
ax[i, 0].imshow((input[i].squeeze()))
ax[i, 1].imshow((target[i].squeeze()), cmap=cmap)
ax[i, 2].imshow((pred[i].squeeze()), cmap=cmap)
else:
fig, ax = plt.subplots(6, 2, figsize=(50, 50))
for i in range(6):
ax[i, 0].imshow((input[i].squeeze()))
ax[i, 1].imshow((target[i].squeeze()), cmap=cmap)
visualize_samples = next(
iter(DataGenerator(data=df, batch_size=6, dim=(HEIGHT, WIDTH)))
)
visualize_depth_map(visualize_samples)<jupyter_output><empty_output><jupyter_text>3D point cloud visualization<jupyter_code>depth_vis = np.flipud(visualize_samples[1][1].squeeze()) # target
img_vis = np.flipud(visualize_samples[0][1].squeeze()) # input
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection="3d")
STEP = 3
for x in range(0, img_vis.shape[0], STEP):
for y in range(0, img_vis.shape[1], STEP):
ax.scatter(
[depth_vis[x, y]] * 3,
[y] * 3,
[x] * 3,
c=tuple(img_vis[x, y, :3] / 255),
s=3,
)
ax.view_init(45, 135)<jupyter_output><empty_output><jupyter_text>Building the model1. The basic model is from U-Net.2. Addditive skip-connections are implemented in the downscaling block.<jupyter_code>class DownscaleBlock(layers.Layer):
def __init__(
self, filters, kernel_size=(3, 3), padding="same", strides=1, **kwargs
):
super().__init__(**kwargs)
self.convA = layers.Conv2D(filters, kernel_size, strides, padding)
self.convB = layers.Conv2D(filters, kernel_size, strides, padding)
self.reluA = layers.LeakyReLU(alpha=0.2)
self.reluB = layers.LeakyReLU(alpha=0.2)
self.bn2a = tf.keras.layers.BatchNormalization()
self.bn2b = tf.keras.layers.BatchNormalization()
self.pool = layers.MaxPool2D((2, 2), (2, 2))
def call(self, input_tensor):
d = self.convA(input_tensor)
x = self.bn2a(d)
x = self.reluA(x)
x = self.convB(x)
x = self.bn2b(x)
x = self.reluB(x)
x += d
p = self.pool(x)
return x, p
class UpscaleBlock(layers.Layer):
def __init__(
self, filters, kernel_size=(3, 3), padding="same", strides=1, **kwargs
):
super().__init__(**kwargs)
self.us = layers.UpSampling2D((2, 2))
self.convA = layers.Conv2D(filters, kernel_size, strides, padding)
self.convB = layers.Conv2D(filters, kernel_size, strides, padding)
self.reluA = layers.LeakyReLU(alpha=0.2)
self.reluB = layers.LeakyReLU(alpha=0.2)
self.bn2a = tf.keras.layers.BatchNormalization()
self.bn2b = tf.keras.layers.BatchNormalization()
self.conc = layers.Concatenate()
def call(self, x, skip):
x = self.us(x)
concat = self.conc([x, skip])
x = self.convA(concat)
x = self.bn2a(x)
x = self.reluA(x)
x = self.convB(x)
x = self.bn2b(x)
x = self.reluB(x)
return x
class BottleNeckBlock(layers.Layer):
def __init__(
self, filters, kernel_size=(3, 3), padding="same", strides=1, **kwargs
):
super().__init__(**kwargs)
self.convA = layers.Conv2D(filters, kernel_size, strides, padding)
self.convB = layers.Conv2D(filters, kernel_size, strides, padding)
self.reluA = layers.LeakyReLU(alpha=0.2)
self.reluB = layers.LeakyReLU(alpha=0.2)
def call(self, x):
x = self.convA(x)
x = self.reluA(x)
x = self.convB(x)
x = self.reluB(x)
return x<jupyter_output><empty_output><jupyter_text>Defining the lossWe will optimize 3 losses in our mode.1. Structural similarity index(SSIM).2. L1-loss, or Point-wise depth in our case.3. Depth smoothness loss.Out of the three loss functions, SSIM contributes the most to improving model performance.<jupyter_code>class DepthEstimationModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.ssim_loss_weight = 0.85
self.l1_loss_weight = 0.1
self.edge_loss_weight = 0.9
self.loss_metric = tf.keras.metrics.Mean(name="loss")
f = [16, 32, 64, 128, 256]
self.downscale_blocks = [
DownscaleBlock(f[0]),
DownscaleBlock(f[1]),
DownscaleBlock(f[2]),
DownscaleBlock(f[3]),
]
self.bottle_neck_block = BottleNeckBlock(f[4])
self.upscale_blocks = [
UpscaleBlock(f[3]),
UpscaleBlock(f[2]),
UpscaleBlock(f[1]),
UpscaleBlock(f[0]),
]
self.conv_layer = layers.Conv2D(1, (1, 1), padding="same", activation="tanh")
def calculate_loss(self, target, pred):
# Edges
dy_true, dx_true = tf.image.image_gradients(target)
dy_pred, dx_pred = tf.image.image_gradients(pred)
weights_x = tf.exp(tf.reduce_mean(tf.abs(dx_true)))
weights_y = tf.exp(tf.reduce_mean(tf.abs(dy_true)))
# Depth smoothness
smoothness_x = dx_pred * weights_x
smoothness_y = dy_pred * weights_y
depth_smoothness_loss = tf.reduce_mean(abs(smoothness_x)) + tf.reduce_mean(
abs(smoothness_y)
)
# Structural similarity (SSIM) index
ssim_loss = tf.reduce_mean(
1
- tf.image.ssim(
target, pred, max_val=WIDTH, filter_size=7, k1=0.01 ** 2, k2=0.03 ** 2
)
)
# Point-wise depth
l1_loss = tf.reduce_mean(tf.abs(target - pred))
loss = (
(self.ssim_loss_weight * ssim_loss)
+ (self.l1_loss_weight * l1_loss)
+ (self.edge_loss_weight * depth_smoothness_loss)
)
return loss
@property
def metrics(self):
return [self.loss_metric]
def train_step(self, batch_data):
input, target = batch_data
with tf.GradientTape() as tape:
pred = self(input, training=True)
loss = self.calculate_loss(target, pred)
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
self.loss_metric.update_state(loss)
return {
"loss": self.loss_metric.result(),
}
def test_step(self, batch_data):
input, target = batch_data
pred = self(input, training=False)
loss = self.calculate_loss(target, pred)
self.loss_metric.update_state(loss)
return {
"loss": self.loss_metric.result(),
}
def call(self, x):
c1, p1 = self.downscale_blocks[0](x)
c2, p2 = self.downscale_blocks[1](p1)
c3, p3 = self.downscale_blocks[2](p2)
c4, p4 = self.downscale_blocks[3](p3)
bn = self.bottle_neck_block(p4)
u1 = self.upscale_blocks[0](bn, c4)
u2 = self.upscale_blocks[1](u1, c3)
u3 = self.upscale_blocks[2](u2, c2)
u4 = self.upscale_blocks[3](u3, c1)
return self.conv_layer(u4)<jupyter_output><empty_output><jupyter_text>Model training<jupyter_code>optimizer = tf.keras.optimizers.Adam(
learning_rate=LR,
amsgrad=False,
)
model = DepthEstimationModel()
# Compile the model
model.compile(optimizer)
train_loader = DataGenerator(
data=df[:260].reset_index(drop="true"), batch_size=BATCH_SIZE, dim=(HEIGHT, WIDTH)
)
validation_loader = DataGenerator(
data=df[260:].reset_index(drop="true"), batch_size=BATCH_SIZE, dim=(HEIGHT, WIDTH)
)
model.fit(
train_loader,
epochs=EPOCHS,
validation_data=validation_loader,
)<jupyter_output><empty_output><jupyter_text>Visualizing model outputWe visualize the model output over the validation set.The first image is the RGB image, the second image is the ground truth depth map imageand the third one is the predicted depth map image.<jupyter_code>test_loader = next(
iter(
DataGenerator(
data=df[265:].reset_index(drop="true"), batch_size=6, dim=(HEIGHT, WIDTH)
)
)
)
visualize_depth_map(test_loader, test=True, model=model)
test_loader = next(
iter(
DataGenerator(
data=df[300:].reset_index(drop="true"), batch_size=6, dim=(HEIGHT, WIDTH)
)
)
)
visualize_depth_map(test_loader, test=True, model=model)<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/depth_estimation.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/depth_estimation.ipynb",
"repo_id": "keras-io",
"token_count": 6394
} | 109 |
<jupyter_start><jupyter_text>Involutional neural networks**Author:** [Aritra Roy Gosthipaty](https://twitter.com/ariG23498)**Date created:** 2021/07/25**Last modified:** 2021/07/25**Description:** Deep dive into location-specific and channel-agnostic "involution" kernels. IntroductionConvolution has been the basis of most modern neuralnetworks for computer vision. A convolution kernel isspatial-agnostic and channel-specific. Because of this, it isn't ableto adapt to different visual patterns with respect todifferent spatial locations. Along with location-related problems, thereceptive field of convolution creates challenges with regard to capturinglong-range spatial interactions.To address the above issues, Li et. al. rethink the propertiesof convolution in[Involution: Inverting the Inherence of Convolution for VisualRecognition](https://arxiv.org/abs/2103.06255).The authors propose the "involution kernel", that is location-specific andchannel-agnostic. Due to the location-specific nature of the operation,the authors say that self-attention falls under the design paradigm ofinvolution.This example describes the involution kernel, compares two imageclassification models, one with convolution and the other withinvolution, and also tries drawing a parallel with the self-attentionlayer. Setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
# Set seed for reproducibility.
tf.random.set_seed(42)<jupyter_output><empty_output><jupyter_text>ConvolutionConvolution remains the mainstay of deep neural networks for computer vision.To understand Involution, it is necessary to talk about theconvolution operation.Consider an input tensor **X** with dimensions **H**, **W** and**C_in**. We take a collection of **C_out** convolution kernels each ofshape **K**, **K**, **C_in**. With the multiply-add operation betweenthe input tensor and the kernels we obtain an output tensor **Y** withdimensions **H**, **W**, **C_out**.In the diagram above `C_out=3`. This makes the output tensor of shape H,W and 3. One can notice that the convoltuion kernel does not depend onthe spatial position of the input tensor which makes it**location-agnostic**. On the other hand, each channel in the outputtensor is based on a specific convolution filter which makes is**channel-specific**. InvolutionThe idea is to have an operation that is both **location-specific**and **channel-agnostic**. Trying to implement these specific properties posesa challenge. With a fixed number of involution kernels (for eachspatial position) we will **not** be able to process variable-resolutioninput tensors.To solve this problem, the authors have considered *generating* eachkernel conditioned on specific spatial positions. With this method, weshould be able to process variable-resolution input tensors with ease.The diagram below provides an intuition on this kernel generationmethod.<jupyter_code>class Involution(keras.layers.Layer):
def __init__(
self, channel, group_number, kernel_size, stride, reduction_ratio, name
):
super().__init__(name=name)
# Initialize the parameters.
self.channel = channel
self.group_number = group_number
self.kernel_size = kernel_size
self.stride = stride
self.reduction_ratio = reduction_ratio
def build(self, input_shape):
# Get the shape of the input.
(_, height, width, num_channels) = input_shape
# Scale the height and width with respect to the strides.
height = height // self.stride
width = width // self.stride
# Define a layer that average pools the input tensor
# if stride is more than 1.
self.stride_layer = (
keras.layers.AveragePooling2D(
pool_size=self.stride, strides=self.stride, padding="same"
)
if self.stride > 1
else tf.identity
)
# Define the kernel generation layer.
self.kernel_gen = keras.Sequential(
[
keras.layers.Conv2D(
filters=self.channel // self.reduction_ratio, kernel_size=1
),
keras.layers.BatchNormalization(),
keras.layers.ReLU(),
keras.layers.Conv2D(
filters=self.kernel_size * self.kernel_size * self.group_number,
kernel_size=1,
),
]
)
# Define reshape layers
self.kernel_reshape = keras.layers.Reshape(
target_shape=(
height,
width,
self.kernel_size * self.kernel_size,
1,
self.group_number,
)
)
self.input_patches_reshape = keras.layers.Reshape(
target_shape=(
height,
width,
self.kernel_size * self.kernel_size,
num_channels // self.group_number,
self.group_number,
)
)
self.output_reshape = keras.layers.Reshape(
target_shape=(height, width, num_channels)
)
def call(self, x):
# Generate the kernel with respect to the input tensor.
# B, H, W, K*K*G
kernel_input = self.stride_layer(x)
kernel = self.kernel_gen(kernel_input)
# reshape the kerenl
# B, H, W, K*K, 1, G
kernel = self.kernel_reshape(kernel)
# Extract input patches.
# B, H, W, K*K*C
input_patches = tf.image.extract_patches(
images=x,
sizes=[1, self.kernel_size, self.kernel_size, 1],
strides=[1, self.stride, self.stride, 1],
rates=[1, 1, 1, 1],
padding="SAME",
)
# Reshape the input patches to align with later operations.
# B, H, W, K*K, C//G, G
input_patches = self.input_patches_reshape(input_patches)
# Compute the multiply-add operation of kernels and patches.
# B, H, W, K*K, C//G, G
output = tf.multiply(kernel, input_patches)
# B, H, W, C//G, G
output = tf.reduce_sum(output, axis=3)
# Reshape the output kernel.
# B, H, W, C
output = self.output_reshape(output)
# Return the output tensor and the kernel.
return output, kernel<jupyter_output><empty_output><jupyter_text>Testing the Involution layer<jupyter_code># Define the input tensor.
input_tensor = tf.random.normal((32, 256, 256, 3))
# Compute involution with stride 1.
output_tensor, _ = Involution(
channel=3, group_number=1, kernel_size=5, stride=1, reduction_ratio=1, name="inv_1"
)(input_tensor)
print(f"with stride 1 ouput shape: {output_tensor.shape}")
# Compute involution with stride 2.
output_tensor, _ = Involution(
channel=3, group_number=1, kernel_size=5, stride=2, reduction_ratio=1, name="inv_2"
)(input_tensor)
print(f"with stride 2 ouput shape: {output_tensor.shape}")
# Compute involution with stride 1, channel 16 and reduction ratio 2.
output_tensor, _ = Involution(
channel=16, group_number=1, kernel_size=5, stride=1, reduction_ratio=2, name="inv_3"
)(input_tensor)
print(
"with channel 16 and reduction ratio 2 ouput shape: {}".format(output_tensor.shape)
)<jupyter_output><empty_output><jupyter_text>Image ClassificationIn this section, we will build an image-classifier model. There willbe two models one with convolutions and the other with involutions.The image-classification model is heavily inspired by this[Convolutional Neural Network (CNN)](https://www.tensorflow.org/tutorials/images/cnn)tutorial from Google. Get the CIFAR10 Dataset<jupyter_code># Load the CIFAR10 dataset.
print("loading the CIFAR10 dataset...")
(
(train_images, train_labels),
(
test_images,
test_labels,
),
) = keras.datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1.
(train_images, test_images) = (train_images / 255.0, test_images / 255.0)
# Shuffle and batch the dataset.
train_ds = (
tf.data.Dataset.from_tensor_slices((train_images, train_labels))
.shuffle(256)
.batch(256)
)
test_ds = tf.data.Dataset.from_tensor_slices((test_images, test_labels)).batch(256)<jupyter_output><empty_output><jupyter_text>Visualise the data<jupyter_code>class_names = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
plt.figure(figsize=(10, 10))
for i in range(25):
plt.subplot(5, 5, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i])
plt.xlabel(class_names[train_labels[i][0]])
plt.show()<jupyter_output><empty_output><jupyter_text>Convolutional Neural Network<jupyter_code># Build the conv model.
print("building the convolution model...")
conv_model = keras.Sequential(
[
keras.layers.Conv2D(32, (3, 3), input_shape=(32, 32, 3), padding="same"),
keras.layers.ReLU(name="relu1"),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(64, (3, 3), padding="same"),
keras.layers.ReLU(name="relu2"),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(64, (3, 3), padding="same"),
keras.layers.ReLU(name="relu3"),
keras.layers.Flatten(),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(10),
]
)
# Compile the mode with the necessary loss function and optimizer.
print("compiling the convolution model...")
conv_model.compile(
optimizer="adam",
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
# Train the model.
print("conv model training...")
conv_hist = conv_model.fit(train_ds, epochs=20, validation_data=test_ds)<jupyter_output><empty_output><jupyter_text>Involutional Neural Network<jupyter_code># Build the involution model.
print("building the involution model...")
inputs = keras.Input(shape=(32, 32, 3))
x, _ = Involution(
channel=3, group_number=1, kernel_size=3, stride=1, reduction_ratio=2, name="inv_1"
)(inputs)
x = keras.layers.ReLU()(x)
x = keras.layers.MaxPooling2D((2, 2))(x)
x, _ = Involution(
channel=3, group_number=1, kernel_size=3, stride=1, reduction_ratio=2, name="inv_2"
)(x)
x = keras.layers.ReLU()(x)
x = keras.layers.MaxPooling2D((2, 2))(x)
x, _ = Involution(
channel=3, group_number=1, kernel_size=3, stride=1, reduction_ratio=2, name="inv_3"
)(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(64, activation="relu")(x)
outputs = keras.layers.Dense(10)(x)
inv_model = keras.Model(inputs=[inputs], outputs=[outputs], name="inv_model")
# Compile the mode with the necessary loss function and optimizer.
print("compiling the involution model...")
inv_model.compile(
optimizer="adam",
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
# train the model
print("inv model training...")
inv_hist = inv_model.fit(train_ds, epochs=20, validation_data=test_ds)<jupyter_output><empty_output><jupyter_text>ComparisonsIn this section, we will be looking at both the models and compare afew pointers. ParametersOne can see that with a similar architecture the parameters in a CNNis much larger than that of an INN (Involutional Neural Network).<jupyter_code>conv_model.summary()
inv_model.summary()<jupyter_output><empty_output><jupyter_text>Loss and Accuracy PlotsHere, the loss and the accuracy plots demonstrate that INNs are slowlearners (with lower parameters).<jupyter_code>plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
plt.title("Convolution Loss")
plt.plot(conv_hist.history["loss"], label="loss")
plt.plot(conv_hist.history["val_loss"], label="val_loss")
plt.legend()
plt.subplot(1, 2, 2)
plt.title("Involution Loss")
plt.plot(inv_hist.history["loss"], label="loss")
plt.plot(inv_hist.history["val_loss"], label="val_loss")
plt.legend()
plt.show()
plt.figure(figsize=(20, 5))
plt.subplot(1, 2, 1)
plt.title("Convolution Accuracy")
plt.plot(conv_hist.history["accuracy"], label="accuracy")
plt.plot(conv_hist.history["val_accuracy"], label="val_accuracy")
plt.legend()
plt.subplot(1, 2, 2)
plt.title("Involution Accuracy")
plt.plot(inv_hist.history["accuracy"], label="accuracy")
plt.plot(inv_hist.history["val_accuracy"], label="val_accuracy")
plt.legend()
plt.show()<jupyter_output><empty_output><jupyter_text>Visualizing Involution KernelsTo visualize the kernels, we take the sum of **K×K** values from eachinvolution kernel. **All the representatives at different spatiallocations frame the corresponding heat map.**The authors mention:"Our proposed involution is reminiscent of self-attention andessentially could become a generalized version of it."With the visualization of the kernel we can indeed obtain an attentionmap of the image. The learned involution kernels provides attention toindividual spatial positions of the input tensor. The**location-specific** property makes involution a generic space of modelsin which self-attention belongs.<jupyter_code>layer_names = ["inv_1", "inv_2", "inv_3"]
outputs = [inv_model.get_layer(name).output[1] for name in layer_names]
vis_model = keras.Model(inv_model.input, outputs)
fig, axes = plt.subplots(nrows=10, ncols=4, figsize=(10, 30))
for ax, test_image in zip(axes, test_images[:10]):
(inv1_kernel, inv2_kernel, inv3_kernel) = vis_model.predict(test_image[None, ...])
inv1_kernel = tf.reduce_sum(inv1_kernel, axis=[-1, -2, -3])
inv2_kernel = tf.reduce_sum(inv2_kernel, axis=[-1, -2, -3])
inv3_kernel = tf.reduce_sum(inv3_kernel, axis=[-1, -2, -3])
ax[0].imshow(keras.utils.array_to_img(test_image))
ax[0].set_title("Input Image")
ax[1].imshow(keras.utils.array_to_img(inv1_kernel[0, ..., None]))
ax[1].set_title("Involution Kernel 1")
ax[2].imshow(keras.utils.array_to_img(inv2_kernel[0, ..., None]))
ax[2].set_title("Involution Kernel 2")
ax[3].imshow(keras.utils.array_to_img(inv3_kernel[0, ..., None]))
ax[3].set_title("Involution Kernel 3")<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/involution.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/involution.ipynb",
"repo_id": "keras-io",
"token_count": 5514
} | 110 |
<jupyter_start><jupyter_text>Object detection with Vision Transformers**Author:** [Karan V. Dave](https://www.linkedin.com/in/karan-dave-811413164/)**Date created:** 2022/03/27**Last modified:** 2023/11/20**Description:** A simple Keras implementation of object detection using Vision Transformers. IntroductionThe article[Vision Transformer (ViT)](https://arxiv.org/abs/2010.11929)architecture by Alexey Dosovitskiy et al.demonstrates that a pure transformer applied directly to sequences of imagepatches can perform well on object detection tasks.In this Keras example, we implement an object detection ViTand we train it on the[Caltech 101 dataset](http://www.vision.caltech.edu/datasets/)to detect an airplane in the given image. Imports and setup<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "jax" # @param ["tensorflow", "jax", "torch"]
import numpy as np
import keras
from keras import layers
from keras import ops
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
import scipy.io
import shutil<jupyter_output><empty_output><jupyter_text>Prepare datasetWe use the [Caltech 101 Dataset](https://data.caltech.edu/records/mzrjq-6wc02).<jupyter_code># Path to images and annotations
path_images = "./101_ObjectCategories/airplanes/"
path_annot = "./Annotations/Airplanes_Side_2/"
path_to_downloaded_file = keras.utils.get_file(
fname="caltech_101_zipped",
origin="https://data.caltech.edu/records/mzrjq-6wc02/files/caltech-101.zip",
extract=True,
archive_format="zip", # downloaded file format
cache_dir="/", # cache and extract in current directory
)
download_base_dir = os.path.dirname(path_to_downloaded_file)
# Extracting tar files found inside main zip file
shutil.unpack_archive(
os.path.join(download_base_dir, "caltech-101", "101_ObjectCategories.tar.gz"), "."
)
shutil.unpack_archive(
os.path.join(download_base_dir, "caltech-101", "Annotations.tar"), "."
)
# list of paths to images and annotations
image_paths = [
f for f in os.listdir(path_images) if os.path.isfile(os.path.join(path_images, f))
]
annot_paths = [
f for f in os.listdir(path_annot) if os.path.isfile(os.path.join(path_annot, f))
]
image_paths.sort()
annot_paths.sort()
image_size = 224 # resize input images to this size
images, targets = [], []
# loop over the annotations and images, preprocess them and store in lists
for i in range(0, len(annot_paths)):
# Access bounding box coordinates
annot = scipy.io.loadmat(path_annot + annot_paths[i])["box_coord"][0]
top_left_x, top_left_y = annot[2], annot[0]
bottom_right_x, bottom_right_y = annot[3], annot[1]
image = keras.utils.load_img(
path_images + image_paths[i],
)
(w, h) = image.size[:2]
# resize images
image = image.resize((image_size, image_size))
# convert image to array and append to list
images.append(keras.utils.img_to_array(image))
# apply relative scaling to bounding boxes as per given image and append to list
targets.append(
(
float(top_left_x) / w,
float(top_left_y) / h,
float(bottom_right_x) / w,
float(bottom_right_y) / h,
)
)
# Convert the list to numpy array, split to train and test dataset
(x_train), (y_train) = (
np.asarray(images[: int(len(images) * 0.8)]),
np.asarray(targets[: int(len(targets) * 0.8)]),
)
(x_test), (y_test) = (
np.asarray(images[int(len(images) * 0.8) :]),
np.asarray(targets[int(len(targets) * 0.8) :]),
)<jupyter_output><empty_output><jupyter_text>Implement multilayer-perceptron (MLP)We use the code from the Keras example[Image classification with Vision Transformer](https://keras.io/examples/vision/image_classification_with_vision_transformer/)as a reference.<jupyter_code>def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=keras.activations.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x<jupyter_output><empty_output><jupyter_text>Implement the patch creation layer<jupyter_code>class Patches(layers.Layer):
def __init__(self, patch_size):
super().__init__()
self.patch_size = patch_size
def call(self, images):
input_shape = ops.shape(images)
batch_size = input_shape[0]
height = input_shape[1]
width = input_shape[2]
channels = input_shape[3]
num_patches_h = height // self.patch_size
num_patches_w = width // self.patch_size
patches = keras.ops.image.extract_patches(images, size=self.patch_size)
patches = ops.reshape(
patches,
(
batch_size,
num_patches_h * num_patches_w,
self.patch_size * self.patch_size * channels,
),
)
return patches
def get_config(self):
config = super().get_config()
config.update({"patch_size": self.patch_size})
return config<jupyter_output><empty_output><jupyter_text>Display patches for an input image<jupyter_code>patch_size = 32 # Size of the patches to be extracted from the input images
plt.figure(figsize=(4, 4))
plt.imshow(x_train[0].astype("uint8"))
plt.axis("off")
patches = Patches(patch_size)(np.expand_dims(x_train[0], axis=0))
print(f"Image size: {image_size} X {image_size}")
print(f"Patch size: {patch_size} X {patch_size}")
print(f"{patches.shape[1]} patches per image \n{patches.shape[-1]} elements per patch")
n = int(np.sqrt(patches.shape[1]))
plt.figure(figsize=(4, 4))
for i, patch in enumerate(patches[0]):
ax = plt.subplot(n, n, i + 1)
patch_img = ops.reshape(patch, (patch_size, patch_size, 3))
plt.imshow(ops.convert_to_numpy(patch_img).astype("uint8"))
plt.axis("off")<jupyter_output><empty_output><jupyter_text>Implement the patch encoding layerThe `PatchEncoder` layer linearly transforms a patch by projecting it into avector of size `projection_dim`. It also adds a learnable positionembedding to the projected vector.<jupyter_code>class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
# Override function to avoid error while saving model
def get_config(self):
config = super().get_config().copy()
config.update(
{
"input_shape": input_shape,
"patch_size": patch_size,
"num_patches": num_patches,
"projection_dim": projection_dim,
"num_heads": num_heads,
"transformer_units": transformer_units,
"transformer_layers": transformer_layers,
"mlp_head_units": mlp_head_units,
}
)
return config
def call(self, patch):
positions = ops.expand_dims(
ops.arange(start=0, stop=self.num_patches, step=1), axis=0
)
projected_patches = self.projection(patch)
encoded = projected_patches + self.position_embedding(positions)
return encoded<jupyter_output><empty_output><jupyter_text>Build the ViT modelThe ViT model has multiple Transformer blocks.The `MultiHeadAttention` layer is used for self-attention,applied to the sequence of image patches. The encoded patches (skip connection)and self-attention layer outputs are normalized and fed into amultilayer perceptron (MLP).The model outputs four dimensions representingthe bounding box coordinates of an object.<jupyter_code>def create_vit_object_detector(
input_shape,
patch_size,
num_patches,
projection_dim,
num_heads,
transformer_units,
transformer_layers,
mlp_head_units,
):
inputs = keras.Input(shape=input_shape)
# Create patches
patches = Patches(patch_size)(inputs)
# Encode patches
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.3)(representation)
# Add MLP.
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.3)
bounding_box = layers.Dense(4)(
features
) # Final four neurons that output bounding box
# return Keras model.
return keras.Model(inputs=inputs, outputs=bounding_box)<jupyter_output><empty_output><jupyter_text>Run the experiment<jupyter_code>def run_experiment(model, learning_rate, weight_decay, batch_size, num_epochs):
optimizer = keras.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
# Compile model.
model.compile(optimizer=optimizer, loss=keras.losses.MeanSquaredError())
checkpoint_filepath = "vit_object_detector.weights.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[
checkpoint_callback,
keras.callbacks.EarlyStopping(monitor="val_loss", patience=10),
],
)
return history
input_shape = (image_size, image_size, 3) # input image shape
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 32
num_epochs = 100
num_patches = (image_size // patch_size) ** 2
projection_dim = 64
num_heads = 4
# Size of the transformer layers
transformer_units = [
projection_dim * 2,
projection_dim,
]
transformer_layers = 4
mlp_head_units = [2048, 1024, 512, 64, 32] # Size of the dense layers
history = []
num_patches = (image_size // patch_size) ** 2
vit_object_detector = create_vit_object_detector(
input_shape,
patch_size,
num_patches,
projection_dim,
num_heads,
transformer_units,
transformer_layers,
mlp_head_units,
)
# Train model
history = run_experiment(
vit_object_detector, learning_rate, weight_decay, batch_size, num_epochs
)
def plot_history(item):
plt.plot(history.history[item], label=item)
plt.plot(history.history["val_" + item], label="val_" + item)
plt.xlabel("Epochs")
plt.ylabel(item)
plt.title("Train and Validation {} Over Epochs".format(item), fontsize=14)
plt.legend()
plt.grid()
plt.show()
plot_history("loss")<jupyter_output><empty_output><jupyter_text>Evaluate the model<jupyter_code>import matplotlib.patches as patches
# Saves the model in current path
vit_object_detector.save("vit_object_detector.keras")
# To calculate IoU (intersection over union, given two bounding boxes)
def bounding_box_intersection_over_union(box_predicted, box_truth):
# get (x, y) coordinates of intersection of bounding boxes
top_x_intersect = max(box_predicted[0], box_truth[0])
top_y_intersect = max(box_predicted[1], box_truth[1])
bottom_x_intersect = min(box_predicted[2], box_truth[2])
bottom_y_intersect = min(box_predicted[3], box_truth[3])
# calculate area of the intersection bb (bounding box)
intersection_area = max(0, bottom_x_intersect - top_x_intersect + 1) * max(
0, bottom_y_intersect - top_y_intersect + 1
)
# calculate area of the prediction bb and ground-truth bb
box_predicted_area = (box_predicted[2] - box_predicted[0] + 1) * (
box_predicted[3] - box_predicted[1] + 1
)
box_truth_area = (box_truth[2] - box_truth[0] + 1) * (
box_truth[3] - box_truth[1] + 1
)
# calculate intersection over union by taking intersection
# area and dividing it by the sum of predicted bb and ground truth
# bb areas subtracted by the interesection area
# return ioU
return intersection_area / float(
box_predicted_area + box_truth_area - intersection_area
)
i, mean_iou = 0, 0
# Compare results for 10 images in the test set
for input_image in x_test[:10]:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 15))
im = input_image
# Display the image
ax1.imshow(im.astype("uint8"))
ax2.imshow(im.astype("uint8"))
input_image = cv2.resize(
input_image, (image_size, image_size), interpolation=cv2.INTER_AREA
)
input_image = np.expand_dims(input_image, axis=0)
preds = vit_object_detector.predict(input_image)[0]
(h, w) = (im).shape[0:2]
top_left_x, top_left_y = int(preds[0] * w), int(preds[1] * h)
bottom_right_x, bottom_right_y = int(preds[2] * w), int(preds[3] * h)
box_predicted = [top_left_x, top_left_y, bottom_right_x, bottom_right_y]
# Create the bounding box
rect = patches.Rectangle(
(top_left_x, top_left_y),
bottom_right_x - top_left_x,
bottom_right_y - top_left_y,
facecolor="none",
edgecolor="red",
linewidth=1,
)
# Add the bounding box to the image
ax1.add_patch(rect)
ax1.set_xlabel(
"Predicted: "
+ str(top_left_x)
+ ", "
+ str(top_left_y)
+ ", "
+ str(bottom_right_x)
+ ", "
+ str(bottom_right_y)
)
top_left_x, top_left_y = int(y_test[i][0] * w), int(y_test[i][1] * h)
bottom_right_x, bottom_right_y = int(y_test[i][2] * w), int(y_test[i][3] * h)
box_truth = top_left_x, top_left_y, bottom_right_x, bottom_right_y
mean_iou += bounding_box_intersection_over_union(box_predicted, box_truth)
# Create the bounding box
rect = patches.Rectangle(
(top_left_x, top_left_y),
bottom_right_x - top_left_x,
bottom_right_y - top_left_y,
facecolor="none",
edgecolor="red",
linewidth=1,
)
# Add the bounding box to the image
ax2.add_patch(rect)
ax2.set_xlabel(
"Target: "
+ str(top_left_x)
+ ", "
+ str(top_left_y)
+ ", "
+ str(bottom_right_x)
+ ", "
+ str(bottom_right_y)
+ "\n"
+ "IoU"
+ str(bounding_box_intersection_over_union(box_predicted, box_truth))
)
i = i + 1
print("mean_iou: " + str(mean_iou / len(x_test[:10])))
plt.show()<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/object_detection_using_vision_transformer.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/object_detection_using_vision_transformer.ipynb",
"repo_id": "keras-io",
"token_count": 6331
} | 111 |
<jupyter_start><jupyter_text>Image similarity estimation using a Siamese Network with a triplet loss**Authors:** [Hazem Essam](https://twitter.com/hazemessamm) and [Santiago L. Valdarrama](https://twitter.com/svpino)**Date created:** 2021/03/25**Last modified:** 2021/03/25**Description:** Training a Siamese Network to compare the similarity of images using a triplet loss function. IntroductionA [Siamese Network](https://en.wikipedia.org/wiki/Siamese_neural_network) is a type of network architecture thatcontains two or more identical subnetworks used to generate feature vectors for each input and compare them.Siamese Networks can be applied to different use cases, like detecting duplicates, finding anomalies, and face recognition.This example uses a Siamese Network with three identical subnetworks. We will provide three images to the model, wheretwo of them will be similar (_anchor_ and _positive_ samples), and the third will be unrelated (a _negative_ example.)Our goal is for the model to learn to estimate the similarity between images.For the network to learn, we use a triplet loss function. You can find an introduction to triplet loss in the[FaceNet paper](https://arxiv.org/abs/1503.03832) by Schroff et al,. 2015. In this example, we define the tripletloss function as follows:`L(A, P, N) = max(‖f(A) - f(P)‖² - ‖f(A) - f(N)‖² + margin, 0)`This example uses the [Totally Looks Like dataset](https://sites.google.com/view/totally-looks-like-dataset)by [Rosenfeld et al., 2018](https://arxiv.org/abs/1803.01485v3). Setup<jupyter_code>import matplotlib.pyplot as plt
import numpy as np
import os
import random
import tensorflow as tf
from pathlib import Path
from keras import applications
from keras import layers
from keras import losses
from keras import ops
from keras import optimizers
from keras import metrics
from keras import Model
from keras.applications import resnet
target_shape = (200, 200)<jupyter_output><empty_output><jupyter_text>Load the datasetWe are going to load the *Totally Looks Like* dataset and unzip it inside the `~/.keras` directoryin the local environment.The dataset consists of two separate files:* `left.zip` contains the images that we will use as the anchor.* `right.zip` contains the images that we will use as the positive sample (an image that looks like the anchor).<jupyter_code>cache_dir = Path(Path.home()) / ".keras"
anchor_images_path = cache_dir / "left"
positive_images_path = cache_dir / "right"
!gdown --id 1jvkbTr_giSP3Ru8OwGNCg6B4PvVbcO34
!gdown --id 1EzBZUb_mh_Dp_FKD0P4XiYYSd0QBH5zW
!unzip -oq left.zip -d $cache_dir
!unzip -oq right.zip -d $cache_dir<jupyter_output><empty_output><jupyter_text>Preparing the dataWe are going to use a `tf.data` pipeline to load the data and generate the triplets that weneed to train the Siamese network.We'll set up the pipeline using a zipped list with anchor, positive, and negative filenames asthe source. The pipeline will load and preprocess the corresponding images.<jupyter_code>def preprocess_image(filename):
"""
Load the specified file as a JPEG image, preprocess it and
resize it to the target shape.
"""
image_string = tf.io.read_file(filename)
image = tf.image.decode_jpeg(image_string, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, target_shape)
return image
def preprocess_triplets(anchor, positive, negative):
"""
Given the filenames corresponding to the three images, load and
preprocess them.
"""
return (
preprocess_image(anchor),
preprocess_image(positive),
preprocess_image(negative),
)<jupyter_output><empty_output><jupyter_text>Let's setup our data pipeline using a zipped list with an anchor, positive,and negative image filename as the source. The output of the pipelinecontains the same triplet with every image loaded and preprocessed.<jupyter_code># We need to make sure both the anchor and positive images are loaded in
# sorted order so we can match them together.
anchor_images = sorted(
[str(anchor_images_path / f) for f in os.listdir(anchor_images_path)]
)
positive_images = sorted(
[str(positive_images_path / f) for f in os.listdir(positive_images_path)]
)
image_count = len(anchor_images)
anchor_dataset = tf.data.Dataset.from_tensor_slices(anchor_images)
positive_dataset = tf.data.Dataset.from_tensor_slices(positive_images)
# To generate the list of negative images, let's randomize the list of
# available images and concatenate them together.
rng = np.random.RandomState(seed=42)
rng.shuffle(anchor_images)
rng.shuffle(positive_images)
negative_images = anchor_images + positive_images
np.random.RandomState(seed=32).shuffle(negative_images)
negative_dataset = tf.data.Dataset.from_tensor_slices(negative_images)
negative_dataset = negative_dataset.shuffle(buffer_size=4096)
dataset = tf.data.Dataset.zip((anchor_dataset, positive_dataset, negative_dataset))
dataset = dataset.shuffle(buffer_size=1024)
dataset = dataset.map(preprocess_triplets)
# Let's now split our dataset in train and validation.
train_dataset = dataset.take(round(image_count * 0.8))
val_dataset = dataset.skip(round(image_count * 0.8))
train_dataset = train_dataset.batch(32, drop_remainder=False)
train_dataset = train_dataset.prefetch(tf.data.AUTOTUNE)
val_dataset = val_dataset.batch(32, drop_remainder=False)
val_dataset = val_dataset.prefetch(tf.data.AUTOTUNE)<jupyter_output><empty_output><jupyter_text>Let's take a look at a few examples of triplets. Notice how the first two imageslook alike while the third one is always different.<jupyter_code>def visualize(anchor, positive, negative):
"""Visualize a few triplets from the supplied batches."""
def show(ax, image):
ax.imshow(image)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig = plt.figure(figsize=(9, 9))
axs = fig.subplots(3, 3)
for i in range(3):
show(axs[i, 0], anchor[i])
show(axs[i, 1], positive[i])
show(axs[i, 2], negative[i])
visualize(*list(train_dataset.take(1).as_numpy_iterator())[0])<jupyter_output><empty_output><jupyter_text>Setting up the embedding generator modelOur Siamese Network will generate embeddings for each of the images of thetriplet. To do this, we will use a ResNet50 model pretrained on ImageNet andconnect a few `Dense` layers to it so we can learn to separate theseembeddings.We will freeze the weights of all the layers of the model up until the layer `conv5_block1_out`.This is important to avoid affecting the weights that the model has already learned.We are going to leave the bottom few layers trainable, so that we can fine-tune their weightsduring training.<jupyter_code>base_cnn = resnet.ResNet50(
weights="imagenet", input_shape=target_shape + (3,), include_top=False
)
flatten = layers.Flatten()(base_cnn.output)
dense1 = layers.Dense(512, activation="relu")(flatten)
dense1 = layers.BatchNormalization()(dense1)
dense2 = layers.Dense(256, activation="relu")(dense1)
dense2 = layers.BatchNormalization()(dense2)
output = layers.Dense(256)(dense2)
embedding = Model(base_cnn.input, output, name="Embedding")
trainable = False
for layer in base_cnn.layers:
if layer.name == "conv5_block1_out":
trainable = True
layer.trainable = trainable<jupyter_output><empty_output><jupyter_text>Setting up the Siamese Network modelThe Siamese network will receive each of the triplet images as an input,generate the embeddings, and output the distance between the anchor and thepositive embedding, as well as the distance between the anchor and the negativeembedding.To compute the distance, we can use a custom layer `DistanceLayer` thatreturns both values as a tuple.<jupyter_code>class DistanceLayer(layers.Layer):
"""
This layer is responsible for computing the distance between the anchor
embedding and the positive embedding, and the anchor embedding and the
negative embedding.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def call(self, anchor, positive, negative):
ap_distance = ops.sum(tf.square(anchor - positive), -1)
an_distance = ops.sum(tf.square(anchor - negative), -1)
return (ap_distance, an_distance)
anchor_input = layers.Input(name="anchor", shape=target_shape + (3,))
positive_input = layers.Input(name="positive", shape=target_shape + (3,))
negative_input = layers.Input(name="negative", shape=target_shape + (3,))
distances = DistanceLayer()(
embedding(resnet.preprocess_input(anchor_input)),
embedding(resnet.preprocess_input(positive_input)),
embedding(resnet.preprocess_input(negative_input)),
)
siamese_network = Model(
inputs=[anchor_input, positive_input, negative_input], outputs=distances
)<jupyter_output><empty_output><jupyter_text>Putting everything togetherWe now need to implement a model with custom training loop so we can computethe triplet loss using the three embeddings produced by the Siamese network.Let's create a `Mean` metric instance to track the loss of the training process.<jupyter_code>class SiameseModel(Model):
"""The Siamese Network model with a custom training and testing loops.
Computes the triplet loss using the three embeddings produced by the
Siamese Network.
The triplet loss is defined as:
L(A, P, N) = max(‖f(A) - f(P)‖² - ‖f(A) - f(N)‖² + margin, 0)
"""
def __init__(self, siamese_network, margin=0.5):
super().__init__()
self.siamese_network = siamese_network
self.margin = margin
self.loss_tracker = metrics.Mean(name="loss")
def call(self, inputs):
return self.siamese_network(inputs)
def train_step(self, data):
# GradientTape is a context manager that records every operation that
# you do inside. We are using it here to compute the loss so we can get
# the gradients and apply them using the optimizer specified in
# `compile()`.
with tf.GradientTape() as tape:
loss = self._compute_loss(data)
# Storing the gradients of the loss function with respect to the
# weights/parameters.
gradients = tape.gradient(loss, self.siamese_network.trainable_weights)
# Applying the gradients on the model using the specified optimizer
self.optimizer.apply_gradients(
zip(gradients, self.siamese_network.trainable_weights)
)
# Let's update and return the training loss metric.
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def test_step(self, data):
loss = self._compute_loss(data)
# Let's update and return the loss metric.
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def _compute_loss(self, data):
# The output of the network is a tuple containing the distances
# between the anchor and the positive example, and the anchor and
# the negative example.
ap_distance, an_distance = self.siamese_network(data)
# Computing the Triplet Loss by subtracting both distances and
# making sure we don't get a negative value.
loss = ap_distance - an_distance
loss = tf.maximum(loss + self.margin, 0.0)
return loss
@property
def metrics(self):
# We need to list our metrics here so the `reset_states()` can be
# called automatically.
return [self.loss_tracker]<jupyter_output><empty_output><jupyter_text>TrainingWe are now ready to train our model.<jupyter_code>siamese_model = SiameseModel(siamese_network)
siamese_model.compile(optimizer=optimizers.Adam(0.0001))
siamese_model.fit(train_dataset, epochs=10, validation_data=val_dataset)<jupyter_output><empty_output><jupyter_text>Inspecting what the network has learnedAt this point, we can check how the network learned to separate the embeddingsdepending on whether they belong to similar images.We can use [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) to measure thesimilarity between embeddings.Let's pick a sample from the dataset to check the similarity between theembeddings generated for each image.<jupyter_code>sample = next(iter(train_dataset))
visualize(*sample)
anchor, positive, negative = sample
anchor_embedding, positive_embedding, negative_embedding = (
embedding(resnet.preprocess_input(anchor)),
embedding(resnet.preprocess_input(positive)),
embedding(resnet.preprocess_input(negative)),
)<jupyter_output><empty_output><jupyter_text>Finally, we can compute the cosine similarity between the anchor and positiveimages and compare it with the similarity between the anchor and the negativeimages.We should expect the similarity between the anchor and positive images to belarger than the similarity between the anchor and the negative images.<jupyter_code>cosine_similarity = metrics.CosineSimilarity()
positive_similarity = cosine_similarity(anchor_embedding, positive_embedding)
print("Positive similarity:", positive_similarity.numpy())
negative_similarity = cosine_similarity(anchor_embedding, negative_embedding)
print("Negative similarity", negative_similarity.numpy())<jupyter_output><empty_output> | keras-io/examples/vision/ipynb/siamese_network.ipynb/0 | {
"file_path": "keras-io/examples/vision/ipynb/siamese_network.ipynb",
"repo_id": "keras-io",
"token_count": 4445
} | 112 |
"""
Title: Knowledge Distillation
Author: [Kenneth Borup](https://twitter.com/Kennethborup)
Date created: 2020/09/01
Last modified: 2020/09/01
Description: Implementation of classical Knowledge Distillation.
Accelerator: GPU
Converted to Keras 3 by: [Md Awsafur Rahman](https://awsaf49.github.io)
"""
"""
## Introduction to Knowledge Distillation
Knowledge Distillation is a procedure for model
compression, in which a small (student) model is trained to match a large pre-trained
(teacher) model. Knowledge is transferred from the teacher model to the student
by minimizing a loss function, aimed at matching softened teacher logits as well as
ground-truth labels.
The logits are softened by applying a "temperature" scaling function in the softmax,
effectively smoothing out the probability distribution and revealing
inter-class relationships learned by the teacher.
**Reference:**
- [Hinton et al. (2015)](https://arxiv.org/abs/1503.02531)
"""
"""
## Setup
"""
import os
import keras
from keras import layers
from keras import ops
import numpy as np
"""
## Construct `Distiller()` class
The custom `Distiller()` class, overrides the `Model` methods `compile`, `compute_loss`,
and `call`. In order to use the distiller, we need:
- A trained teacher model
- A student model to train
- A student loss function on the difference between student predictions and ground-truth
- A distillation loss function, along with a `temperature`, on the difference between the
soft student predictions and the soft teacher labels
- An `alpha` factor to weight the student and distillation loss
- An optimizer for the student and (optional) metrics to evaluate performance
In the `compute_loss` method, we perform a forward pass of both the teacher and student,
calculate the loss with weighting of the `student_loss` and `distillation_loss` by `alpha`
and `1 - alpha`, respectively. Note: only the student weights are updated.
"""
class Distiller(keras.Model):
def __init__(self, student, teacher):
super().__init__()
self.teacher = teacher
self.student = student
def compile(
self,
optimizer,
metrics,
student_loss_fn,
distillation_loss_fn,
alpha=0.1,
temperature=3,
):
"""Configure the distiller.
Args:
optimizer: Keras optimizer for the student weights
metrics: Keras metrics for evaluation
student_loss_fn: Loss function of difference between student
predictions and ground-truth
distillation_loss_fn: Loss function of difference between soft
student predictions and soft teacher predictions
alpha: weight to student_loss_fn and 1-alpha to distillation_loss_fn
temperature: Temperature for softening probability distributions.
Larger temperature gives softer distributions.
"""
super().compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
self.alpha = alpha
self.temperature = temperature
def compute_loss(
self, x=None, y=None, y_pred=None, sample_weight=None, allow_empty=False
):
teacher_pred = self.teacher(x, training=False)
student_loss = self.student_loss_fn(y, y_pred)
distillation_loss = self.distillation_loss_fn(
ops.softmax(teacher_pred / self.temperature, axis=1),
ops.softmax(y_pred / self.temperature, axis=1),
) * (self.temperature**2)
loss = self.alpha * student_loss + (1 - self.alpha) * distillation_loss
return loss
def call(self, x):
return self.student(x)
"""
## Create student and teacher models
Initialy, we create a teacher model and a smaller student model. Both models are
convolutional neural networks and created using `Sequential()`,
but could be any Keras model.
"""
# Create the teacher
teacher = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(256, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(512, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="teacher",
)
# Create the student
student = keras.Sequential(
[
keras.Input(shape=(28, 28, 1)),
layers.Conv2D(16, (3, 3), strides=(2, 2), padding="same"),
layers.LeakyReLU(negative_slope=0.2),
layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding="same"),
layers.Conv2D(32, (3, 3), strides=(2, 2), padding="same"),
layers.Flatten(),
layers.Dense(10),
],
name="student",
)
# Clone student for later comparison
student_scratch = keras.models.clone_model(student)
"""
## Prepare the dataset
The dataset used for training the teacher and distilling the teacher is
[MNIST](https://keras.io/api/datasets/mnist/), and the procedure would be equivalent for
any other
dataset, e.g. [CIFAR-10](https://keras.io/api/datasets/cifar10/), with a suitable choice
of models. Both the student and teacher are trained on the training set and evaluated on
the test set.
"""
# Prepare the train and test dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Normalize data
x_train = x_train.astype("float32") / 255.0
x_train = np.reshape(x_train, (-1, 28, 28, 1))
x_test = x_test.astype("float32") / 255.0
x_test = np.reshape(x_test, (-1, 28, 28, 1))
"""
## Train the teacher
In knowledge distillation we assume that the teacher is trained and fixed. Thus, we start
by training the teacher model on the training set in the usual way.
"""
# Train teacher as usual
teacher.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate teacher on data.
teacher.fit(x_train, y_train, epochs=5)
teacher.evaluate(x_test, y_test)
"""
## Distill teacher to student
We have already trained the teacher model, and we only need to initialize a
`Distiller(student, teacher)` instance, `compile()` it with the desired losses,
hyperparameters and optimizer, and distill the teacher to the student.
"""
# Initialize and compile distiller
distiller = Distiller(student=student, teacher=teacher)
distiller.compile(
optimizer=keras.optimizers.Adam(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
student_loss_fn=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
distillation_loss_fn=keras.losses.KLDivergence(),
alpha=0.1,
temperature=10,
)
# Distill teacher to student
distiller.fit(x_train, y_train, epochs=3)
# Evaluate student on test dataset
distiller.evaluate(x_test, y_test)
"""
## Train student from scratch for comparison
We can also train an equivalent student model from scratch without the teacher, in order
to evaluate the performance gain obtained by knowledge distillation.
"""
# Train student as doen usually
student_scratch.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
# Train and evaluate student trained from scratch.
student_scratch.fit(x_train, y_train, epochs=3)
student_scratch.evaluate(x_test, y_test)
"""
If the teacher is trained for 5 full epochs and the student is distilled on this teacher
for 3 full epochs, you should in this example experience a performance boost compared to
training the same student model from scratch, and even compared to the teacher itself.
You should expect the teacher to have accuracy around 97.6%, the student trained from
scratch should be around 97.6%, and the distilled student should be around 98.1%. Remove
or try out different seeds to use different weight initializations.
"""
| keras-io/examples/vision/knowledge_distillation.py/0 | {
"file_path": "keras-io/examples/vision/knowledge_distillation.py",
"repo_id": "keras-io",
"token_count": 2743
} | 113 |
# CutMix data augmentation for image classification
**Author:** [Sayan Nath](https://twitter.com/sayannath2350)<br>
**Date created:** 2021/06/08<br>
**Last modified:** 2023/11/14<br>
**Description:** Data augmentation with CutMix for image classification on CIFAR-10.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/cutmix.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/cutmix.py)
---
## Introduction
_CutMix_ is a data augmentation technique that addresses the issue of information loss
and inefficiency present in regional dropout strategies.
Instead of removing pixels and filling them with black or grey pixels or Gaussian noise,
you replace the removed regions with a patch from another image,
while the ground truth labels are mixed proportionally to the number of pixels of combined images.
CutMix was proposed in
[CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features](https://arxiv.org/abs/1905.04899)
(Yun et al., 2019)
It's implemented via the following formulas:
<img src="https://i.imgur.com/cGvd13V.png" width="200"/>
where `M` is the binary mask which indicates the cutout and the fill-in
regions from the two randomly drawn images and `λ` (in `[0, 1]`) is drawn from a
[`Beta(α, α)` distribution](https://en.wikipedia.org/wiki/Beta_distribution)
The coordinates of bounding boxes are:
<img src="https://i.imgur.com/eNisep4.png" width="150"/>
which indicates the cutout and fill-in regions in case of the images.
The bounding box sampling is represented by:
<img src="https://i.imgur.com/Snph9aj.png" width="200"/>
where `rx, ry` are randomly drawn from a uniform distribution with upper bound.
---
## Setup
```python
import numpy as np
import keras
import matplotlib.pyplot as plt
from keras import layers
# TF imports related to tf.data preprocessing
from tensorflow import clip_by_value
from tensorflow import data as tf_data
from tensorflow import image as tf_image
from tensorflow import random as tf_random
keras.utils.set_random_seed(42)
```
---
## Load the CIFAR-10 dataset
In this example, we will use the
[CIFAR-10 image classification dataset](https://www.cs.toronto.edu/~kriz/cifar.html).
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes=10)
y_test = keras.utils.to_categorical(y_test, num_classes=10)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
class_names = [
"Airplane",
"Automobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
```
<div class="k-default-codeblock">
```
(50000, 32, 32, 3)
(50000, 10)
(10000, 32, 32, 3)
(10000, 10)
```
</div>
---
## Define hyperparameters
```python
AUTO = tf_data.AUTOTUNE
BATCH_SIZE = 32
IMG_SIZE = 32
```
---
## Define the image preprocessing function
```python
def preprocess_image(image, label):
image = tf_image.resize(image, (IMG_SIZE, IMG_SIZE))
image = tf_image.convert_image_dtype(image, "float32") / 255.0
label = keras.ops.cast(label, dtype="float32")
return image, label
```
---
## Convert the data into TensorFlow `Dataset` objects
```python
train_ds_one = (
tf_data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(1024)
.map(preprocess_image, num_parallel_calls=AUTO)
)
train_ds_two = (
tf_data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(1024)
.map(preprocess_image, num_parallel_calls=AUTO)
)
train_ds_simple = tf_data.Dataset.from_tensor_slices((x_train, y_train))
test_ds = tf_data.Dataset.from_tensor_slices((x_test, y_test))
train_ds_simple = (
train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# Combine two shuffled datasets from the same training data.
train_ds = tf_data.Dataset.zip((train_ds_one, train_ds_two))
test_ds = (
test_ds.map(preprocess_image, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
```
---
## Define the CutMix data augmentation function
The CutMix function takes two `image` and `label` pairs to perform the augmentation.
It samples `λ(l)` from the [Beta distribution](https://en.wikipedia.org/wiki/Beta_distribution)
and returns a bounding box from `get_box` function. We then crop the second image (`image2`)
and pad this image in the final padded image at the same location.
```python
def sample_beta_distribution(size, concentration_0=0.2, concentration_1=0.2):
gamma_1_sample = tf_random.gamma(shape=[size], alpha=concentration_1)
gamma_2_sample = tf_random.gamma(shape=[size], alpha=concentration_0)
return gamma_1_sample / (gamma_1_sample + gamma_2_sample)
def get_box(lambda_value):
cut_rat = keras.ops.sqrt(1.0 - lambda_value)
cut_w = IMG_SIZE * cut_rat # rw
cut_w = keras.ops.cast(cut_w, "int32")
cut_h = IMG_SIZE * cut_rat # rh
cut_h = keras.ops.cast(cut_h, "int32")
cut_x = keras.random.uniform((1,), minval=0, maxval=IMG_SIZE) # rx
cut_x = keras.ops.cast(cut_x, "int32")
cut_y = keras.random.uniform((1,), minval=0, maxval=IMG_SIZE) # ry
cut_y = keras.ops.cast(cut_y, "int32")
boundaryx1 = clip_by_value(cut_x[0] - cut_w // 2, 0, IMG_SIZE)
boundaryy1 = clip_by_value(cut_y[0] - cut_h // 2, 0, IMG_SIZE)
bbx2 = clip_by_value(cut_x[0] + cut_w // 2, 0, IMG_SIZE)
bby2 = clip_by_value(cut_y[0] + cut_h // 2, 0, IMG_SIZE)
target_h = bby2 - boundaryy1
if target_h == 0:
target_h += 1
target_w = bbx2 - boundaryx1
if target_w == 0:
target_w += 1
return boundaryx1, boundaryy1, target_h, target_w
def cutmix(train_ds_one, train_ds_two):
(image1, label1), (image2, label2) = train_ds_one, train_ds_two
alpha = [0.25]
beta = [0.25]
# Get a sample from the Beta distribution
lambda_value = sample_beta_distribution(1, alpha, beta)
# Define Lambda
lambda_value = lambda_value[0][0]
# Get the bounding box offsets, heights and widths
boundaryx1, boundaryy1, target_h, target_w = get_box(lambda_value)
# Get a patch from the second image (`image2`)
crop2 = tf_image.crop_to_bounding_box(
image2, boundaryy1, boundaryx1, target_h, target_w
)
# Pad the `image2` patch (`crop2`) with the same offset
image2 = tf_image.pad_to_bounding_box(
crop2, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE
)
# Get a patch from the first image (`image1`)
crop1 = tf_image.crop_to_bounding_box(
image1, boundaryy1, boundaryx1, target_h, target_w
)
# Pad the `image1` patch (`crop1`) with the same offset
img1 = tf_image.pad_to_bounding_box(
crop1, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE
)
# Modify the first image by subtracting the patch from `image1`
# (before applying the `image2` patch)
image1 = image1 - img1
# Add the modified `image1` and `image2` together to get the CutMix image
image = image1 + image2
# Adjust Lambda in accordance to the pixel ration
lambda_value = 1 - (target_w * target_h) / (IMG_SIZE * IMG_SIZE)
lambda_value = keras.ops.cast(lambda_value, "float32")
# Combine the labels of both images
label = lambda_value * label1 + (1 - lambda_value) * label2
return image, label
```
**Note**: we are combining two images to create a single one.
---
## Visualize the new dataset after applying the CutMix augmentation
```python
# Create the new dataset using our `cutmix` utility
train_ds_cmu = (
train_ds.shuffle(1024)
.map(cutmix, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# Let's preview 9 samples from the dataset
image_batch, label_batch = next(iter(train_ds_cmu))
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.title(class_names[np.argmax(label_batch[i])])
plt.imshow(image_batch[i])
plt.axis("off")
```

---
## Define a ResNet-20 model
```python
def resnet_layer(
inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation="relu",
batch_normalization=True,
conv_first=True,
):
conv = layers.Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.L2(1e-4),
)
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = layers.BatchNormalization()(x)
if activation is not None:
x = layers.Activation(activation)(x)
else:
if batch_normalization:
x = layers.BatchNormalization()(x)
if activation is not None:
x = layers.Activation(activation)(x)
x = conv(x)
return x
def resnet_v20(input_shape, depth, num_classes=10):
if (depth - 2) % 6 != 0:
raise ValueError("depth should be 6n+2 (eg 20, 32, 44 in [a])")
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = layers.Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides)
y = resnet_layer(inputs=y, num_filters=num_filters, activation=None)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(
inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False,
)
x = layers.add([x, y])
x = layers.Activation("relu")(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = layers.AveragePooling2D(pool_size=8)(x)
y = layers.Flatten()(x)
outputs = layers.Dense(
num_classes, activation="softmax", kernel_initializer="he_normal"
)(y)
# Instantiate model.
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def training_model():
return resnet_v20((32, 32, 3), 20)
initial_model = training_model()
initial_model.save_weights("initial_weights.weights.h5")
```
---
## Train the model with the dataset augmented by CutMix
```python
model = training_model()
model.load_weights("initial_weights.weights.h5")
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(train_ds_cmu, validation_data=test_ds, epochs=15)
test_loss, test_accuracy = model.evaluate(test_ds)
print("Test accuracy: {:.2f}%".format(test_accuracy * 100))
```
<div class="k-default-codeblock">
```
Epoch 1/15
10/1563 [37m━━━━━━━━━━━━━━━━━━━━ 19s 13ms/step - accuracy: 0.0795 - loss: 5.3035
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1699988196.560261 362411 device_compiler.h:187] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 64s 27ms/step - accuracy: 0.3148 - loss: 2.1918 - val_accuracy: 0.4067 - val_loss: 1.8339
Epoch 2/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 27s 17ms/step - accuracy: 0.4295 - loss: 1.9021 - val_accuracy: 0.5516 - val_loss: 1.4744
Epoch 3/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 28s 18ms/step - accuracy: 0.4883 - loss: 1.8076 - val_accuracy: 0.5305 - val_loss: 1.5067
Epoch 4/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 27s 17ms/step - accuracy: 0.5243 - loss: 1.7342 - val_accuracy: 0.6303 - val_loss: 1.2822
Epoch 5/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 27s 17ms/step - accuracy: 0.5574 - loss: 1.6614 - val_accuracy: 0.5370 - val_loss: 1.5912
Epoch 6/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 27s 17ms/step - accuracy: 0.5832 - loss: 1.6167 - val_accuracy: 0.6254 - val_loss: 1.3116
Epoch 7/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 26s 17ms/step - accuracy: 0.6045 - loss: 1.5738 - val_accuracy: 0.6101 - val_loss: 1.3408
Epoch 8/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 28s 18ms/step - accuracy: 0.6170 - loss: 1.5493 - val_accuracy: 0.6209 - val_loss: 1.2923
Epoch 9/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 29s 18ms/step - accuracy: 0.6292 - loss: 1.5299 - val_accuracy: 0.6290 - val_loss: 1.2813
Epoch 10/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 28s 18ms/step - accuracy: 0.6394 - loss: 1.5110 - val_accuracy: 0.7234 - val_loss: 1.0608
Epoch 11/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 26s 17ms/step - accuracy: 0.6467 - loss: 1.4915 - val_accuracy: 0.7498 - val_loss: 0.9854
Epoch 12/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 28s 18ms/step - accuracy: 0.6559 - loss: 1.4785 - val_accuracy: 0.6481 - val_loss: 1.2410
Epoch 13/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 26s 17ms/step - accuracy: 0.6596 - loss: 1.4656 - val_accuracy: 0.7551 - val_loss: 0.9784
Epoch 14/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 27s 17ms/step - accuracy: 0.6577 - loss: 1.4637 - val_accuracy: 0.6822 - val_loss: 1.1703
Epoch 15/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 26s 17ms/step - accuracy: 0.6702 - loss: 1.4445 - val_accuracy: 0.7108 - val_loss: 1.0805
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7140 - loss: 1.0766
Test accuracy: 71.08%
```
</div>
---
## Train the model using the original non-augmented dataset
```python
model = training_model()
model.load_weights("initial_weights.weights.h5")
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(train_ds_simple, validation_data=test_ds, epochs=15)
test_loss, test_accuracy = model.evaluate(test_ds)
print("Test accuracy: {:.2f}%".format(test_accuracy * 100))
```
<div class="k-default-codeblock">
```
Epoch 1/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 41s 15ms/step - accuracy: 0.3943 - loss: 1.8736 - val_accuracy: 0.5359 - val_loss: 1.4376
Epoch 2/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.6160 - loss: 1.2407 - val_accuracy: 0.5887 - val_loss: 1.4254
Epoch 3/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.6927 - loss: 1.0448 - val_accuracy: 0.6102 - val_loss: 1.4850
Epoch 4/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 12s 7ms/step - accuracy: 0.7411 - loss: 0.9222 - val_accuracy: 0.6262 - val_loss: 1.3898
Epoch 5/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 13s 8ms/step - accuracy: 0.7711 - loss: 0.8439 - val_accuracy: 0.6283 - val_loss: 1.3425
Epoch 6/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 12s 8ms/step - accuracy: 0.7983 - loss: 0.7886 - val_accuracy: 0.2460 - val_loss: 5.6869
Epoch 7/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.8168 - loss: 0.7490 - val_accuracy: 0.1954 - val_loss: 21.7670
Epoch 8/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.8113 - loss: 0.7779 - val_accuracy: 0.1027 - val_loss: 36.3144
Epoch 9/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 11s 7ms/step - accuracy: 0.6592 - loss: 1.4179 - val_accuracy: 0.1025 - val_loss: 40.0770
Epoch 10/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 12s 8ms/step - accuracy: 0.5611 - loss: 1.9856 - val_accuracy: 0.1699 - val_loss: 40.6308
Epoch 11/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 13s 8ms/step - accuracy: 0.6076 - loss: 1.7795 - val_accuracy: 0.1003 - val_loss: 63.4775
Epoch 12/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 12s 7ms/step - accuracy: 0.6175 - loss: 1.8077 - val_accuracy: 0.1099 - val_loss: 21.9148
Epoch 13/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 12s 7ms/step - accuracy: 0.6468 - loss: 1.6702 - val_accuracy: 0.1576 - val_loss: 72.7290
Epoch 14/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 12s 7ms/step - accuracy: 0.6437 - loss: 1.7858 - val_accuracy: 0.1000 - val_loss: 64.9249
Epoch 15/15
1563/1563 ━━━━━━━━━━━━━━━━━━━━ 13s 8ms/step - accuracy: 0.6587 - loss: 1.7587 - val_accuracy: 0.1000 - val_loss: 138.8463
313/313 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.0988 - loss: 139.3117
Test accuracy: 10.00%
```
</div>
---
## Notes
In this example, we trained our model for 15 epochs.
In our experiment, the model with CutMix achieves a better accuracy on the CIFAR-10 dataset
(77.34% in our experiment) compared to the model that doesn't use the augmentation (66.90%).
You may notice it takes less time to train the model with the CutMix augmentation.
You can experiment further with the CutMix technique by following the
[original paper](https://arxiv.org/abs/1905.04899).
| keras-io/examples/vision/md/cutmix.md/0 | {
"file_path": "keras-io/examples/vision/md/cutmix.md",
"repo_id": "keras-io",
"token_count": 7363
} | 114 |
# Image Classification using Global Context Vision Transformer
**Author:** Md Awsafur Rahman<br>
**Date created:** 2023/10/30<br>
**Last modified:** 2023/10/30<br>
**Description:** Implementation and fine-tuning of Global Context Vision Transformer for image classification.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/image_classification_using_global_context_vision_transformer.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/image_classification_using_global_context_vision_transformer.py)
# Setup
```python
!pip install --upgrade keras_cv tensorflow
!pip install --upgrade keras
```
```python
import keras
from keras_cv.layers import DropPath
from keras import ops
from keras import layers
import tensorflow as tf # only for dataloader
import tensorflow_datasets as tfds # for flower dataset
from skimage.data import chelsea
import matplotlib.pyplot as plt
import numpy as np
```
---
## Introduction
In this notebook, we will utilize multi-backend Keras 3.0 to implement the
[**GCViT: Global Context Vision Transformer**](https://arxiv.org/abs/2206.09959) paper,
presented at ICML 2023 by A Hatamizadeh et al. The, we will fine-tune the model on the
Flower dataset for image classification task, leveraging the official ImageNet pre-trained
weights. A highlight of this notebook is its compatibility with multiple backends:
TensorFlow, PyTorch, and JAX, showcasing the true potential of multi-backend Keras.
---
## Motivation
> **Note:** In this section we'll learn about the backstory of GCViT and try to
understand why it is proposed.
* During recent years, **Transformers** have achieved dominance in **Natural Language
Processing (NLP)** tasks and with the **self-attention** mechanism which allows for
capturing both long and short-range information.
* Following this trend, **Vision Transformer (ViT)** proposed to utilize image patches as
tokens in a gigantic architecture similar to encoder of the original Transformer.
* Despite the historic dominance of **Convolutional Neural Network (CNN)** in computer
vision, **ViT-based** models have shown **SOTA or competitive performance** in various
computer vision tasks.
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/vit_gif.gif"
width=600>
* However, **quadratic [`O(n^2)`] computational complexity** of self-attention and **lack
of multi-scale information** makes it difficult for **ViT** to be considered as
general-purpose architecture for Compute Vision tasks like **segmentation and object
detection** where it requires **dense prediction at the pixel level**.
* Swin Transformer has attempted to address the issues of **ViT** by proposing
**multi-resolution/hierarchical** architectures in which the self-attention is computed
in **local windows** and cross-window connections such as **window shifting** are used
for modeling the interactions across different regions. But the **limited receptive field
of local windows** can not capture long-range information, and cross-window-connection
schemes such as **window-shifting only cover a small neighborhood** in the vicinity of
each window. Also, it lacks **inductive-bias** that encourages certain translation
invariance is still preferable for general-purpose visual modeling, particularly for the
dense prediction tasks of object detection and semantic segmentation.
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/swin_vs_vit.JPG"
width=400> <img
src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/shifted_window.JPG"
width=400>
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/swin_arch.JPG"
width=800>
* To address above limitations, **Global Context (GC) ViT** network is proposed.
---
## Architecture
Let's have a quick **overview** of our key components,
1. `Stem/PatchEmbed:` A stem/patchify layer processes images at the network’s beginning.
For this network, it creates **patches/tokens** and converts them into **embeddings**.
2. `Level:` It is the repetitive building block that extracts features using different
blocks.
3. `Global Token Gen./FeatureExtraction:` It generates **global tokens/patches** with
**Depthwise-CNN**, **SqueezeAndExcitation (Squeeze-Excitation)**, **CNN** and
**MaxPooling**. So basically
it's a Feature Extractor.
4. `Block:` It is the repetitive module that applies attention to the features and
projects them to a certain dimension.
1. `Local-MSA:` Local Multi head Self Attention.
2. `Global-MSA:` Global Multi head Self Attention.
3. `MLP:` Linear layer that projects a vector to another dimension.
5. `Downsample/ReduceSize:` It is very similar to **Global Token Gen.** module except it
uses **CNN** instead of **MaxPooling** to downsample with additional **Layer
Normalization** modules.
6. `Head:` It is the module responsible for the classification task.
1. `Pooling:` It converts `N x 2D` features to `N x 1D` features.
2. `Classifier:` It processes `N x 1D` features to make a decision about class.
I've annotated the architecture figure to make it easier to digest,
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/arch_annot.png">
### Unit Blocks
> **Note:** This blocks are used to build other modules throughout the paper. Most of the
blocks are either borrowed from other work or modified version old work.
1. `SqueezeAndExcitation`: **Squeeze-Excitation (SE)** aka **Bottleneck** module acts sd
kind of **channel
attention**. It consits of **AvgPooling**, **Dense/FullyConnected (FC)/Linear** ,
**GELU** and **Sigmoid** module.
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/se_annot.png"
width=400>
2. `Fused-MBConv:` This is similar to the one used in **EfficientNetV2**. It uses
**Depthwise-Conv**, **GELU**, **SqueezeAndExcitation**, **Conv**, to extract feature with
a resiudal
connection. Note that, no new module is declared for this one, we simply applied
corresponding modules directly.
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/fmb_annot.png"
width=350>
3. `ReduceSize`: It is a **CNN** based **downsample** module which abvobe mentioned
`Fused-MBConv` module to extract feature, **Strided Conv** to simultaneously reduce
spatial dimension and increse channelwise dimention of the features and finally
**LayerNormalization** module to normalize features. In the paper/figure this module is
referred as **downsample** module. I think it is mention worthy that **SwniTransformer**
used `PatchMerging` module instead of `ReduceSize` to reduce the spatial dimention and
increase channelwise dimension which uses **fully-connected/dense/linear** module.
According to the **GCViT** paper, one of the purposes of using `ReduceSize` is to add
inductive bias through **CNN** module.
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/down_annot.png"
width=300>
4. `MLP:` This is our very own **Multi Layer Perceptron** module. This a
feed-forward/fully-connected/linear module which simply projects input to an arbitary
dimension.
```python
class SqueezeAndExcitation(layers.Layer):
"""Squeeze and excitation block.
Args:
output_dim: output features dimension, if `None` use same dim as input.
expansion: expansion ratio.
"""
def __init__(self, output_dim=None, expansion=0.25, **kwargs):
super().__init__(**kwargs)
self.expansion = expansion
self.output_dim = output_dim
def build(self, input_shape):
inp = input_shape[-1]
self.output_dim = self.output_dim or inp
self.avg_pool = layers.GlobalAvgPool2D(keepdims=True, name="avg_pool")
self.fc = [
layers.Dense(int(inp * self.expansion), use_bias=False, name="fc_0"),
layers.Activation("gelu", name="fc_1"),
layers.Dense(self.output_dim, use_bias=False, name="fc_2"),
layers.Activation("sigmoid", name="fc_3"),
]
super().build(input_shape)
def call(self, inputs, **kwargs):
x = self.avg_pool(inputs)
for layer in self.fc:
x = layer(x)
return x * inputs
class ReduceSize(layers.Layer):
"""Down-sampling block.
Args:
keepdims: if False spatial dim is reduced and channel dim is increased
"""
def __init__(self, keepdims=False, **kwargs):
super().__init__(**kwargs)
self.keepdims = keepdims
def build(self, input_shape):
embed_dim = input_shape[-1]
dim_out = embed_dim if self.keepdims else 2 * embed_dim
self.pad1 = layers.ZeroPadding2D(1, name="pad1")
self.pad2 = layers.ZeroPadding2D(1, name="pad2")
self.conv = [
layers.DepthwiseConv2D(
kernel_size=3, strides=1, padding="valid", use_bias=False, name="conv_0"
),
layers.Activation("gelu", name="conv_1"),
SqueezeAndExcitation(name="conv_2"),
layers.Conv2D(
embed_dim,
kernel_size=1,
strides=1,
padding="valid",
use_bias=False,
name="conv_3",
),
]
self.reduction = layers.Conv2D(
dim_out,
kernel_size=3,
strides=2,
padding="valid",
use_bias=False,
name="reduction",
)
self.norm1 = layers.LayerNormalization(
-1, 1e-05, name="norm1"
) # eps like PyTorch
self.norm2 = layers.LayerNormalization(-1, 1e-05, name="norm2")
def call(self, inputs, **kwargs):
x = self.norm1(inputs)
xr = self.pad1(x)
for layer in self.conv:
xr = layer(xr)
x = x + xr
x = self.pad2(x)
x = self.reduction(x)
x = self.norm2(x)
return x
class MLP(layers.Layer):
"""Multi-Layer Perceptron (MLP) block.
Args:
hidden_features: hidden features dimension.
out_features: output features dimension.
activation: activation function.
dropout: dropout rate.
"""
def __init__(
self,
hidden_features=None,
out_features=None,
activation="gelu",
dropout=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_features = hidden_features
self.out_features = out_features
self.activation = activation
self.dropout = dropout
def build(self, input_shape):
self.in_features = input_shape[-1]
self.hidden_features = self.hidden_features or self.in_features
self.out_features = self.out_features or self.in_features
self.fc1 = layers.Dense(self.hidden_features, name="fc1")
self.act = layers.Activation(self.activation, name="act")
self.fc2 = layers.Dense(self.out_features, name="fc2")
self.drop1 = layers.Dropout(self.dropout, name="drop1")
self.drop2 = layers.Dropout(self.dropout, name="drop2")
def call(self, inputs, **kwargs):
x = self.fc1(inputs)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
```
### Stem
> **Notes**: In the code, this module is referred to as **PatchEmbed** but on paper, it
is referred to as **Stem**.
In the model, we have first used `patch_embed` module. Let's try to understand this
module. As we can see from the `call` method,
1. This module first **pads** input
2. Then uses **convolutions** to extract patches with embeddings.
3. Finally, uses `ReduceSize` module to first extract features with **convolution** but
neither reduces spatial dimension nor increases spatial dimension.
4. One important point to notice, unlike **ViT** or **SwinTransformer**, **GCViT**
creates **overlapping patches**. We can notice that from the code,
`Conv2D(self.embed_dim, kernel_size=3, strides=2, name='proj')`. If we wanted
**non-overlapping** patches then we would've used the same `kernel_size` and `stride`.
5. This module reduces the spatial dimension of input by `4x`.
> Summary: image → padding → convolution →
(feature_extract + downsample)
```python
class PatchEmbed(layers.Layer):
"""Patch embedding block.
Args:
embed_dim: feature size dimension.
"""
def __init__(self, embed_dim, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
def build(self, input_shape):
self.pad = layers.ZeroPadding2D(1, name="pad")
self.proj = layers.Conv2D(self.embed_dim, 3, 2, name="proj")
self.conv_down = ReduceSize(keepdims=True, name="conv_down")
def call(self, inputs, **kwargs):
x = self.pad(inputs)
x = self.proj(x)
x = self.conv_down(x)
return x
```
### Global Token Gen.
> **Notes:** It is one of the two **CNN** modules that is used to imppose inductive bias.
As we can see from above cell, in the `level` we have first used `to_q_global/Global
Token Gen./FeatureExtraction`. Let's try to understand how it works,
* This module is series of `FeatureExtract` module, according to paper we need to
repeat this module `K` times, where `K = log2(H/h)`, `H = feature_map_height`,
`W = feature_map_width`.
* `FeatureExtraction:` This layer is very similar to `ReduceSize` module except it uses
**MaxPooling** module to reduce the dimension, it doesn't increse feature dimension
(channelsie) and it doesn't uses **LayerNormalizaton**. This module is used to in
`Generate Token Gen.` module repeatedly to generte **global tokens** for
**global-context-attention**.
* One important point to notice from the figure is that, **global tokens** is shared
across the whole image which means we use only **one global window** for **all local
tokens** in a image. This makes the computation very efficient.
* For input feature map with shape `(B, H, W, C)`, we'll get output shape `(B, h, w, C)`.
If we copy these global tokens for total `M` local windows in an image where,
`M = (H x W)/(h x w) = num_window`, then output shape: `(B * M, h, w, C)`."
> Summary: This module is used to `resize` the image to fit window.
<img
src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/global_token_annot.png"
width=800>
```python
class FeatureExtraction(layers.Layer):
"""Feature extraction block.
Args:
keepdims: bool argument for maintaining the resolution.
"""
def __init__(self, keepdims=False, **kwargs):
super().__init__(**kwargs)
self.keepdims = keepdims
def build(self, input_shape):
embed_dim = input_shape[-1]
self.pad1 = layers.ZeroPadding2D(1, name="pad1")
self.pad2 = layers.ZeroPadding2D(1, name="pad2")
self.conv = [
layers.DepthwiseConv2D(3, 1, use_bias=False, name="conv_0"),
layers.Activation("gelu", name="conv_1"),
SqueezeAndExcitation(name="conv_2"),
layers.Conv2D(embed_dim, 1, 1, use_bias=False, name="conv_3"),
]
if not self.keepdims:
self.pool = layers.MaxPool2D(3, 2, name="pool")
super().build(input_shape)
def call(self, inputs, **kwargs):
x = inputs
xr = self.pad1(x)
for layer in self.conv:
xr = layer(xr)
x = x + xr
if not self.keepdims:
x = self.pool(self.pad2(x))
return x
class GlobalQueryGenerator(layers.Layer):
"""Global query generator.
Args:
keepdims: to keep the dimension of FeatureExtraction layer.
For instance, repeating log(56/7) = 3 blocks, with input
window dimension 56 and output window dimension 7 at down-sampling
ratio 2. Please check Fig.5 of GC ViT paper for details.
"""
def __init__(self, keepdims=False, **kwargs):
super().__init__(**kwargs)
self.keepdims = keepdims
def build(self, input_shape):
self.to_q_global = [
FeatureExtraction(keepdims, name=f"to_q_global_{i}")
for i, keepdims in enumerate(self.keepdims)
]
super().build(input_shape)
def call(self, inputs, **kwargs):
x = inputs
for layer in self.to_q_global:
x = layer(x)
return x
```
### Attention
> **Notes:** This is the core contribution of the paper.
As we can see from the `call` method,
1. `WindowAttention` module applies both **local** and **global** window attention
depending on `global_query` parameter.
2. First it converts input features into `query, key, value` for local attention and
`key, value` for global attention. For global attention, it takes global query from
`Global Token Gen.`. One thing to notice from the code is that we divide the **features
or embed_dim** among all the **heads of Transformer** to reduce the computation.
`qkv = tf.reshape(qkv, [B_, N, self.qkv_size, self.num_heads, C // self.num_heads])`
3. Before sending query, key and value for attention, **global token** goes through an
important process. Same global tokens or one global window gets copied for all the local
windows to increase efficiency.
`q_global = tf.repeat(q_global, repeats=B_//B, axis=0)`, here `B_//B` means `num_windows`
in a image.
4. Then simply applies `local-window-self-attention` or `global-window-attention`
depending on `global_query` parameter. One thing to notice from the code is that we are
adding **relative-positional-embedding** with the **attention mask** instead of the
**patch embedding**.
`attn = attn + relative_position_bias[tf.newaxis,]`
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/lvg_msa.PNG"
width=800>
5. Now, let's think for a bit and try to understand what is happening here. Let's focus
on the figure below. We can see from the left, that in the **local-attention** the
**query is local** and it's **limited to the local window** (red square border) hence we
don't have access to long-range information. But on the right that due to **global
query** we're now **not limited to local-windows** (blue square border) and we have
access to long-range information.
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/lvg_arch.PNG"
width=800>
6. In **ViT** we compare (attention) image-tokens with image-tokens, in
**SwinTransformer** we compare window-tokens with window-tokens but in **GCViT** we
compare image-tokens with window-tokens. But now you may ask, how can compare(attention)
image-tokens with window-tokens even after image-tokens have larger dimensions than
window-tokens? (from above figure image-tokens have shape `(1, 8, 8, 3)` and
window-tokens have shape `(1, 4, 4, 3)`). Yes, you are right we can't directly compare
them hence we resize image-tokens to fit window-tokens with `Global Token
Gen./FeatureExtraction` **CNN** module. The following table should give you a clear
comparison,
| Model | Query Tokens | Key-Value Tokens | Attention Type | Attention Coverage |
|------------------|-----------------|-------------------|---------------------------|--------------------|
| ViT | image | image | self-attention | global |
| SwinTransformer | window | window | self-attention | local |
| **GCViT** | **resized-image** | **window** | **image-window attention** | **global** |
```python
class WindowAttention(layers.Layer):
"""Local window attention.
This implementation was proposed by
[Liu et al., 2021](https://arxiv.org/abs/2103.14030) in SwinTransformer.
Args:
window_size: window size.
num_heads: number of attention head.
global_query: if the input contains global_query
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
attention_dropout: attention dropout rate.
projection_dropout: output dropout rate.
"""
def __init__(
self,
window_size,
num_heads,
global_query,
qkv_bias=True,
qk_scale=None,
attention_dropout=0.0,
projection_dropout=0.0,
**kwargs,
):
super().__init__(**kwargs)
window_size = (window_size, window_size)
self.window_size = window_size
self.num_heads = num_heads
self.global_query = global_query
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.attention_dropout = attention_dropout
self.projection_dropout = projection_dropout
def build(self, input_shape):
embed_dim = input_shape[0][-1]
head_dim = embed_dim // self.num_heads
self.scale = self.qk_scale or head_dim**-0.5
self.qkv_size = 3 - int(self.global_query)
self.qkv = layers.Dense(
embed_dim * self.qkv_size, use_bias=self.qkv_bias, name="qkv"
)
self.relative_position_bias_table = self.add_weight(
name="relative_position_bias_table",
shape=[
(2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1),
self.num_heads,
],
initializer=keras.initializers.TruncatedNormal(stddev=0.02),
trainable=True,
dtype=self.dtype,
)
self.attn_drop = layers.Dropout(self.attention_dropout, name="attn_drop")
self.proj = layers.Dense(embed_dim, name="proj")
self.proj_drop = layers.Dropout(self.projection_dropout, name="proj_drop")
self.softmax = layers.Activation("softmax", name="softmax")
super().build(input_shape)
def get_relative_position_index(self):
coords_h = ops.arange(self.window_size[0])
coords_w = ops.arange(self.window_size[1])
coords = ops.stack(ops.meshgrid(coords_h, coords_w, indexing="ij"), axis=0)
coords_flatten = ops.reshape(coords, [2, -1])
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = ops.transpose(relative_coords, axes=[1, 2, 0])
relative_coords_xx = relative_coords[:, :, 0] + self.window_size[0] - 1
relative_coords_yy = relative_coords[:, :, 1] + self.window_size[1] - 1
relative_coords_xx = relative_coords_xx * (2 * self.window_size[1] - 1)
relative_position_index = relative_coords_xx + relative_coords_yy
return relative_position_index
def call(self, inputs, **kwargs):
if self.global_query:
inputs, q_global = inputs
B = ops.shape(q_global)[0] # B, N, C
else:
inputs = inputs[0]
B_, N, C = ops.shape(inputs) # B*num_window, num_tokens, channels
qkv = self.qkv(inputs)
qkv = ops.reshape(
qkv, [B_, N, self.qkv_size, self.num_heads, C // self.num_heads]
)
qkv = ops.transpose(qkv, [2, 0, 3, 1, 4])
if self.global_query:
k, v = ops.split(
qkv, indices_or_sections=2, axis=0
) # for unknown shame num=None will throw error
q_global = ops.repeat(
q_global, repeats=B_ // B, axis=0
) # num_windows = B_//B => q_global same for all windows in a img
q = ops.reshape(q_global, [B_, N, self.num_heads, C // self.num_heads])
q = ops.transpose(q, axes=[0, 2, 1, 3])
else:
q, k, v = ops.split(qkv, indices_or_sections=3, axis=0)
q = ops.squeeze(q, axis=0)
k = ops.squeeze(k, axis=0)
v = ops.squeeze(v, axis=0)
q = q * self.scale
attn = q @ ops.transpose(k, axes=[0, 1, 3, 2])
relative_position_bias = ops.take(
self.relative_position_bias_table,
ops.reshape(self.get_relative_position_index(), [-1]),
)
relative_position_bias = ops.reshape(
relative_position_bias,
[
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1,
],
)
relative_position_bias = ops.transpose(relative_position_bias, axes=[2, 0, 1])
attn = attn + relative_position_bias[None,]
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = ops.transpose((attn @ v), axes=[0, 2, 1, 3])
x = ops.reshape(x, [B_, N, C])
x = self.proj_drop(self.proj(x))
return x
```
### Block
> **Notes:** This module doesn't have any Convolutional module.
In the `level` second module that we have used is `block`. Let's try to understand how it
works. As we can see from the `call` method,
1. `Block` module takes either only feature_maps for local attention or additional global
query for global attention.
2. Before sending feature maps for attention, this module converts **batch feature maps**
to **batch windows** as we'll be applying **Window Attention**.
3. Then we send batch **batch windows** for attention.
4. After attention has been applied we revert **batch windows** to **batch feature maps**.
5. Before sending the attention to applied features for output, this module applies
**Stochastic Depth** regularization in the residual connection. Also, before applying
**Stochastic Depth** it rescales the input with trainable parameters. Note that, this
**Stochastic Depth** block hasn't been shown in the figure of the paper.
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/block2.JPG"
width=400>
### Window
In the `block` module, we have created **windows** before and after applying attention.
Let's try to understand how we're creating windows,
* Following module converts feature maps `(B, H, W, C)` to stacked windows
`(B x H/h x W/w, h, w, C)` → `(num_windows_batch, window_size, window_size, channel)`
* This module uses `reshape` & `transpose` to create these windows out of image instead
of iterating over them.
```python
class Block(layers.Layer):
"""GCViT block.
Args:
window_size: window size.
num_heads: number of attention head.
global_query: apply global window attention
mlp_ratio: MLP ratio.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
drop: dropout rate.
attention_dropout: attention dropout rate.
path_drop: drop path rate.
activation: activation function.
layer_scale: layer scaling coefficient.
"""
def __init__(
self,
window_size,
num_heads,
global_query,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
dropout=0.0,
attention_dropout=0.0,
path_drop=0.0,
activation="gelu",
layer_scale=None,
**kwargs,
):
super().__init__(**kwargs)
self.window_size = window_size
self.num_heads = num_heads
self.global_query = global_query
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.dropout = dropout
self.attention_dropout = attention_dropout
self.path_drop = path_drop
self.activation = activation
self.layer_scale = layer_scale
def build(self, input_shape):
B, H, W, C = input_shape[0]
self.norm1 = layers.LayerNormalization(-1, 1e-05, name="norm1")
self.attn = WindowAttention(
window_size=self.window_size,
num_heads=self.num_heads,
global_query=self.global_query,
qkv_bias=self.qkv_bias,
qk_scale=self.qk_scale,
attention_dropout=self.attention_dropout,
projection_dropout=self.dropout,
name="attn",
)
self.drop_path1 = DropPath(self.path_drop)
self.drop_path2 = DropPath(self.path_drop)
self.norm2 = layers.LayerNormalization(-1, 1e-05, name="norm2")
self.mlp = MLP(
hidden_features=int(C * self.mlp_ratio),
dropout=self.dropout,
activation=self.activation,
name="mlp",
)
if self.layer_scale is not None:
self.gamma1 = self.add_weight(
name="gamma1",
shape=[C],
initializer=keras.initializers.Constant(self.layer_scale),
trainable=True,
dtype=self.dtype,
)
self.gamma2 = self.add_weight(
name="gamma2",
shape=[C],
initializer=keras.initializers.Constant(self.layer_scale),
trainable=True,
dtype=self.dtype,
)
else:
self.gamma1 = 1.0
self.gamma2 = 1.0
self.num_windows = int(H // self.window_size) * int(W // self.window_size)
super().build(input_shape)
def call(self, inputs, **kwargs):
if self.global_query:
inputs, q_global = inputs
else:
inputs = inputs[0]
B, H, W, C = ops.shape(inputs)
x = self.norm1(inputs)
# create windows and concat them in batch axis
x = self.window_partition(x, self.window_size) # (B_, win_h, win_w, C)
# flatten patch
x = ops.reshape(x, [-1, self.window_size * self.window_size, C])
# attention
if self.global_query:
x = self.attn([x, q_global])
else:
x = self.attn([x])
# reverse window partition
x = self.window_reverse(x, self.window_size, H, W, C)
# FFN
x = inputs + self.drop_path1(x * self.gamma1)
x = x + self.drop_path2(self.gamma2 * self.mlp(self.norm2(x)))
return x
def window_partition(self, x, window_size):
"""
Args:
x: (B, H, W, C)
window_size: window size
Returns:
local window features (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = ops.shape(x)
x = ops.reshape(
x,
[
-1,
H // window_size,
window_size,
W // window_size,
window_size,
C,
],
)
x = ops.transpose(x, axes=[0, 1, 3, 2, 4, 5])
windows = ops.reshape(x, [-1, window_size, window_size, C])
return windows
def window_reverse(self, windows, window_size, H, W, C):
"""
Args:
windows: local window features (num_windows*B, window_size, window_size, C)
window_size: Window size
H: Height of image
W: Width of image
C: Channel of image
Returns:
x: (B, H, W, C)
"""
x = ops.reshape(
windows,
[
-1,
H // window_size,
W // window_size,
window_size,
window_size,
C,
],
)
x = ops.transpose(x, axes=[0, 1, 3, 2, 4, 5])
x = ops.reshape(x, [-1, H, W, C])
return x
```
### Level
> **Note:** This module has both Transformer and CNN modules.
In the model, the second module that we have used is `level`. Let's try to understand
this module. As we can see from the `call` method,
1. First it creates **global_token** with a series of `FeatureExtraction` modules. As
we'll see
later that `FeatureExtraction` is nothing but a simple **CNN** based module.
2. Then it uses series of`Block` modules to apply **local or global window attention**
depending on depth level.
3. Finally, it uses `ReduceSize` to reduce the dimension of **contextualized features**.
> Summary: feature_map → global_token → local/global window
attention → dowsample
<img src="https://raw.githubusercontent.com/awsaf49/gcvit-tf/main/image/level.png"
width=400>
```python
class Level(layers.Layer):
"""GCViT level.
Args:
depth: number of layers in each stage.
num_heads: number of heads in each stage.
window_size: window size in each stage.
keepdims: dims to keep in FeatureExtraction.
downsample: bool argument for down-sampling.
mlp_ratio: MLP ratio.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
drop: dropout rate.
attention_dropout: attention dropout rate.
path_drop: drop path rate.
layer_scale: layer scaling coefficient.
"""
def __init__(
self,
depth,
num_heads,
window_size,
keepdims,
downsample=True,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
dropout=0.0,
attention_dropout=0.0,
path_drop=0.0,
layer_scale=None,
**kwargs,
):
super().__init__(**kwargs)
self.depth = depth
self.num_heads = num_heads
self.window_size = window_size
self.keepdims = keepdims
self.downsample = downsample
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.dropout = dropout
self.attention_dropout = attention_dropout
self.path_drop = path_drop
self.layer_scale = layer_scale
def build(self, input_shape):
path_drop = (
[self.path_drop] * self.depth
if not isinstance(self.path_drop, list)
else self.path_drop
)
self.blocks = [
Block(
window_size=self.window_size,
num_heads=self.num_heads,
global_query=bool(i % 2),
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias,
qk_scale=self.qk_scale,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
path_drop=path_drop[i],
layer_scale=self.layer_scale,
name=f"blocks_{i}",
)
for i in range(self.depth)
]
self.down = ReduceSize(keepdims=False, name="downsample")
self.q_global_gen = GlobalQueryGenerator(self.keepdims, name="q_global_gen")
super().build(input_shape)
def call(self, inputs, **kwargs):
x = inputs
q_global = self.q_global_gen(x) # shape: (B, win_size, win_size, C)
for i, blk in enumerate(self.blocks):
if i % 2:
x = blk([x, q_global]) # shape: (B, H, W, C)
else:
x = blk([x]) # shape: (B, H, W, C)
if self.downsample:
x = self.down(x) # shape: (B, H//2, W//2, 2*C)
return x
```
### Model
Let's directly jump to the model. As we can see from the `call` method,
1. It creates patch embeddings from an image. This layer doesn't flattens these
embeddings which means output of this module will be
`(batch, height/window_size, width/window_size, embed_dim)` instead of
`(batch, height x width/window_size^2, embed_dim)`.
2. Then it applies `Dropout` module which randomly sets input units to 0.
3. It passes these embeddings to series of `Level` modules which we are calling `level`
where,
1. Global token is generated
1. Both local & global attention is applied
1. Finally downsample is applied.
4. So, output after `n` number of **levels**, shape: `(batch, width/window_size x 2^{n-1},
width/window_size x 2^{n-1}, embed_dim x 2^{n-1})`. In the last layer,
paper doesn't use **downsample** and increase **channels**.
5. Output of above layer is normalized using `LayerNormalization` module.
6. In the head, 2D features are converted to 1D features with `Pooling` module. Output
shape after this module is `(batch, embed_dim x 2^{n-1})`
7. Finally, pooled features are sent to `Dense/Linear` module for classification.
> Sumamry: image → (patchs + embedding) → dropout
→ (attention + feature extraction) → normalizaion →
pooling → classify
```python
class GCViT(keras.Model):
"""GCViT model.
Args:
window_size: window size in each stage.
embed_dim: feature size dimension.
depths: number of layers in each stage.
num_heads: number of heads in each stage.
drop_rate: dropout rate.
mlp_ratio: MLP ratio.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
attention_dropout: attention dropout rate.
path_drop: drop path rate.
layer_scale: layer scaling coefficient.
num_classes: number of classes.
head_activation: activation function for head.
"""
def __init__(
self,
window_size,
embed_dim,
depths,
num_heads,
drop_rate=0.0,
mlp_ratio=3.0,
qkv_bias=True,
qk_scale=None,
attention_dropout=0.0,
path_drop=0.1,
layer_scale=None,
num_classes=1000,
head_activation="softmax",
**kwargs,
):
super().__init__(**kwargs)
self.window_size = window_size
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.drop_rate = drop_rate
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.attention_dropout = attention_dropout
self.path_drop = path_drop
self.layer_scale = layer_scale
self.num_classes = num_classes
self.head_activation = head_activation
self.patch_embed = PatchEmbed(embed_dim=embed_dim, name="patch_embed")
self.pos_drop = layers.Dropout(drop_rate, name="pos_drop")
path_drops = np.linspace(0.0, path_drop, sum(depths))
keepdims = [(0, 0, 0), (0, 0), (1,), (1,)]
self.levels = []
for i in range(len(depths)):
path_drop = path_drops[sum(depths[:i]) : sum(depths[: i + 1])].tolist()
level = Level(
depth=depths[i],
num_heads=num_heads[i],
window_size=window_size[i],
keepdims=keepdims[i],
downsample=(i < len(depths) - 1),
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
dropout=drop_rate,
attention_dropout=attention_dropout,
path_drop=path_drop,
layer_scale=layer_scale,
name=f"levels_{i}",
)
self.levels.append(level)
self.norm = layers.LayerNormalization(axis=-1, epsilon=1e-05, name="norm")
self.pool = layers.GlobalAvgPool2D(name="pool")
self.head = layers.Dense(num_classes, name="head", activation=head_activation)
def build(self, input_shape):
super().build(input_shape)
self.built = True
def call(self, inputs, **kwargs):
x = self.patch_embed(inputs) # shape: (B, H, W, C)
x = self.pos_drop(x)
for level in self.levels:
x = level(x) # shape: (B, H_, W_, C_)
x = self.norm(x)
x = self.pool(x) # shape: (B, C__)
x = self.head(x)
return x
def build_graph(self, input_shape=(224, 224, 3)):
"""
ref: https://www.kaggle.com/code/ipythonx/tf-hybrid-efficientnet-swin-transformer-gradcam
"""
x = keras.Input(shape=input_shape)
return keras.Model(inputs=[x], outputs=self.call(x), name=self.name)
def summary(self, input_shape=(224, 224, 3)):
return self.build_graph(input_shape).summary()
```
---
## Build Model
* Let's build a complete model with all the modules that we've explained above. We'll
build **GCViT-XXTiny** model with the configuration mentioned in the paper.
* Also we'll load the ported official **pre-trained** weights and try for some
predictions.
```python
# Model Configs
config = {
"window_size": (7, 7, 14, 7),
"embed_dim": 64,
"depths": (2, 2, 6, 2),
"num_heads": (2, 4, 8, 16),
"mlp_ratio": 3.0,
"path_drop": 0.2,
}
ckpt_link = (
"https://github.com/awsaf49/gcvit-tf/releases/download/v1.1.6/gcvitxxtiny.keras"
)
# Build Model
model = GCViT(**config)
inp = ops.array(np.random.uniform(size=(1, 224, 224, 3)))
out = model(inp)
# Load Weights
ckpt_path = keras.utils.get_file(ckpt_link.split("/")[-1], ckpt_link)
model.load_weights(ckpt_path)
# Summary
model.summary((224, 224, 3))
```
<div class="k-default-codeblock">
```
Downloading data from https://github.com/awsaf49/gcvit-tf/releases/download/v1.1.6/gcvitxxtiny.keras
48767519/48767519 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
```
</div>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "gc_vi_t"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━┩
│ input_layer (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">224</span>, <span style="color: #00af00; text-decoration-color: #00af00">224</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ patch_embed (<span style="color: #0087ff; text-decoration-color: #0087ff">PatchEmbed</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">56</span>, <span style="color: #00af00; text-decoration-color: #00af00">56</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">45,632</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ pos_drop (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">56</span>, <span style="color: #00af00; text-decoration-color: #00af00">56</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ levels_0 (<span style="color: #0087ff; text-decoration-color: #0087ff">Level</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">28</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">180,964</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ levels_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Level</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">14</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">688,456</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ levels_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Level</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">5,170,608</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ levels_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Level</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">5,395,744</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ norm (<span style="color: #0087ff; text-decoration-color: #0087ff">LayerNormalization</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">7</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,024</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ pool (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">512</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├────────────────────────────────────┼───────────────────────────────┼─────────────┤
│ head (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1000</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">513,000</span> │
└────────────────────────────────────┴───────────────────────────────┴─────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">11,995,428</span> (45.76 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">11,995,428</span> (45.76 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
---
## Sanity check for Pre-Trained Weights
```python
img = keras.applications.imagenet_utils.preprocess_input(
chelsea(), mode="torch"
) # Chelsea the cat
img = ops.image.resize(img, (224, 224))[None,] # resize & create batch
pred = model(img)
pred_dec = keras.applications.imagenet_utils.decode_predictions(pred)[0]
print("\n# Image:")
plt.figure(figsize=(6, 6))
plt.imshow(chelsea())
plt.show()
print()
print("# Prediction (Top 5):")
for i in range(5):
print("{:<12} : {:0.2f}".format(pred_dec[i][1], pred_dec[i][2]))
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json
35363/35363 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
```
</div>
<div class="k-default-codeblock">
```
# Image:
```
</div>

<div class="k-default-codeblock">
```
# Prediction (Top 5):
Egyptian_cat : 0.72
tiger_cat : 0.04
tabby : 0.03
crossword_puzzle : 0.01
panpipe : 0.00
```
</div>
# Fine-tune **GCViT** Model
In the following cells, we will fine-tune **GCViT** model on Flower Dataset which
consists `104` classes.
### Configs
```python
# Model
IMAGE_SIZE = (224, 224)
# Hyper Params
BATCH_SIZE = 32
EPOCHS = 5
# Dataset
CLASSES = [
"dandelion",
"daisy",
"tulips",
"sunflowers",
"roses",
] # don't change the order
# Other constants
MEAN = 255 * np.array([0.485, 0.456, 0.406], dtype="float32") # imagenet mean
STD = 255 * np.array([0.229, 0.224, 0.225], dtype="float32") # imagenet std
AUTO = tf.data.AUTOTUNE
```
---
## Data Loader
```python
def make_dataset(dataset: tf.data.Dataset, train: bool, image_size: int = IMAGE_SIZE):
def preprocess(image, label):
# for training, do augmentation
if train:
if tf.random.uniform(shape=[]) > 0.5:
image = tf.image.flip_left_right(image)
image = tf.image.resize(image, size=image_size, method="bicubic")
image = (image - MEAN) / STD # normalization
return image, label
if train:
dataset = dataset.shuffle(BATCH_SIZE * 10)
return dataset.map(preprocess, AUTO).batch(BATCH_SIZE).prefetch(AUTO)
```
### Flower Dataset
```python
train_dataset, val_dataset = tfds.load(
"tf_flowers",
split=["train[:90%]", "train[90%:]"],
as_supervised=True,
try_gcs=False, # gcs_path is necessary for tpu,
)
train_dataset = make_dataset(train_dataset, True)
val_dataset = make_dataset(val_dataset, False)
```
<div class="k-default-codeblock">
```
Downloading and preparing dataset 218.21 MiB (download: 218.21 MiB, generated: 221.83 MiB, total: 440.05 MiB) to /root/tensorflow_datasets/tf_flowers/3.0.1...
Dl Completed...: 0%| | 0/5 [00:00<?, ? file/s]
Dataset tf_flowers downloaded and prepared to /root/tensorflow_datasets/tf_flowers/3.0.1. Subsequent calls will reuse this data.
```
</div>
### Re-Build Model for Flower Dataset
```python
# Re-Build Model
model = GCViT(**config, num_classes=104)
inp = ops.array(np.random.uniform(size=(1, 224, 224, 3)))
out = model(inp)
# Load Weights
ckpt_path = keras.utils.get_file(ckpt_link.split("/")[-1], ckpt_link)
model.load_weights(ckpt_path, skip_mismatch=True)
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
```
<div class="k-default-codeblock">
```
/usr/local/lib/python3.10/dist-packages/keras/src/saving/saving_lib.py:269: UserWarning: A total of 1 objects could not be loaded. Example error message for object <Dense name=head, built=True>:
```
</div>
<div class="k-default-codeblock">
```
Layer 'head' expected 2 variables, but received 0 variables during loading. Expected: ['kernel', 'bias']
```
</div>
<div class="k-default-codeblock">
```
List of objects that could not be loaded:
[<Dense name=head, built=True>]
warnings.warn(msg)
```
</div>
### Training
```python
history = model.fit(
train_dataset, validation_data=val_dataset, epochs=EPOCHS, verbose=1
)
```
<div class="k-default-codeblock">
```
Epoch 1/5
104/104 ━━━━━━━━━━━━━━━━━━━━ 153s 581ms/step - accuracy: 0.5140 - loss: 1.4615 - val_accuracy: 0.8828 - val_loss: 0.3485
Epoch 2/5
104/104 ━━━━━━━━━━━━━━━━━━━━ 7s 69ms/step - accuracy: 0.8775 - loss: 0.3437 - val_accuracy: 0.8828 - val_loss: 0.3508
Epoch 3/5
104/104 ━━━━━━━━━━━━━━━━━━━━ 7s 68ms/step - accuracy: 0.8937 - loss: 0.2918 - val_accuracy: 0.9019 - val_loss: 0.2953
Epoch 4/5
104/104 ━━━━━━━━━━━━━━━━━━━━ 7s 68ms/step - accuracy: 0.9232 - loss: 0.2397 - val_accuracy: 0.9183 - val_loss: 0.2212
Epoch 5/5
104/104 ━━━━━━━━━━━━━━━━━━━━ 7s 68ms/step - accuracy: 0.9456 - loss: 0.1645 - val_accuracy: 0.9210 - val_loss: 0.2897
```
</div>
---
## Reference
* [gcvit-tf - A Python library for GCViT with TF2.0](https://github.com/awsaf49/gcvit-tf)
* [gcvit - Official codebase for GCViT](https://github.com/NVlabs/GCVit)
| keras-io/examples/vision/md/image_classification_using_global_context_vision_transformer.md/0 | {
"file_path": "keras-io/examples/vision/md/image_classification_using_global_context_vision_transformer.md",
"repo_id": "keras-io",
"token_count": 22411
} | 115 |
# Semi-supervised image classification using contrastive pretraining with SimCLR
**Author:** [András Béres](https://www.linkedin.com/in/andras-beres-789190210)<br>
**Date created:** 2021/04/24<br>
**Last modified:** 2021/04/24<br>
**Description:** Contrastive pretraining with SimCLR for semi-supervised image classification on the STL-10 dataset.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/examples/vision/ipynb/semisupervised_simclr.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/examples/vision/semisupervised_simclr.py)
---
## Introduction
### Semi-supervised learning
Semi-supervised learning is a machine learning paradigm that deals with
**partially labeled datasets**. When applying deep learning in the real world,
one usually has to gather a large dataset to make it work well. However, while
the cost of labeling scales linearly with the dataset size (labeling each
example takes a constant time), model performance only scales
[sublinearly](https://arxiv.org/abs/2001.08361) with it. This means that
labeling more and more samples becomes less and less cost-efficient, while
gathering unlabeled data is generally cheap, as it is usually readily available
in large quantities.
Semi-supervised learning offers to solve this problem by only requiring a
partially labeled dataset, and by being label-efficient by utilizing the
unlabeled examples for learning as well.
In this example, we will pretrain an encoder with contrastive learning on the
[STL-10](https://ai.stanford.edu/~acoates/stl10/) semi-supervised dataset using
no labels at all, and then fine-tune it using only its labeled subset.
### Contrastive learning
On the highest level, the main idea behind contrastive learning is to **learn
representations that are invariant to image augmentations** in a self-supervised
manner. One problem with this objective is that it has a trivial degenerate
solution: the case where the representations are constant, and do not depend at all on the
input images.
Contrastive learning avoids this trap by modifying the objective in the
following way: it pulls representations of augmented versions/views of the same
image closer to each other (contracting positives), while simultaneously pushing
different images away from each other (contrasting negatives) in representation
space.
One such contrastive approach is [SimCLR](https://arxiv.org/abs/2002.05709),
which essentially identifies the core components needed to optimize this
objective, and can achieve high performance by scaling this simple approach.
Another approach is [SimSiam](https://arxiv.org/abs/2011.10566)
([Keras example](https://keras.io/examples/vision/simsiam/)),
whose main difference from
SimCLR is that the former does not use any negatives in its loss. Therefore, it does not
explicitly prevent the trivial solution, and, instead, avoids it implicitly by
architecture design (asymmetric encoding paths using a predictor network and
batch normalization (BatchNorm) are applied in the final layers).
For further reading about SimCLR, check out
[the official Google AI blog post](https://ai.googleblog.com/2020/04/advancing-self-supervised-and-semi.html),
and for an overview of self-supervised learning across both vision and language
check out
[this blog post](https://ai.facebook.com/blog/self-supervised-learning-the-dark-matter-of-intelligence/).
---
## Setup
```python
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
# Make sure we are able to handle large datasets
import resource
low, high = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (high, high))
import math
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import keras
from keras import layers
```
---
## Hyperparameter setup
```python
# Dataset hyperparameters
unlabeled_dataset_size = 100000
labeled_dataset_size = 5000
image_channels = 3
# Algorithm hyperparameters
num_epochs = 20
batch_size = 525 # Corresponds to 200 steps per epoch
width = 128
temperature = 0.1
# Stronger augmentations for contrastive, weaker ones for supervised training
contrastive_augmentation = {"min_area": 0.25, "brightness": 0.6, "jitter": 0.2}
classification_augmentation = {
"min_area": 0.75,
"brightness": 0.3,
"jitter": 0.1,
}
```
---
## Dataset
During training we will simultaneously load a large batch of unlabeled images along with a
smaller batch of labeled images.
```python
def prepare_dataset():
# Labeled and unlabeled samples are loaded synchronously
# with batch sizes selected accordingly
steps_per_epoch = (unlabeled_dataset_size + labeled_dataset_size) // batch_size
unlabeled_batch_size = unlabeled_dataset_size // steps_per_epoch
labeled_batch_size = labeled_dataset_size // steps_per_epoch
print(
f"batch size is {unlabeled_batch_size} (unlabeled) + {labeled_batch_size} (labeled)"
)
# Turning off shuffle to lower resource usage
unlabeled_train_dataset = (
tfds.load("stl10", split="unlabelled", as_supervised=True, shuffle_files=False)
.shuffle(buffer_size=10 * unlabeled_batch_size)
.batch(unlabeled_batch_size)
)
labeled_train_dataset = (
tfds.load("stl10", split="train", as_supervised=True, shuffle_files=False)
.shuffle(buffer_size=10 * labeled_batch_size)
.batch(labeled_batch_size)
)
test_dataset = (
tfds.load("stl10", split="test", as_supervised=True)
.batch(batch_size)
.prefetch(buffer_size=tf.data.AUTOTUNE)
)
# Labeled and unlabeled datasets are zipped together
train_dataset = tf.data.Dataset.zip(
(unlabeled_train_dataset, labeled_train_dataset)
).prefetch(buffer_size=tf.data.AUTOTUNE)
return train_dataset, labeled_train_dataset, test_dataset
# Load STL10 dataset
train_dataset, labeled_train_dataset, test_dataset = prepare_dataset()
```
<div class="k-default-codeblock">
```
batch size is 500 (unlabeled) + 25 (labeled)
```
</div>
---
## Image augmentations
The two most important image augmentations for contrastive learning are the
following:
- Cropping: forces the model to encode different parts of the same image
similarly, we implement it with the
[RandomTranslation](https://keras.io/api/layers/preprocessing_layers/image_augmentation/random_translation/)
and
[RandomZoom](https://keras.io/api/layers/preprocessing_layers/image_augmentation/random_zoom/)
layers
- Color jitter: prevents a trivial color histogram-based solution to the task by
distorting color histograms. A principled way to implement that is by affine
transformations in color space.
In this example we use random horizontal flips as well. Stronger augmentations
are applied for contrastive learning, along with weaker ones for supervised
classification to avoid overfitting on the few labeled examples.
We implement random color jitter as a custom preprocessing layer. Using
preprocessing layers for data augmentation has the following two advantages:
- The data augmentation will run on GPU in batches, so the training will not be
bottlenecked by the data pipeline in environments with constrained CPU
resources (such as a Colab Notebook, or a personal machine)
- Deployment is easier as the data preprocessing pipeline is encapsulated in the
model, and does not have to be reimplemented when deploying it
```python
# Distorts the color distibutions of images
class RandomColorAffine(layers.Layer):
def __init__(self, brightness=0, jitter=0, **kwargs):
super().__init__(**kwargs)
self.brightness = brightness
self.jitter = jitter
def get_config(self):
config = super().get_config()
config.update({"brightness": self.brightness, "jitter": self.jitter})
return config
def call(self, images, training=True):
if training:
batch_size = tf.shape(images)[0]
# Same for all colors
brightness_scales = 1 + tf.random.uniform(
(batch_size, 1, 1, 1),
minval=-self.brightness,
maxval=self.brightness,
)
# Different for all colors
jitter_matrices = tf.random.uniform(
(batch_size, 1, 3, 3), minval=-self.jitter, maxval=self.jitter
)
color_transforms = (
tf.eye(3, batch_shape=[batch_size, 1]) * brightness_scales
+ jitter_matrices
)
images = tf.clip_by_value(tf.matmul(images, color_transforms), 0, 1)
return images
# Image augmentation module
def get_augmenter(min_area, brightness, jitter):
zoom_factor = 1.0 - math.sqrt(min_area)
return keras.Sequential(
[
layers.Rescaling(1 / 255),
layers.RandomFlip("horizontal"),
layers.RandomTranslation(zoom_factor / 2, zoom_factor / 2),
layers.RandomZoom((-zoom_factor, 0.0), (-zoom_factor, 0.0)),
RandomColorAffine(brightness, jitter),
]
)
def visualize_augmentations(num_images):
# Sample a batch from a dataset
images = next(iter(train_dataset))[0][0][:num_images]
# Apply augmentations
augmented_images = zip(
images,
get_augmenter(**classification_augmentation)(images),
get_augmenter(**contrastive_augmentation)(images),
get_augmenter(**contrastive_augmentation)(images),
)
row_titles = [
"Original:",
"Weakly augmented:",
"Strongly augmented:",
"Strongly augmented:",
]
plt.figure(figsize=(num_images * 2.2, 4 * 2.2), dpi=100)
for column, image_row in enumerate(augmented_images):
for row, image in enumerate(image_row):
plt.subplot(4, num_images, row * num_images + column + 1)
plt.imshow(image)
if column == 0:
plt.title(row_titles[row], loc="left")
plt.axis("off")
plt.tight_layout()
visualize_augmentations(num_images=8)
```

---
## Encoder architecture
```python
# Define the encoder architecture
def get_encoder():
return keras.Sequential(
[
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Conv2D(width, kernel_size=3, strides=2, activation="relu"),
layers.Flatten(),
layers.Dense(width, activation="relu"),
],
name="encoder",
)
```
---
## Supervised baseline model
A baseline supervised model is trained using random initialization.
```python
# Baseline supervised training with random initialization
baseline_model = keras.Sequential(
[
get_augmenter(**classification_augmentation),
get_encoder(),
layers.Dense(10),
],
name="baseline_model",
)
baseline_model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
baseline_history = baseline_model.fit(
labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset
)
print(
"Maximal validation accuracy: {:.2f}%".format(
max(baseline_history.history["val_acc"]) * 100
)
)
```
<div class="k-default-codeblock">
```
Epoch 1/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 9s 25ms/step - acc: 0.2031 - loss: 2.1576 - val_acc: 0.3234 - val_loss: 1.7719
Epoch 2/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.3476 - loss: 1.7792 - val_acc: 0.4042 - val_loss: 1.5626
Epoch 3/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.4060 - loss: 1.6054 - val_acc: 0.4319 - val_loss: 1.4832
Epoch 4/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 18ms/step - acc: 0.4347 - loss: 1.5052 - val_acc: 0.4570 - val_loss: 1.4428
Epoch 5/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 18ms/step - acc: 0.4600 - loss: 1.4546 - val_acc: 0.4765 - val_loss: 1.3977
Epoch 6/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.4754 - loss: 1.4015 - val_acc: 0.4740 - val_loss: 1.4082
Epoch 7/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.4901 - loss: 1.3589 - val_acc: 0.4761 - val_loss: 1.4061
Epoch 8/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.5110 - loss: 1.2793 - val_acc: 0.5247 - val_loss: 1.3026
Epoch 9/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.5298 - loss: 1.2765 - val_acc: 0.5138 - val_loss: 1.3286
Epoch 10/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.5514 - loss: 1.2078 - val_acc: 0.5543 - val_loss: 1.2227
Epoch 11/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.5520 - loss: 1.1851 - val_acc: 0.5446 - val_loss: 1.2709
Epoch 12/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.5851 - loss: 1.1368 - val_acc: 0.5725 - val_loss: 1.1944
Epoch 13/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 18ms/step - acc: 0.5738 - loss: 1.1411 - val_acc: 0.5685 - val_loss: 1.1974
Epoch 14/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 21ms/step - acc: 0.6078 - loss: 1.0308 - val_acc: 0.5899 - val_loss: 1.1769
Epoch 15/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 18ms/step - acc: 0.6284 - loss: 1.0386 - val_acc: 0.5863 - val_loss: 1.1742
Epoch 16/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 18ms/step - acc: 0.6450 - loss: 0.9773 - val_acc: 0.5849 - val_loss: 1.1993
Epoch 17/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6547 - loss: 0.9555 - val_acc: 0.5683 - val_loss: 1.2424
Epoch 18/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6593 - loss: 0.9084 - val_acc: 0.5990 - val_loss: 1.1458
Epoch 19/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6672 - loss: 0.9267 - val_acc: 0.5685 - val_loss: 1.2758
Epoch 20/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6824 - loss: 0.8863 - val_acc: 0.5969 - val_loss: 1.2035
Maximal validation accuracy: 59.90%
```
</div>
---
## Self-supervised model for contrastive pretraining
We pretrain an encoder on unlabeled images with a contrastive loss.
A nonlinear projection head is attached to the top of the encoder, as it
improves the quality of representations of the encoder.
We use the InfoNCE/NT-Xent/N-pairs loss, which can be interpreted in the
following way:
1. We treat each image in the batch as if it had its own class.
2. Then, we have two examples (a pair of augmented views) for each "class".
3. Each view's representation is compared to every possible pair's one (for both
augmented versions).
4. We use the temperature-scaled cosine similarity of compared representations as
logits.
5. Finally, we use categorical cross-entropy as the "classification" loss
The following two metrics are used for monitoring the pretraining performance:
- [Contrastive accuracy (SimCLR Table 5)](https://arxiv.org/abs/2002.05709):
Self-supervised metric, the ratio of cases in which the representation of an
image is more similar to its differently augmented version's one, than to the
representation of any other image in the current batch. Self-supervised
metrics can be used for hyperparameter tuning even in the case when there are
no labeled examples.
- [Linear probing accuracy](https://arxiv.org/abs/1603.08511): Linear probing is
a popular metric to evaluate self-supervised classifiers. It is computed as
the accuracy of a logistic regression classifier trained on top of the
encoder's features. In our case, this is done by training a single dense layer
on top of the frozen encoder. Note that contrary to traditional approach where
the classifier is trained after the pretraining phase, in this example we
train it during pretraining. This might slightly decrease its accuracy, but
that way we can monitor its value during training, which helps with
experimentation and debugging.
Another widely used supervised metric is the
[KNN accuracy](https://arxiv.org/abs/1805.01978), which is the accuracy of a KNN
classifier trained on top of the encoder's features, which is not implemented in
this example.
```python
# Define the contrastive model with model-subclassing
class ContrastiveModel(keras.Model):
def __init__(self):
super().__init__()
self.temperature = temperature
self.contrastive_augmenter = get_augmenter(**contrastive_augmentation)
self.classification_augmenter = get_augmenter(**classification_augmentation)
self.encoder = get_encoder()
# Non-linear MLP as projection head
self.projection_head = keras.Sequential(
[
keras.Input(shape=(width,)),
layers.Dense(width, activation="relu"),
layers.Dense(width),
],
name="projection_head",
)
# Single dense layer for linear probing
self.linear_probe = keras.Sequential(
[layers.Input(shape=(width,)), layers.Dense(10)],
name="linear_probe",
)
self.encoder.summary()
self.projection_head.summary()
self.linear_probe.summary()
def compile(self, contrastive_optimizer, probe_optimizer, **kwargs):
super().compile(**kwargs)
self.contrastive_optimizer = contrastive_optimizer
self.probe_optimizer = probe_optimizer
# self.contrastive_loss will be defined as a method
self.probe_loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
self.contrastive_loss_tracker = keras.metrics.Mean(name="c_loss")
self.contrastive_accuracy = keras.metrics.SparseCategoricalAccuracy(
name="c_acc"
)
self.probe_loss_tracker = keras.metrics.Mean(name="p_loss")
self.probe_accuracy = keras.metrics.SparseCategoricalAccuracy(name="p_acc")
@property
def metrics(self):
return [
self.contrastive_loss_tracker,
self.contrastive_accuracy,
self.probe_loss_tracker,
self.probe_accuracy,
]
def contrastive_loss(self, projections_1, projections_2):
# InfoNCE loss (information noise-contrastive estimation)
# NT-Xent loss (normalized temperature-scaled cross entropy)
# Cosine similarity: the dot product of the l2-normalized feature vectors
projections_1 = tf.math.l2_normalize(projections_1, axis=1)
projections_2 = tf.math.l2_normalize(projections_2, axis=1)
similarities = (
tf.matmul(projections_1, projections_2, transpose_b=True) / self.temperature
)
# The similarity between the representations of two augmented views of the
# same image should be higher than their similarity with other views
batch_size = tf.shape(projections_1)[0]
contrastive_labels = tf.range(batch_size)
self.contrastive_accuracy.update_state(contrastive_labels, similarities)
self.contrastive_accuracy.update_state(
contrastive_labels, tf.transpose(similarities)
)
# The temperature-scaled similarities are used as logits for cross-entropy
# a symmetrized version of the loss is used here
loss_1_2 = keras.losses.sparse_categorical_crossentropy(
contrastive_labels, similarities, from_logits=True
)
loss_2_1 = keras.losses.sparse_categorical_crossentropy(
contrastive_labels, tf.transpose(similarities), from_logits=True
)
return (loss_1_2 + loss_2_1) / 2
def train_step(self, data):
(unlabeled_images, _), (labeled_images, labels) = data
# Both labeled and unlabeled images are used, without labels
images = tf.concat((unlabeled_images, labeled_images), axis=0)
# Each image is augmented twice, differently
augmented_images_1 = self.contrastive_augmenter(images, training=True)
augmented_images_2 = self.contrastive_augmenter(images, training=True)
with tf.GradientTape() as tape:
features_1 = self.encoder(augmented_images_1, training=True)
features_2 = self.encoder(augmented_images_2, training=True)
# The representations are passed through a projection mlp
projections_1 = self.projection_head(features_1, training=True)
projections_2 = self.projection_head(features_2, training=True)
contrastive_loss = self.contrastive_loss(projections_1, projections_2)
gradients = tape.gradient(
contrastive_loss,
self.encoder.trainable_weights + self.projection_head.trainable_weights,
)
self.contrastive_optimizer.apply_gradients(
zip(
gradients,
self.encoder.trainable_weights + self.projection_head.trainable_weights,
)
)
self.contrastive_loss_tracker.update_state(contrastive_loss)
# Labels are only used in evalutation for an on-the-fly logistic regression
preprocessed_images = self.classification_augmenter(
labeled_images, training=True
)
with tf.GradientTape() as tape:
# the encoder is used in inference mode here to avoid regularization
# and updating the batch normalization paramers if they are used
features = self.encoder(preprocessed_images, training=False)
class_logits = self.linear_probe(features, training=True)
probe_loss = self.probe_loss(labels, class_logits)
gradients = tape.gradient(probe_loss, self.linear_probe.trainable_weights)
self.probe_optimizer.apply_gradients(
zip(gradients, self.linear_probe.trainable_weights)
)
self.probe_loss_tracker.update_state(probe_loss)
self.probe_accuracy.update_state(labels, class_logits)
return {m.name: m.result() for m in self.metrics}
def test_step(self, data):
labeled_images, labels = data
# For testing the components are used with a training=False flag
preprocessed_images = self.classification_augmenter(
labeled_images, training=False
)
features = self.encoder(preprocessed_images, training=False)
class_logits = self.linear_probe(features, training=False)
probe_loss = self.probe_loss(labels, class_logits)
self.probe_loss_tracker.update_state(probe_loss)
self.probe_accuracy.update_state(labels, class_logits)
# Only the probe metrics are logged at test time
return {m.name: m.result() for m in self.metrics[2:]}
# Contrastive pretraining
pretraining_model = ContrastiveModel()
pretraining_model.compile(
contrastive_optimizer=keras.optimizers.Adam(),
probe_optimizer=keras.optimizers.Adam(),
)
pretraining_history = pretraining_model.fit(
train_dataset, epochs=num_epochs, validation_data=test_dataset
)
print(
"Maximal validation accuracy: {:.2f}%".format(
max(pretraining_history.history["val_p_acc"]) * 100
)
)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "encoder"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ conv2d_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ ? │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ │ │ (unbuilt) │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ ? │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ │ │ (unbuilt) │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_6 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ ? │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ │ │ (unbuilt) │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ ? │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ │ │ (unbuilt) │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ flatten_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Flatten</span>) │ ? │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ │ │ (unbuilt) │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ ? │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ │ │ (unbuilt) │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "projection_head"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ dense_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,512</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">16,512</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">33,024</span> (129.00 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">33,024</span> (129.00 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "linear_probe"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ dense_5 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,290</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">1,290</span> (5.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">1,290</span> (5.04 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
<div class="k-default-codeblock">
```
Epoch 1/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 34s 134ms/step - c_acc: 0.0880 - c_loss: 5.2606 - p_acc: 0.1326 - p_loss: 2.2726 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.2579 - val_p_loss: 2.0671
Epoch 2/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 139ms/step - c_acc: 0.2808 - c_loss: 3.6233 - p_acc: 0.2956 - p_loss: 2.0228 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.3440 - val_p_loss: 1.9242
Epoch 3/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 28s 136ms/step - c_acc: 0.4097 - c_loss: 2.9369 - p_acc: 0.3671 - p_loss: 1.8674 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.3876 - val_p_loss: 1.7757
Epoch 4/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 30s 142ms/step - c_acc: 0.4893 - c_loss: 2.5707 - p_acc: 0.3957 - p_loss: 1.7490 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.3960 - val_p_loss: 1.7002
Epoch 5/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 28s 136ms/step - c_acc: 0.5458 - c_loss: 2.3342 - p_acc: 0.4274 - p_loss: 1.6608 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.4374 - val_p_loss: 1.6145
Epoch 6/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 140ms/step - c_acc: 0.5949 - c_loss: 2.1179 - p_acc: 0.4410 - p_loss: 1.5812 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.4444 - val_p_loss: 1.5439
Epoch 7/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 28s 135ms/step - c_acc: 0.6273 - c_loss: 1.9861 - p_acc: 0.4633 - p_loss: 1.5076 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.4695 - val_p_loss: 1.5056
Epoch 8/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 139ms/step - c_acc: 0.6566 - c_loss: 1.8668 - p_acc: 0.4817 - p_loss: 1.4601 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.4790 - val_p_loss: 1.4566
Epoch 9/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 28s 135ms/step - c_acc: 0.6726 - c_loss: 1.7938 - p_acc: 0.4885 - p_loss: 1.4136 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.4933 - val_p_loss: 1.4163
Epoch 10/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 139ms/step - c_acc: 0.6931 - c_loss: 1.7210 - p_acc: 0.4954 - p_loss: 1.3663 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5140 - val_p_loss: 1.3677
Epoch 11/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 137ms/step - c_acc: 0.7055 - c_loss: 1.6619 - p_acc: 0.5210 - p_loss: 1.3376 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5155 - val_p_loss: 1.3573
Epoch 12/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 30s 145ms/step - c_acc: 0.7215 - c_loss: 1.6112 - p_acc: 0.5264 - p_loss: 1.2920 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5232 - val_p_loss: 1.3337
Epoch 13/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 31s 146ms/step - c_acc: 0.7279 - c_loss: 1.5749 - p_acc: 0.5388 - p_loss: 1.2570 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5217 - val_p_loss: 1.3155
Epoch 14/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 140ms/step - c_acc: 0.7435 - c_loss: 1.5196 - p_acc: 0.5505 - p_loss: 1.2507 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5460 - val_p_loss: 1.2640
Epoch 15/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 40s 135ms/step - c_acc: 0.7477 - c_loss: 1.4979 - p_acc: 0.5653 - p_loss: 1.2188 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5594 - val_p_loss: 1.2351
Epoch 16/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 139ms/step - c_acc: 0.7598 - c_loss: 1.4463 - p_acc: 0.5590 - p_loss: 1.1917 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5551 - val_p_loss: 1.2411
Epoch 17/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 28s 135ms/step - c_acc: 0.7633 - c_loss: 1.4271 - p_acc: 0.5775 - p_loss: 1.1731 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5502 - val_p_loss: 1.2428
Epoch 18/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 140ms/step - c_acc: 0.7666 - c_loss: 1.4246 - p_acc: 0.5752 - p_loss: 1.1805 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5633 - val_p_loss: 1.2167
Epoch 19/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 28s 135ms/step - c_acc: 0.7708 - c_loss: 1.3928 - p_acc: 0.5814 - p_loss: 1.1677 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5665 - val_p_loss: 1.2191
Epoch 20/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 29s 140ms/step - c_acc: 0.7806 - c_loss: 1.3733 - p_acc: 0.5836 - p_loss: 1.1442 - val_c_acc: 0.0000e+00 - val_c_loss: 0.0000e+00 - val_p_acc: 0.5640 - val_p_loss: 1.2172
Maximal validation accuracy: 56.65%
```
</div>
---
## Supervised finetuning of the pretrained encoder
We then finetune the encoder on the labeled examples, by attaching
a single randomly initalized fully connected classification layer on its top.
```python
# Supervised finetuning of the pretrained encoder
finetuning_model = keras.Sequential(
[
get_augmenter(**classification_augmentation),
pretraining_model.encoder,
layers.Dense(10),
],
name="finetuning_model",
)
finetuning_model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
finetuning_history = finetuning_model.fit(
labeled_train_dataset, epochs=num_epochs, validation_data=test_dataset
)
print(
"Maximal validation accuracy: {:.2f}%".format(
max(finetuning_history.history["val_acc"]) * 100
)
)
```
<div class="k-default-codeblock">
```
Epoch 1/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 5s 18ms/step - acc: 0.2104 - loss: 2.0930 - val_acc: 0.4017 - val_loss: 1.5433
Epoch 2/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.4037 - loss: 1.5791 - val_acc: 0.4544 - val_loss: 1.4250
Epoch 3/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.4639 - loss: 1.4161 - val_acc: 0.5266 - val_loss: 1.2958
Epoch 4/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.5438 - loss: 1.2686 - val_acc: 0.5655 - val_loss: 1.1711
Epoch 5/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.5678 - loss: 1.1746 - val_acc: 0.5775 - val_loss: 1.1670
Epoch 6/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6096 - loss: 1.1071 - val_acc: 0.6034 - val_loss: 1.1400
Epoch 7/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6242 - loss: 1.0413 - val_acc: 0.6235 - val_loss: 1.0756
Epoch 8/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6284 - loss: 1.0264 - val_acc: 0.6030 - val_loss: 1.1048
Epoch 9/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6491 - loss: 0.9706 - val_acc: 0.5770 - val_loss: 1.2818
Epoch 10/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.6754 - loss: 0.9104 - val_acc: 0.6119 - val_loss: 1.1087
Epoch 11/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 20ms/step - acc: 0.6620 - loss: 0.8855 - val_acc: 0.6323 - val_loss: 1.0526
Epoch 12/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 19ms/step - acc: 0.7060 - loss: 0.8179 - val_acc: 0.6406 - val_loss: 1.0565
Epoch 13/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 3s 17ms/step - acc: 0.7252 - loss: 0.7796 - val_acc: 0.6135 - val_loss: 1.1273
Epoch 14/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.7176 - loss: 0.7935 - val_acc: 0.6292 - val_loss: 1.1028
Epoch 15/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.7322 - loss: 0.7471 - val_acc: 0.6266 - val_loss: 1.1313
Epoch 16/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.7400 - loss: 0.7218 - val_acc: 0.6332 - val_loss: 1.1064
Epoch 17/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.7490 - loss: 0.6968 - val_acc: 0.6532 - val_loss: 1.0112
Epoch 18/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.7491 - loss: 0.6879 - val_acc: 0.6403 - val_loss: 1.1083
Epoch 19/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 4s 17ms/step - acc: 0.7802 - loss: 0.6504 - val_acc: 0.6479 - val_loss: 1.0548
Epoch 20/20
200/200 ━━━━━━━━━━━━━━━━━━━━ 3s 17ms/step - acc: 0.7800 - loss: 0.6234 - val_acc: 0.6409 - val_loss: 1.0998
Maximal validation accuracy: 65.32%
```
</div>
---
## Comparison against the baseline
```python
# The classification accuracies of the baseline and the pretraining + finetuning process:
def plot_training_curves(pretraining_history, finetuning_history, baseline_history):
for metric_key, metric_name in zip(["acc", "loss"], ["accuracy", "loss"]):
plt.figure(figsize=(8, 5), dpi=100)
plt.plot(
baseline_history.history[f"val_{metric_key}"],
label="supervised baseline",
)
plt.plot(
pretraining_history.history[f"val_p_{metric_key}"],
label="self-supervised pretraining",
)
plt.plot(
finetuning_history.history[f"val_{metric_key}"],
label="supervised finetuning",
)
plt.legend()
plt.title(f"Classification {metric_name} during training")
plt.xlabel("epochs")
plt.ylabel(f"validation {metric_name}")
plot_training_curves(pretraining_history, finetuning_history, baseline_history)
```


By comparing the training curves, we can see that when using contrastive
pretraining, a higher validation accuracy can be reached, paired with a lower
validation loss, which means that the pretrained network was able to generalize
better when seeing only a small amount of labeled examples.
---
## Improving further
### Architecture
The experiment in the original paper demonstrated that increasing the width and depth of the
models improves performance at a higher rate than for supervised learning. Also,
using a [ResNet-50](https://keras.io/api/applications/resnet/#resnet50-function)
encoder is quite standard in the literature. However keep in mind, that more
powerful models will not only increase training time but will also require more
memory and will limit the maximal batch size you can use.
It has [been](https://arxiv.org/abs/1905.09272)
[reported](https://arxiv.org/abs/1911.05722) that the usage of BatchNorm layers
could sometimes degrade performance, as it introduces an intra-batch dependency
between samples, which is why I did not have used them in this example. In my
experiments however, using BatchNorm, especially in the projection head,
improves performance.
### Hyperparameters
The hyperparameters used in this example have been tuned manually for this task and
architecture. Therefore, without changing them, only marginal gains can be expected
from further hyperparameter tuning.
However for a different task or model architecture these would need tuning, so
here are my notes on the most important ones:
- **Batch size**: since the objective can be interpreted as a classification
over a batch of images (loosely speaking), the batch size is actually a more
important hyperparameter than usual. The higher, the better.
- **Temperature**: the temperature defines the "softness" of the softmax
distribution that is used in the cross-entropy loss, and is an important
hyperparameter. Lower values generally lead to a higher contrastive accuracy.
A recent trick (in [ALIGN](https://arxiv.org/abs/2102.05918)) is to learn
the temperature's value as well (which can be done by defining it as a
tf.Variable, and applying gradients on it). Even though this provides a good baseline
value, in my experiments the learned temperature was somewhat lower
than optimal, as it is optimized with respect to the contrastive loss, which is not a
perfect proxy for representation quality.
- **Image augmentation strength**: during pretraining stronger augmentations
increase the difficulty of the task, however after a point too strong
augmentations will degrade performance. During finetuning stronger
augmentations reduce overfitting while in my experience too strong
augmentations decrease the performance gains from pretraining. The whole data
augmentation pipeline can be seen as an important hyperparameter of the
algorithm, implementations of other custom image augmentation layers in Keras
can be found in
[this repository](https://github.com/beresandras/image-augmentation-layers-keras).
- **Learning rate schedule**: a constant schedule is used here, but it is
quite common in the literature to use a
[cosine decay schedule](https://www.tensorflow.org/api_docs/python/tf/keras/experimental/CosineDecay),
which can further improve performance.
- **Optimizer**: Adam is used in this example, as it provides good performance
with default parameters. SGD with momentum requires more tuning, however it
could slightly increase performance.
---
## Related works
Other instance-level (image-level) contrastive learning methods:
- [MoCo](https://arxiv.org/abs/1911.05722)
([v2](https://arxiv.org/abs/2003.04297),
[v3](https://arxiv.org/abs/2104.02057)): uses a momentum-encoder as well,
whose weights are an exponential moving average of the target encoder
- [SwAV](https://arxiv.org/abs/2006.09882): uses clustering instead of pairwise
comparison
- [BarlowTwins](https://arxiv.org/abs/2103.03230): uses a cross
correlation-based objective instead of pairwise comparison
Keras implementations of **MoCo** and **BarlowTwins** can be found in
[this repository](https://github.com/beresandras/contrastive-classification-keras),
which includes a Colab notebook.
There is also a new line of works, which optimize a similar objective, but
without the use of any negatives:
- [BYOL](https://arxiv.org/abs/2006.07733): momentum-encoder + no negatives
- [SimSiam](https://arxiv.org/abs/2011.10566)
([Keras example](https://keras.io/examples/vision/simsiam/)):
no momentum-encoder + no negatives
In my experience, these methods are more brittle (they can collapse to a constant
representation, I could not get them to work using this encoder architecture).
Even though they are generally more dependent on the
[model](https://generallyintelligent.ai/understanding-self-supervised-contrastive-learning.html)
[architecture](https://arxiv.org/abs/2010.10241), they can improve
performance at smaller batch sizes.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/semi-supervised-classification-simclr)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/semi-supervised-classification).
| keras-io/examples/vision/md/semisupervised_simclr.md/0 | {
"file_path": "keras-io/examples/vision/md/semisupervised_simclr.md",
"repo_id": "keras-io",
"token_count": 18974
} | 116 |
"""
Title: Image classification with Perceiver
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/04/30
Last modified: 2023/12/30
Description: Implementing the Perceiver model for image classification.
Accelerator: GPU
"""
"""
## Introduction
This example implements the
[Perceiver: General Perception with Iterative Attention](https://arxiv.org/abs/2103.03206)
model by Andrew Jaegle et al. for image classification,
and demonstrates it on the CIFAR-100 dataset.
The Perceiver model leverages an asymmetric attention mechanism to iteratively
distill inputs into a tight latent bottleneck,
allowing it to scale to handle very large inputs.
In other words: let's assume that your input data array (e.g. image) has `M` elements (i.e. patches), where `M` is large.
In a standard Transformer model, a self-attention operation is performed for the `M` elements.
The complexity of this operation is `O(M^2)`.
However, the Perceiver model creates a latent array of size `N` elements, where `N << M`,
and performs two operations iteratively:
1. Cross-attention Transformer between the latent array and the data array - The complexity of this operation is `O(M.N)`.
2. Self-attention Transformer on the latent array - The complexity of this operation is `O(N^2)`.
This example requires Keras 3.0 or higher.
"""
"""
## Setup
"""
import keras
from keras import layers, activations, ops
"""
## Prepare the data
"""
num_classes = 100
input_shape = (32, 32, 3)
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
"""
## Configure the hyperparameters
"""
learning_rate = 0.001
weight_decay = 0.0001
batch_size = 64
num_epochs = 2 # It is recommended to run 50 epochs to observe improvements in accuracy
dropout_rate = 0.2
image_size = 64 # We'll resize input images to this size.
patch_size = 2 # Size of the patches to be extract from the input images.
num_patches = (image_size // patch_size) ** 2 # Size of the data array.
latent_dim = 256 # Size of the latent array.
projection_dim = 256 # Embedding size of each element in the data and latent arrays.
num_heads = 8 # Number of Transformer heads.
ffn_units = [
projection_dim,
projection_dim,
] # Size of the Transformer Feedforward network.
num_transformer_blocks = 4
num_iterations = 2 # Repetitions of the cross-attention and Transformer modules.
classifier_units = [
projection_dim,
num_classes,
] # Size of the Feedforward network of the final classifier.
print(f"Image size: {image_size} X {image_size} = {image_size ** 2}")
print(f"Patch size: {patch_size} X {patch_size} = {patch_size ** 2} ")
print(f"Patches per image: {num_patches}")
print(f"Elements per patch (3 channels): {(patch_size ** 2) * 3}")
print(f"Latent array shape: {latent_dim} X {projection_dim}")
print(f"Data array shape: {num_patches} X {projection_dim}")
"""
Note that, in order to use each pixel as an individual input in the data array,
set `patch_size` to 1.
"""
"""
## Use data augmentation
"""
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.Resizing(image_size, image_size),
layers.RandomFlip("horizontal"),
layers.RandomZoom(height_factor=0.2, width_factor=0.2),
],
name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)
"""
## Implement Feedforward network (FFN)
"""
def create_ffn(hidden_units, dropout_rate):
ffn_layers = []
for units in hidden_units[:-1]:
ffn_layers.append(layers.Dense(units, activation=activations.gelu))
ffn_layers.append(layers.Dense(units=hidden_units[-1]))
ffn_layers.append(layers.Dropout(dropout_rate))
ffn = keras.Sequential(ffn_layers)
return ffn
"""
## Implement patch creation as a layer
"""
class Patches(layers.Layer):
def __init__(self, patch_size):
super().__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = ops.shape(images)[0]
patches = ops.image.extract_patches(
image=images,
size=(self.patch_size, self.patch_size),
strides=(self.patch_size, self.patch_size),
dilation_rate=1,
padding="valid",
)
patch_dims = patches.shape[-1]
patches = ops.reshape(patches, [batch_size, -1, patch_dims])
return patches
"""
## Implement the patch encoding layer
The `PatchEncoder` layer will linearly transform a patch by projecting it into
a vector of size `latent_dim`. In addition, it adds a learnable position embedding
to the projected vector.
Note that the orginal Perceiver paper uses the Fourier feature positional encodings.
"""
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super().__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patches):
positions = ops.arange(start=0, stop=self.num_patches, step=1)
encoded = self.projection(patches) + self.position_embedding(positions)
return encoded
"""
## Build the Perceiver model
The Perceiver consists of two modules: a cross-attention
module and a standard Transformer with self-attention.
"""
"""
### Cross-attention module
The cross-attention expects a `(latent_dim, projection_dim)` latent array,
and the `(data_dim, projection_dim)` data array as inputs,
to produce a `(latent_dim, projection_dim)` latent array as an output.
To apply cross-attention, the `query` vectors are generated from the latent array,
while the `key` and `value` vectors are generated from the encoded image.
Note that the data array in this example is the image,
where the `data_dim` is set to the `num_patches`.
"""
def create_cross_attention_module(
latent_dim, data_dim, projection_dim, ffn_units, dropout_rate
):
inputs = {
# Recieve the latent array as an input of shape [1, latent_dim, projection_dim].
"latent_array": layers.Input(
shape=(latent_dim, projection_dim), name="latent_array"
),
# Recieve the data_array (encoded image) as an input of shape [batch_size, data_dim, projection_dim].
"data_array": layers.Input(shape=(data_dim, projection_dim), name="data_array"),
}
# Apply layer norm to the inputs
latent_array = layers.LayerNormalization(epsilon=1e-6)(inputs["latent_array"])
data_array = layers.LayerNormalization(epsilon=1e-6)(inputs["data_array"])
# Create query tensor: [1, latent_dim, projection_dim].
query = layers.Dense(units=projection_dim)(latent_array)
# Create key tensor: [batch_size, data_dim, projection_dim].
key = layers.Dense(units=projection_dim)(data_array)
# Create value tensor: [batch_size, data_dim, projection_dim].
value = layers.Dense(units=projection_dim)(data_array)
# Generate cross-attention outputs: [batch_size, latent_dim, projection_dim].
attention_output = layers.Attention(use_scale=True, dropout=0.1)(
[query, key, value], return_attention_scores=False
)
# Skip connection 1.
attention_output = layers.Add()([attention_output, latent_array])
# Apply layer norm.
attention_output = layers.LayerNormalization(epsilon=1e-6)(attention_output)
# Apply Feedforward network.
ffn = create_ffn(hidden_units=ffn_units, dropout_rate=dropout_rate)
outputs = ffn(attention_output)
# Skip connection 2.
outputs = layers.Add()([outputs, attention_output])
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
### Transformer module
The Transformer expects the output latent vector from the cross-attention module
as an input, applies multi-head self-attention to its `latent_dim` elements,
followed by feedforward network, to produce another `(latent_dim, projection_dim)` latent array.
"""
def create_transformer_module(
latent_dim,
projection_dim,
num_heads,
num_transformer_blocks,
ffn_units,
dropout_rate,
):
# input_shape: [1, latent_dim, projection_dim]
inputs = layers.Input(shape=(latent_dim, projection_dim))
x0 = inputs
# Create multiple layers of the Transformer block.
for _ in range(num_transformer_blocks):
# Apply layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(x0)
# Create a multi-head self-attention layer.
attention_output = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=projection_dim, dropout=0.1
)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, x0])
# Apply layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# Apply Feedforward network.
ffn = create_ffn(hidden_units=ffn_units, dropout_rate=dropout_rate)
x3 = ffn(x3)
# Skip connection 2.
x0 = layers.Add()([x3, x2])
# Create the Keras model.
model = keras.Model(inputs=inputs, outputs=x0)
return model
"""
### Perceiver model
The Perceiver model repeats the cross-attention and Transformer modules
`num_iterations` times—with shared weights and skip connections—to allow
the latent array to iteratively extract information from the input image as it is needed.
"""
class Perceiver(keras.Model):
def __init__(
self,
patch_size,
data_dim,
latent_dim,
projection_dim,
num_heads,
num_transformer_blocks,
ffn_units,
dropout_rate,
num_iterations,
classifier_units,
):
super().__init__()
self.latent_dim = latent_dim
self.data_dim = data_dim
self.patch_size = patch_size
self.projection_dim = projection_dim
self.num_heads = num_heads
self.num_transformer_blocks = num_transformer_blocks
self.ffn_units = ffn_units
self.dropout_rate = dropout_rate
self.num_iterations = num_iterations
self.classifier_units = classifier_units
def build(self, input_shape):
# Create latent array.
self.latent_array = self.add_weight(
shape=(self.latent_dim, self.projection_dim),
initializer="random_normal",
trainable=True,
)
# Create patching module.
self.patcher = Patches(self.patch_size)
# Create patch encoder.
self.patch_encoder = PatchEncoder(self.data_dim, self.projection_dim)
# Create cross-attenion module.
self.cross_attention = create_cross_attention_module(
self.latent_dim,
self.data_dim,
self.projection_dim,
self.ffn_units,
self.dropout_rate,
)
# Create Transformer module.
self.transformer = create_transformer_module(
self.latent_dim,
self.projection_dim,
self.num_heads,
self.num_transformer_blocks,
self.ffn_units,
self.dropout_rate,
)
# Create global average pooling layer.
self.global_average_pooling = layers.GlobalAveragePooling1D()
# Create a classification head.
self.classification_head = create_ffn(
hidden_units=self.classifier_units, dropout_rate=self.dropout_rate
)
super().build(input_shape)
def call(self, inputs):
# Augment data.
augmented = data_augmentation(inputs)
# Create patches.
patches = self.patcher(augmented)
# Encode patches.
encoded_patches = self.patch_encoder(patches)
# Prepare cross-attention inputs.
cross_attention_inputs = {
"latent_array": ops.expand_dims(self.latent_array, 0),
"data_array": encoded_patches,
}
# Apply the cross-attention and the Transformer modules iteratively.
for _ in range(self.num_iterations):
# Apply cross-attention from the latent array to the data array.
latent_array = self.cross_attention(cross_attention_inputs)
# Apply self-attention Transformer to the latent array.
latent_array = self.transformer(latent_array)
# Set the latent array of the next iteration.
cross_attention_inputs["latent_array"] = latent_array
# Apply global average pooling to generate a [batch_size, projection_dim] repesentation tensor.
representation = self.global_average_pooling(latent_array)
# Generate logits.
logits = self.classification_head(representation)
return logits
"""
## Compile, train, and evaluate the mode
"""
def run_experiment(model):
# Create ADAM instead of LAMB optimizer with weight decay. (LAMB isn't supported yet)
optimizer = keras.optimizers.Adam(learning_rate=learning_rate)
# Compile the model.
model.compile(
optimizer=optimizer,
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
keras.metrics.SparseTopKCategoricalAccuracy(5, name="top5-acc"),
],
)
# Create a learning rate scheduler callback.
reduce_lr = keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.2, patience=3
)
# Create an early stopping callback.
early_stopping = keras.callbacks.EarlyStopping(
monitor="val_loss", patience=15, restore_best_weights=True
)
# Fit the model.
history = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.1,
callbacks=[early_stopping, reduce_lr],
)
_, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")
# Return history to plot learning curves.
return history
"""
Note that training the perceiver model with the current settings on a V100 GPUs takes
around 200 seconds.
"""
perceiver_classifier = Perceiver(
patch_size,
num_patches,
latent_dim,
projection_dim,
num_heads,
num_transformer_blocks,
ffn_units,
dropout_rate,
num_iterations,
classifier_units,
)
history = run_experiment(perceiver_classifier)
"""
After 40 epochs, the Perceiver model achieves around 53% accuracy and 81% top-5 accuracy on the test data.
As mentioned in the ablations of the [Perceiver](https://arxiv.org/abs/2103.03206) paper,
you can obtain better results by increasing the latent array size,
increasing the (projection) dimensions of the latent array and data array elements,
increasing the number of blocks in the Transformer module, and increasing the number of iterations of applying
the cross-attention and the latent Transformer modules. You may also try to increase the size the input images
and use different patch sizes.
The Perceiver benefits from inceasing the model size. However, larger models needs bigger accelerators
to fit in and train efficiently. This is why in the Perceiver paper they used 32 TPU cores to run the experiments.
"""
| keras-io/examples/vision/perceiver_image_classification.py/0 | {
"file_path": "keras-io/examples/vision/perceiver_image_classification.py",
"repo_id": "keras-io",
"token_count": 5869
} | 117 |
"""
Title: Supervised Contrastive Learning
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2020/11/30
Last modified: 2020/11/30
Description: Using supervised contrastive learning for image classification.
Accelerator: GPU
"""
"""
## Introduction
[Supervised Contrastive Learning](https://arxiv.org/abs/2004.11362)
(Prannay Khosla et al.) is a training methodology that outperforms
supervised training with crossentropy on classification tasks.
Essentially, training an image classification model with Supervised Contrastive
Learning is performed in two phases:
1. Training an encoder to learn to produce vector representations of input images such
that representations of images in the same class will be more similar compared to
representations of images in different classes.
2. Training a classifier on top of the frozen encoder.
Note that this example requires [TensorFlow Addons](https://www.tensorflow.org/addons),
which you can install using the following command:
```python
pip install tensorflow-addons
```
## Setup
"""
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
"""
## Prepare the data
"""
num_classes = 10
input_shape = (32, 32, 3)
# Load the train and test data splits
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
# Display shapes of train and test datasets
print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")
"""
## Using image data augmentation
"""
data_augmentation = keras.Sequential(
[
layers.Normalization(),
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.02),
]
)
# Setting the state of the normalization layer.
data_augmentation.layers[0].adapt(x_train)
"""
## Build the encoder model
The encoder model takes the image as input and turns it into a 2048-dimensional
feature vector.
"""
def create_encoder():
resnet = keras.applications.ResNet50V2(
include_top=False, weights=None, input_shape=input_shape, pooling="avg"
)
inputs = keras.Input(shape=input_shape)
augmented = data_augmentation(inputs)
outputs = resnet(augmented)
model = keras.Model(inputs=inputs, outputs=outputs, name="cifar10-encoder")
return model
encoder = create_encoder()
encoder.summary()
learning_rate = 0.001
batch_size = 265
hidden_units = 512
projection_units = 128
num_epochs = 50
dropout_rate = 0.5
temperature = 0.05
"""
## Build the classification model
The classification model adds a fully-connected layer on top of the encoder,
plus a softmax layer with the target classes.
"""
def create_classifier(encoder, trainable=True):
for layer in encoder.layers:
layer.trainable = trainable
inputs = keras.Input(shape=input_shape)
features = encoder(inputs)
features = layers.Dropout(dropout_rate)(features)
features = layers.Dense(hidden_units, activation="relu")(features)
features = layers.Dropout(dropout_rate)(features)
outputs = layers.Dense(num_classes, activation="softmax")(features)
model = keras.Model(inputs=inputs, outputs=outputs, name="cifar10-classifier")
model.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
return model
"""
## Experiment 1: Train the baseline classification model
In this experiment, a baseline classifier is trained as usual, i.e., the
encoder and the classifier parts are trained together as a single model
to minimize the crossentropy loss.
"""
encoder = create_encoder()
classifier = create_classifier(encoder)
classifier.summary()
history = classifier.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs)
accuracy = classifier.evaluate(x_test, y_test)[1]
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
"""
## Experiment 2: Use supervised contrastive learning
In this experiment, the model is trained in two phases. In the first phase,
the encoder is pretrained to optimize the supervised contrastive loss,
described in [Prannay Khosla et al.](https://arxiv.org/abs/2004.11362).
In the second phase, the classifier is trained using the trained encoder with
its weights freezed; only the weights of fully-connected layers with the
softmax are optimized.
### 1. Supervised contrastive learning loss function
"""
class SupervisedContrastiveLoss(keras.losses.Loss):
def __init__(self, temperature=1, name=None):
super().__init__(name=name)
self.temperature = temperature
def __call__(self, labels, feature_vectors, sample_weight=None):
# Normalize feature vectors
feature_vectors_normalized = tf.math.l2_normalize(feature_vectors, axis=1)
# Compute logits
logits = tf.divide(
tf.matmul(
feature_vectors_normalized, tf.transpose(feature_vectors_normalized)
),
self.temperature,
)
return tfa.losses.npairs_loss(tf.squeeze(labels), logits)
def add_projection_head(encoder):
inputs = keras.Input(shape=input_shape)
features = encoder(inputs)
outputs = layers.Dense(projection_units, activation="relu")(features)
model = keras.Model(
inputs=inputs, outputs=outputs, name="cifar-encoder_with_projection-head"
)
return model
"""
### 2. Pretrain the encoder
"""
encoder = create_encoder()
encoder_with_projection_head = add_projection_head(encoder)
encoder_with_projection_head.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss=SupervisedContrastiveLoss(temperature),
)
encoder_with_projection_head.summary()
history = encoder_with_projection_head.fit(
x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs
)
"""
### 3. Train the classifier with the frozen encoder
"""
classifier = create_classifier(encoder, trainable=False)
history = classifier.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=num_epochs)
accuracy = classifier.evaluate(x_test, y_test)[1]
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
"""
We get to an improved test accuracy.
"""
"""
## Conclusion
As shown in the experiments, using the supervised contrastive learning technique
outperformed the conventional technique in terms of the test accuracy. Note that
the same training budget (i.e., number of epochs) was given to each technique.
Supervised contrastive learning pays off when the encoder involves a complex
architecture, like ResNet, and multi-class problems with many labels.
In addition, large batch sizes and multi-layer projection heads
improve its effectiveness. See the [Supervised Contrastive Learning](https://arxiv.org/abs/2004.11362)
paper for more details.
You can use the trained model hosted on [Hugging Face Hub](https://huggingface.co/keras-io/supervised-contrastive-learning-cifar10)
and try the demo on [Hugging Face Spaces](https://huggingface.co/spaces/keras-io/supervised-contrastive-learning).
"""
| keras-io/examples/vision/supervised-contrastive-learning.py/0 | {
"file_path": "keras-io/examples/vision/supervised-contrastive-learning.py",
"repo_id": "keras-io",
"token_count": 2381
} | 118 |
<jupyter_start><jupyter_text>Segment Anything in KerasCV!**Author:** Tirth Patel, Ian Stenbit**Date created:** 2023/12/04**Last modified:** 2023/12/19**Description:** Segment anything using text, box, and points prompts in KerasCV. OverviewThe Segment Anything Model (SAM) produces high quality object masks from input promptssuch as points or boxes, and it can be used to generate masks for all objects in animage. It has been trained on a[dataset](https://segment-anything.com/dataset/index.html) of 11 million images and 1.1billion masks, and has strong zero-shot performance on a variety of segmentation tasks.In this guide, we will show how to use KerasCV's implementation of the[Segment Anything Model](https://github.com/facebookresearch/segment-anything)and show how powerful TensorFlow's and JAX's performance boost is.First, let's get all our dependencies and images for our demo.<jupyter_code>!pip install -Uq keras-cv
!pip install -Uq keras
!wget -q https://raw.githubusercontent.com/facebookresearch/segment-anything/main/notebooks/images/truck.jpg<jupyter_output><empty_output><jupyter_text>Choose your backendWith Keras 3, you can choose to use your favorite backend!<jupyter_code>import os
os.environ["KERAS_BACKEND"] = "jax"
import timeit
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras import ops
import keras_cv<jupyter_output><empty_output><jupyter_text>Helper functionsLet's define some helper functions for visulazing the images, prompts, and thesegmentation results.<jupyter_code>def show_mask(mask, ax, random_color=False):
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
else:
color = np.array([30 / 255, 144 / 255, 255 / 255, 0.6])
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image)
def show_points(coords, labels, ax, marker_size=375):
pos_points = coords[labels == 1]
neg_points = coords[labels == 0]
ax.scatter(
pos_points[:, 0],
pos_points[:, 1],
color="green",
marker="*",
s=marker_size,
edgecolor="white",
linewidth=1.25,
)
ax.scatter(
neg_points[:, 0],
neg_points[:, 1],
color="red",
marker="*",
s=marker_size,
edgecolor="white",
linewidth=1.25,
)
def show_box(box, ax):
box = box.reshape(-1)
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(
plt.Rectangle((x0, y0), w, h, edgecolor="green", facecolor=(0, 0, 0, 0), lw=2)
)
def inference_resizing(image, pad=True):
# Compute Preprocess Shape
image = ops.cast(image, dtype="float32")
old_h, old_w = image.shape[0], image.shape[1]
scale = 1024 * 1.0 / max(old_h, old_w)
new_h = old_h * scale
new_w = old_w * scale
preprocess_shape = int(new_h + 0.5), int(new_w + 0.5)
# Resize the image
image = ops.image.resize(image[None, ...], preprocess_shape)[0]
# Pad the shorter side
if pad:
pixel_mean = ops.array([123.675, 116.28, 103.53])
pixel_std = ops.array([58.395, 57.12, 57.375])
image = (image - pixel_mean) / pixel_std
h, w = image.shape[0], image.shape[1]
pad_h = 1024 - h
pad_w = 1024 - w
image = ops.pad(image, [(0, pad_h), (0, pad_w), (0, 0)])
# KerasCV now rescales the images and normalizes them.
# Just unnormalize such that when KerasCV normalizes them
# again, the padded values map to 0.
image = image * pixel_std + pixel_mean
return image<jupyter_output><empty_output><jupyter_text>Get the pretrained SAM modelWe can initialize a trained SAM model using KerasCV's `from_preset` factory method. Here,we use the huge ViT backbone trained on the SA-1B dataset (`sam_huge_sa1b`) forhigh-quality segmentation masks. You can also use one of the `sam_large_sa1b` or`sam_base_sa1b` for better performance (at the cost of decreasing quality of segmentationmasks).<jupyter_code>model = keras_cv.models.SegmentAnythingModel.from_preset("sam_huge_sa1b")<jupyter_output><empty_output><jupyter_text>Understanding PromptsSegment Anything allows prompting an image using points, boxes, and masks:1. Point prompts are the most basic of all: the model tries to guess the object given apoint on an image. The point can either be a foreground point (i.e. the desiredsegmentation mask contains the point in it) or a backround point (i.e. the point liesoutside the desired mask).2. Another way to prompt the model is using boxes. Given a bounding box, the model triesto segment the object contained in it.3. Finally, the model can also be prompted using a mask itself. This is useful, forinstance, to refine the borders of a previously predicted or known segmentation mask.What makes the model incredibly powerful is the ability to combine the prompts above.Point, box, and mask prompts can be combined in several different ways to achieve thebest result.Let's see the semantics of passing these prompts to the Segment Anything model inKerasCV. Input to the SAM model is a dictionary with keys:1. `"images"`: A batch of images to segment. Must be of shape `(B, 1024, 1024, 3)`.2. `"points"`: A batch of point prompts. Each point is an `(x, y)` coordinate originatingfrom the top-left corner of the image. In other works, each point is of the form `(r, c)`where `r` and `c` are the row and column of the pixel in the image. Must be of shape `(B,N, 2)`.3. `"labels"`: A batch of labels for the given points. `1` represents foreground pointsand `0` represents background points. Must be of shape `(B, N)`.4. `"boxes"`: A batch of boxes. Note that the model only accepts one box per batch.Hence, the expected shape is `(B, 1, 2, 2)`. Each box is a collection of 2 points: thetop left corner and the bottom right corner of the box. The points here follow the samesemantics as the point prompts. Here the `1` in the second dimension represents thepresence of box prompts. If the box prompts are missing, a placeholder input of shape`(B, 0, 2, 2)` must be passed.5. `"masks"`: A batch of masks. Just like box prompts, only one mask prompt per image isallowed. The shape of the input mask must be `(B, 1, 256, 256, 1)` if they are presentand `(B, 0, 256, 256, 1)` for missing mask prompt.Placeholder prompts are only required when calling the model directly (i.e.`model(...)`). When calling the `predict` method, missing prompts can be omitted from theinput dictionary. Point promptsFirst, let's segment an image using point prompts. We load the image and resize it toshape `(1024, 1024)`, the image size the pretrained SAM model expects.<jupyter_code># Load our image
image = np.array(keras.utils.load_img("truck.jpg"))
image = inference_resizing(image)
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
plt.axis("on")
plt.show()<jupyter_output><empty_output><jupyter_text>Next, we will define the point on the object we want to segment. Let's try to segment thetruck's window pane at coordinates `(284, 213)`.<jupyter_code># Define the input point prompt
input_point = np.array([[284, 213.5]])
input_label = np.array([1])
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
show_points(input_point, input_label, plt.gca())
plt.axis("on")
plt.show()<jupyter_output><empty_output><jupyter_text>Now let's call the `predict` method of our model to get the segmentation masks.**Note**: We don't call the model directly (`model(...)`) since placeholder prompts arerequired to do so. Missing prompts are handled automatically by the predict method so wecall it instead. Also, when no box prompts are present, the points and labels need to bepadded with a zero point prompt and `-1` label prompt respectively. The cell belowdemonstrates how this works.<jupyter_code>outputs = model.predict(
{
"images": image[np.newaxis, ...],
"points": np.concatenate(
[input_point[np.newaxis, ...], np.zeros((1, 1, 2))], axis=1
),
"labels": np.concatenate(
[input_label[np.newaxis, ...], np.full((1, 1), fill_value=-1)], axis=1
),
}
)<jupyter_output><empty_output><jupyter_text>`SegmentAnythingModel.predict` returns two outputs. First are logits (segmentation masks)of shape `(1, 4, 256, 256)` and the other are the IoU confidence scores (of shape `(1,4)`) for each mask predicted. The pretrained SAM model predicts four masks: the first isthe best mask the model could come up with for the given prompts, and the other 3 are thealternative masks which can be used in case the best prediction doesn't contain thedesired object. The user can choose whichever mask they prefer.Let's visualize the masks returned by the model!<jupyter_code># Resize the mask to our image shape i.e. (1024, 1024)
mask = inference_resizing(outputs["masks"][0][0][..., None], pad=False)[..., 0]
# Convert the logits to a numpy array
# and convert the logits to a boolean mask
mask = ops.convert_to_numpy(mask) > 0.0
iou_score = ops.convert_to_numpy(outputs["iou_pred"][0][0])
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
show_mask(mask, plt.gca())
show_points(input_point, input_label, plt.gca())
plt.title(f"IoU Score: {iou_score:.3f}", fontsize=18)
plt.axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>As expected, the model returns a segmentation mask for the truck's window pane. But, ourpoint prompt can also mean a range of other things. For example, another possible maskthat contains our point is just the right side of the window pane or the whole truck. Let's also visualize the other masks the model has predicted.<jupyter_code>fig, ax = plt.subplots(1, 3, figsize=(20, 60))
masks, scores = outputs["masks"][0][1:], outputs["iou_pred"][0][1:]
for i, (mask, score) in enumerate(zip(masks, scores)):
mask = inference_resizing(mask[..., None], pad=False)[..., 0]
mask, score = map(ops.convert_to_numpy, (mask, score))
mask = 1 * (mask > 0.0)
ax[i].imshow(ops.convert_to_numpy(image) / 255.0)
show_mask(mask, ax[i])
show_points(input_point, input_label, ax[i])
ax[i].set_title(f"Mask {i+1}, Score: {score:.3f}", fontsize=12)
ax[i].axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>Nice! SAM was able to capture the ambiguity of our point prompt and also returned otherpossible segmentation masks. Box PromptsNow, let's see how we can prompt the model using boxes. The box is specified using twopoints, the top-left corner and the bottom-right corner of the bounding box in xyxyformat. Let's prompt the model using a bounding box around the left front tyre of thetruck.<jupyter_code># Let's specify the box
input_box = np.array([[240, 340], [400, 500]])
outputs = model.predict(
{"images": image[np.newaxis, ...], "boxes": input_box[np.newaxis, np.newaxis, ...]}
)
mask = inference_resizing(outputs["masks"][0][0][..., None], pad=False)[..., 0]
mask = ops.convert_to_numpy(mask) > 0.0
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
show_mask(mask, plt.gca())
show_box(input_box, plt.gca())
plt.axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>Boom! The model perfectly segments out the left front tyre in our bounding box. Combining promptsTo get the true potential of the model out, let's combine box and point prompts and seewhat the model does.<jupyter_code># Let's specify the box
input_box = np.array([[240, 340], [400, 500]])
# Let's specify the point and mark it background
input_point = np.array([[325, 425]])
input_label = np.array([0])
outputs = model.predict(
{
"images": image[np.newaxis, ...],
"points": input_point[np.newaxis, ...],
"labels": input_label[np.newaxis, ...],
"boxes": input_box[np.newaxis, np.newaxis, ...],
}
)
mask = inference_resizing(outputs["masks"][0][0][..., None], pad=False)[..., 0]
mask = ops.convert_to_numpy(mask) > 0.0
plt.figure(figsize=(10, 10))
plt.imshow(ops.convert_to_numpy(image) / 255.0)
show_mask(mask, plt.gca())
show_box(input_box, plt.gca())
show_points(input_point, input_label, plt.gca())
plt.axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>Voila! The model understood that the object we wanted to exclude from our mask was therim of the tyre. Text promptsFinally, let's see how text prompts can be used along with KerasCV's`SegmentAnythingModel`.For this demo, we will use the[offical Grounding DINO model](https://github.com/IDEA-Research/GroundingDINO).Grounding DINO is a model thattakes as input a `(image, text)` pair and generates a bounding box around the object inthe `image` described by the `text`. You can refer to the[paper](https://arxiv.org/abs/2303.05499) for more details on the implementation of themodel.For this part of the demo, we will need to install the `groundingdino` package fromsource:```pip install -U git+https://github.com/IDEA-Research/GroundingDINO.git```Then, we can install the pretrained model's weights and config:<jupyter_code>!wget -q https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth
!wget -q https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/v0.1.0-alpha2/groundingdino/config/GroundingDINO_SwinT_OGC.py
from groundingdino.util.inference import Model as GroundingDINO
CONFIG_PATH = "GroundingDINO_SwinT_OGC.py"
WEIGHTS_PATH = "groundingdino_swint_ogc.pth"
grounding_dino = GroundingDINO(CONFIG_PATH, WEIGHTS_PATH)<jupyter_output><empty_output><jupyter_text>Let's load an image of a dog for this part!<jupyter_code>filepath = keras.utils.get_file(
origin="https://storage.googleapis.com/keras-cv/test-images/mountain-dog.jpeg"
)
image = np.array(keras.utils.load_img(filepath))
image = ops.convert_to_numpy(inference_resizing(image))
plt.figure(figsize=(10, 10))
plt.imshow(image / 255.0)
plt.axis("on")
plt.show()<jupyter_output><empty_output><jupyter_text>We first predict the bounding box of the object we want to segment using the GroundingDINO model. Then, we prompt the SAM model using the bounding box to get the segmentationmask.Let's try to segment out the harness of the dog. Change the image and text below tosegment whatever you want using text from your image!<jupyter_code># Let's predict the bounding box for the harness of the dog
boxes = grounding_dino.predict_with_caption(image.astype(np.uint8), "harness")
boxes = np.array(boxes[0].xyxy)
outputs = model.predict(
{
"images": np.repeat(image[np.newaxis, ...], boxes.shape[0], axis=0),
"boxes": boxes.reshape(-1, 1, 2, 2),
},
batch_size=1,
)<jupyter_output><empty_output><jupyter_text>And that's it! We got a segmentation mask for our text prompt using the combination ofGounding DINO + SAM! This is a very powerful technique to combine different models toexpand the applications!Let's visualize the results.<jupyter_code>plt.figure(figsize=(10, 10))
plt.imshow(image / 255.0)
for mask in outputs["masks"]:
mask = inference_resizing(mask[0][..., None], pad=False)[..., 0]
mask = ops.convert_to_numpy(mask) > 0.0
show_mask(mask, plt.gca())
show_box(boxes, plt.gca())
plt.axis("off")
plt.show()<jupyter_output><empty_output><jupyter_text>Optimizing SAMYou can use `mixed_float16` or `bfloat16` dtype policies to gain huge speedups and memoryoptimizations at releatively low precision loss.<jupyter_code># Load our image
image = np.array(keras.utils.load_img("truck.jpg"))
image = inference_resizing(image)
# Specify the prompt
input_box = np.array([[240, 340], [400, 500]])
# Let's first see how fast the model is with float32 dtype
time_taken = timeit.repeat(
'model.predict({"images": image[np.newaxis, ...], "boxes": input_box[np.newaxis, np.newaxis, ...]}, verbose=False)',
repeat=3,
number=3,
globals=globals(),
)
print(f"Time taken with float32 dtype: {min(time_taken) / 3:.10f}s")
# Set the dtype policy in Keras
keras.mixed_precision.set_global_policy("mixed_float16")
model = keras_cv.models.SegmentAnythingModel.from_preset("sam_huge_sa1b")
time_taken = timeit.repeat(
'model.predict({"images": image[np.newaxis, ...], "boxes": input_box[np.newaxis, np.newaxis, ...]}, verbose=False)',
repeat=3,
number=3,
globals=globals(),
)
print(f"Time taken with float16 dtype: {min(time_taken) / 3:.10f}s")<jupyter_output><empty_output> | keras-io/guides/ipynb/keras_cv/segment_anything_in_keras_cv.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/keras_cv/segment_anything_in_keras_cv.ipynb",
"repo_id": "keras-io",
"token_count": 5826
} | 119 |
<jupyter_start><jupyter_text>Save, serialize, and export models**Authors:** Neel Kovelamudi, Francois Chollet**Date created:** 2023/06/14**Last modified:** 2023/06/30**Description:** Complete guide to saving, serializing, and exporting models. IntroductionA Keras model consists of multiple components:- The architecture, or configuration, which specifies what layers the modelcontain, and how they're connected.- A set of weights values (the "state of the model").- An optimizer (defined by compiling the model).- A set of losses and metrics (defined by compiling the model).The Keras API saves all of these pieces together in a unified format,marked by the `.keras` extension. This is a zip archive consisting of thefollowing:- A JSON-based configuration file (config.json): Records of model, layer, andother trackables' configuration.- A H5-based state file, such as `model.weights.h5` (for the whole model),with directory keys for layers and their weights.- A metadata file in JSON, storing things such as the current Keras version.Let's take a look at how this works. How to save and load a modelIf you only have 10 seconds to read this guide, here's what you need to know.**Saving a Keras model:**```pythonmodel = ... Get model (Sequential, Functional Model, or Model subclass)model.save('path/to/location.keras') The file needs to end with the .keras extension```**Loading the model back:**```pythonmodel = keras.models.load_model('path/to/location.keras')```Now, let's look at the details. Setup<jupyter_code>import numpy as np
import keras
from keras import ops<jupyter_output><empty_output><jupyter_text>SavingThis section is about saving an entire model to a single file. The file will include:- The model's architecture/config- The model's weight values (which were learned during training)- The model's compilation information (if `compile()` was called)- The optimizer and its state, if any (this enables you to restart trainingwhere you left) APIsYou can save a model with `model.save()` or `keras.models.save_model()` (which is equivalent).You can load it back with `keras.models.load_model()`.The only supported format in Keras 3 is the "Keras v3" format,which uses the `.keras` extension.**Example:**<jupyter_code>def get_model():
# Create a simple model.
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile(optimizer=keras.optimizers.Adam(), loss="mean_squared_error")
return model
model = get_model()
# Train the model.
test_input = np.random.random((128, 32))
test_target = np.random.random((128, 1))
model.fit(test_input, test_target)
# Calling `save('my_model.keras')` creates a zip archive `my_model.keras`.
model.save("my_model.keras")
# It can be used to reconstruct the model identically.
reconstructed_model = keras.models.load_model("my_model.keras")
# Let's check:
np.testing.assert_allclose(
model.predict(test_input), reconstructed_model.predict(test_input)
)<jupyter_output><empty_output><jupyter_text>Custom objectsThis section covers the basic workflows for handling custom layers, functions, andmodels in Keras saving and reloading.When saving a model that includes custom objects, such as a subclassed Layer,you **must** define a `get_config()` method on the object class.If the arguments passed to the constructor (`__init__()` method) of the custom objectaren't Python objects (anything other than base types like ints, strings,etc.), then you **must** also explicitly deserialize these arguments in the `from_config()`class method.Like this:```pythonclass CustomLayer(keras.layers.Layer): def __init__(self, sublayer, **kwargs): super().__init__(**kwargs) self.sublayer = layer def call(self, x): return self.sublayer(x) def get_config(self): base_config = super().get_config() config = { "sublayer": keras.saving.serialize_keras_object(self.sublayer), } return {**base_config, **config} @classmethod def from_config(cls, config): sublayer_config = config.pop("sublayer") sublayer = keras.saving.deserialize_keras_object(sublayer_config) return cls(sublayer, **config)```Please see the [Defining the config methods section](config_methods) for moredetails and examples.The saved `.keras` file is lightweight and does not store the Python code for customobjects. Therefore, to reload the model, `load_model` requires access to the definitionof any custom objects used through one of the following methods:1. Registering custom objects **(preferred)**,2. Passing custom objects directly when loading, or3. Using a custom object scopeBelow are examples of each workflow: Registering custom objects (**preferred**)This is the preferred method, as custom object registration greatly simplifies saving andloading code. Adding the `@keras.saving.register_keras_serializable` decorator to theclass definition of a custom object registers the object globally in a master list,allowing Keras to recognize the object when loading the model.Let's create a custom model involving both a custom layer and a custom activationfunction to demonstrate this.**Example:**<jupyter_code># Clear all previously registered custom objects
keras.saving.get_custom_objects().clear()
# Upon registration, you can optionally specify a package or a name.
# If left blank, the package defaults to `Custom` and the name defaults to
# the class name.
@keras.saving.register_keras_serializable(package="MyLayers")
class CustomLayer(keras.layers.Layer):
def __init__(self, factor):
super().__init__()
self.factor = factor
def call(self, x):
return x * self.factor
def get_config(self):
return {"factor": self.factor}
@keras.saving.register_keras_serializable(package="my_package", name="custom_fn")
def custom_fn(x):
return x**2
# Create the model.
def get_model():
inputs = keras.Input(shape=(4,))
mid = CustomLayer(0.5)(inputs)
outputs = keras.layers.Dense(1, activation=custom_fn)(mid)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="mean_squared_error")
return model
# Train the model.
def train_model(model):
input = np.random.random((4, 4))
target = np.random.random((4, 1))
model.fit(input, target)
return model
test_input = np.random.random((4, 4))
test_target = np.random.random((4, 1))
model = get_model()
model = train_model(model)
model.save("custom_model.keras")
# Now, we can simply load without worrying about our custom objects.
reconstructed_model = keras.models.load_model("custom_model.keras")
# Let's check:
np.testing.assert_allclose(
model.predict(test_input), reconstructed_model.predict(test_input)
)<jupyter_output><empty_output><jupyter_text>Passing custom objects to `load_model()`<jupyter_code>model = get_model()
model = train_model(model)
# Calling `save('my_model.keras')` creates a zip archive `my_model.keras`.
model.save("custom_model.keras")
# Upon loading, pass a dict containing the custom objects used in the
# `custom_objects` argument of `keras.models.load_model()`.
reconstructed_model = keras.models.load_model(
"custom_model.keras",
custom_objects={"CustomLayer": CustomLayer, "custom_fn": custom_fn},
)
# Let's check:
np.testing.assert_allclose(
model.predict(test_input), reconstructed_model.predict(test_input)
)<jupyter_output><empty_output><jupyter_text>Using a custom object scopeAny code within the custom object scope will be able to recognize the custom objectspassed to the scope argument. Therefore, loading the model within the scope will allowthe loading of our custom objects.**Example:**<jupyter_code>model = get_model()
model = train_model(model)
model.save("custom_model.keras")
# Pass the custom objects dictionary to a custom object scope and place
# the `keras.models.load_model()` call within the scope.
custom_objects = {"CustomLayer": CustomLayer, "custom_fn": custom_fn}
with keras.saving.custom_object_scope(custom_objects):
reconstructed_model = keras.models.load_model("custom_model.keras")
# Let's check:
np.testing.assert_allclose(
model.predict(test_input), reconstructed_model.predict(test_input)
)<jupyter_output><empty_output><jupyter_text>Model serializationThis section is about saving only the model's configuration, without its state.The model's configuration (or architecture) specifies what layers the modelcontains, and how these layers are connected. If you have the configuration of a model,then the model can be created with a freshly initialized state (no weights or compilationinformation). APIsThe following serialization APIs are available:- `keras.models.clone_model(model)`: make a (randomly initialized) copy of a model.- `get_config()` and `cls.from_config()`: retrieve the configuration of a layer or model, and recreatea model instance from its config, respectively.- `keras.models.model_to_json()` and `keras.models.model_from_json()`: similar, but as JSON strings.- `keras.saving.serialize_keras_object()`: retrieve the configuration any arbitrary Keras object.- `keras.saving.deserialize_keras_object()`: recreate an object instance from its configuration. In-memory model cloningYou can do in-memory cloning of a model via `keras.models.clone_model()`.This is equivalent to getting the config then recreating the model from its config(so it does not preserve compilation information or layer weights values).**Example:**<jupyter_code>new_model = keras.models.clone_model(model)<jupyter_output><empty_output><jupyter_text>`get_config()` and `from_config()`Calling `model.get_config()` or `layer.get_config()` will return a Python dict containingthe configuration of the model or layer, respectively. You should define `get_config()`to contain arguments needed for the `__init__()` method of the model or layer. At loading time,the `from_config(config)` method will then call `__init__()` with these arguments toreconstruct the model or layer.**Layer example:**<jupyter_code>layer = keras.layers.Dense(3, activation="relu")
layer_config = layer.get_config()
print(layer_config)<jupyter_output><empty_output><jupyter_text>Now let's reconstruct the layer using the `from_config()` method:<jupyter_code>new_layer = keras.layers.Dense.from_config(layer_config)<jupyter_output><empty_output><jupyter_text>**Sequential model example:**<jupyter_code>model = keras.Sequential([keras.Input((32,)), keras.layers.Dense(1)])
config = model.get_config()
new_model = keras.Sequential.from_config(config)<jupyter_output><empty_output><jupyter_text>**Functional model example:**<jupyter_code>inputs = keras.Input((32,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(config)<jupyter_output><empty_output><jupyter_text>`to_json()` and `keras.models.model_from_json()`This is similar to `get_config` / `from_config`, except it turns the modelinto a JSON string, which can then be loaded without the original model class.It is also specific to models, it isn't meant for layers.**Example:**<jupyter_code>model = keras.Sequential([keras.Input((32,)), keras.layers.Dense(1)])
json_config = model.to_json()
new_model = keras.models.model_from_json(json_config)<jupyter_output><empty_output><jupyter_text>Arbitrary object serialization and deserializationThe `keras.saving.serialize_keras_object()` and `keras.saving.deserialize_keras_object()`APIs are general-purpose APIs that can be used to serialize or deserialize any Kerasobject and any custom object. It is at the foundation of saving model architecture and isbehind all `serialize()`/`deserialize()` calls in keras.**Example**:<jupyter_code>my_reg = keras.regularizers.L1(0.005)
config = keras.saving.serialize_keras_object(my_reg)
print(config)<jupyter_output><empty_output><jupyter_text>Note the serialization format containing all the necessary information for properreconstruction:- `module` containing the name of the Keras module or other identifying module the objectcomes from- `class_name` containing the name of the object's class.- `config` with all the information needed to reconstruct the object- `registered_name` for custom objects. See [here](custom_object_serialization).Now we can reconstruct the regularizer.<jupyter_code>new_reg = keras.saving.deserialize_keras_object(config)<jupyter_output><empty_output><jupyter_text>Model weights savingYou can choose to only save & load a model's weights. This can be useful if:- You only need the model for inference: in this case you won't need torestart training, so you don't need the compilation information or optimizer state.- You are doing transfer learning: in this case you will be training a new modelreusing the state of a prior model, so you don't need the compilationinformation of the prior model. APIs for in-memory weight transferWeights can be copied between different objects by using `get_weights()`and `set_weights()`:* `keras.layers.Layer.get_weights()`: Returns a list of NumPy arrays of weight values.* `keras.layers.Layer.set_weights(weights)`: Sets the model weights to the valuesprovided (as NumPy arrays).Examples:***Transfering weights from one layer to another, in memory***<jupyter_code>def create_layer():
layer = keras.layers.Dense(64, activation="relu", name="dense_2")
layer.build((None, 784))
return layer
layer_1 = create_layer()
layer_2 = create_layer()
# Copy weights from layer 1 to layer 2
layer_2.set_weights(layer_1.get_weights())<jupyter_output><empty_output><jupyter_text>***Transfering weights from one model to another model with a compatible architecture, in memory***<jupyter_code># Create a simple functional model
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp")
# Define a subclassed model with the same architecture
class SubclassedModel(keras.Model):
def __init__(self, output_dim, name=None):
super().__init__(name=name)
self.output_dim = output_dim
self.dense_1 = keras.layers.Dense(64, activation="relu", name="dense_1")
self.dense_2 = keras.layers.Dense(64, activation="relu", name="dense_2")
self.dense_3 = keras.layers.Dense(output_dim, name="predictions")
def call(self, inputs):
x = self.dense_1(inputs)
x = self.dense_2(x)
x = self.dense_3(x)
return x
def get_config(self):
return {"output_dim": self.output_dim, "name": self.name}
subclassed_model = SubclassedModel(10)
# Call the subclassed model once to create the weights.
subclassed_model(np.ones((1, 784)))
# Copy weights from functional_model to subclassed_model.
subclassed_model.set_weights(functional_model.get_weights())
assert len(functional_model.weights) == len(subclassed_model.weights)
for a, b in zip(functional_model.weights, subclassed_model.weights):
np.testing.assert_allclose(a.numpy(), b.numpy())<jupyter_output><empty_output><jupyter_text>***The case of stateless layers***Because stateless layers do not change the order or number of weights,models can have compatible architectures even if there are extra/missingstateless layers.<jupyter_code>inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp")
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
# Add a dropout layer, which does not contain any weights.
x = keras.layers.Dropout(0.5)(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
functional_model_with_dropout = keras.Model(
inputs=inputs, outputs=outputs, name="3_layer_mlp"
)
functional_model_with_dropout.set_weights(functional_model.get_weights())<jupyter_output><empty_output><jupyter_text>APIs for saving weights to disk & loading them backWeights can be saved to disk by calling `model.save_weights(filepath)`.The filename should end in `.weights.h5`.**Example:**<jupyter_code># Runnable example
sequential_model = keras.Sequential(
[
keras.Input(shape=(784,), name="digits"),
keras.layers.Dense(64, activation="relu", name="dense_1"),
keras.layers.Dense(64, activation="relu", name="dense_2"),
keras.layers.Dense(10, name="predictions"),
]
)
sequential_model.save_weights("my_model.weights.h5")
sequential_model.load_weights("my_model.weights.h5")<jupyter_output><empty_output><jupyter_text>Note that changing `layer.trainable` may result in a different`layer.weights` ordering when the model contains nested layers.<jupyter_code>class NestedDenseLayer(keras.layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=name)
self.dense_1 = keras.layers.Dense(units, name="dense_1")
self.dense_2 = keras.layers.Dense(units, name="dense_2")
def call(self, inputs):
return self.dense_2(self.dense_1(inputs))
nested_model = keras.Sequential([keras.Input((784,)), NestedDenseLayer(10, "nested")])
variable_names = [v.name for v in nested_model.weights]
print("variables: {}".format(variable_names))
print("\nChanging trainable status of one of the nested layers...")
nested_model.get_layer("nested").dense_1.trainable = False
variable_names_2 = [v.name for v in nested_model.weights]
print("\nvariables: {}".format(variable_names_2))
print("variable ordering changed:", variable_names != variable_names_2)<jupyter_output><empty_output><jupyter_text>**Transfer learning example**When loading pretrained weights from a weights file, it is recommended to loadthe weights into the original checkpointed model, and then extractthe desired weights/layers into a new model.**Example:**<jupyter_code>def create_functional_model():
inputs = keras.Input(shape=(784,), name="digits")
x = keras.layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = keras.layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = keras.layers.Dense(10, name="predictions")(x)
return keras.Model(inputs=inputs, outputs=outputs, name="3_layer_mlp")
functional_model = create_functional_model()
functional_model.save_weights("pretrained.weights.h5")
# In a separate program:
pretrained_model = create_functional_model()
pretrained_model.load_weights("pretrained.weights.h5")
# Create a new model by extracting layers from the original model:
extracted_layers = pretrained_model.layers[:-1]
extracted_layers.append(keras.layers.Dense(5, name="dense_3"))
model = keras.Sequential(extracted_layers)
model.summary()<jupyter_output><empty_output><jupyter_text>Appendix: Handling custom objects Defining the config methodsSpecifications:* `get_config()` should return a JSON-serializable dictionary in order to becompatible with the Keras architecture- and model-saving APIs.* `from_config(config)` (a `classmethod`) should return a new layer or modelobject that is created from the config.The default implementation returns `cls(**config)`.**NOTE**: If all your constructor arguments are already serializable, e.g. strings andints, or non-custom Keras objects, overriding `from_config` is not necessary. However,for more complex objects such as layers or models passed to `__init__`, deserializationmust be handled explicitly either in `__init__` itself or overriding the `from_config()`method.**Example:**<jupyter_code>@keras.saving.register_keras_serializable(package="MyLayers", name="KernelMult")
class MyDense(keras.layers.Layer):
def __init__(
self,
units,
*,
kernel_regularizer=None,
kernel_initializer=None,
nested_model=None,
**kwargs
):
super().__init__(**kwargs)
self.hidden_units = units
self.kernel_regularizer = kernel_regularizer
self.kernel_initializer = kernel_initializer
self.nested_model = nested_model
def get_config(self):
config = super().get_config()
# Update the config with the custom layer's parameters
config.update(
{
"units": self.hidden_units,
"kernel_regularizer": self.kernel_regularizer,
"kernel_initializer": self.kernel_initializer,
"nested_model": self.nested_model,
}
)
return config
def build(self, input_shape):
input_units = input_shape[-1]
self.kernel = self.add_weight(
name="kernel",
shape=(input_units, self.hidden_units),
regularizer=self.kernel_regularizer,
initializer=self.kernel_initializer,
)
def call(self, inputs):
return ops.matmul(inputs, self.kernel)
layer = MyDense(units=16, kernel_regularizer="l1", kernel_initializer="ones")
layer3 = MyDense(units=64, nested_model=layer)
config = keras.layers.serialize(layer3)
print(config)
new_layer = keras.layers.deserialize(config)
print(new_layer)<jupyter_output><empty_output><jupyter_text>Note that overriding `from_config` is unnecessary above for `MyDense` because`hidden_units`, `kernel_initializer`, and `kernel_regularizer` are ints, strings, and abuilt-in Keras object, respectively. This means that the default `from_config`implementation of `cls(**config)` will work as intended.For more complex objects, such as layers and models passed to `__init__`, forexample, you must explicitly deserialize these objects. Let's take a look at an exampleof a model where a `from_config` override is necessary.**Example:**<jupyter_code>@keras.saving.register_keras_serializable(package="ComplexModels")
class CustomModel(keras.layers.Layer):
def __init__(self, first_layer, second_layer=None, **kwargs):
super().__init__(**kwargs)
self.first_layer = first_layer
if second_layer is not None:
self.second_layer = second_layer
else:
self.second_layer = keras.layers.Dense(8)
def get_config(self):
config = super().get_config()
config.update(
{
"first_layer": self.first_layer,
"second_layer": self.second_layer,
}
)
return config
@classmethod
def from_config(cls, config):
# Note that you can also use `keras.saving.deserialize_keras_object` here
config["first_layer"] = keras.layers.deserialize(config["first_layer"])
config["second_layer"] = keras.layers.deserialize(config["second_layer"])
return cls(**config)
def call(self, inputs):
return self.first_layer(self.second_layer(inputs))
# Let's make our first layer the custom layer from the previous example (MyDense)
inputs = keras.Input((32,))
outputs = CustomModel(first_layer=layer)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(config)<jupyter_output><empty_output><jupyter_text>How custom objects are serializedThe serialization format has a special key for custom objects registered via`@keras.saving.register_keras_serializable`. This `registered_name` key allows for easyretrieval at loading/deserialization time while also allowing users to add custom naming.Let's take a look at the config from serializing the custom layer `MyDense` we definedabove.**Example**:<jupyter_code>layer = MyDense(
units=16,
kernel_regularizer=keras.regularizers.L1L2(l1=1e-5, l2=1e-4),
kernel_initializer="ones",
)
config = keras.layers.serialize(layer)
print(config)<jupyter_output><empty_output> | keras-io/guides/ipynb/serialization_and_saving.ipynb/0 | {
"file_path": "keras-io/guides/ipynb/serialization_and_saving.ipynb",
"repo_id": "keras-io",
"token_count": 7942
} | 120 |
# Semantic Segmentation with KerasCV
**Author:** [Divyashree Sreepathihalli](https://github.com/divyashreepathihalli), [Ian Stenbit](https://github.com/ianstenbit)<br>
**Date created:** 2023/08/22<br>
**Last modified:** 2023/08/24<br>
**Description:** Train and use DeepLabv3+ segmentation model with KerasCV.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/keras_cv/semantic_segmentation_deeplab_v3_plus.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/keras_cv/semantic_segmentation_deeplab_v3_plus.py)

---
## Background
Semantic segmentation is a type of computer vision task that involves assigning a
class label such as person, bike, or background to each individual pixel of an
image, effectively dividing the image into regions that correspond to different
fobject classes or categories.

KerasCV offers the DeepLabv3+ model developed by Google for semantic
segmentation. This guide demonstrates how to finetune and use DeepLabv3+ model for
image semantic segmentaion with KerasCV. Its architecture that combines atrous convolutions,
contextual information aggregation, and powerful backbones to achieve accurate and
detailed semantic segmentation. The DeepLabv3+ model has been shown to achieve
state-of-the-art results on a variety of image segmentation benchmarks.
### References
[Encoder-Decoder with Atrous Separable Convolution for Semantic Image
Segmentation](https://arxiv.org/abs/1802.02611)<br>
[Rethinking Atrous Convolution for Semantic Image
Segmentation](https://arxiv.org/abs/1706.05587)
---
## Setup and Imports
Let's install the dependencies and import the necessary modules.
To run this tutorial, you will need to install the following packages:
* `keras-cv`
* `keras-core`
```python
!pip install -q --upgrade keras-cv
!pip install -q --upgrade keras # Upgrade to Keras 3.
```
After installing `keras-core` and `keras-cv`, set the backend for `keras-core`.
This guide can be run with any backend (Tensorflow, JAX, PyTorch).
```
import os
os.environ["KERAS_BACKEND"] = "jax"
```
```python
import keras
from keras import ops
import keras_cv
import numpy as np
from keras_cv.datasets.pascal_voc.segmentation import load as load_voc
```
---
## Perform semantic segmentation with a pretrained DeepLabv3+ model
The highest level API in the KerasCV semantic segmentation API is the `keras_cv.models`
API. This API includes fully pretrained semantic segmentation models, such as
`keras_cv.models.DeepLabV3Plus`.
Let's get started by constructing a DeepLabv3+ pretrained on the pascalvoc dataset.
```python
model = keras_cv.models.DeepLabV3Plus.from_preset(
"deeplab_v3_plus_resnet50_pascalvoc",
num_classes=21,
input_shape=[512, 512, 3],
)
```
Let us visualize the results of this pretrained model
```python
filepath = keras.utils.get_file(origin="https://i.imgur.com/gCNcJJI.jpg")
image = keras.utils.load_img(filepath)
resize = keras_cv.layers.Resizing(height=512, width=512)
image = resize(image)
image = keras.ops.expand_dims(np.array(image), axis=0)
preds = ops.expand_dims(ops.argmax(model(image), axis=-1), axis=-1)
keras_cv.visualization.plot_segmentation_mask_gallery(
image,
value_range=(0, 255),
num_classes=1,
y_true=None,
y_pred=preds,
scale=3,
rows=1,
cols=1,
)
```

---
## Train a custom semantic segmentation model
In this guide, we'll assemble a full training pipeline for a KerasCV DeepLabV3 semantic
segmentation model. This includes data loading, augmentation, training, metric
evaluation, and inference!
---
## Download the data
We download
[Pascal VOC dataset](https://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz)
with KerasCV datasets and split them into train dataset `train_ds` and `eval_ds`.
```python
train_ds = load_voc(split="sbd_train")
eval_ds = load_voc(split="sbd_eval")
```
---
## Preprocess the data
The `preprocess_tfds_inputs` utility function preprocesses the inputs to a dictionary of
`images` and `segmentation_masks`. The images and segmentation masks are resized to
512x512. The resulting dataset is then batched into groups of 4 image and segmentation
mask pairs.
A batch of this preprocessed input training data can be visualized using the
`keras_cv.visualization.plot_segmentation_mask_gallery` function. This function takes a
batch of images and segmentation masks as input and displays them in a grid.
```python
def preprocess_tfds_inputs(inputs):
def unpackage_tfds_inputs(tfds_inputs):
return {
"images": tfds_inputs["image"],
"segmentation_masks": tfds_inputs["class_segmentation"],
}
outputs = inputs.map(unpackage_tfds_inputs)
outputs = outputs.map(keras_cv.layers.Resizing(height=512, width=512))
outputs = outputs.batch(4, drop_remainder=True)
return outputs
train_ds = preprocess_tfds_inputs(train_ds)
batch = train_ds.take(1).get_single_element()
keras_cv.visualization.plot_segmentation_mask_gallery(
batch["images"],
value_range=(0, 255),
num_classes=21, # The number of classes for the oxford iiit pet dataset. The VOC dataset also includes 1 class for the background.
y_true=batch["segmentation_masks"],
scale=3,
rows=2,
cols=2,
)
```

The preprocessing is applied to the evaluation dataset `eval_ds`.
```python
eval_ds = preprocess_tfds_inputs(eval_ds)
```
---
## Data Augmentation
KerasCV provides a variety of image augmentation options. In this example, we will use
the `RandomFlip` augmentation to augment the training dataset. The `RandomFlip`
augmentation randomly flips the images in the training dataset horizontally or
vertically. This can help to improve the model's robustness to changes in the orientation
of the objects in the images.
```python
train_ds = train_ds.map(keras_cv.layers.RandomFlip())
batch = train_ds.take(1).get_single_element()
keras_cv.visualization.plot_segmentation_mask_gallery(
batch["images"],
value_range=(0, 255),
num_classes=21,
y_true=batch["segmentation_masks"],
scale=3,
rows=2,
cols=2,
)
```

---
## Model Configuration
Please feel free to modify the configurations for model training and note how the
training results changes. This is an great exercise to get a better understanding of the
training pipeline.
The learning rate schedule is used by the optimizer to calculate the learning rate for
each epoch. The optimizer then uses the learning rate to update the weights of the model.
In this case, the learning rate schedule uses a cosine decay function. A cosine decay
function starts high and then decreases over time, eventually reaching zero. The
cardinality of the VOC dataset is 2124 with a batch size of 4. The dataset cardinality
is important for learning rate decay because it determines how many steps the model
will train for. The initial learning rate is proportional to 0.007 and the decay
steps are 2124. This means that the learning rate will start at `INITIAL_LR` and then
decrease to zero over 2124 steps.

```python
BATCH_SIZE = 4
INITIAL_LR = 0.007 * BATCH_SIZE / 16
EPOCHS = 1
NUM_CLASSES = 21
learning_rate = keras.optimizers.schedules.CosineDecay(
INITIAL_LR,
decay_steps=EPOCHS * 2124,
)
```
We instantiate a DeepLabV3+ model with a ResNet50 backbone pretrained on ImageNet classification:
`resnet50_v2_imagenet` pre-trained weights will be used as the backbone feature
extractor for the DeepLabV3Plus model. The `num_classes` parameter specifies the number of
classes that the model will be trained to segment.
```python
model = keras_cv.models.DeepLabV3Plus.from_preset(
"resnet50_v2_imagenet", num_classes=NUM_CLASSES
)
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/keras-cv/models/resnet50v2/imagenet/classification-v2-notop.h5
94687928/94687928 ━━━━━━━━━━━━━━━━━━━━ 1s 0us/step
```
</div>
---
## Compile the model
The model.compile() function sets up the training process for the model. It defines the
- optimization algorithm - Stochastic Gradient Descent (SGD)
- the loss function - categorical cross-entropy
- the evaluation metrics - Mean IoU and categorical accuracy
Semantic segmentation evaluation metrics:
Mean Intersection over Union (MeanIoU):
MeanIoU measures how well a semantic segmentation model accurately identifies
and delineates different objects or regions in an image. It calculates the
overlap between predicted and actual object boundaries, providing a score
between 0 and 1, where 1 represents a perfect match.
Categorical Accuracy:
Categorical Accuracy measures the proportion of correctly classified pixels in
an image. It gives a simple percentage indicating how accurately the model
predicts the categories of pixels in the entire image.
In essence, MeanIoU emphasizes the accuracy of identifying specific object
boundaries, while Categorical Accuracy gives a broad overview of overall
pixel-level correctness.
```python
model.compile(
optimizer=keras.optimizers.SGD(
learning_rate=learning_rate, weight_decay=0.0001, momentum=0.9, clipnorm=10.0
),
loss=keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=[
keras.metrics.MeanIoU(
num_classes=NUM_CLASSES, sparse_y_true=False, sparse_y_pred=False
),
keras.metrics.CategoricalAccuracy(),
],
)
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "deep_lab_v3_plus_1"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Connected to </span>┃
┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━┩
│ input_layer_9 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ - │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ functional_11 │ [(<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">23,556…</span> │ input_layer_9[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>), │ │ │
│ │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ │ │
│ │ <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>)] │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ spatial_pyramid_po… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">15,538…</span> │ functional_11[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">1</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">SpatialPyramidPoo…</span> │ <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ encoder_output_ups… │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ spatial_pyramid_poo… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">UpSampling2D</span>) │ <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">256</span>) │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ sequential_14 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">12,480</span> │ functional_11[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Sequential</span>) │ <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">48</span>) │ │ │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ concatenate_1 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ encoder_output_upsa… │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Concatenate</span>) │ <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">304</span>) │ │ sequential_14[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
├─────────────────────┼───────────────────┼─────────┼──────────────────────┤
│ sequential_15 │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, │ <span style="color: #00af00; text-decoration-color: #00af00">84,224</span> │ concatenate_1[<span style="color: #00af00; text-decoration-color: #00af00">0</span>][<span style="color: #00af00; text-decoration-color: #00af00">0</span>] │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">Sequential</span>) │ <span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">21</span>) │ │ │
└─────────────────────┴───────────────────┴─────────┴──────────────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">39,191,488</span> (149.50 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">39,146,464</span> (149.33 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">45,024</span> (175.88 KB)
</pre>
The utility function `dict_to_tuple` effectively transforms the dictionaries of training
and validation datasets into tuples of images and one-hot encoded segmentation masks,
which is used during training and evaluation of the DeepLabv3+ model.
```python
def dict_to_tuple(x):
import tensorflow as tf
return x["images"], tf.one_hot(
tf.cast(tf.squeeze(x["segmentation_masks"], axis=-1), "int32"), 21
)
train_ds = train_ds.map(dict_to_tuple)
eval_ds = eval_ds.map(dict_to_tuple)
model.fit(train_ds, validation_data=eval_ds, epochs=EPOCHS)
```
<div class="k-default-codeblock">
```
2124/Unknown 735s 319ms/step - categorical_accuracy: 0.7026 - loss: 1.2143 - mean_io_u: 0.0706
/usr/lib/python3.10/contextlib.py:153: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset.
self.gen.throw(typ, value, traceback)
2124/2124 ━━━━━━━━━━━━━━━━━━━━ 813s 356ms/step - categorical_accuracy: 0.7026 - loss: 1.2143 - mean_io_u: 0.0706 - val_categorical_accuracy: 0.7768 - val_loss: 0.8223 - val_mean_io_u: 0.1593
<keras.src.callbacks.history.History at 0x7f261a534640>
```
</div>
---
## Predictions with trained model
Now that the model training of DeepLabv3+ has completed, let's test it by making
predications
on a few sample images.
```python
test_ds = load_voc(split="sbd_eval")
test_ds = preprocess_tfds_inputs(test_ds)
images, masks = next(iter(train_ds.take(1)))
images = ops.convert_to_tensor(images)
masks = ops.convert_to_tensor(masks)
preds = ops.expand_dims(ops.argmax(model(images), axis=-1), axis=-1)
masks = ops.expand_dims(ops.argmax(masks, axis=-1), axis=-1)
keras_cv.visualization.plot_segmentation_mask_gallery(
images,
value_range=(0, 255),
num_classes=21,
y_true=masks,
y_pred=preds,
scale=3,
rows=1,
cols=4,
)
```

Here are some additional tips for using the KerasCV DeepLabv3+ model:
- The model can be trained on a variety of datasets, including the COCO dataset, the
PASCAL VOC dataset, and the Cityscapes dataset.
- The model can be fine-tuned on a custom dataset to improve its performance on a
specific task.
- The model can be used to perform real-time inference on images.
- Also, try out KerasCV's SegFormer model `keras_cv.models.segmentation.SegFormer`. The
SegFormer model is a newer model that has been shown to achieve state-of-the-art results
on a variety of image segmentation benchmarks. It is based on the Swin Transformer
architecture, and it is more efficient and accurate than previous image segmentation
models.
| keras-io/guides/md/keras_cv/semantic_segmentation_deeplab_v3_plus.md/0 | {
"file_path": "keras-io/guides/md/keras_cv/semantic_segmentation_deeplab_v3_plus.md",
"repo_id": "keras-io",
"token_count": 7627
} | 121 |
# Transfer learning & fine-tuning
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2020/04/15<br>
**Last modified:** 2023/06/25<br>
**Description:** Complete guide to transfer learning & fine-tuning in Keras.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/transfer_learning.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/transfer_learning.py)
---
## Setup
```python
import numpy as np
import keras
from keras import layers
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
```
---
## Introduction
**Transfer learning** consists of taking features learned on one problem, and
leveraging them on a new, similar problem. For instance, features from a model that has
learned to identify racoons may be useful to kick-start a model meant to identify
tanukis.
Transfer learning is usually done for tasks where your dataset has too little data to
train a full-scale model from scratch.
The most common incarnation of transfer learning in the context of deep learning is the
following workflow:
1. Take layers from a previously trained model.
2. Freeze them, so as to avoid destroying any of the information they contain during
future training rounds.
3. Add some new, trainable layers on top of the frozen layers. They will learn to turn
the old features into predictions on a new dataset.
4. Train the new layers on your dataset.
A last, optional step, is **fine-tuning**, which consists of unfreezing the entire
model you obtained above (or part of it), and re-training it on the new data with a
very low learning rate. This can potentially achieve meaningful improvements, by
incrementally adapting the pretrained features to the new data.
First, we will go over the Keras `trainable` API in detail, which underlies most
transfer learning & fine-tuning workflows.
Then, we'll demonstrate the typical workflow by taking a model pretrained on the
ImageNet dataset, and retraining it on the Kaggle "cats vs dogs" classification
dataset.
This is adapted from
[Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python)
and the 2016 blog post
["building powerful image classification models using very little data"](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html).
---
## Freezing layers: understanding the `trainable` attribute
Layers & models have three weight attributes:
- `weights` is the list of all weights variables of the layer.
- `trainable_weights` is the list of those that are meant to be updated (via gradient
descent) to minimize the loss during training.
- `non_trainable_weights` is the list of those that aren't meant to be trained.
Typically they are updated by the model during the forward pass.
**Example: the `Dense` layer has 2 trainable weights (kernel & bias)**
```python
layer = keras.layers.Dense(3)
layer.build((None, 4)) # Create the weights
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
```
<div class="k-default-codeblock">
```
weights: 2
trainable_weights: 2
non_trainable_weights: 0
```
</div>
In general, all weights are trainable weights. The only built-in layer that has
non-trainable weights is the `BatchNormalization` layer. It uses non-trainable weights
to keep track of the mean and variance of its inputs during training.
To learn how to use non-trainable weights in your own custom layers, see the
[guide to writing new layers from scratch](/guides/making_new_layers_and_models_via_subclassing/).
**Example: the `BatchNormalization` layer has 2 trainable weights and 2 non-trainable
weights**
```python
layer = keras.layers.BatchNormalization()
layer.build((None, 4)) # Create the weights
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
```
<div class="k-default-codeblock">
```
weights: 4
trainable_weights: 2
non_trainable_weights: 2
```
</div>
Layers & models also feature a boolean attribute `trainable`. Its value can be changed.
Setting `layer.trainable` to `False` moves all the layer's weights from trainable to
non-trainable. This is called "freezing" the layer: the state of a frozen layer won't
be updated during training (either when training with `fit()` or when training with
any custom loop that relies on `trainable_weights` to apply gradient updates).
**Example: setting `trainable` to `False`**
```python
layer = keras.layers.Dense(3)
layer.build((None, 4)) # Create the weights
layer.trainable = False # Freeze the layer
print("weights:", len(layer.weights))
print("trainable_weights:", len(layer.trainable_weights))
print("non_trainable_weights:", len(layer.non_trainable_weights))
```
<div class="k-default-codeblock">
```
weights: 2
trainable_weights: 0
non_trainable_weights: 2
```
</div>
When a trainable weight becomes non-trainable, its value is no longer updated during
training.
```python
# Make a model with 2 layers
layer1 = keras.layers.Dense(3, activation="relu")
layer2 = keras.layers.Dense(3, activation="sigmoid")
model = keras.Sequential([keras.Input(shape=(3,)), layer1, layer2])
# Freeze the first layer
layer1.trainable = False
# Keep a copy of the weights of layer1 for later reference
initial_layer1_weights_values = layer1.get_weights()
# Train the model
model.compile(optimizer="adam", loss="mse")
model.fit(np.random.random((2, 3)), np.random.random((2, 3)))
# Check that the weights of layer1 have not changed during training
final_layer1_weights_values = layer1.get_weights()
np.testing.assert_allclose(
initial_layer1_weights_values[0], final_layer1_weights_values[0]
)
np.testing.assert_allclose(
initial_layer1_weights_values[1], final_layer1_weights_values[1]
)
```
<div class="k-default-codeblock">
```
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 766ms/step - loss: 0.0615
```
</div>
Do not confuse the `layer.trainable` attribute with the argument `training` in
`layer.__call__()` (which controls whether the layer should run its forward pass in
inference mode or training mode). For more information, see the
[Keras FAQ](
https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute).
---
## Recursive setting of the `trainable` attribute
If you set `trainable = False` on a model or on any layer that has sublayers,
all children layers become non-trainable as well.
**Example:**
```python
inner_model = keras.Sequential(
[
keras.Input(shape=(3,)),
keras.layers.Dense(3, activation="relu"),
keras.layers.Dense(3, activation="relu"),
]
)
model = keras.Sequential(
[
keras.Input(shape=(3,)),
inner_model,
keras.layers.Dense(3, activation="sigmoid"),
]
)
model.trainable = False # Freeze the outer model
assert inner_model.trainable == False # All layers in `model` are now frozen
assert inner_model.layers[0].trainable == False # `trainable` is propagated recursively
```
---
## The typical transfer-learning workflow
This leads us to how a typical transfer learning workflow can be implemented in Keras:
1. Instantiate a base model and load pre-trained weights into it.
2. Freeze all layers in the base model by setting `trainable = False`.
3. Create a new model on top of the output of one (or several) layers from the base
model.
4. Train your new model on your new dataset.
Note that an alternative, more lightweight workflow could also be:
1. Instantiate a base model and load pre-trained weights into it.
2. Run your new dataset through it and record the output of one (or several) layers
from the base model. This is called **feature extraction**.
3. Use that output as input data for a new, smaller model.
A key advantage of that second workflow is that you only run the base model once on
your data, rather than once per epoch of training. So it's a lot faster & cheaper.
An issue with that second workflow, though, is that it doesn't allow you to dynamically
modify the input data of your new model during training, which is required when doing
data augmentation, for instance. Transfer learning is typically used for tasks when
your new dataset has too little data to train a full-scale model from scratch, and in
such scenarios data augmentation is very important. So in what follows, we will focus
on the first workflow.
Here's what the first workflow looks like in Keras:
First, instantiate a base model with pre-trained weights.
```python
base_model = keras.applications.Xception(
weights='imagenet', # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False) # Do not include the ImageNet classifier at the top.
```
Then, freeze the base model.
```python
base_model.trainable = False
```
Create a new model on top.
```python
inputs = keras.Input(shape=(150, 150, 3))
# We make sure that the base_model is running in inference mode here,
# by passing `training=False`. This is important for fine-tuning, as you will
# learn in a few paragraphs.
x = base_model(inputs, training=False)
# Convert features of shape `base_model.output_shape[1:]` to vectors
x = keras.layers.GlobalAveragePooling2D()(x)
# A Dense classifier with a single unit (binary classification)
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
```
Train the model on new data.
```python
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()])
model.fit(new_dataset, epochs=20, callbacks=..., validation_data=...)
```
---
## Fine-tuning
Once your model has converged on the new data, you can try to unfreeze all or part of
the base model and retrain the whole model end-to-end with a very low learning rate.
This is an optional last step that can potentially give you incremental improvements.
It could also potentially lead to quick overfitting -- keep that in mind.
It is critical to only do this step *after* the model with frozen layers has been
trained to convergence. If you mix randomly-initialized trainable layers with
trainable layers that hold pre-trained features, the randomly-initialized layers will
cause very large gradient updates during training, which will destroy your pre-trained
features.
It's also critical to use a very low learning rate at this stage, because
you are training a much larger model than in the first round of training, on a dataset
that is typically very small.
As a result, you are at risk of overfitting very quickly if you apply large weight
updates. Here, you only want to readapt the pretrained weights in an incremental way.
This is how to implement fine-tuning of the whole base model:
```python
# Unfreeze the base model
base_model.trainable = True
# It's important to recompile your model after you make any changes
# to the `trainable` attribute of any inner layer, so that your changes
# are take into account
model.compile(optimizer=keras.optimizers.Adam(1e-5), # Very low learning rate
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()])
# Train end-to-end. Be careful to stop before you overfit!
model.fit(new_dataset, epochs=10, callbacks=..., validation_data=...)
```
**Important note about `compile()` and `trainable`**
Calling `compile()` on a model is meant to "freeze" the behavior of that model. This
implies that the `trainable`
attribute values at the time the model is compiled should be preserved throughout the
lifetime of that model,
until `compile` is called again. Hence, if you change any `trainable` value, make sure
to call `compile()` again on your
model for your changes to be taken into account.
**Important notes about `BatchNormalization` layer**
Many image models contain `BatchNormalization` layers. That layer is a special case on
every imaginable count. Here are a few things to keep in mind.
- `BatchNormalization` contains 2 non-trainable weights that get updated during
training. These are the variables tracking the mean and variance of the inputs.
- When you set `bn_layer.trainable = False`, the `BatchNormalization` layer will
run in inference mode, and will not update its mean & variance statistics. This is not
the case for other layers in general, as
[weight trainability & inference/training modes are two orthogonal concepts](
https://keras.io/getting_started/faq/#whats-the-difference-between-the-training-argument-in-call-and-the-trainable-attribute).
But the two are tied in the case of the `BatchNormalization` layer.
- When you unfreeze a model that contains `BatchNormalization` layers in order to do
fine-tuning, you should keep the `BatchNormalization` layers in inference mode by
passing `training=False` when calling the base model.
Otherwise the updates applied to the non-trainable weights will suddenly destroy
what the model has learned.
You'll see this pattern in action in the end-to-end example at the end of this guide.
---
## An end-to-end example: fine-tuning an image classification model on a cats vs. dogs dataset
To solidify these concepts, let's walk you through a concrete end-to-end transfer
learning & fine-tuning example. We will load the Xception model, pre-trained on
ImageNet, and use it on the Kaggle "cats vs. dogs" classification dataset.
### Getting the data
First, let's fetch the cats vs. dogs dataset using TFDS. If you have your own dataset,
you'll probably want to use the utility
`keras.utils.image_dataset_from_directory` to generate similar labeled
dataset objects from a set of images on disk filed into class-specific folders.
Transfer learning is most useful when working with very small datasets. To keep our
dataset small, we will use 40% of the original training data (25,000 images) for
training, 10% for validation, and 10% for testing.
```python
tfds.disable_progress_bar()
train_ds, validation_ds, test_ds = tfds.load(
"cats_vs_dogs",
# Reserve 10% for validation and 10% for test
split=["train[:40%]", "train[40%:50%]", "train[50%:60%]"],
as_supervised=True, # Include labels
)
print(f"Number of training samples: {train_ds.cardinality()}")
print(f"Number of validation samples: {validation_ds.cardinality()}")
print(f"Number of test samples: {test_ds.cardinality()}")
```
<div class="k-default-codeblock">
```
Downloading and preparing dataset 786.68 MiB (download: 786.68 MiB, generated: Unknown size, total: 786.68 MiB) to /home/mattdangerw/tensorflow_datasets/cats_vs_dogs/4.0.0...
WARNING:absl:1738 images were corrupted and were skipped
Dataset cats_vs_dogs downloaded and prepared to /home/mattdangerw/tensorflow_datasets/cats_vs_dogs/4.0.0. Subsequent calls will reuse this data.
Number of training samples: 9305
Number of validation samples: 2326
Number of test samples: 2326
```
</div>
These are the first 9 images in the training dataset -- as you can see, they're all
different sizes.
```python
plt.figure(figsize=(10, 10))
for i, (image, label) in enumerate(train_ds.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title(int(label))
plt.axis("off")
```

We can also see that label 1 is "dog" and label 0 is "cat".
### Standardizing the data
Our raw images have a variety of sizes. In addition, each pixel consists of 3 integer
values between 0 and 255 (RGB level values). This isn't a great fit for feeding a
neural network. We need to do 2 things:
- Standardize to a fixed image size. We pick 150x150.
- Normalize pixel values between -1 and 1. We'll do this using a `Normalization` layer as
part of the model itself.
In general, it's a good practice to develop models that take raw data as input, as
opposed to models that take already-preprocessed data. The reason being that, if your
model expects preprocessed data, any time you export your model to use it elsewhere
(in a web browser, in a mobile app), you'll need to reimplement the exact same
preprocessing pipeline. This gets very tricky very quickly. So we should do the least
possible amount of preprocessing before hitting the model.
Here, we'll do image resizing in the data pipeline (because a deep neural network can
only process contiguous batches of data), and we'll do the input value scaling as part
of the model, when we create it.
Let's resize images to 150x150:
```python
resize_fn = keras.layers.Resizing(150, 150)
train_ds = train_ds.map(lambda x, y: (resize_fn(x), y))
validation_ds = validation_ds.map(lambda x, y: (resize_fn(x), y))
test_ds = test_ds.map(lambda x, y: (resize_fn(x), y))
```
### Using random data augmentation
When you don't have a large image dataset, it's a good practice to artificially
introduce sample diversity by applying random yet realistic transformations to
the training images, such as random horizontal flipping or small random rotations. This
helps expose the model to different aspects of the training data while slowing down
overfitting.
```python
augmentation_layers = [
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
]
def data_augmentation(x):
for layer in augmentation_layers:
x = layer(x)
return x
train_ds = train_ds.map(lambda x, y: (data_augmentation(x), y))
```
Let's batch the data and use prefetching to optimize loading speed.
```python
from tensorflow import data as tf_data
batch_size = 64
train_ds = train_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
validation_ds = validation_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
test_ds = test_ds.batch(batch_size).prefetch(tf_data.AUTOTUNE).cache()
```
Let's visualize what the first image of the first batch looks like after various random
transformations:
```python
for images, labels in train_ds.take(1):
plt.figure(figsize=(10, 10))
first_image = images[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(np.expand_dims(first_image, 0))
plt.imshow(np.array(augmented_image[0]).astype("int32"))
plt.title(int(labels[0]))
plt.axis("off")
```

---
## Build a model
Now let's built a model that follows the blueprint we've explained earlier.
Note that:
- We add a `Rescaling` layer to scale input values (initially in the `[0, 255]`
range) to the `[-1, 1]` range.
- We add a `Dropout` layer before the classification layer, for regularization.
- We make sure to pass `training=False` when calling the base model, so that
it runs in inference mode, so that batchnorm statistics don't get updated
even after we unfreeze the base model for fine-tuning.
```python
base_model = keras.applications.Xception(
weights="imagenet", # Load weights pre-trained on ImageNet.
input_shape=(150, 150, 3),
include_top=False,
) # Do not include the ImageNet classifier at the top.
# Freeze the base_model
base_model.trainable = False
# Create new model on top
inputs = keras.Input(shape=(150, 150, 3))
# Pre-trained Xception weights requires that input be scaled
# from (0, 255) to a range of (-1., +1.), the rescaling layer
# outputs: `(inputs * scale) + offset`
scale_layer = keras.layers.Rescaling(scale=1 / 127.5, offset=-1)
x = scale_layer(inputs)
# The base model contains batchnorm layers. We want to keep them in inference mode
# when we unfreeze the base model for fine-tuning, so we make sure that the
# base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.summary(show_trainable=True)
```
<div class="k-default-codeblock">
```
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels_notop.h5
83683744/83683744 ━━━━━━━━━━━━━━━━━━━━ 0s 0us/step
```
</div>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_4"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Trai… </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━┩
│ input_layer_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">150</span>, <span style="color: #00af00; text-decoration-color: #00af00">150</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ <span style="font-weight: bold">-</span> │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ rescaling (<span style="color: #0087ff; text-decoration-color: #0087ff">Rescaling</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">150</span>, <span style="color: #00af00; text-decoration-color: #00af00">150</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ <span style="font-weight: bold">-</span> │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ xception (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">20,861…</span> │ <span style="color: #ff0000; text-decoration-color: #ff0000; font-weight: bold">N</span> │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ global_average_pooling2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ <span style="font-weight: bold">-</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePooling2D</span>) │ │ │ │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ <span style="font-weight: bold">-</span> │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ dense_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,049</span> │ <span style="color: #00af00; text-decoration-color: #00af00; font-weight: bold">Y</span> │
└─────────────────────────────┴──────────────────────────┴─────────┴───────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">20,863,529</span> (79.59 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">2,049</span> (8.00 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">20,861,480</span> (79.58 MB)
</pre>
---
## Train the top layer
```python
model.compile(
optimizer=keras.optimizers.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 2
print("Fitting the top layer of the model")
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
```
<div class="k-default-codeblock">
```
Fitting the top layer of the model
Epoch 1/2
78/146 ━━━━━━━━━━[37m━━━━━━━━━━ 15s 226ms/step - binary_accuracy: 0.7995 - loss: 0.4088
Corrupt JPEG data: 65 extraneous bytes before marker 0xd9
136/146 ━━━━━━━━━━━━━━━━━━[37m━━ 2s 231ms/step - binary_accuracy: 0.8430 - loss: 0.3298
Corrupt JPEG data: 239 extraneous bytes before marker 0xd9
143/146 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 231ms/step - binary_accuracy: 0.8464 - loss: 0.3235
Corrupt JPEG data: 1153 extraneous bytes before marker 0xd9
144/146 ━━━━━━━━━━━━━━━━━━━[37m━ 0s 231ms/step - binary_accuracy: 0.8468 - loss: 0.3226
Corrupt JPEG data: 228 extraneous bytes before marker 0xd9
146/146 ━━━━━━━━━━━━━━━━━━━━ 0s 260ms/step - binary_accuracy: 0.8478 - loss: 0.3209
Corrupt JPEG data: 2226 extraneous bytes before marker 0xd9
146/146 ━━━━━━━━━━━━━━━━━━━━ 54s 317ms/step - binary_accuracy: 0.8482 - loss: 0.3200 - val_binary_accuracy: 0.9667 - val_loss: 0.0877
Epoch 2/2
146/146 ━━━━━━━━━━━━━━━━━━━━ 7s 51ms/step - binary_accuracy: 0.9483 - loss: 0.1232 - val_binary_accuracy: 0.9705 - val_loss: 0.0786
<keras.src.callbacks.history.History at 0x7fc8b7f1db70>
```
</div>
---
## Do a round of fine-tuning of the entire model
Finally, let's unfreeze the base model and train the entire model end-to-end with a low
learning rate.
Importantly, although the base model becomes trainable, it is still running in
inference mode since we passed `training=False` when calling it when we built the
model. This means that the batch normalization layers inside won't update their batch
statistics. If they did, they would wreck havoc on the representations learned by the
model so far.
```python
# Unfreeze the base_model. Note that it keeps running in inference mode
# since we passed `training=False` when calling it. This means that
# the batchnorm layers will not update their batch statistics.
# This prevents the batchnorm layers from undoing all the training
# we've done so far.
base_model.trainable = True
model.summary(show_trainable=True)
model.compile(
optimizer=keras.optimizers.Adam(1e-5), # Low learning rate
loss=keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[keras.metrics.BinaryAccuracy()],
)
epochs = 1
print("Fitting the end-to-end model")
model.fit(train_ds, epochs=epochs, validation_data=validation_ds)
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "functional_4"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━┳━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃<span style="font-weight: bold"> Trai… </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━╇━━━━━━━┩
│ input_layer_4 (<span style="color: #0087ff; text-decoration-color: #0087ff">InputLayer</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">150</span>, <span style="color: #00af00; text-decoration-color: #00af00">150</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ <span style="font-weight: bold">-</span> │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ rescaling (<span style="color: #0087ff; text-decoration-color: #0087ff">Rescaling</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">150</span>, <span style="color: #00af00; text-decoration-color: #00af00">150</span>, <span style="color: #00af00; text-decoration-color: #00af00">3</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ <span style="font-weight: bold">-</span> │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ xception (<span style="color: #0087ff; text-decoration-color: #0087ff">Functional</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">5</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">20,861…</span> │ <span style="color: #00af00; text-decoration-color: #00af00; font-weight: bold">Y</span> │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ global_average_pooling2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ <span style="font-weight: bold">-</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePooling2D</span>) │ │ │ │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">2048</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │ <span style="font-weight: bold">-</span> │
├─────────────────────────────┼──────────────────────────┼─────────┼───────┤
│ dense_7 (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">1</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">2,049</span> │ <span style="color: #00af00; text-decoration-color: #00af00; font-weight: bold">Y</span> │
└─────────────────────────────┴──────────────────────────┴─────────┴───────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">20,867,629</span> (79.60 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">20,809,001</span> (79.38 MB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">54,528</span> (213.00 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Optimizer params: </span><span style="color: #00af00; text-decoration-color: #00af00">4,100</span> (16.02 KB)
</pre>
<div class="k-default-codeblock">
```
Fitting the end-to-end model
146/146 ━━━━━━━━━━━━━━━━━━━━ 75s 327ms/step - binary_accuracy: 0.8487 - loss: 0.3760 - val_binary_accuracy: 0.9494 - val_loss: 0.1160
<keras.src.callbacks.history.History at 0x7fcd1c755090>
```
</div>
After 10 epochs, fine-tuning gains us a nice improvement here.
Let's evaluate the model on the test dataset:
```python
print("Test dataset evaluation")
model.evaluate(test_ds)
```
<div class="k-default-codeblock">
```
Test dataset evaluation
11/37 ━━━━━[37m━━━━━━━━━━━━━━━ 1s 52ms/step - binary_accuracy: 0.9407 - loss: 0.1155
Corrupt JPEG data: 99 extraneous bytes before marker 0xd9
37/37 ━━━━━━━━━━━━━━━━━━━━ 2s 47ms/step - binary_accuracy: 0.9427 - loss: 0.1259
[0.13755160570144653, 0.941300630569458]
```
</div> | keras-io/guides/md/transfer_learning.md/0 | {
"file_path": "keras-io/guides/md/transfer_learning.md",
"repo_id": "keras-io",
"token_count": 11917
} | 122 |
"""
Title: Writing your own callbacks
Authors: Rick Chao, Francois Chollet
Date created: 2019/03/20
Last modified: 2023/06/25
Description: Complete guide to writing new Keras callbacks.
Accelerator: GPU
"""
"""
## Introduction
A callback is a powerful tool to customize the behavior of a Keras model during
training, evaluation, or inference. Examples include `keras.callbacks.TensorBoard`
to visualize training progress and results with TensorBoard, or
`keras.callbacks.ModelCheckpoint` to periodically save your model during training.
In this guide, you will learn what a Keras callback is, what it can do, and how you can
build your own. We provide a few demos of simple callback applications to get you
started.
"""
"""
## Setup
"""
import numpy as np
import keras
"""
## Keras callbacks overview
All callbacks subclass the `keras.callbacks.Callback` class, and
override a set of methods called at various stages of training, testing, and
predicting. Callbacks are useful to get a view on internal states and statistics of
the model during training.
You can pass a list of callbacks (as the keyword argument `callbacks`) to the following
model methods:
- `keras.Model.fit()`
- `keras.Model.evaluate()`
- `keras.Model.predict()`
"""
"""
## An overview of callback methods
### Global methods
#### `on_(train|test|predict)_begin(self, logs=None)`
Called at the beginning of `fit`/`evaluate`/`predict`.
#### `on_(train|test|predict)_end(self, logs=None)`
Called at the end of `fit`/`evaluate`/`predict`.
### Batch-level methods for training/testing/predicting
#### `on_(train|test|predict)_batch_begin(self, batch, logs=None)`
Called right before processing a batch during training/testing/predicting.
#### `on_(train|test|predict)_batch_end(self, batch, logs=None)`
Called at the end of training/testing/predicting a batch. Within this method, `logs` is
a dict containing the metrics results.
### Epoch-level methods (training only)
#### `on_epoch_begin(self, epoch, logs=None)`
Called at the beginning of an epoch during training.
#### `on_epoch_end(self, epoch, logs=None)`
Called at the end of an epoch during training.
"""
"""
## A basic example
Let's take a look at a concrete example. To get started, let's import tensorflow and
define a simple Sequential Keras model:
"""
# Define the Keras model to add callbacks to
def get_model():
model = keras.Sequential()
model.add(keras.layers.Dense(1))
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=0.1),
loss="mean_squared_error",
metrics=["mean_absolute_error"],
)
return model
"""
Then, load the MNIST data for training and testing from Keras datasets API:
"""
# Load example MNIST data and pre-process it
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 784).astype("float32") / 255.0
x_test = x_test.reshape(-1, 784).astype("float32") / 255.0
# Limit the data to 1000 samples
x_train = x_train[:1000]
y_train = y_train[:1000]
x_test = x_test[:1000]
y_test = y_test[:1000]
"""
Now, define a simple custom callback that logs:
- When `fit`/`evaluate`/`predict` starts & ends
- When each epoch starts & ends
- When each training batch starts & ends
- When each evaluation (test) batch starts & ends
- When each inference (prediction) batch starts & ends
"""
class CustomCallback(keras.callbacks.Callback):
def on_train_begin(self, logs=None):
keys = list(logs.keys())
print("Starting training; got log keys: {}".format(keys))
def on_train_end(self, logs=None):
keys = list(logs.keys())
print("Stop training; got log keys: {}".format(keys))
def on_epoch_begin(self, epoch, logs=None):
keys = list(logs.keys())
print("Start epoch {} of training; got log keys: {}".format(epoch, keys))
def on_epoch_end(self, epoch, logs=None):
keys = list(logs.keys())
print("End epoch {} of training; got log keys: {}".format(epoch, keys))
def on_test_begin(self, logs=None):
keys = list(logs.keys())
print("Start testing; got log keys: {}".format(keys))
def on_test_end(self, logs=None):
keys = list(logs.keys())
print("Stop testing; got log keys: {}".format(keys))
def on_predict_begin(self, logs=None):
keys = list(logs.keys())
print("Start predicting; got log keys: {}".format(keys))
def on_predict_end(self, logs=None):
keys = list(logs.keys())
print("Stop predicting; got log keys: {}".format(keys))
def on_train_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: start of batch {}; got log keys: {}".format(batch, keys))
def on_train_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Training: end of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: start of batch {}; got log keys: {}".format(batch, keys))
def on_test_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Evaluating: end of batch {}; got log keys: {}".format(batch, keys))
def on_predict_batch_begin(self, batch, logs=None):
keys = list(logs.keys())
print("...Predicting: start of batch {}; got log keys: {}".format(batch, keys))
def on_predict_batch_end(self, batch, logs=None):
keys = list(logs.keys())
print("...Predicting: end of batch {}; got log keys: {}".format(batch, keys))
"""
Let's try it out:
"""
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=1,
verbose=0,
validation_split=0.5,
callbacks=[CustomCallback()],
)
res = model.evaluate(
x_test, y_test, batch_size=128, verbose=0, callbacks=[CustomCallback()]
)
res = model.predict(x_test, batch_size=128, callbacks=[CustomCallback()])
"""
### Usage of `logs` dict
The `logs` dict contains the loss value, and all the metrics at the end of a batch or
epoch. Example includes the loss and mean absolute error.
"""
class LossAndErrorPrintingCallback(keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
print(
"Up to batch {}, the average loss is {:7.2f}.".format(batch, logs["loss"])
)
def on_test_batch_end(self, batch, logs=None):
print(
"Up to batch {}, the average loss is {:7.2f}.".format(batch, logs["loss"])
)
def on_epoch_end(self, epoch, logs=None):
print(
"The average loss for epoch {} is {:7.2f} "
"and mean absolute error is {:7.2f}.".format(
epoch, logs["loss"], logs["mean_absolute_error"]
)
)
model = get_model()
model.fit(
x_train,
y_train,
batch_size=128,
epochs=2,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
res = model.evaluate(
x_test,
y_test,
batch_size=128,
verbose=0,
callbacks=[LossAndErrorPrintingCallback()],
)
"""
## Usage of `self.model` attribute
In addition to receiving log information when one of their methods is called,
callbacks have access to the model associated with the current round of
training/evaluation/inference: `self.model`.
Here are a few of the things you can do with `self.model` in a callback:
- Set `self.model.stop_training = True` to immediately interrupt training.
- Mutate hyperparameters of the optimizer (available as `self.model.optimizer`),
such as `self.model.optimizer.learning_rate`.
- Save the model at period intervals.
- Record the output of `model.predict()` on a few test samples at the end of each
epoch, to use as a sanity check during training.
- Extract visualizations of intermediate features at the end of each epoch, to monitor
what the model is learning over time.
- etc.
Let's see this in action in a couple of examples.
"""
"""
## Examples of Keras callback applications
### Early stopping at minimum loss
This first example shows the creation of a `Callback` that stops training when the
minimum of loss has been reached, by setting the attribute `self.model.stop_training`
(boolean). Optionally, you can provide an argument `patience` to specify how many
epochs we should wait before stopping after having reached a local minimum.
`keras.callbacks.EarlyStopping` provides a more complete and general implementation.
"""
class EarlyStoppingAtMinLoss(keras.callbacks.Callback):
"""Stop training when the loss is at its min, i.e. the loss stops decreasing.
Arguments:
patience: Number of epochs to wait after min has been hit. After this
number of no improvement, training stops.
"""
def __init__(self, patience=0):
super().__init__()
self.patience = patience
# best_weights to store the weights at which the minimum loss occurs.
self.best_weights = None
def on_train_begin(self, logs=None):
# The number of epoch it has waited when loss is no longer minimum.
self.wait = 0
# The epoch the training stops at.
self.stopped_epoch = 0
# Initialize the best as infinity.
self.best = np.Inf
def on_epoch_end(self, epoch, logs=None):
current = logs.get("loss")
if np.less(current, self.best):
self.best = current
self.wait = 0
# Record the best weights if current results is better (less).
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
print("Restoring model weights from the end of the best epoch.")
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print(f"Epoch {self.stopped_epoch + 1}: early stopping")
model = get_model()
model.fit(
x_train,
y_train,
batch_size=64,
epochs=30,
verbose=0,
callbacks=[LossAndErrorPrintingCallback(), EarlyStoppingAtMinLoss()],
)
"""
### Learning rate scheduling
In this example, we show how a custom Callback can be used to dynamically change the
learning rate of the optimizer during the course of training.
See `callbacks.LearningRateScheduler` for a more general implementations.
"""
class CustomLearningRateScheduler(keras.callbacks.Callback):
"""Learning rate scheduler which sets the learning rate according to schedule.
Arguments:
schedule: a function that takes an epoch index
(integer, indexed from 0) and current learning rate
as inputs and returns a new learning rate as output (float).
"""
def __init__(self, schedule):
super().__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, "learning_rate"):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
# Get the current learning rate from model's optimizer.
lr = self.model.optimizer.learning_rate
# Call schedule function to get the scheduled learning rate.
scheduled_lr = self.schedule(epoch, lr)
# Set the value back to the optimizer before this epoch starts
self.model.optimizer.learning_rate = scheduled_lr
print(f"\nEpoch {epoch}: Learning rate is {float(np.array(scheduled_lr))}.")
LR_SCHEDULE = [
# (epoch to start, learning rate) tuples
(3, 0.05),
(6, 0.01),
(9, 0.005),
(12, 0.001),
]
def lr_schedule(epoch, lr):
"""Helper function to retrieve the scheduled learning rate based on epoch."""
if epoch < LR_SCHEDULE[0][0] or epoch > LR_SCHEDULE[-1][0]:
return lr
for i in range(len(LR_SCHEDULE)):
if epoch == LR_SCHEDULE[i][0]:
return LR_SCHEDULE[i][1]
return lr
model = get_model()
model.fit(
x_train,
y_train,
batch_size=64,
epochs=15,
verbose=0,
callbacks=[
LossAndErrorPrintingCallback(),
CustomLearningRateScheduler(lr_schedule),
],
)
"""
### Built-in Keras callbacks
Be sure to check out the existing Keras callbacks by
reading the [API docs](https://keras.io/api/callbacks/).
Applications include logging to CSV, saving
the model, visualizing metrics in TensorBoard, and a lot more!
"""
| keras-io/guides/writing_your_own_callbacks.py/0 | {
"file_path": "keras-io/guides/writing_your_own_callbacks.py",
"repo_id": "keras-io",
"token_count": 4591
} | 123 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/data_loading/timeseries/'" />
| keras-io/redirects/api/preprocessing/timeseries/index.html/0 | {
"file_path": "keras-io/redirects/api/preprocessing/timeseries/index.html",
"repo_id": "keras-io",
"token_count": 36
} | 124 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/layers/initializers/'" />
| keras-io/redirects/initializers/index.html/0 | {
"file_path": "keras-io/redirects/initializers/index.html",
"repo_id": "keras-io",
"token_count": 35
} | 125 |
<meta http-equiv="refresh" content="0; URL='https://keras.io/api/models/'" />
| keras-io/redirects/models/about-keras-models/index.html/0 | {
"file_path": "keras-io/redirects/models/about-keras-models/index.html",
"repo_id": "keras-io",
"token_count": 31
} | 126 |
import re
import string
import markdown
import copy
import pathlib
import os
def save_file(path, content):
parent = pathlib.Path(path).parent
if not os.path.exists(parent):
os.makedirs(parent)
f = open(path, "w", encoding="utf8")
f.write(content)
f.close()
def process_outline_title(title):
title = re.sub(r"`(.*?)`", r"<code>\1</code>", title)
title = re.sub(r"\[(.*?)\]\(.*?\)", r"\1", title)
return title
def turn_title_into_id(title):
title = title.lower()
title = title.replace("&", "amp")
title = title.replace("&", "amp")
title = title.replace("<code>", "")
title = title.replace("</code>", "")
title = title.translate(str.maketrans("", "", string.punctuation))
title = title.replace(" ", "-")
return title
def make_outline(md_source):
lines = md_source.split("\n")
outline = []
in_code_block = False
for line in lines:
if line.startswith("```"):
in_code_block = not in_code_block
if in_code_block:
continue
if line.startswith("# "):
title = line[2:]
title = process_outline_title(title)
outline.append(
{
"title": title,
"url": "#" + turn_title_into_id(title),
"depth": 1,
}
)
if line.startswith("## "):
title = line[3:]
title = process_outline_title(title)
outline.append(
{
"title": title,
"url": "#" + turn_title_into_id(title),
"depth": 2,
}
)
if line.startswith("### "):
title = line[4:]
title = process_outline_title(title)
outline.append(
{
"title": title,
"url": "#" + turn_title_into_id(title),
"depth": 3,
}
)
return outline
def render_markdown_to_html(md_content):
return markdown.markdown(
md_content,
extensions=[
"fenced_code",
"tables",
"codehilite",
"mdx_truly_sane_lists",
"smarty",
],
extension_configs={
"codehilite": {
"guess_lang": False,
},
"smarty": {
"smart_dashes": True,
"smart_quotes": False,
"smart_angled_quotes": False,
"smart_ellipses": False,
},
},
)
def set_active_flag_in_nav_entry(entry, relative_url):
entry = copy.copy(entry)
if relative_url.startswith(entry["relative_url"]):
entry["active"] = True
else:
entry["active"] = False
children = [
set_active_flag_in_nav_entry(child, relative_url)
for child in entry.get("children", [])
]
entry["children"] = children
return entry
| keras-io/scripts/autogen_utils.py/0 | {
"file_path": "keras-io/scripts/autogen_utils.py",
"repo_id": "keras-io",
"token_count": 1602
} | 127 |
# Keras Applications
Keras Applications are deep learning models that are made available alongside pre-trained weights.
These models can be used for prediction, feature extraction, and fine-tuning.
Weights are downloaded automatically when instantiating a model. They are stored at `~/.keras/models/`.
Upon instantiation, the models will be built according to the image data format set in your Keras configuration file at `~/.keras/keras.json`.
For instance, if you have set `image_data_format=channels_last`,
then any model loaded from this repository will get built according to the data format convention "Height-Width-Depth".
## Available models
| Model | Size (MB)| Top-1 Accuracy | Top-5 Accuracy | Parameters | Depth | Time (ms) per inference step (CPU) | Time (ms) per inference step (GPU) |
| ----- | -------: | -------------: | -------------: | --------: | ----: | ---------------------------------: | ---------------------------------: |
| [Xception](xception) | 88 | 79.0% | 94.5% | 22.9M | 81 | 109.4 | 8.1 |
| [VGG16](vgg/#vgg16-function) | 528 | 71.3% | 90.1% | 138.4M | 16 | 69.5 | 4.2 |
| [VGG19](vgg/#vgg19-function) | 549 | 71.3% | 90.0% | 143.7M | 19 | 84.8 | 4.4 |
| [ResNet50](resnet/#resnet50-function) | 98 | 74.9% | 92.1% | 25.6M | 107 | 58.2 | 4.6 |
| [ResNet50V2](resnet/#resnet50v2-function) | 98 | 76.0% | 93.0% | 25.6M | 103 | 45.6 | 4.4 |
| [ResNet101](resnet/#resnet101-function) | 171 | 76.4% | 92.8% | 44.7M | 209 | 89.6 | 5.2 |
| [ResNet101V2](resnet/#resnet101v2-function) | 171 | 77.2% | 93.8% | 44.7M | 205 | 72.7 | 5.4 |
| [ResNet152](resnet/#resnet152-function) | 232 | 76.6% | 93.1% | 60.4M | 311 | 127.4 | 6.5 |
| [ResNet152V2](resnet/#resnet152v2-function) | 232 | 78.0% | 94.2% | 60.4M | 307 | 107.5 | 6.6 |
| [InceptionV3](inceptionv3) | 92 | 77.9% | 93.7% | 23.9M | 189 | 42.2 | 6.9 |
| [InceptionResNetV2](inceptionresnetv2) | 215 | 80.3% | 95.3% | 55.9M | 449 | 130.2 | 10.0 |
| [MobileNet](mobilenet) | 16 | 70.4% | 89.5% | 4.3M | 55 | 22.6 | 3.4 |
| [MobileNetV2](mobilenet/#mobilenetv2-function) | 14 | 71.3% | 90.1% | 3.5M | 105 | 25.9 | 3.8 |
| [DenseNet121](densenet/#densenet121-function) | 33 | 75.0% | 92.3% | 8.1M | 242 | 77.1 | 5.4 |
| [DenseNet169](densenet/#densenet169-function) | 57 | 76.2% | 93.2% | 14.3M | 338 | 96.4 | 6.3 |
| [DenseNet201](densenet/#densenet201-function) | 80 | 77.3% | 93.6% | 20.2M | 402 | 127.2 | 6.7 |
| [NASNetMobile](nasnet/#nasnetmobile-function) | 23 | 74.4% | 91.9% | 5.3M | 389 | 27.0 | 6.7 |
| [NASNetLarge](nasnet/#nasnetlarge-function) | 343 | 82.5% | 96.0% | 88.9M | 533 | 344.5 | 20.0 |
| [EfficientNetB0](efficientnet/#efficientnetb0-function) | 29 | 77.1% | 93.3% | 5.3M | 132 | 46.0 | 4.9 |
| [EfficientNetB1](efficientnet/#efficientnetb1-function) | 31 | 79.1% | 94.4% | 7.9M | 186 | 60.2 | 5.6 |
| [EfficientNetB2](efficientnet/#efficientnetb2-function) | 36 | 80.1% | 94.9% | 9.2M | 186 | 80.8 | 6.5 |
| [EfficientNetB3](efficientnet/#efficientnetb3-function) | 48 | 81.6% | 95.7% | 12.3M | 210 | 140.0 | 8.8 |
| [EfficientNetB4](efficientnet/#efficientnetb4-function) | 75 | 82.9% | 96.4% | 19.5M | 258 | 308.3 | 15.1 |
| [EfficientNetB5](efficientnet/#efficientnetb5-function) | 118 | 83.6% | 96.7% | 30.6M | 312 | 579.2 | 25.3 |
| [EfficientNetB6](efficientnet/#efficientnetb6-function) | 166 | 84.0% | 96.8% | 43.3M | 360 | 958.1 | 40.4 |
| [EfficientNetB7](efficientnet/#efficientnetb7-function) | 256 | 84.3% | 97.0% | 66.7M | 438 | 1578.9 | 61.6 |
| [EfficientNetV2B0](efficientnet_v2/#efficientnetv2b0-function) | 29 | 78.7% | 94.3% | 7.2M | - | - | - |
| [EfficientNetV2B1](efficientnet_v2/#efficientnetv2b1-function) | 34 | 79.8% | 95.0% | 8.2M | - | - | - |
| [EfficientNetV2B2](efficientnet_v2/#efficientnetv2b2-function) | 42 | 80.5% | 95.1% | 10.2M | - | - | - |
| [EfficientNetV2B3](efficientnet_v2/#efficientnetv2b3-function) | 59 | 82.0% | 95.8% | 14.5M | - | - | - |
| [EfficientNetV2S](efficientnet_v2/#efficientnetv2s-function) | 88 | 83.9% | 96.7% | 21.6M | - | - | - |
| [EfficientNetV2M](efficientnet_v2/#efficientnetv2m-function) | 220 | 85.3% | 97.4% | 54.4M | - | - | - |
| [EfficientNetV2L](efficientnet_v2/#efficientnetv2l-function) | 479 | 85.7% | 97.5% | 119.0M | - | - | - |
| [ConvNeXtTiny](convnext/#convnexttiny-function) | 109.42 | 81.3% | - | 28.6M | - | - | - |
| [ConvNeXtSmall](convnext/#convnextsmall-function) | 192.29 | 82.3% | - | 50.2M | - | - | - |
| [ConvNeXtBase](convnext/#convnextbase-function) | 338.58 | 85.3% | - | 88.5M | - | - | - |
| [ConvNeXtLarge](convnext/#convnextlarge-function) | 755.07 | 86.3% | - | 197.7M | - | - | - |
| [ConvNeXtXLarge](convnext/#convnextxlarge-function) | 1310 | 86.7% | - | 350.1M | - | - | - |
The top-1 and top-5 accuracy refers to the model's performance on the ImageNet validation dataset.
Depth refers to the topological depth of the network. This includes activation layers, batch normalization layers etc.
Time per inference step is the average of 30 batches and 10 repetitions.
- CPU: AMD EPYC Processor (with IBPB) (92 core)
- RAM: 1.7T
- GPU: Tesla A100
- Batch size: 32
Depth counts the number of layers with parameters.
-----
## Usage examples for image classification models
### Classify ImageNet classes with ResNet50
```python
import keras
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet')
img_path = 'elephant.jpg'
img = keras.utils.load_img(img_path, target_size=(224, 224))
x = keras.utils.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])
# Predicted: [(u'n02504013', u'Indian_elephant', 0.82658225), (u'n01871265', u'tusker', 0.1122357), (u'n02504458', u'African_elephant', 0.061040461)]
```
### Extract features with VGG16
```python
import keras
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
import numpy as np
model = VGG16(weights='imagenet', include_top=False)
img_path = 'elephant.jpg'
img = keras.utils.load_img(img_path, target_size=(224, 224))
x = keras.utils.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
```
### Extract features from an arbitrary intermediate layer with VGG19
```python
from keras.applications.vgg19 import VGG19
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
import numpy as np
base_model = VGG19(weights='imagenet')
model = Model(inputs=base_model.input, outputs=base_model.get_layer('block4_pool').output)
img_path = 'elephant.jpg'
img = keras.utils.load_img(img_path, target_size=(224, 224))
x = keras.utils.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
block4_pool_features = model.predict(x)
```
### Fine-tune InceptionV3 on a new set of classes
```python
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer -- let's say we have 200 classes
predictions = Dense(200, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# train the model on the new data for a few epochs
model.fit(...)
# at this point, the top layers are well trained and we can start fine-tuning
# convolutional layers from inception V3. We will freeze the bottom N layers
# and train the remaining top layers.
# let's visualize layer names and layer indices to see how many layers
# we should freeze:
for i, layer in enumerate(base_model.layers):
print(i, layer.name)
# we chose to train the top 2 inception blocks, i.e. we will freeze
# the first 249 layers and unfreeze the rest:
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
# we need to recompile the model for these modifications to take effect
# we use SGD with a low learning rate
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy')
# we train our model again (this time fine-tuning the top 2 inception blocks
# alongside the top Dense layers
model.fit(...)
```
### Build InceptionV3 over a custom input tensor
```python
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Input
# this could also be the output a different Keras model or layer
input_tensor = Input(shape=(224, 224, 3))
model = InceptionV3(input_tensor=input_tensor, weights='imagenet', include_top=True)
```
| keras-io/templates/api/applications/index.md/0 | {
"file_path": "keras-io/templates/api/applications/index.md",
"repo_id": "keras-io",
"token_count": 3511
} | 128 |
# RoBERTa
Models, tokenizers, and preprocessing layers for RoBERTa,
as described in ["RoBERTa: A Robustly Optimized BERT Pretraining Approach"](https://arxiv.org/abs/1907.11692).
For a full list of available **presets**, see the
[models page](/api/keras_nlp/models).
{{toc}}
| keras-io/templates/api/keras_nlp/models/roberta/index.md/0 | {
"file_path": "keras-io/templates/api/keras_nlp/models/roberta/index.md",
"repo_id": "keras-io",
"token_count": 96
} | 129 |
# Metrics
A metric is a function that is used to judge the performance of your model.
Metric functions are similar to loss functions, except that the results from evaluating a metric are not used when training the model.
Note that you may use any loss function as a metric.
## Available metrics
{{toc}}
---
## Usage with `compile()` & `fit()`
The `compile()` method takes a `metrics` argument, which is a list of metrics:
```python
model.compile(
optimizer='adam',
loss='mean_squared_error',
metrics=[
metrics.MeanSquaredError(),
metrics.AUC(),
]
)
```
Metric values are displayed during `fit()` and logged to the `History` object returned
by `fit()`. They are also returned by `model.evaluate()`.
Note that the best way to monitor your metrics during training is via [TensorBoard](/api/callbacks/tensorboard).
To track metrics under a specific name, you can pass the `name` argument
to the metric constructor:
```python
model.compile(
optimizer='adam',
loss='mean_squared_error',
metrics=[
metrics.MeanSquaredError(name='my_mse'),
metrics.AUC(name='my_auc'),
]
)
```
All built-in metrics may also be passed via their string identifier (in this case,
default constructor argument values are used, including a default metric name):
```python
model.compile(
optimizer='adam',
loss='mean_squared_error',
metrics=[
'MeanSquaredError',
'AUC',
]
)
```
---
## Standalone usage
Unlike losses, metrics are stateful. You update their state using the `update_state()` method,
and you query the scalar metric result using the `result()` method:
```python
m = keras.metrics.AUC()
m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
print('Intermediate result:', float(m.result()))
m.update_state([1, 1, 1, 1], [0, 1, 1, 0])
print('Final result:', float(m.result()))
```
The internal state can be cleared via `metric.reset_states()`.
Here's how you would use a metric as part of a simple custom training loop:
```python
accuracy = keras.metrics.CategoricalAccuracy()
loss_fn = keras.losses.CategoricalCrossentropy(from_logits=True)
optimizer = keras.optimizers.Adam()
# Iterate over the batches of a dataset.
for step, (x, y) in enumerate(dataset):
with tf.GradientTape() as tape:
logits = model(x)
# Compute the loss value for this batch.
loss_value = loss_fn(y, logits)
# Update the state of the `accuracy` metric.
accuracy.update_state(y, logits)
# Update the weights of the model to minimize the loss value.
gradients = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
# Logging the current accuracy value so far.
if step % 100 == 0:
print('Step:', step)
print('Total running accuracy so far: %.3f' % accuracy.result())
```
---
## Creating custom metrics
### As simple callables (stateless)
Much like loss functions, any callable with signature `metric_fn(y_true, y_pred)`
that returns an array of losses (one of sample in the input batch) can be passed to `compile()` as a metric.
Note that sample weighting is automatically supported for any such metric.
Here's a simple example:
```python
from keras import ops
def my_metric_fn(y_true, y_pred):
squared_difference = ops.square(y_true - y_pred)
return ops.mean(squared_difference, axis=-1) # Note the `axis=-1`
model.compile(optimizer='adam', loss='mean_squared_error', metrics=[my_metric_fn])
```
In this case, the scalar metric value you are tracking during training and evaluation
is the average of the per-batch metric values for all batches see during a given epoch
(or during a given call to `model.evaluate()`).
### As subclasses of `Metric` (stateful)
Not all metrics can be expressed via stateless callables, because
metrics are evaluated for each batch during training and evaluation, but in some cases
the average of the per-batch values is not what you are interested in.
Let's say that you want to compute AUC over a
given evaluation dataset: the average of the per-batch AUC values
isn't the same as the AUC over the entire dataset.
For such metrics, you're going to want to subclass the `Metric` class,
which can maintain a state across batches. It's easy:
- Create the state variables in `__init__`
- Update the variables given `y_true` and `y_pred` in `update_state()`
- Return the scalar metric result in `result()`
- Clear the state in `reset_states()`
Here's a simple example computing binary true positives:
```python
class BinaryTruePositives(keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super().__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = ops.cast(y_true, "bool")
y_pred = ops.cast(y_pred, "bool")
values = ops.logical_and(ops.equal(y_true, True), ops.equal(y_pred, True))
values = ops.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = ops.cast(sample_weight, self.dtype)
values = values * sample_weight
self.true_positives.assign_add(ops.sum(values))
def result(self):
return self.true_positives
def reset_states(self):
self.true_positives.assign(0)
m = BinaryTruePositives()
m.update_state([0, 1, 1, 1], [0, 1, 0, 0])
print(f'Intermediate result: {m.result().numpy()}')
m.update_state([1, 1, 1, 1], [0, 1, 1, 0])
print(f'Intermediate result: {m.result().numpy()}')
```
| keras-io/templates/api/metrics/index.md/0 | {
"file_path": "keras-io/templates/api/metrics/index.md",
"repo_id": "keras-io",
"token_count": 1892
} | 130 |
# Introduction to Keras for engineers
**Author:** [fchollet](https://twitter.com/fchollet)<br>
**Date created:** 2023/07/10<br>
**Last modified:** 2023/07/10<br>
**Description:** First contact with Keras 3.
<img class="k-inline-icon" src="https://colab.research.google.com/img/colab_favicon.ico"/> [**View in Colab**](https://colab.research.google.com/github/keras-team/keras-io/blob/master/guides/ipynb/intro_to_keras_for_engineers.ipynb) <span class="k-dot">•</span><img class="k-inline-icon" src="https://github.com/favicon.ico"/> [**GitHub source**](https://github.com/keras-team/keras-io/blob/master/guides/intro_to_keras_for_engineers.py)
---
## Introduction
Keras 3 is a deep learning framework
works with TensorFlow, JAX, and PyTorch interchangeably.
This notebook will walk you through key Keras 3 workflows.
---
## Setup
We're going to be using the JAX backend here -- but you can
edit the string below to `"tensorflow"` or `"torch"` and hit
"Restart runtime", and the whole notebook will run just the same!
This entire guide is backend-agnostic.
```python
import numpy as np
import os
os.environ["KERAS_BACKEND"] = "jax"
# Note that Keras should only be imported after the backend
# has been configured. The backend cannot be changed once the
# package is imported.
import keras
```
---
## A first example: A MNIST convnet
Let's start with the Hello World of ML: training a convnet
to classify MNIST digits.
Here's the data:
```python
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print("y_train shape:", y_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
```
<div class="k-default-codeblock">
```
x_train shape: (60000, 28, 28, 1)
y_train shape: (60000,)
60000 train samples
10000 test samples
```
</div>
Here's our model.
Different model-building options that Keras offers include:
- [The Sequential API](https://keras.io/guides/sequential_model/) (what we use below)
- [The Functional API](https://keras.io/guides/functional_api/) (most typical)
- [Writing your own models yourself via subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) (for advanced use cases)
```python
# Model parameters
num_classes = 10
input_shape = (28, 28, 1)
model = keras.Sequential(
[
keras.layers.Input(shape=input_shape),
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"),
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dropout(0.5),
keras.layers.Dense(num_classes, activation="softmax"),
]
)
```
Here's our model summary:
```python
model.summary()
```
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold">Model: "sequential"</span>
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━┓
┃<span style="font-weight: bold"> Layer (type) </span>┃<span style="font-weight: bold"> Output Shape </span>┃<span style="font-weight: bold"> Param # </span>┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━┩
│ conv2d (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">26</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">640</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_1 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">24</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">36,928</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ max_pooling2d (<span style="color: #0087ff; text-decoration-color: #0087ff">MaxPooling2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">12</span>, <span style="color: #00af00; text-decoration-color: #00af00">12</span>, <span style="color: #00af00; text-decoration-color: #00af00">64</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_2 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">73,856</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ conv2d_3 (<span style="color: #0087ff; text-decoration-color: #0087ff">Conv2D</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">8</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">147,584</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ global_average_pooling2d │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
│ (<span style="color: #0087ff; text-decoration-color: #0087ff">GlobalAveragePooling2D</span>) │ │ │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dropout (<span style="color: #0087ff; text-decoration-color: #0087ff">Dropout</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">128</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">0</span> │
├─────────────────────────────────┼───────────────────────────┼────────────┤
│ dense (<span style="color: #0087ff; text-decoration-color: #0087ff">Dense</span>) │ (<span style="color: #00d7ff; text-decoration-color: #00d7ff">None</span>, <span style="color: #00af00; text-decoration-color: #00af00">10</span>) │ <span style="color: #00af00; text-decoration-color: #00af00">1,290</span> │
└─────────────────────────────────┴───────────────────────────┴────────────┘
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Total params: </span><span style="color: #00af00; text-decoration-color: #00af00">260,298</span> (1016.79 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">260,298</span> (1016.79 KB)
</pre>
<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace"><span style="font-weight: bold"> Non-trainable params: </span><span style="color: #00af00; text-decoration-color: #00af00">0</span> (0.00 B)
</pre>
We use the `compile()` method to specify the optimizer, loss function,
and the metrics to monitor. Note that with the JAX and TensorFlow backends,
XLA compilation is turned on by default.
```python
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
],
)
```
Let's train and evaluate the model. We'll set aside a validation split of 15%
of the data during training to monitor generalization on unseen data.
```python
batch_size = 128
epochs = 20
callbacks = [
keras.callbacks.ModelCheckpoint(filepath="model_at_epoch_{epoch}.keras"),
keras.callbacks.EarlyStopping(monitor="val_loss", patience=2),
]
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.15,
callbacks=callbacks,
)
score = model.evaluate(x_test, y_test, verbose=0)
```
<div class="k-default-codeblock">
```
Epoch 1/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 74s 184ms/step - acc: 0.4980 - loss: 1.3832 - val_acc: 0.9609 - val_loss: 0.1513
Epoch 2/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 74s 186ms/step - acc: 0.9245 - loss: 0.2487 - val_acc: 0.9702 - val_loss: 0.0999
Epoch 3/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 70s 175ms/step - acc: 0.9515 - loss: 0.1647 - val_acc: 0.9816 - val_loss: 0.0608
Epoch 4/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 69s 174ms/step - acc: 0.9622 - loss: 0.1247 - val_acc: 0.9833 - val_loss: 0.0541
Epoch 5/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 68s 171ms/step - acc: 0.9685 - loss: 0.1083 - val_acc: 0.9860 - val_loss: 0.0468
Epoch 6/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 70s 176ms/step - acc: 0.9710 - loss: 0.0955 - val_acc: 0.9897 - val_loss: 0.0400
Epoch 7/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 69s 172ms/step - acc: 0.9742 - loss: 0.0853 - val_acc: 0.9888 - val_loss: 0.0388
Epoch 8/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 68s 169ms/step - acc: 0.9789 - loss: 0.0738 - val_acc: 0.9902 - val_loss: 0.0387
Epoch 9/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 75s 187ms/step - acc: 0.9789 - loss: 0.0691 - val_acc: 0.9907 - val_loss: 0.0341
Epoch 10/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 77s 194ms/step - acc: 0.9806 - loss: 0.0636 - val_acc: 0.9907 - val_loss: 0.0348
Epoch 11/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 74s 186ms/step - acc: 0.9812 - loss: 0.0610 - val_acc: 0.9926 - val_loss: 0.0271
Epoch 12/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 219s 550ms/step - acc: 0.9820 - loss: 0.0590 - val_acc: 0.9912 - val_loss: 0.0294
Epoch 13/20
399/399 ━━━━━━━━━━━━━━━━━━━━ 70s 176ms/step - acc: 0.9843 - loss: 0.0504 - val_acc: 0.9918 - val_loss: 0.0316
```
</div>
During training, we were saving a model at the end of each epoch. You
can also save the model in its latest state like this:
```python
model.save("final_model.keras")
```
And reload it like this:
```python
model = keras.saving.load_model("final_model.keras")
```
Next, you can query predictions of class probabilities with `predict()`:
```python
predictions = model.predict(x_test)
```
<div class="k-default-codeblock">
```
313/313 ━━━━━━━━━━━━━━━━━━━━ 3s 9ms/step
```
</div>
That's it for the basics!
---
## Writing cross-framework custom components
Keras enables you to write custom Layers, Models, Metrics, Losses, and Optimizers
that work across TensorFlow, JAX, and PyTorch with the same codebase. Let's take a look
at custom layers first.
The `keras.ops` namespace contains:
- An implementation of the NumPy API, e.g. `keras.ops.stack` or `keras.ops.matmul`.
- A set of neural network specific ops that are absent from NumPy, such as `keras.ops.conv`
or `keras.ops.binary_crossentropy`.
Let's make a custom `Dense` layer that works with all backends:
```python
class MyDense(keras.layers.Layer):
def __init__(self, units, activation=None, name=None):
super().__init__(name=name)
self.units = units
self.activation = keras.activations.get(activation)
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(
shape=(input_dim, self.units),
initializer=keras.initializers.GlorotNormal(),
name="kernel",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,),
initializer=keras.initializers.Zeros(),
name="bias",
trainable=True,
)
def call(self, inputs):
# Use Keras ops to create backend-agnostic layers/metrics/etc.
x = keras.ops.matmul(inputs, self.w) + self.b
return self.activation(x)
```
Next, let's make a custom `Dropout` layer that relies on the `keras.random`
namespace:
```python
class MyDropout(keras.layers.Layer):
def __init__(self, rate, name=None):
super().__init__(name=name)
self.rate = rate
# Use seed_generator for managing RNG state.
# It is a state element and its seed variable is
# tracked as part of `layer.variables`.
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, inputs):
# Use `keras.random` for random ops.
return keras.random.dropout(inputs, self.rate, seed=self.seed_generator)
```
Next, let's write a custom subclassed model that uses our two custom layers:
```python
class MyModel(keras.Model):
def __init__(self, num_classes):
super().__init__()
self.conv_base = keras.Sequential(
[
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"),
keras.layers.Conv2D(128, kernel_size=(3, 3), activation="relu"),
keras.layers.GlobalAveragePooling2D(),
]
)
self.dp = MyDropout(0.5)
self.dense = MyDense(num_classes, activation="softmax")
def call(self, x):
x = self.conv_base(x)
x = self.dp(x)
return self.dense(x)
```
Let's compile it and fit it:
```python
model = MyModel(num_classes=10)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
],
)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=1, # For speed
validation_split=0.15,
)
```
<div class="k-default-codeblock">
```
399/399 ━━━━━━━━━━━━━━━━━━━━ 70s 174ms/step - acc: 0.5104 - loss: 1.3473 - val_acc: 0.9256 - val_loss: 0.2484
<keras.src.callbacks.history.History at 0x105608670>
```
</div>
---
## Training models on arbitrary data sources
All Keras models can be trained and evaluated on a wide variety of data sources,
independently of the backend you're using. This includes:
- NumPy arrays
- Pandas dataframes
- TensorFlow `tf.data.Dataset` objects
- PyTorch `DataLoader` objects
- Keras `PyDataset` objects
They all work whether you're using TensorFlow, JAX, or PyTorch as your Keras backend.
Let's try it out with PyTorch `DataLoaders`:
```python
import torch
# Create a TensorDataset
train_torch_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
val_torch_dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_test), torch.from_numpy(y_test)
)
# Create a DataLoader
train_dataloader = torch.utils.data.DataLoader(
train_torch_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = torch.utils.data.DataLoader(
val_torch_dataset, batch_size=batch_size, shuffle=False
)
model = MyModel(num_classes=10)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
],
)
model.fit(train_dataloader, epochs=1, validation_data=val_dataloader)
```
<div class="k-default-codeblock">
```
469/469 ━━━━━━━━━━━━━━━━━━━━ 81s 172ms/step - acc: 0.5502 - loss: 1.2550 - val_acc: 0.9419 - val_loss: 0.1972
<keras.src.callbacks.history.History at 0x2b3385480>
```
</div>
Now let's try this out with `tf.data`:
```python
import tensorflow as tf
train_dataset = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.batch(batch_size)
.prefetch(tf.data.AUTOTUNE)
)
test_dataset = (
tf.data.Dataset.from_tensor_slices((x_test, y_test))
.batch(batch_size)
.prefetch(tf.data.AUTOTUNE)
)
model = MyModel(num_classes=10)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
metrics=[
keras.metrics.SparseCategoricalAccuracy(name="acc"),
],
)
model.fit(train_dataset, epochs=1, validation_data=test_dataset)
```
<div class="k-default-codeblock">
```
469/469 ━━━━━━━━━━━━━━━━━━━━ 81s 172ms/step - acc: 0.5771 - loss: 1.1948 - val_acc: 0.9229 - val_loss: 0.2502
<keras.src.callbacks.history.History at 0x2b33e7df0>
```
</div>
---
## Further reading
This concludes our short overview of the new multi-backend capabilities
of Keras 3. Next, you can learn about:
### How to customize what happens in `fit()`
Want to implement a non-standard training algorithm yourself but still want to benefit from
the power and usability of `fit()`? It's easy to customize
`fit()` to support arbitrary use cases:
- [Customizing what happens in `fit()` with TensorFlow](http://keras.io/guides/custom_train_step_in_tensorflow/)
- [Customizing what happens in `fit()` with JAX](http://keras.io/guides/custom_train_step_in_jax/)
- [Customizing what happens in `fit()` with PyTorch](http://keras.io/guides/custom_train_step_in_torch/)
---
## How to write custom training loops
- [Writing a training loop from scratch in TensorFlow](http://keras.io/guides/writing_a_custom_training_loop_in_tensorflow/)
- [Writing a training loop from scratch in JAX](http://keras.io/guides/writing_a_custom_training_loop_in_jax/)
- [Writing a training loop from scratch in PyTorch](http://keras.io/guides/writing_a_custom_training_loop_in_torch/)
---
## How to distribute training
- [Guide to distributed training with TensorFlow](http://keras.io/guides/distributed_training_with_tensorflow/)
- [JAX distributed training example](https://github.com/keras-team/keras/blob/master/examples/demo_jax_distributed.py)
- [PyTorch distributed training example](https://github.com/keras-team/keras/blob/master/examples/demo_torch_multi_gpu.py)
Enjoy the library! 🚀
| keras-io/templates/getting_started/intro_to_keras_for_engineers.md/0 | {
"file_path": "keras-io/templates/getting_started/intro_to_keras_for_engineers.md",
"repo_id": "keras-io",
"token_count": 8148
} | 131 |
<div class='k-main-top'>
<script>
function displayDropdownMenu() {
e = document.getElementById("nav-menu");
if (e.style.display == "block") {
e.style.display = "none";
}
else {
e.style.display = "block";
document.getElementById("dropdown-nav").style.display = "block";
}
}
function resetMobileUI() {
if (window.innerWidth <= 840) {
document.getElementById("nav-menu").style.display = "none";
document.getElementById("dropdown-nav").style.display = "block";
}
else {
document.getElementById("nav-menu").style.display = "block";
document.getElementById("dropdown-nav").style.display = "none";
}
var navmenu = document.getElementById("nav-menu");
var menuheight = navmenu.clientHeight;
var kmain = document.getElementById("k-main-id");
kmain.style.minHeight = (menuheight + 100) + 'px';
}
window.onresize = resetMobileUI;
window.addEventListener("load", (event) => {
resetMobileUI()
});
</script>
<div id='dropdown-nav' onclick="displayDropdownMenu();">
<svg viewBox="-20 -20 120 120" width="60" height="60">
<rect width="100" height="20"></rect>
<rect y="30" width="100" height="20"></rect>
<rect y="60" width="100" height="20"></rect>
</svg>
</div>
<form class="bd-search d-flex align-items-center k-search-form" id="search-form">
<input type="search" class="k-search-input" id="search-input" placeholder="Search Keras documentation..." aria-label="Search Keras documentation..." autocomplete="off">
<button class="k-search-btn">
<svg width="13" height="13" viewBox="0 0 13 13"><title>search</title><path d="m4.8495 7.8226c0.82666 0 1.5262-0.29146 2.0985-0.87438 0.57232-0.58292 0.86378-1.2877 0.87438-2.1144 0.010599-0.82666-0.28086-1.5262-0.87438-2.0985-0.59352-0.57232-1.293-0.86378-2.0985-0.87438-0.8055-0.010599-1.5103 0.28086-2.1144 0.87438-0.60414 0.59352-0.8956 1.293-0.87438 2.0985 0.021197 0.8055 0.31266 1.5103 0.87438 2.1144 0.56172 0.60414 1.2665 0.8956 2.1144 0.87438zm4.4695 0.2115 3.681 3.6819-1.259 1.284-3.6817-3.7 0.0019784-0.69479-0.090043-0.098846c-0.87973 0.76087-1.92 1.1413-3.1207 1.1413-1.3553 0-2.5025-0.46363-3.4417-1.3909s-1.4088-2.0686-1.4088-3.4239c0-1.3553 0.4696-2.4966 1.4088-3.4239 0.9392-0.92727 2.0864-1.3969 3.4417-1.4088 1.3553-0.011889 2.4906 0.45771 3.406 1.4088 0.9154 0.95107 1.379 2.0924 1.3909 3.4239 0 1.2126-0.38043 2.2588-1.1413 3.1385l0.098834 0.090049z"></path></svg>
</button>
</form>
<script>
var form = document.getElementById('search-form');
form.onsubmit = function(e) {
e.preventDefault();
var query = document.getElementById('search-input').value;
window.location.href = '{{base_url}}search.html?query=' + query;
return False
}
</script>
</div>
<div class='k-main-inner' id='k-main-id'>
<div class='k-location-slug'>
<span class="k-location-slug-pointer">►</span> {% for part in location_history %}
<a href='{{part.url}}'>{{part.title}}</a> /
{% endfor %} {{title}}
</div>
<div class='k-content'>
{{content}}
</div>
{% if outline|length > 1 %}
<div class='k-outline'>
{% for entry in outline %}
<div class='k-outline-depth-{{entry.depth}}'>
{% if entry.depth == 2 %} ◆ {% endif %}
<a href='{{entry.url}}'>{{entry.title}}</a>
</div>
{% endfor %}
</div>
{% endif %}
</div>
| keras-io/theme/docs.html/0 | {
"file_path": "keras-io/theme/docs.html",
"repo_id": "keras-io",
"token_count": 2047
} | 132 |
pip install --upgrade pip
pip install -r requirements.txt
pip install -e .
echo "sh shell/lint.sh" > .git/hooks/pre-commit
chmod a+x .git/hooks/pre-commit
| keras-nlp/.devcontainer/setup.sh/0 | {
"file_path": "keras-nlp/.devcontainer/setup.sh",
"repo_id": "keras-nlp",
"token_count": 60
} | 133 |
# Contribution guide
KerasNLP is an actively growing project and community! We would love for you
to get involved. Below are instructions for how to plug into KerasNLP
development.
## Background reading
Before contributing code, please review our [Style Guide](STYLE_GUIDE.md) and
[API Design Guide](API_DESIGN_GUIDE.md).
Our [Roadmap](ROADMAP.md) contains an overview of the project goals and our
current focus areas.
We follow
[Google's Open Source Community Guidelines](https://opensource.google/conduct/).
## Finding an issue
The fastest way to contribute it to find open issues that need an assignee. We
maintain two lists of github tags for contributors:
- [good first issue](https://github.com/keras-team/keras-nlp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22):
a list of small, well defined issues for newcomers to the project.
- [contributions welcome](https://github.com/keras-team/keras-nlp/issues?q=is%3Aissue+is%3Aopen+label%3A%22contributions+welcome%22):
a larger list of issues that may range in complexity.
If you would like propose a new symbol or feature, please first review our
design guide and roadmap linked above, and open an issue to discuss. If you have
a specific design in mind, please include a Colab notebook showing the proposed
design in a end-to-end example. Keep in mind that design for a new feature or
use case may take longer than contributing to an open issue with a
vetted-design.
## Contributing code
Follow these steps to submit your code contribution.
### Step 1. Open an issue
Before making any changes, we recommend opening an issue (if one doesn't already
exist) and discussing your proposed changes. This way, we can give you feedback
and validate the proposed changes.
If your code change involves the fixing of a bug, please include a
[Colab](https://colab.research.google.com/) notebook that shows
how to reproduce the broken behavior.
If the changes are minor (simple bug fix or documentation fix), then feel free
to open a PR without discussion.
### Step 2. Make code changes
To make code changes, you need to fork the repository. You will need to setup a
development environment and run the unit tests. This is covered in section
"Setup environment".
### Step 3. Create a pull request
Once the change is ready, open a pull request from your branch in your fork to
the master branch in
[keras-team/keras-nlp](https://github.com/keras-team/keras-nlp).
### Step 4. Sign the Contributor License Agreement
After creating the pull request, you will need to sign the Google CLA agreement.
The agreement can be found at
[https://cla.developers.google.com/clas](https://cla.developers.google.com/clas).
### Step 5. Code review
CI tests will automatically be run directly on your pull request. Their
status will be reported back via GitHub actions.
There may be several rounds of comments and code changes before the pull
request gets approved by the reviewer.
### Step 6. Merging
Once the pull request is approved, a team member will take care of merging.
## Setting up an Environment
Python 3.9 or later is required.
Setting up your KerasNLP development environment requires you to fork the
KerasNLP repository and clone it locally. With the
[GitHub CLI](https://github.com/cli/cli) installed, you can do this as follows:
```shell
gh repo fork keras-team/keras-nlp --clone --remote
cd keras-nlp
```
Next we must setup a python environment with the correct dependencies. We
recommend using `conda` to set up a base environment, and `pip` to install
python packages from PyPI. The exact method will depend on your OS.
**Note**: Be careful not to use mix pre-packaged tensorflow and jax libraries in
`conda` with PyPI packages from `pip`. We recommend pulling *all* KerasNLP
dependencies via `pip` as described below.
### Linux (recommended)
For developing and unit testing the library, a CPU-only environment is often
sufficient. For any training or inference with the library, you will quickly
want accelerator support. The easiest way to get GPU support across all of our
backends is to set up a few different python environements and pull in all cuda
dependencies via `pip`.
The shell snippet below will install four conda environments: `keras-nlp-cpu`,
`keras-nlp-jax`, `keras-nlp-torch`, and `keras-nlp-tensorflow`. The cpu
environement supports all backends without cuda, and each backend environement
has cuda support.
```shell
conda create -y -n keras-nlp-cpu python=3.10
conda activate keras-nlp-cpu
pip install -r requirements.txt # install deps
python pip_build.py --install # install keras-nlp
for backend in "jax" "torch" "tensorflow"; do
conda create -y -n keras-nlp-${backend} python=3.10
conda activate keras-nlp-${backend}
pip install -r requirements-${backend}-cuda.txt # install deps
python pip_build.py --install # install keras-nlp
done
```
To activate the jax environment and set keras to use jax, run:
```shell
conda activate keras-nlp-jax && export KERAS_BACKEND=jax
```
### MacOS
`tensorflow-text` does not release precompiled binaries for MacOS M-series
chips, though the library does support building from source on MacOS.
We strongly recommend a Linux development environment for an easy contribution
experience. To build a dev environement from scratch on MacOS, see the following
guides:
- https://developer.apple.com/metal/tensorflow-plugin/
- https://github.com/tensorflow/text
### Windows
For the best experience developing on windows, please install
[WSL](https://learn.microsoft.com/en-us/windows/wsl/install), and proceed with
the linux installation instruction above.
To run the format and lint scripts, make sure you clone the repo with Linux
style line endings and change any line separator settings in your editor.
This is automatically done if you clone using git inside WSL.
Note that will not support Windows Shell/PowerShell for any scripts in this
repository.
## Testing changes
KerasNLP is tested using [PyTest](https://docs.pytest.org/en/6.2.x/).
### Run a test file
To run a test file, run `pytest path/to/file` from the root directory of the
repository.
### Run a single test case
To run a single test, you can use `-k=<your_regex>`
to use regular expression to match the test you want to run. For example, you
can use the following command to run all the tests in `import_test.py`
whose names contain `import`:
```shell
pytest keras_nlp/keras_nlp/integration_tests/import_test.py -k="import"
```
### Run the full test suite
You can run the default testing suite by simply invoking pytest:
```shell
pytest
```
We annotate tests that are slower or require a network connection as "large",
and by default `pytest` will skip these tests. We run large tests continuously
on GCP. You can specify these by running:
```shell
pytest --run_large
```
Finally, for tests that are very slow and resource intensive (e.g. downloading
a 5GB checkpoint), we use an "extra_large" annotation and do not run them
continuously at all. You can specify these by running:
```shell
pytest --run_extra_large
```
When running "extra_large" tests, we recommend also specify a specific test file
so you aren't waiting around forever!
## Formatting Code
We use `flake8`, `isort` and `black` for code formatting. You can run
the following commands manually every time you want to format your code:
- Run `shell/format.sh` to format your code
- Run `shell/lint.sh` to check the result.
If after running these the CI flow is still failing, try updating `flake8`,
`isort` and `black`. This can be done by running `pip install --upgrade black`,
`pip install --upgrade flake8`, and `pip install --upgrade isort`.
| keras-nlp/CONTRIBUTING.md/0 | {
"file_path": "keras-nlp/CONTRIBUTING.md",
"repo_id": "keras-nlp",
"token_count": 2177
} | 134 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbischof): remove in favor of presets with load_weights=False
MODEL_CONFIGS = {
"tiny": {
"num_layers": 2,
"hidden_dim": 128,
"dropout": 0.1,
"num_heads": 2,
"intermediate_dim": 512,
},
"mini": {
"num_layers": 4,
"hidden_dim": 256,
"dropout": 0.1,
"num_heads": 4,
"intermediate_dim": 1024,
},
"small": {
"num_layers": 4,
"hidden_dim": 512,
"dropout": 0.1,
"num_heads": 8,
"intermediate_dim": 2048,
},
"medium": {
"num_layers": 8,
"hidden_dim": 512,
"dropout": 0.1,
"num_heads": 8,
"intermediate_dim": 2048,
},
"base": {
"num_layers": 12,
"hidden_dim": 768,
"dropout": 0.1,
"num_heads": 12,
"intermediate_dim": 3072,
},
"large": {
"num_layers": 24,
"hidden_dim": 1024,
"dropout": 0.1,
"num_heads": 16,
"intermediate_dim": 4096,
},
}
# Currently we have the same set of training parameters for all configurations.
# We should see if we need to split this for different architecture sizes.
PREPROCESSING_CONFIG = {
"max_seq_length": 512,
"max_predictions_per_seq": 76,
"dupe_factor": 10,
"masked_lm_prob": 0.15,
"short_seq_prob": 0.1,
}
TRAINING_CONFIG = {
"batch_size": 256,
"epochs": 10,
"learning_rate": 1e-4,
"num_train_steps": 1_000_000,
# Percentage of training steps used for learning rate warmup.
"warmup_percentage": 0.1,
}
| keras-nlp/examples/bert_pretraining/bert_config.py/0 | {
"file_path": "keras-nlp/examples/bert_pretraining/bert_config.py",
"repo_id": "keras-nlp",
"token_count": 951
} | 135 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create BERT wordpiece vocabularies.
This script will create wordpiece vocabularies suitable for pretraining BERT.
Usage:
python examples/tools/train_word_piece_vocabulary.py \
--input_files ~/datasets/bert-sentence-split-data/ \
--output_file vocab.txt
"""
import os
import sys
import tensorflow as tf
from absl import app
from absl import flags
from tensorflow_text.tools.wordpiece_vocab import bert_vocab_from_dataset
from examples.utils.scripting_utils import list_filenames_for_arg
FLAGS = flags.FLAGS
flags.DEFINE_string(
"input_files",
None,
"Comma seperated list of directories, files, or globs.",
)
flags.DEFINE_string(
"output_file", None, "Output file for the computed vocabulary."
)
flags.DEFINE_bool(
"do_lower_case",
True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.",
)
flags.DEFINE_string(
"reserved_tokens",
"[PAD],[UNK],[CLS],[SEP],[MASK]",
"Comma separated list of reserved tokens in the vocabulary.",
)
flags.DEFINE_integer("vocabulary_size", 30522, "Number of output files.")
def write_vocab_file(filepath, vocab):
with open(filepath, "w") as file:
for token in vocab:
file.write(token + "\n")
def main(_):
print(f"Reading input data from {FLAGS.input_files}")
input_filenames = list_filenames_for_arg(FLAGS.input_files)
if not input_filenames:
print("No input files found. Check `input_files` flag.")
sys.exit(1)
print(f"Outputting to {FLAGS.output_file}")
if os.path.exists(FLAGS.output_file):
print(f"File {FLAGS.output_file} already exists.")
sys.exit(1)
with open(FLAGS.output_file, "w") as file:
# TODO(mattdangerw): This is the slow and simple BERT vocabulary
# learner from tf text, we should try the faster flume option.
vocab = bert_vocab_from_dataset.bert_vocab_from_dataset(
tf.data.TextLineDataset(input_filenames).batch(1000).prefetch(2),
# The target vocabulary size
vocab_size=FLAGS.vocabulary_size,
# Reserved tokens that must be included in the vocabulary
reserved_tokens=FLAGS.reserved_tokens.split(","),
# Arguments for `text.BertTokenizer`
bert_tokenizer_params={"lower_case": FLAGS.do_lower_case},
)
for token in vocab:
file.write(token + "\n")
if __name__ == "__main__":
flags.mark_flag_as_required("input_files")
flags.mark_flag_as_required("output_file")
app.run(main)
| keras-nlp/examples/tools/train_word_piece_vocab.py/0 | {
"file_path": "keras-nlp/examples/tools/train_word_piece_vocab.py",
"repo_id": "keras-nlp",
"token_count": 1192
} | 136 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.backend import random
from keras_nlp.layers.modeling.sine_position_encoding import (
SinePositionEncoding,
)
from keras_nlp.tests.test_case import TestCase
class SinePositionEncodingTest(TestCase):
def test_layer_behaviors(self):
self.run_layer_test(
cls=SinePositionEncoding,
init_kwargs={
"max_wavelength": 10000,
},
input_data=random.uniform(shape=(2, 4, 6)),
expected_output_shape=(2, 4, 6),
)
def test_layer_behaviors_4d(self):
self.run_layer_test(
cls=SinePositionEncoding,
init_kwargs={
"max_wavelength": 10000,
},
input_data=random.uniform(shape=(1, 2, 4, 6)),
expected_output_shape=(1, 2, 4, 6),
)
def test_static_layer_output_shape(self):
pos_encoding = SinePositionEncoding()
seq_length = 100
hidden_size = 32
inputs = keras.Input(shape=(seq_length, hidden_size))
outputs = pos_encoding(inputs)
# When using static positional encoding shapes, the output is expected
# to be the same as the input shape in all dimensions.
expected_output_shape = (None, seq_length, hidden_size)
self.assertEqual(expected_output_shape, outputs.shape)
def test_dynamic_layer_output_shape(self):
pos_encoding = SinePositionEncoding()
hidden_size = 32
inputs = keras.Input(shape=(None, hidden_size))
outputs = pos_encoding(inputs)
# When using dynamic positional encoding shapes, the output is expected
# to be the same as the input shape in all dimensions but may be None.
expected_output_shape = (None, None, hidden_size)
self.assertEqual(expected_output_shape, outputs.shape)
# do multi dimension before sequence length
def test_multi_dimension_layer_output_shape(self):
pos_encoding = SinePositionEncoding()
seq_length = 100
hidden_size = 32
inputs = keras.Input(shape=(None, seq_length, hidden_size))
outputs = pos_encoding(inputs)
# When using multiple dimensions before sequence length, the output is
# expected to be the same as the input shape in all dimensions.
expected_output_shape = (None, None, seq_length, hidden_size)
self.assertEqual(expected_output_shape, outputs.shape)
def test_output_correct_values(self):
pos_encoding = SinePositionEncoding()
model = keras.Sequential(
[
keras.Input(shape=(4, 6)),
pos_encoding,
]
)
input = random.uniform(shape=[1, 4, 6])
output = model(input)
# comapre position encoding values for position 0 and 3
expected_0 = [0.0, 1.0, 0.0, 1.0, 0.0, 1.0]
expected_3 = [0.14112, -0.98999, 0.13879, 0.99032, 0.00646, 0.99997]
self.assertAllClose(output[0, 0, :], expected_0, atol=0.01, rtol=0.01)
self.assertAllClose(output[0, 3, :], expected_3, atol=0.01, rtol=0.01)
def test_start_index(self):
batch_size, seq_length, feature_size = 2, 3, 4
layer = SinePositionEncoding()
data = random.uniform(shape=(batch_size, seq_length, feature_size))
full_output = layer(data)
sequential_output = ops.zeros((batch_size, seq_length, feature_size))
for i in range(seq_length):
parial_output = layer(data[:, i : i + 1, :], start_index=i)
sequential_output = ops.slice_update(
sequential_output, (0, i, 0), parial_output
)
self.assertAllClose(full_output, sequential_output)
| keras-nlp/keras_nlp/layers/modeling/sine_position_encoding_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/modeling/sine_position_encoding_test.py",
"repo_id": "keras-nlp",
"token_count": 1811
} | 137 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.backend import keras
from keras_nlp.layers.preprocessing.random_deletion import RandomDeletion
from keras_nlp.tests.test_case import TestCase
class RandomDeletionTest(TestCase):
def test_shape_and_output_from_word_deletion(self):
keras.utils.set_random_seed(1337)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.split(inputs)
augmenter = RandomDeletion(rate=0.4, max_deletions=1, seed=42)
augmented = augmenter(split)
output = [
tf.strings.reduce_join(x, separator=" ", axis=-1) for x in augmented
]
exp_output = ["I like", "and Tensorflow"]
self.assertAllEqual(output, exp_output)
def test_shape_and_output_from_character_swaps(self):
keras.utils.set_random_seed(1337)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.unicode_split(inputs, "UTF-8")
augmenter = RandomDeletion(rate=0.4, max_deletions=1, seed=42)
augmented = augmenter(split)
output = [tf.strings.reduce_join(x, axis=-1) for x in augmented]
exp_output = ["Hey I lie", "Keras and Tensoflow"]
self.assertAllEqual(output, exp_output)
def test_with_integer_tokens(self):
keras.utils.set_random_seed(1337)
inputs = tf.constant([[1, 2], [3, 4]])
augmenter = RandomDeletion(rate=0.4, max_deletions=4, seed=42)
output = augmenter(inputs)
exp_output = [[2], [4]]
self.assertAllEqual(output, exp_output)
def test_skip_options(self):
keras.utils.set_random_seed(1337)
augmenter = RandomDeletion(
rate=0.4, max_deletions=1, seed=42, skip_list=["Tensorflow", "like"]
)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.split(inputs)
augmented = augmenter(split)
output = tf.strings.reduce_join(augmented, separator=" ", axis=-1)
exp_output = ["I like", "and Tensorflow"]
self.assertAllEqual(output, exp_output)
def skip_fn(word):
if word == "Tensorflow" or word == "like":
return True
return False
augmenter = RandomDeletion(
rate=0.4, max_deletions=1, seed=42, skip_fn=skip_fn
)
augmented = augmenter(split)
output = tf.strings.reduce_join(augmented, separator=" ", axis=-1)
exp_output = ["Hey like", "Keras Tensorflow"]
self.assertAllEqual(output, exp_output)
def skip_py_fn(word):
if word == "Tensorflow" or word == "like":
return True
return False
augmenter = RandomDeletion(
rate=0.4, max_deletions=1, seed=42, skip_py_fn=skip_py_fn
)
augmented = augmenter(split)
output = tf.strings.reduce_join(augmented, separator=" ", axis=-1)
exp_output = ["Hey like", "Keras Tensorflow"]
def test_get_config_and_from_config(self):
augmenter = RandomDeletion(rate=0.4, max_deletions=1, seed=42)
expected_config_subset = {"max_deletions": 1, "rate": 0.4, "seed": 42}
config = augmenter.get_config()
self.assertEqual(config, {**config, **expected_config_subset})
restored_augmenter = RandomDeletion.from_config(
config,
)
self.assertEqual(
restored_augmenter.get_config(),
{**config, **expected_config_subset},
)
def test_augment_first_batch_second(self):
keras.utils.set_random_seed(1337)
augmenter = RandomDeletion(rate=0.4, max_deletions=1, seed=42)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.split(inputs)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.map(augmenter)
ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(2))
output = ds.take(1).get_single_element()
exp_output = [["I", "like"], ["Keras", "and", "Tensorflow"]]
self.assertAllEqual(output, exp_output)
def skip_fn(word):
return tf.strings.regex_full_match(word, r"\pP")
def skip_py_fn(word):
return len(word) < 4
augmenter = RandomDeletion(
rate=0.8, max_deletions=1, seed=42, skip_fn=skip_fn
)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.map(augmenter)
ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(2))
output = ds.take(1).get_single_element()
exp_output = [["I", "like"], ["and", "Tensorflow"]]
self.assertAllEqual(output, exp_output)
augmenter = RandomDeletion(
rate=0.8, max_deletions=1, seed=42, skip_py_fn=skip_py_fn
)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.map(augmenter)
ds = ds.apply(tf.data.experimental.dense_to_ragged_batch(2))
output = ds.take(1).get_single_element()
exp_output = [["Hey", "I", "like"], ["and", "Tensorflow"]]
self.assertAllEqual(output, exp_output)
def test_batch_first_augment_second(self):
keras.utils.set_random_seed(1337)
augmenter = RandomDeletion(rate=0.4, max_deletions=1, seed=42)
inputs = ["Hey I like", "Keras and Tensorflow"]
split = tf.strings.split(inputs)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.batch(5).map(augmenter)
output = ds.take(1).get_single_element()
exp_output = [["I", "like"], ["and", "Tensorflow"]]
self.assertAllEqual(output, exp_output)
def skip_fn(word):
return tf.strings.regex_full_match(word, r"\pP")
def skip_py_fn(word):
return len(word) < 4
augmenter = RandomDeletion(
rate=0.8, max_deletions=1, seed=42, skip_fn=skip_fn
)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.batch(5).map(augmenter)
output = ds.take(1).get_single_element()
exp_output = [["I", "like"], ["and", "Tensorflow"]]
self.assertAllEqual(output, exp_output)
augmenter = RandomDeletion(
rate=0.8, max_deletions=1, seed=42, skip_py_fn=skip_py_fn
)
ds = tf.data.Dataset.from_tensor_slices(split)
ds = ds.batch(5).map(augmenter)
output = ds.take(1).get_single_element()
exp_output = [["Hey", "I", "like"], ["and", "Tensorflow"]]
self.assertAllEqual(output, exp_output)
| keras-nlp/keras_nlp/layers/preprocessing/random_deletion_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/layers/preprocessing/random_deletion_test.py",
"repo_id": "keras-nlp",
"token_count": 3274
} | 138 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import config
from keras_nlp.backend import keras
from keras_nlp.utils.preset_utils import check_preset_class
from keras_nlp.utils.preset_utils import load_from_preset
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring
@keras.saving.register_keras_serializable(package="keras_nlp")
class Backbone(keras.Model):
def __init__(self, *args, dtype=None, **kwargs):
super().__init__(*args, **kwargs)
self._functional_layer_ids = set(
id(layer) for layer in self._flatten_layers()
)
self._initialized = True
def __dir__(self):
if config.keras_3():
return super().__dir__()
# Temporary fixes for Keras 2 saving. This mimics the following PR for
# older version of Keras: https://github.com/keras-team/keras/pull/18982
def filter_fn(attr):
if attr in [
"_layer_checkpoint_dependencies",
"transformer_layers",
"encoder_transformer_layers",
"decoder_transformer_layers",
]:
return False
return id(getattr(self, attr)) not in self._functional_layer_ids
return filter(filter_fn, super().__dir__())
def __setattr__(self, name, value):
# Work around setattr issues for Keras 2 and Keras 3 torch backend.
# Since all our state is covered by functional model we can route
# around custom setattr calls.
is_property = isinstance(getattr(type(self), name, None), property)
is_unitialized = not hasattr(self, "_initialized")
is_torch = config.backend() == "torch"
is_keras_2 = not config.keras_3()
if is_torch and (is_property or is_unitialized):
return object.__setattr__(self, name, value)
if is_keras_2 and is_unitialized:
return object.__setattr__(self, name, value)
return super().__setattr__(name, value)
@property
def token_embedding(self):
"""A `keras.layers.Embedding` instance for embedding token ids.
This layer embeds integer token ids to the hidden dim of the model.
"""
return self._token_embedding
@token_embedding.setter
def token_embedding(self, value):
self._token_embedding = value
def get_config(self):
# Don't chain to super here. `get_config()` for functional models is
# a nested layer config and cannot be passed to Backbone constructors.
return {
"name": self.name,
"trainable": self.trainable,
}
@classmethod
def from_config(cls, config):
# The default `from_config()` for functional models will return a
# vanilla `keras.Model`. We override it to get a subclass instance back.
return cls(**config)
@classproperty
def presets(cls):
return {}
@classmethod
def from_preset(
cls,
preset,
load_weights=True,
**kwargs,
):
"""Instantiate {{model_name}} model from preset architecture and weights.
Args:
preset: string. Must be one of "{{preset_names}}".
load_weights: Whether to load pre-trained weights into model.
Defaults to `True`.
Examples:
```python
# Load architecture and weights from preset
model = keras_nlp.models.{{model_name}}.from_preset(
"{{example_preset_name}}"
)
# Load randomly initialized model from preset architecture
model = keras_nlp.models.{{model_name}}.from_preset(
"{{example_preset_name}}",
load_weights=False
)
```
"""
# We support short IDs for official presets, e.g. `"bert_base_en"`.
# Map these to a Kaggle Models handle.
if preset in cls.presets:
preset = cls.presets[preset]["kaggle_handle"]
check_preset_class(preset, cls)
return load_from_preset(
preset,
load_weights=load_weights,
config_overrides=kwargs,
)
def __init_subclass__(cls, **kwargs):
# Use __init_subclass__ to setup a correct docstring for from_preset.
super().__init_subclass__(**kwargs)
# If the subclass does not define from_preset, assign a wrapper so that
# each class can have a distinct docstring.
if "from_preset" not in cls.__dict__:
def from_preset(calling_cls, *args, **kwargs):
return super(cls, calling_cls).from_preset(*args, **kwargs)
cls.from_preset = classmethod(from_preset)
# Format and assign the docstring unless the subclass has overridden it.
if cls.from_preset.__doc__ is None:
cls.from_preset.__func__.__doc__ = Backbone.from_preset.__doc__
format_docstring(
model_name=cls.__name__,
example_preset_name=next(iter(cls.presets), ""),
preset_names='", "'.join(cls.presets),
)(cls.from_preset.__func__)
def enable_lora(self, rank):
"""Enable Lora on the backbone.
Calling this method will freeze all weights on the backbone,
while enabling Lora on the query & value `EinsumDense` layers
of the attention layers.
"""
target_names = ["query_dense", "value_dense", "query", "value"]
self.trainable = True
self._lora_enabled_layers = []
self._lora_rank = rank
for layer in self._flatten_layers(include_self=False):
layer.trainable = False
all_layers = self._flatten_layers(include_self=False)
all_layers = [lyr for lyr in all_layers if lyr.weights]
for i, layer in enumerate(all_layers):
for name in target_names:
if layer.name == name:
if hasattr(layer, "enable_lora"):
layer.trainable = True
layer.enable_lora(rank)
self._lora_enabled_layers.append(i)
def save_lora_weights(self, filepath):
if not getattr(self, "_lora_enabled_layers", []):
raise ValueError(
"There are no lora-enabled layers in this model. "
"Make sure to call `.enable_lora(rank)` first."
)
if not str(filepath).endswith(".lora.h5"):
raise ValueError(
"The filename must end in `.lora.h5`. "
f"Received: filepath={filepath}"
)
store = keras.src.saving.saving_lib.H5IOStore(filepath, mode="w")
lora_store = store.make("lora")
lora_store["rank"] = self._lora_rank
# We cannot identify layers by name since names are non-unique,
# so we identify them by index in the topologically sorted list
# of layers that have weights.
all_layers = self._flatten_layers(include_self=False)
all_layers = [lyr for lyr in all_layers if lyr.weights]
for layer_index in self._lora_enabled_layers:
# We only lora the einsumdense layers,
# so the factored weights are always named `kernel`
layer = all_layers[layer_index]
inner_store = store.make(f"lora/{layer_index}")
inner_store["lora_kernel_a"] = layer.lora_kernel_a
inner_store["lora_kernel_b"] = layer.lora_kernel_b
store.close()
def load_lora_weights(self, filepath):
store = keras.src.saving.saving_lib.H5IOStore(filepath, mode="r")
lora_store = store.get("lora")
rank = int(lora_store["rank"][()])
if not getattr(self, "_lora_enabled_layers", []):
self.enable_lora(rank)
else:
if self._lora_rank != rank:
raise ValueError(
f"The Lora rank expected by file '{filepath}' "
f"is rank={rank}, but the model was called with "
f"`.enable_lora(rank={self._lora_rank})`. "
"Both ranks must match."
)
all_layers = self._flatten_layers(include_self=False)
all_layers = [lyr for lyr in all_layers if lyr.weights]
for layer_index in self._lora_enabled_layers:
layer = all_layers[layer_index]
lora_kernel_a = store.get(f"lora/{layer_index}")["lora_kernel_a"]
lora_kernel_b = store.get(f"lora/{layer_index}")["lora_kernel_b"]
layer.lora_kernel_a.assign(lora_kernel_a)
layer.lora_kernel_b.assign(lora_kernel_b)
store.close()
| keras-nlp/keras_nlp/models/backbone.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/backbone.py",
"repo_id": "keras-nlp",
"token_count": 4125
} | 139 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.models.bloom.bloom_causal_lm_preprocessor import (
BloomCausalLMPreprocessor,
)
from keras_nlp.models.bloom.bloom_tokenizer import BloomTokenizer
from keras_nlp.tests.test_case import TestCase
class BloomCausalLMPreprocessorTest(TestCase):
def setUp(self):
self.vocab = ["<pad>", "<s>", "</s>"]
self.vocab += ["!", "air", "Ġair", "plane", "Ġat", "port"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.tokenizer = BloomTokenizer(
vocabulary=self.vocab,
merges=self.merges,
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
"sequence_length": 8,
}
self.input_data = ["airplane at airport"]
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=BloomCausalLMPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output=(
{
"token_ids": [[1, 4, 6, 7, 5, 8, 2, 0]],
"padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]],
},
[[4, 6, 7, 5, 8, 2, 0, 0]], # Pass through labels.
[[1, 1, 1, 1, 1, 1, 0, 0]], # Pass through sample_weights.
),
)
def test_no_start_end_token(self):
input_data = ["airplane at airport"] * 4
preprocessor = BloomCausalLMPreprocessor(
**self.init_kwargs,
add_start_token=False,
add_end_token=False,
)
x, y, sw = preprocessor(input_data)
self.assertAllEqual(x["token_ids"], [[4, 6, 7, 5, 8, 0, 0, 0]] * 4)
self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4)
self.assertAllEqual(y, [[6, 7, 5, 8, 0, 0, 0, 0]] * 4)
self.assertAllEqual(sw, [[1, 1, 1, 1, 0, 0, 0, 0]] * 4)
def test_generate_preprocess(self):
input_data = "airplane at airport"
preprocessor = BloomCausalLMPreprocessor(**self.init_kwargs)
x = preprocessor.generate_preprocess(input_data)
self.assertAllEqual(x["token_ids"], [1, 4, 6, 7, 5, 8, 0, 0])
self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0])
def test_generate_postprocess(self):
input_data = {
"token_ids": [1, 4, 6, 7, 5, 8, 0, 0],
"padding_mask": [1, 1, 1, 1, 1, 1, 0, 0],
}
preprocessor = BloomCausalLMPreprocessor(**self.init_kwargs)
x = preprocessor.generate_postprocess(input_data)
self.assertAllEqual(x, "airplane at airport")
@pytest.mark.extra_large
def test_all_presets(self):
for preset in BloomCausalLMPreprocessor.presets:
self.run_preset_test(
cls=BloomCausalLMPreprocessor,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/bloom/bloom_causal_lm_preprocessor_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/bloom/bloom_causal_lm_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1757
} | 140 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from keras_nlp.models.distil_bert.distil_bert_backbone import DistilBertBackbone
from keras_nlp.models.distil_bert.distil_bert_masked_lm import (
DistilBertMaskedLM,
)
from keras_nlp.models.distil_bert.distil_bert_masked_lm_preprocessor import (
DistilBertMaskedLMPreprocessor,
)
from keras_nlp.models.distil_bert.distil_bert_tokenizer import (
DistilBertTokenizer,
)
from keras_nlp.tests.test_case import TestCase
class DistilBertMaskedLMTest(TestCase):
def setUp(self):
# Setup model.
self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]
self.vocab += ["the", "quick", "brown", "fox", "."]
self.preprocessor = DistilBertMaskedLMPreprocessor(
DistilBertTokenizer(vocabulary=self.vocab),
# Simplify our testing by masking every available token.
mask_selection_rate=1.0,
mask_token_rate=1.0,
random_token_rate=0.0,
mask_selection_length=5,
sequence_length=5,
)
self.backbone = DistilBertBackbone(
vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(),
num_layers=2,
num_heads=2,
hidden_dim=2,
intermediate_dim=4,
max_sequence_length=self.preprocessor.sequence_length,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
"backbone": self.backbone,
}
self.train_data = (
["the quick brown fox.", "the slow brown fox."], # Features.
)
self.input_data = self.preprocessor(*self.train_data)[0]
def test_masked_lm_basics(self):
self.run_task_test(
cls=DistilBertMaskedLM,
init_kwargs=self.init_kwargs,
train_data=self.train_data,
expected_output_shape=(2, 5, 10),
)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=DistilBertMaskedLM,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in DistilBertMaskedLM.presets:
self.run_preset_test(
cls=DistilBertMaskedLM,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py",
"repo_id": "keras-nlp",
"token_count": 1339
} | 141 |
# Copyright 2024 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest.mock import patch
import keras
import pytest
from keras_nlp.backend import ops
from keras_nlp.models.gemma.gemma_backbone import GemmaBackbone
from keras_nlp.models.gemma.gemma_causal_lm import GemmaCausalLM
from keras_nlp.models.gemma.gemma_causal_lm_preprocessor import (
GemmaCausalLMPreprocessor,
)
from keras_nlp.models.gemma.gemma_tokenizer import GemmaTokenizer
from keras_nlp.tests.test_case import TestCase
@pytest.mark.keras_3_only
class GemmaCausalLMTest(TestCase):
def setUp(self):
self.tokenizer = GemmaTokenizer(
proto=os.path.join(
self.get_test_data_dir(), "gemma_test_vocab.spm"
),
)
self.preprocessor = GemmaCausalLMPreprocessor(
self.tokenizer,
sequence_length=8,
)
self.backbone = GemmaBackbone(
vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(),
num_layers=2,
num_query_heads=2,
num_key_value_heads=1,
hidden_dim=4,
intermediate_dim=8,
head_dim=2,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
"backbone": self.backbone,
}
self.train_data = (["the quick brown fox", "the quick brown fox"],)
self.input_data = self.preprocessor(*self.train_data)[0]
def test_causal_lm_basics(self):
self.run_task_test(
cls=GemmaCausalLM,
init_kwargs=self.init_kwargs,
train_data=self.train_data,
expected_output_shape=(2, 8, 11),
)
def test_generate(self):
causal_lm = GemmaCausalLM(**self.init_kwargs)
# String input.
prompt = "the quick brown fox"
output = causal_lm.generate("the quick brown fox")
self.assertTrue(prompt in output)
# Int tensor input.
prompt_ids = self.preprocessor.generate_preprocess([prompt])
causal_lm.preprocessor = None
outputs = causal_lm.generate(prompt_ids)
# Assert prompt is in output in token id space.
self.assertAllEqual(
outputs["token_ids"][:, :4],
prompt_ids["token_ids"][:, :4],
)
self.assertAllEqual(
outputs["padding_mask"][:, :4],
prompt_ids["padding_mask"][:, :4],
)
def test_generate_with_bfloat16(self):
original_floatx = keras.config.floatx()
keras.config.set_floatx("float16")
try:
causal_lm = GemmaCausalLM(**self.init_kwargs)
# String input.
prompt = "the quick brown fox"
output = causal_lm.generate("the quick brown fox")
self.assertTrue(prompt in output)
# Int tensor input.
prompt_ids = self.preprocessor.generate_preprocess([prompt])
causal_lm.preprocessor = None
outputs = causal_lm.generate(prompt_ids)
# Assert prompt is in output in token id space.
self.assertAllEqual(
outputs["token_ids"][:, :4],
prompt_ids["token_ids"][:, :4],
)
self.assertAllEqual(
outputs["padding_mask"][:, :4],
prompt_ids["padding_mask"][:, :4],
)
finally:
# Restore floatx to the original value to prevent impact on other
# tests even if there is an exception.
keras.config.set_floatx(original_floatx)
def test_early_stopping(self):
causal_lm = GemmaCausalLM(**self.init_kwargs)
call_with_cache = causal_lm.call_with_cache
def wrapper(*args, **kwargs):
"""Modify output logits to always favor end_token_id"""
logits, hidden_states, cache = call_with_cache(*args, **kwargs)
index = self.preprocessor.tokenizer.end_token_id
update = ops.ones_like(logits)[:, :, index] * 1.0e9
update = ops.expand_dims(update, axis=-1)
logits = ops.slice_update(logits, (0, 0, index), update)
return logits, hidden_states, cache
with patch.object(causal_lm, "call_with_cache", wraps=wrapper):
prompt = ["the quick brown fox", "the quick"]
output = causal_lm.generate(prompt)
# We should immediately abort and output the prompt.
self.assertEqual(prompt, output)
def test_generate_compilation(self):
causal_lm = GemmaCausalLM(**self.init_kwargs)
# Assert we do not recompile with successive calls.
causal_lm.generate("the quick brown fox")
first_fn = causal_lm.generate_function
causal_lm.generate("the quick brown fox")
second_fn = causal_lm.generate_function
self.assertEqual(first_fn, second_fn)
# Assert we do recompile after compile is called.
causal_lm.compile(sampler="greedy")
self.assertIsNone(causal_lm.generate_function)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=GemmaCausalLM,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in GemmaCausalLM.presets:
self.run_preset_test(
cls=GemmaCausalLM,
preset=preset,
input_data=self.input_data,
)
def test_score_logits(self):
# Setup prompts, models, and associated expected shapes.
prompts = ["the quick brown fox", "the quick brown fox"]
causal_lm = GemmaCausalLM(**self.init_kwargs)
expected_score_shape = (2, 8, 11)
# Preprocess prompts to get tokenized representations and padding masks.
preprocessed_prompts = causal_lm.preprocessor.generate_preprocess(
prompts
)
token_ids = preprocessed_prompts["token_ids"]
padding_mask = preprocessed_prompts["padding_mask"]
# Get the scores and assert their shape.
scores = causal_lm.score(
token_ids=token_ids,
padding_mask=padding_mask,
scoring_mode="logits",
)
self.assertEqual(ops.shape(scores), expected_score_shape)
def test_score_loss(self):
# Setup prompts, models, and associated expected shapes.
prompts = ["the quick brown fox", "the quick brown fox"]
causal_lm = GemmaCausalLM(**self.init_kwargs)
expected_score_shape = (2, 8)
# Preprocess prompts to get tokenized representations and padding masks.
preprocessed_prompts = causal_lm.preprocessor.generate_preprocess(
prompts
)
token_ids = preprocessed_prompts["token_ids"]
padding_mask = preprocessed_prompts["padding_mask"]
target_ids = keras.ops.roll(token_ids, shift=-1, axis=1)
# Get the scores and assert their shape.
scores = causal_lm.score(
token_ids=token_ids,
padding_mask=padding_mask,
scoring_mode="loss",
target_ids=target_ids,
)
self.assertEqual(ops.shape(scores), expected_score_shape)
def test_score_layer_intercept_fn_exfiltration(self):
# Setup prompts, models, and associated expected shapes.
prompts = ["the quick brown fox", "the quick brown fox"]
causal_lm = GemmaCausalLM(**self.init_kwargs)
expected_embedded_shape = (2, 8, 4)
expected_score_shape = (2, 8, 11)
# Preprocess prompts to get tokenized representations and padding masks.
preprocessed_prompts = causal_lm.preprocessor.generate_preprocess(
prompts
)
token_ids = preprocessed_prompts["token_ids"]
padding_mask = preprocessed_prompts["padding_mask"]
# Setup a custom intercept function that extracts the embeddings to a
# a variable from the embeddings layer and otherwise asserts on shapes.
embedded_prompts = None
def layer_intercept_fn_for_testing(x, i):
if i == -1:
nonlocal embedded_prompts
embedded_prompts = x
else:
nonlocal expected_embedded_shape
self.assertEqual(ops.shape(x), expected_embedded_shape)
return x
# Get the scores.
scores = causal_lm.score(
token_ids=token_ids,
padding_mask=padding_mask,
scoring_mode="logits",
layer_intercept_fn=layer_intercept_fn_for_testing,
)
# Assert shapes for info exfiltrated into the parent context.
self.assertEqual(ops.shape(embedded_prompts), expected_embedded_shape)
self.assertEqual(ops.shape(scores), expected_score_shape)
| keras-nlp/keras_nlp/models/gemma/gemma_causal_lm_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gemma/gemma_causal_lm_test.py",
"repo_id": "keras-nlp",
"token_count": 4232
} | 142 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import pytest
from keras_nlp.backend import ops
from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone
from keras_nlp.models.gpt2.gpt2_causal_lm import GPT2CausalLM
from keras_nlp.models.gpt2.gpt2_causal_lm_preprocessor import (
GPT2CausalLMPreprocessor,
)
from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer
from keras_nlp.tests.test_case import TestCase
class GPT2CausalLMTest(TestCase):
def setUp(self):
self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"]
self.vocab += ["<|endoftext|>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.preprocessor = GPT2CausalLMPreprocessor(
GPT2Tokenizer(vocabulary=self.vocab, merges=self.merges),
sequence_length=8,
)
self.backbone = GPT2Backbone(
vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(),
num_layers=2,
num_heads=2,
hidden_dim=4,
intermediate_dim=8,
max_sequence_length=self.preprocessor.sequence_length,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
"backbone": self.backbone,
}
self.train_data = ([" airplane at airport", " airplane at airport"],)
self.input_data = self.preprocessor(*self.train_data)[0]
def test_causal_lm_basics(self):
self.run_task_test(
cls=GPT2CausalLM,
init_kwargs=self.init_kwargs,
train_data=self.train_data,
expected_output_shape=(2, 8, 7),
)
def test_generate(self):
causal_lm = GPT2CausalLM(**self.init_kwargs)
# String input.
prompt = " airplane at airport"
output = causal_lm.generate(" airplane at airport")
self.assertTrue(prompt in output)
# Int tensor input.
prompt_ids = self.preprocessor.generate_preprocess([prompt])
causal_lm.preprocessor = None
outputs = causal_lm.generate(prompt_ids)
# Assert prompt is in output in token id space.
self.assertAllEqual(
outputs["token_ids"][:, :5],
prompt_ids["token_ids"][:, :5],
)
self.assertAllEqual(
outputs["padding_mask"][:, :5],
prompt_ids["padding_mask"][:, :5],
)
def test_early_stopping(self):
causal_lm = GPT2CausalLM(**self.init_kwargs)
call_with_cache = causal_lm.call_with_cache
def wrapper(*args, **kwargs):
"""Modify output logits to always favor end_token_id"""
logits, hidden_states, cache = call_with_cache(*args, **kwargs)
index = self.preprocessor.tokenizer.end_token_id
update = ops.ones_like(logits)[:, :, index] * 1.0e9
update = ops.expand_dims(update, axis=-1)
logits = ops.slice_update(logits, (0, 0, index), update)
return logits, hidden_states, cache
with patch.object(causal_lm, "call_with_cache", wraps=wrapper):
prompt = [" airplane at airport", " airplane"]
output = causal_lm.generate(prompt)
# We should immediately abort and output the prompt.
self.assertEqual(prompt, output)
def test_generate_compilation(self):
causal_lm = GPT2CausalLM(**self.init_kwargs)
# Assert we do not recompile with successive calls.
causal_lm.generate(" airplane at airport")
first_fn = causal_lm.generate_function
causal_lm.generate(" airplane at airport")
second_fn = causal_lm.generate_function
self.assertEqual(first_fn, second_fn)
# Assert we do recompile after compile is called.
causal_lm.compile(sampler="greedy")
self.assertIsNone(causal_lm.generate_function)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=GPT2CausalLM,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in GPT2CausalLM.presets:
self.run_preset_test(
cls=GPT2CausalLM,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/gpt2/gpt2_causal_lm_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gpt2/gpt2_causal_lm_test.py",
"repo_id": "keras-nlp",
"token_count": 2316
} | 143 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.models.gpt_neo_x.gpt_neo_x_preprocessor import (
GPTNeoXPreprocessor,
)
from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer
from keras_nlp.tests.test_case import TestCase
class GPTNeoXPreprocessorTest(TestCase):
def setUp(self):
self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"]
self.vocab += ["<|endoftext|>"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.tokenizer = GPTNeoXTokenizer(
vocabulary=self.vocab,
merges=self.merges,
)
self.init_kwargs = {
"tokenizer": self.tokenizer,
"sequence_length": 8,
}
self.input_data = ["airplane at airport"]
def test_preprocessor_basics(self):
self.run_preprocessor_test(
cls=GPTNeoXPreprocessor,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
expected_output={
"token_ids": [[6, 1, 3, 4, 2, 5, 6, 0]],
"padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]],
},
)
def test_no_start_end_token(self):
input_data = ["airplane at airport"] * 4
preprocessor = GPTNeoXPreprocessor(
tokenizer=GPTNeoXTokenizer(
vocabulary=self.vocab,
merges=self.merges,
),
sequence_length=8,
add_start_token=False,
add_end_token=False,
)
x = preprocessor(input_data)
self.assertAllEqual(x["token_ids"], [[1, 3, 4, 2, 5, 0, 0, 0]] * 4)
self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4)
def test_sequence_length_override(self):
input_data = "airplane at airport"
preprocessor = GPTNeoXPreprocessor(**self.init_kwargs)
x = preprocessor(input_data, sequence_length=4)
self.assertAllEqual(x["token_ids"], [6, 1, 3, 6])
| keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor_test.py",
"repo_id": "keras-nlp",
"token_count": 1270
} | 144 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import pytest
from keras_nlp.backend import ops
from keras_nlp.models.opt.opt_backbone import OPTBackbone
from keras_nlp.models.opt.opt_causal_lm import OPTCausalLM
from keras_nlp.models.opt.opt_causal_lm_preprocessor import (
OPTCausalLMPreprocessor,
)
from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer
from keras_nlp.tests.test_case import TestCase
class OPTCausalLMTest(TestCase):
def setUp(self):
self.vocab = ["<pad>", "</s>", "air", "Ġair", "plane", "Ġat", "port"]
self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)])
self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"]
self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"]
self.merges += ["Ġai r", "Ġa i", "pla ne"]
self.preprocessor = OPTCausalLMPreprocessor(
OPTTokenizer(vocabulary=self.vocab, merges=self.merges),
sequence_length=8,
)
self.backbone = OPTBackbone(
vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(),
num_layers=2,
num_heads=2,
hidden_dim=4,
intermediate_dim=8,
max_sequence_length=self.preprocessor.sequence_length,
)
self.init_kwargs = {
"preprocessor": self.preprocessor,
"backbone": self.backbone,
}
self.train_data = ([" airplane at airport", " airplane at airport"],)
self.input_data = self.preprocessor(*self.train_data)[0]
def test_causal_lm_basics(self):
self.run_task_test(
cls=OPTCausalLM,
init_kwargs=self.init_kwargs,
train_data=self.train_data,
expected_output_shape=(2, 8, 7),
)
def test_generate(self):
causal_lm = OPTCausalLM(**self.init_kwargs)
# String input.
prompt = " airplane at airport"
output = causal_lm.generate(" airplane at airport")
self.assertTrue(prompt in output)
# Int tensor input.
prompt_ids = self.preprocessor.generate_preprocess([prompt])
causal_lm.preprocessor = None
outputs = causal_lm.generate(prompt_ids)
# Assert prompt is in output in token id space.
self.assertAllEqual(
outputs["token_ids"][:, :5],
prompt_ids["token_ids"][:, :5],
)
self.assertAllEqual(
outputs["padding_mask"][:, :5],
prompt_ids["padding_mask"][:, :5],
)
def test_early_stopping(self):
causal_lm = OPTCausalLM(**self.init_kwargs)
call_with_cache = causal_lm.call_with_cache
def wrapper(*args, **kwargs):
"""Modify output logits to always favor end_token_id"""
logits, hidden_states, cache = call_with_cache(*args, **kwargs)
index = self.preprocessor.tokenizer.end_token_id
update = ops.ones_like(logits)[:, :, index] * 1.0e9
update = ops.expand_dims(update, axis=-1)
logits = ops.slice_update(logits, (0, 0, index), update)
return logits, hidden_states, cache
with patch.object(causal_lm, "call_with_cache", wraps=wrapper):
prompt = [" airplane at airport", " airplane"]
output = causal_lm.generate(prompt)
# We should immediately abort and output the prompt.
self.assertEqual(prompt, output)
def test_generate_compilation(self):
causal_lm = OPTCausalLM(**self.init_kwargs)
# Assert we do not recompile with successive calls.
causal_lm.generate(" airplane at airport")
first_fn = causal_lm.generate_function
causal_lm.generate(" airplane at airport")
second_fn = causal_lm.generate_function
self.assertEqual(first_fn, second_fn)
# Assert we do recompile after compile is called.
causal_lm.compile(sampler="greedy")
self.assertIsNone(causal_lm.generate_function)
@pytest.mark.large
def test_saved_model(self):
self.run_model_saving_test(
cls=OPTCausalLM,
init_kwargs=self.init_kwargs,
input_data=self.input_data,
)
@pytest.mark.extra_large
def test_all_presets(self):
for preset in OPTCausalLM.presets:
self.run_preset_test(
cls=OPTCausalLM,
preset=preset,
input_data=self.input_data,
)
| keras-nlp/keras_nlp/models/opt/opt_causal_lm_test.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/opt/opt_causal_lm_test.py",
"repo_id": "keras-nlp",
"token_count": 2273
} | 145 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.layers.preprocessing.multi_segment_packer import (
MultiSegmentPacker,
)
from keras_nlp.models.preprocessor import Preprocessor
from keras_nlp.models.roberta.roberta_presets import backbone_presets
from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer
from keras_nlp.utils.keras_utils import (
convert_inputs_to_list_of_tensor_segments,
)
from keras_nlp.utils.keras_utils import pack_x_y_sample_weight
from keras_nlp.utils.python_utils import classproperty
@keras_nlp_export("keras_nlp.models.RobertaPreprocessor")
class RobertaPreprocessor(Preprocessor):
"""A RoBERTa preprocessing layer which tokenizes and packs inputs.
This preprocessing layer will do three things:
1. Tokenize any number of input segments using the `tokenizer`.
2. Pack the inputs together with the appropriate `"<s>"`, `"</s>"` and
`"<pad>"` tokens, i.e., adding a single `"<s>"` at the start of the
entire sequence, `"</s></s>"` at the end of each segment, save the last
and a `"</s>"` at the end of the entire sequence.
3. Construct a dictionary with keys `"token_ids"`, `"padding_mask"` that
can be passed directly to a RoBERTa model.
This layer can be used directly with `tf.data.Dataset.map` to preprocess
string data in the `(x, y, sample_weight)` format used by
`keras.Model.fit`.
Args:
tokenizer: A `keras_nlp.models.RobertaTokenizer` instance.
sequence_length: The length of the packed inputs.
truncate: string. The algorithm to truncate a list of batched segments
to fit within `sequence_length`. The value can be either
`round_robin` or `waterfall`:
- `"round_robin"`: Available space is assigned one token at a
time in a round-robin fashion to the inputs that still need
some, until the limit is reached.
- `"waterfall"`: The allocation of the budget is done using a
"waterfall" algorithm that allocates quota in a
left-to-right manner and fills up the buckets until we run
out of budget. It supports an arbitrary number of segments.
Call arguments:
x: A tensor of single string sequences, or a tuple of multiple
tensor sequences to be packed together. Inputs may be batched or
unbatched. For single sequences, raw python inputs will be converted
to tensors. For multiple sequences, pass tensors directly.
y: Any label data. Will be passed through unaltered.
sample_weight: Any label weight data. Will be passed through unaltered.
Examples:
Directly calling the layer on data.
```python
preprocessor = keras_nlp.models.RobertaPreprocessor.from_preset(
"roberta_base_en"
)
# Tokenize and pack a single sentence.
preprocessor("The quick brown fox jumped.")
# Tokenize a batch of single sentences.
preprocessor(["The quick brown fox jumped.", "Call me Ishmael."])
# Preprocess a batch of sentence pairs.
# When handling multiple sequences, always convert to tensors first!
first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."])
second = tf.constant(["The fox tripped.", "Oh look, a whale."])
preprocessor((first, second))
# Custom vocabulary.
vocab = {"<s>": 0, "<pad>": 1, "</s>": 2, "<mask>": 3}
vocab = {**vocab, "a": 4, "Ġquick": 5, "Ġfox": 6}
merges = ["Ġ q", "u i", "c k", "ui ck", "Ġq uick", "Ġ f", "o x", "Ġf ox"]
tokenizer = keras_nlp.models.RobertaTokenizer(
vocabulary=vocab,
merges=merges
)
preprocessor = keras_nlp.models.RobertaPreprocessor(tokenizer)
preprocessor("a quick fox")
```
Mapping with `tf.data.Dataset`.
```python
preprocessor = keras_nlp.models.RobertaPreprocessor.from_preset(
"roberta_base_en"
)
first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."])
second = tf.constant(["The fox tripped.", "Oh look, a whale."])
label = tf.constant([1, 1])
# Map labeled single sentences.
ds = tf.data.Dataset.from_tensor_slices((first, label))
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map unlabeled single sentences.
ds = tf.data.Dataset.from_tensor_slices(first)
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map labeled sentence pairs.
ds = tf.data.Dataset.from_tensor_slices(((first, second), label))
ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE)
# Map unlabeled sentence pairs.
ds = tf.data.Dataset.from_tensor_slices((first, second))
# Watch out for tf.data's default unpacking of tuples here!
# Best to invoke the `preprocessor` directly in this case.
ds = ds.map(
lambda first, second: preprocessor(x=(first, second)),
num_parallel_calls=tf.data.AUTOTUNE,
)
```
"""
def __init__(
self,
tokenizer,
sequence_length=512,
truncate="round_robin",
**kwargs,
):
super().__init__(**kwargs)
self.tokenizer = tokenizer
self.packer = None
self.truncate = truncate
self.sequence_length = sequence_length
def build(self, input_shape):
# Defer packer creation to `build()` so that we can be sure tokenizer
# assets have loaded when restoring a saved model.
self.packer = MultiSegmentPacker(
start_value=self.tokenizer.start_token_id,
end_value=self.tokenizer.end_token_id,
sep_value=[self.tokenizer.end_token_id] * 2,
pad_value=self.tokenizer.pad_token_id,
truncate=self.truncate,
sequence_length=self.sequence_length,
)
self.built = True
def call(self, x, y=None, sample_weight=None):
x = convert_inputs_to_list_of_tensor_segments(x)
x = [self.tokenizer(segment) for segment in x]
token_ids, _ = self.packer(x)
x = {
"token_ids": token_ids,
"padding_mask": token_ids != self.tokenizer.pad_token_id,
}
return pack_x_y_sample_weight(x, y, sample_weight)
def get_config(self):
config = super().get_config()
config.update(
{
"sequence_length": self.sequence_length,
"truncate": self.truncate,
}
)
return config
@property
def sequence_length(self):
"""The padded length of model input sequences."""
return self._sequence_length
@sequence_length.setter
def sequence_length(self, value):
self._sequence_length = value
if self.packer is not None:
self.packer.sequence_length = value
@classproperty
def tokenizer_cls(cls):
return RobertaTokenizer
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)
| keras-nlp/keras_nlp/models/roberta/roberta_preprocessor.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/roberta/roberta_preprocessor.py",
"repo_id": "keras-nlp",
"token_count": 3037
} | 146 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_nlp.backend import keras
from keras_nlp.backend import ops
from keras_nlp.models.xlnet.relative_attention import TwoStreamRelativeAttention
def xlnet_kernel_initializer(stddev=0.02):
return keras.initializers.TruncatedNormal(stddev=stddev)
class XLNetEncoder(keras.layers.Layer):
"""
XLNet Encoder.
This class follows the architecture of the transformer encoder layer in the
paper [Attention is All You Need](https://arxiv.org/abs/1706.03762). Users
can instantiate multiple instances of this class to stack up an encoder.
Contrary to the single hidden state used in the paper mentioned above, this
Encoder uses two hidden states, Content State and Query State. Thus calculates
Two Stream Relative Attention using both of the hidden states. To know more
please check the reference.
Args:
num_heads: int, the number of heads in the
`keras.layers.TwoStreamRelativeAttention` layer.
hidden_dim: int, the size hidden states.
head_dim: int, the size of each attention head.
intermediate_dim: int, the hidden size of feedforward network.
dropout: float, defaults to 0.0 the dropout value, shared by
`keras.layers.TwoStreamRelativeAttention` and feedforward network.
activation: string or `keras.activations`, defaults to "gelu". the
activation function of feedforward network.
layer_norm_epsilon: float, defaults to 1e-12. The epsilon value in layer
normalization components.
kernel_initializer_range: int, defaults to 0.02. The kernel initializer
range for the dense and relative attention layers.
bias_initializer: string or `keras.initializers` initializer,
defaults to "zeros". The bias initializer for
the dense and multiheaded relative attention layers.
name: string, defaults to None. The name of the layer.
**kwargs: other keyword arguments.
References:
- [XLNet: Generalized Autoregressive Pretraining for Language Understanding]
(https://arxiv.org/abs/1906.08237)
"""
def __init__(
self,
num_heads,
hidden_dim,
head_dim,
intermediate_dim,
dropout=0.0,
activation="gelu",
layer_norm_epsilon=1e-12,
kernel_initializer_range=0.02,
bias_initializer="zeros",
name=None,
**kwargs
):
super().__init__(name=name, **kwargs)
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.head_dim = head_dim
self.intermediate_dim = intermediate_dim
self.dropout = dropout
self.activation = activation
self.layer_norm_epsilon = layer_norm_epsilon
self.kernel_initializer_range = kernel_initializer_range
self.bias_initializer = keras.initializers.get(bias_initializer)
self.kernel_initializer = xlnet_kernel_initializer(
self.kernel_initializer_range
)
def build(self, input_shape):
# Attention Part
self.relative_attention = TwoStreamRelativeAttention(
num_heads=self.num_heads,
key_dim=self.head_dim,
kernel_initializer=self.kernel_initializer,
bias_initializer=self.bias_initializer,
dtype=self.dtype_policy,
name="rel_attn",
)
self.layer_norm = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="layer_norm_rel_attn",
)
self.layer_norm.build(input_shape)
self.dropout_attn = keras.layers.Dropout(
self.dropout,
dtype=self.dtype_policy,
)
# Feed-Forward Part
self.layer_norm_ff = keras.layers.LayerNormalization(
epsilon=self.layer_norm_epsilon,
dtype=self.dtype_policy,
name="layer_norm_ff",
)
self.layer_norm_ff.build(input_shape)
self.feedforward_intermediate_dense = keras.layers.Dense(
self.intermediate_dim,
kernel_initializer=self.kernel_initializer,
dtype=self.dtype_policy,
name="feedforward_intermediate_dense",
)
self.feedforward_intermediate_dense.build(input_shape)
self.feedforward_output_dense = keras.layers.Dense(
self.hidden_dim,
kernel_initializer=self.kernel_initializer,
dtype=self.dtype_policy,
name="feedforward_output_dense",
)
self.feedforward_output_dense.build(
self.feedforward_intermediate_dense.compute_output_shape(
input_shape
)
)
self.dropout_ff = keras.layers.Dropout(
self.dropout,
dtype=self.dtype_policy,
)
self.activation_function_ff = keras.activations.get(self.activation)
self.content_attention_bias = self.add_weight(
shape=(self.num_heads, self.head_dim),
initializer=self.bias_initializer,
trainable=True,
name="content_attention_bias",
)
self.positional_attention_bias = self.add_weight(
shape=(self.num_heads, self.head_dim),
initializer=self.bias_initializer,
trainable=True,
name="positional_attention_bias",
)
self.segment_attention_bias = self.add_weight(
shape=(self.num_heads, self.head_dim),
initializer=self.bias_initializer,
trainable=True,
name="segment_attention_bias",
)
self.segment_encoding = self.add_weight(
shape=(2, self.num_heads, self.head_dim),
initializer=self.kernel_initializer,
trainable=True,
name="segment_encoding",
)
super().build(input_shape)
def call(
self,
output_content,
attn_mask_content,
attn_mask_query,
pos_emb,
seg_mat,
output_query=None,
mems=None,
target_mapping=None,
):
# rel_attn
attn_out_content, attn_out_query = self.relative_attention(
content_stream=output_content,
query_stream=output_query,
content_attention_mask=attn_mask_content,
query_attention_mask=attn_mask_query,
relative_position_encoding=pos_emb,
content_attention_bias=self.content_attention_bias,
positional_attention_bias=self.positional_attention_bias,
segment_attention_bias=self.segment_attention_bias,
segment_matrix=seg_mat,
segment_encoding=self.segment_encoding,
target_mapping=target_mapping,
state=mems,
)
attn_out_content = self.dropout_attn(attn_out_content)
attn_out_content = attn_out_content + output_content
attn_out_content = self.layer_norm(attn_out_content)
if attn_out_query is not None:
attn_out_query = self.dropout_attn(attn_out_query)
attn_out_query = attn_out_query + output_query
attn_out_query = self.layer_norm(attn_out_query)
# feed-forward
ff_out_content = attn_out_content
ff_out_content = self.feedforward_intermediate_dense(ff_out_content)
ff_out_content = self.activation_function_ff(ff_out_content)
ff_out_content = self.dropout_ff(ff_out_content)
ff_out_content = self.feedforward_output_dense(ff_out_content)
ff_out_content = self.dropout_ff(ff_out_content)
ff_out_content = self.layer_norm_ff(ff_out_content + attn_out_content)
if attn_out_query is not None:
ff_out_query = attn_out_query
ff_out_query = self.feedforward_intermediate_dense(ff_out_query)
ff_out_query = self.activation_function_ff(ff_out_query)
ff_out_query = self.dropout_ff(ff_out_query)
ff_out_query = self.feedforward_output_dense(ff_out_query)
ff_out_query = self.dropout_ff(ff_out_query)
ff_out_query = self.layer_norm_ff(ff_out_query + attn_out_query)
return ff_out_content, ff_out_query
return ff_out_content, None
def compute_output_shape(
self,
output_content_shape,
pos_emb_shape,
attn_mask_content_shape,
attn_mask_query_shape,
seg_mat_shape,
output_query_shape=None,
):
return [output_content_shape, output_content_shape]
class XLNetAttentionMaskLayer(keras.layers.Layer):
"""
Attention Mask Layer for XLNet Encoder Block.
This layer processes attention masks for both content state and query state
during the forward pass.
Args:
hidden_dim: int, the size hidden states.
kernel_initializer_range: int, defaults to 0.02. The kernel initializer
range for the dense and relative attention layers.
**kwargs: other keyword arguments.
"""
def __init__(self, hidden_dim, kernel_initializer_range, **kwargs):
super().__init__(**kwargs)
self.hidden_dim = hidden_dim
self.kernel_initializer_range = kernel_initializer_range
self.kernel_initializer = xlnet_kernel_initializer(
self.kernel_initializer_range
)
def build(self, inputs_shape):
self.mask_emb = self.add_weight(
shape=(1, 1, self.hidden_dim),
initializer=self.kernel_initializer,
trainable=True,
name="mask_emb",
)
self.built = True
def call(self, inputs, mlen=None):
bsz, qlen = ops.shape(inputs)[0], ops.shape(inputs)[1]
mlen = 0 if mlen is None else mlen
inputs = 1 - inputs
inputs = ops.reshape(
inputs,
[ops.shape(inputs)[1], ops.shape(inputs)[0]],
)
data_mask = ops.expand_dims(inputs, 0)
if mlen > 0:
mems_mask = ops.zeros([ops.shape(data_mask)[0], mlen, bsz])
data_mask = ops.concatenate(
[ops.cast(mems_mask, dtype="int32"), data_mask], axis=1
)
attn_mask_query = ops.expand_dims(data_mask, -1)
attn_mask_query = ops.cast(
attn_mask_query > 0, dtype=attn_mask_query.dtype
)
# Since ops.eye doesn't support tensorflow Tensor as input.
# we need to create custom function here.
n = ops.expand_dims(ops.arange(qlen), -1)
m = ops.arange(qlen)
attn_mask_content = -ops.cast(
ops.where(n == m, 1, 0), attn_mask_query.dtype
)
if mlen > 0:
attn_mask_content = ops.concatenate(
[
ops.zeros([qlen, mlen], dtype=attn_mask_content.dtype),
attn_mask_content,
],
axis=-1,
)
attn_mask_content = ops.cast(
(
attn_mask_query
+ ops.expand_dims(ops.expand_dims(attn_mask_content, -1), -1)
)
> 0,
dtype=attn_mask_content.dtype,
)
# to make sure inputs suitable for TwoStreamRelativeAttention
attn_mask_content = 1.0 - ops.cast(
ops.transpose(ops.squeeze(attn_mask_content, -1), [2, 0, 1]),
"float32",
)
attn_mask_query = 1.0 - ops.cast(
ops.transpose(ops.squeeze(attn_mask_query, -1), [2, 0, 1]),
"float32",
)
return attn_mask_content, attn_mask_query
def compute_output_shape(self, padding_mask_shape):
return [padding_mask_shape, padding_mask_shape]
class XLNetSegmentMatrixLayer(keras.layers.Layer):
"""
This layer creates Segment Matrix for XLNet Encoder.
"""
def call(self, segment_ids, mlen=None):
bsz = ops.shape(segment_ids)[0]
mlen = 0 if mlen is None else mlen
# Prepare seg_mat
segment_ids = ops.transpose(segment_ids, [1, 0])
if mlen > 0:
mem_pad = ops.zeros([mlen, bsz], dtype=segment_ids.dtype)
cat_ids = ops.concatenate([mem_pad, segment_ids], 0)
else:
cat_ids = segment_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = ops.cast(
ops.logical_not(ops.equal(segment_ids[:, None], cat_ids[None, :])),
dtype=segment_ids.dtype,
)
# to make sure inputs suitable for TwoStreamRelativeAttention
seg_mat = ops.cast(ops.transpose(seg_mat, [2, 0, 1]), dtype="bool")
return seg_mat
def compute_output_shape(self, segment_ids_shape):
return segment_ids_shape
| keras-nlp/keras_nlp/models/xlnet/xlnet_encoder.py/0 | {
"file_path": "keras-nlp/keras_nlp/models/xlnet/xlnet_encoder.py",
"repo_id": "keras-nlp",
"token_count": 6171
} | 147 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_nlp.api_export import keras_nlp_export
from keras_nlp.tokenizers.word_piece_tokenizer import pretokenize
from keras_nlp.utils.tensor_utils import assert_tf_text_installed
try:
from tensorflow_text.tools.wordpiece_vocab import (
wordpiece_tokenizer_learner_lib as learner,
)
except ImportError:
learner = None
@keras_nlp_export("keras_nlp.tokenizers.compute_word_piece_vocabulary")
def compute_word_piece_vocabulary(
data,
vocabulary_size,
vocabulary_output_file=None,
lowercase=False,
strip_accents=False,
split=True,
split_on_cjk=True,
suffix_indicator="##",
reserved_tokens=["[PAD]", "[CLS]", "[SEP]", "[UNK]", "[MASK]"],
):
r"""A utility to train a WordPiece vocabulary.
Trains a WordPiece vocabulary from an input dataset or a list of filenames.
For custom data loading and pretokenization (`split=False`), the input
`data` should be a `tf.data.Dataset`. If `data` is a list of filenames,
the file format is required to be plain text files, and the text would be
read in line by line during training.
Args:
data: A `tf.data.Dataset`, or a list of filenames.
vocabulary_size: int. The maximum size of a vocabulary to be trained.
vocabulary_output_file: str. The location to write a
vocabulary file. defaults to `None`.
lowercase: bool. If `True`, the input text will be
lowercased before tokenization. Defaults to `False`.
strip_accents: bool. If `True`, all accent marks will
be removed from text before tokenization. Defaults to `False`.
split: bool. If `True`, input will be split on
whitespace and punctuation marks, and all punctuation marks will be
kept as tokens. If `False`, input should be split ("pre-tokenized")
before calling the tokenizer, and passed as a dense or ragged tensor
of whole words. `split` is required to be `True` when `data` is a
list of filenames. Defaults to `True`.
split_on_cjk: bool. If `True`, input will be split
on CJK characters, i.e., Chinese, Japanese, Korean and Vietnamese
characters (https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)).
Note that this is applicable only when `split` is `True`.
Defaults to `True`.
suffix_indicator: str. The characters prepended to a
WordPiece to indicate that it is a suffix to another subword.
E.g. `"##ing"`. Defaults to `"##"`.
reserved_tokens: list of strings. A list of tokens that must be included in the vocabulary.
Returns:
Returns a list of vocabulary terms.
Examples:
Basic Usage (from Dataset).
>>> inputs = tf.data.Dataset.from_tensor_slices(["bat sat pat mat rat"])
>>> vocab = compute_word_piece_vocabulary(inputs, 13)
>>> vocab
['[PAD]', '[CLS]', '[SEP]', '[UNK]', '[MASK]', 'a', 'b', 'm', 'p', 'r', 's', 't', '##at']
>>> tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(vocabulary=vocab, oov_token="[UNK]")
>>> outputs = inputs.map(tokenizer.tokenize)
>>> for x in outputs:
... print(x)
tf.Tensor([ 6 12 10 12 8 12 7 12 9 12], shape=(10,), dtype=int32)
Basic Usage (from filenames).
```python
with open("test.txt", "w+") as f:
f.write("bat sat pat mat rat\n")
inputs = ["test.txt"]
vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(inputs, 13)
```
Custom Split Usage (from Dataset).
>>> def normalize_and_split(x):
... "Strip punctuation and split on whitespace."
... x = tf.strings.regex_replace(x, r"\p{P}", "")
... return tf.strings.split(x)
>>> inputs = tf.data.Dataset.from_tensor_slices(["bat sat: pat mat rat.\n"])
>>> split_inputs = inputs.map(normalize_and_split)
>>> vocab = compute_word_piece_vocabulary(
... split_inputs, 13, split=False,
... )
>>> vocab
['[PAD]', '[CLS]', '[SEP]', '[UNK]', '[MASK]', 'a', 'b', 'm', 'p', 'r', 's', 't', '##at']
>>> tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(vocabulary=vocab)
>>> inputs.map(tokenizer.tokenize)
Custom Split Usage (from filenames).
```python
def normalize_and_split(x):
"Strip punctuation and split on whitespace."
x = tf.strings.regex_replace(x, r"\p{P}", "")
return tf.strings.split(x)
with open("test.txt", "w+") as f:
f.write("bat sat: pat mat rat.\n")
inputs = tf.data.TextLineDataset(["test.txt"])
split_inputs = inputs.map(normalize_and_split)
vocab = keras_nlp.tokenizers.compute_word_piece_vocabulary(
split_inputs, 13, split=False
)
tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(vocabulary=vocab)
inputs.map(tokenizer.tokenize)
```
"""
assert_tf_text_installed(compute_word_piece_vocabulary.__name__)
# Read data files.
if not isinstance(data, (list, tf.data.Dataset)):
raise ValueError(
"The `data` argument must be either `tf.data.Dataset` or `list`. "
f"Received: {type(data)}."
)
if isinstance(data, list):
# Processing list of file paths.
if not split:
raise ValueError(
"When learning a vocab from files, `split` must be `True`. "
"To compute a vocabulary with custom split rules, load your "
"data as a dataset, split it, and pass it to "
"`compute_word_piece_vocabulary()` with split=False."
)
path_ds = tf.data.Dataset.from_tensor_slices(data)
# Uses map to read filepaths.
data = path_ds.map(
lambda path: tf.io.read_file(path),
num_parallel_calls=tf.data.AUTOTUNE,
)
words_data = data.map(
lambda text: pretokenize(
text, lowercase, strip_accents, split, split_on_cjk
),
num_parallel_calls=tf.data.AUTOTUNE,
)
word_counts = learner.count_words(words_data)
# Train tokenizer.
vocab = learner.learn(
word_counts,
vocab_size=vocabulary_size,
reserved_tokens=reserved_tokens,
include_joiner_token=True,
joiner=suffix_indicator,
)
if len(vocab) > vocabulary_size:
vocab = vocab[:vocabulary_size]
if vocabulary_output_file is not None:
vocab_text = "".join([line + "\n" for line in vocab])
# Write vocab to file.
with open(vocabulary_output_file, "w", encoding="utf-8") as vocab_file:
vocab_file.write(vocab_text)
else:
return vocab
| keras-nlp/keras_nlp/tokenizers/word_piece_tokenizer_trainer.py/0 | {
"file_path": "keras-nlp/keras_nlp/tokenizers/word_piece_tokenizer_trainer.py",
"repo_id": "keras-nlp",
"token_count": 2986
} | 148 |
<jupyter_start><jupyter_text>Install deps<jupyter_code>!pip install git+https://github.com/abheesht17/keras-nlp.git@bert-large-vars tensorflow tf-models-official tensorflow_hub --upgrade --quiet
import json
import os
import keras_nlp
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
MODEL_TYPE = "bert_large"
MODEL_SUFFIX = "cased"
MODEL_SPEC_STR = "L-24_H-1024_A-16"
MODEL_NAME = f"{MODEL_TYPE}_{MODEL_SUFFIX}"
VOCAB_SIZE = 28996
NUM_LAYERS = 24
NUM_ATTN_HEADS = 16
EMBEDDING_SIZE = 1024
# BERT ckpt https://github.com/google-research/bert/blob/master/README.md.
zip_path = f"""https://storage.googleapis.com/bert_models/2018_10_18/{MODEL_SUFFIX}_{MODEL_SPEC_STR}.zip"""
zip_file = keras.utils.get_file(
f"""/content/{MODEL_NAME}""",
zip_path,
extract=True,
archive_format="zip",
)
!unzip """{MODEL_NAME}"""
# BERT paths.
extract_dir = f"/content/{MODEL_SUFFIX}_{MODEL_SPEC_STR}"
vocab_path = os.path.join(extract_dir, "vocab.txt")
checkpoint_path = os.path.join(extract_dir, "bert_model.ckpt")
config_path = os.path.join(extract_dir, "bert_config.json")
vars = tf.train.list_variables(checkpoint_path)
weights = {}
for name, shape in vars:
print(name, shape)
weight = tf.train.load_variable(checkpoint_path, name)
weights[name] = weight<jupyter_output>bert/embeddings/LayerNorm/beta [1024]
bert/embeddings/LayerNorm/gamma [1024]
bert/embeddings/position_embeddings [512, 1024]
bert/embeddings/token_type_embeddings [2, 1024]
bert/embeddings/word_embeddings [28996, 1024]
bert/encoder/layer_0/attention/output/LayerNorm/beta [1024]
bert/encoder/layer_0/attention/output/LayerNorm/gamma [1024]
bert/encoder/layer_0/attention/output/dense/bias [1024]
bert/encoder/layer_0/attention/output/dense/kernel [1024, 1024]
bert/encoder/layer_0/attention/self/key/bias [1024]
bert/encoder/layer_0/attention/self/key/kernel [1024, 1024]
bert/encoder/layer_0/attention/self/query/bias [1024]
bert/encoder/layer_0/attention/self/query/kernel [1024, 1024]
bert/encoder/layer_0/attention/self/value/bias [1024]
bert/encoder/layer_0/attention/self/value/kernel [1024, 1024]
bert/encoder/layer_0/intermediate/dense/bias [4096]
bert/encoder/layer_0/intermediate/dense/kernel [1024, 4096]
bert/encoder/layer_0/output/LayerNorm/beta [1024]
bert/encoder/layer_0/output/LayerN[...]<jupyter_text>Load BertLarge model with KerasNLP.<jupyter_code>model = keras_nlp.models.BertLarge(vocabulary_size=VOCAB_SIZE)
model.summary()<jupyter_output>Model: "bert_custom"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
token_ids (InputLayer) [(None, None)] 0 []
token_embedding (Embedding) (None, None, 1024) 29691904 ['token_ids[0][0]']
segment_ids (InputLayer) [(None, None)] 0 []
position_embedding (PositionEm (None, None, 1024) 524288 ['token_embedding[0][0][...]<jupyter_text>Convert Weights<jupyter_code>model.get_layer("token_embedding").embeddings.assign(
weights["bert/embeddings/word_embeddings"]
)
model.get_layer("position_embedding").position_embeddings.assign(
weights["bert/embeddings/position_embeddings"]
)
model.get_layer("segment_embedding").embeddings.assign(
weights["bert/embeddings/token_type_embeddings"]
)
model.get_layer("embeddings_layer_norm").gamma.assign(
weights["bert/embeddings/LayerNorm/gamma"]
)
model.get_layer("embeddings_layer_norm").beta.assign(
weights["bert/embeddings/LayerNorm/beta"]
)
for i in range(model.num_layers):
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/attention/self/key/kernel"].reshape(
(EMBEDDING_SIZE, NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/attention/self/key/bias"].reshape(
(NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/attention/self/query/kernel"].reshape(
(EMBEDDING_SIZE, NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/attention/self/query/bias"].reshape(
(NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/attention/self/value/kernel"].reshape(
(EMBEDDING_SIZE, NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/attention/self/value/bias"].reshape(
(NUM_ATTN_HEADS, -1)
)
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
weights[
f"bert/encoder/layer_{i}/attention/output/dense/kernel"
].reshape((NUM_ATTN_HEADS, -1, EMBEDDING_SIZE))
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/attention/output/dense/bias"]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.gamma.assign(
weights[f"bert/encoder/layer_{i}/attention/output/LayerNorm/gamma"]
)
model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.beta.assign(
weights[f"bert/encoder/layer_{i}/attention/output/LayerNorm/beta"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/intermediate/dense/kernel"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/intermediate/dense/bias"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.kernel.assign(
weights[f"bert/encoder/layer_{i}/output/dense/kernel"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.bias.assign(
weights[f"bert/encoder/layer_{i}/output/dense/bias"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.gamma.assign(
weights[f"bert/encoder/layer_{i}/output/LayerNorm/gamma"]
)
model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.beta.assign(
weights[f"bert/encoder/layer_{i}/output/LayerNorm/beta"]
)
model.get_layer("pooled_dense").kernel.assign(
weights["bert/pooler/dense/kernel"]
)
model.get_layer("pooled_dense").bias.assign(weights["bert/pooler/dense/bias"])
pass<jupyter_output><empty_output><jupyter_text>Load Bert Large from TF-Hub.These weights have been ratified by the authors of BERT: https://github.com/google-research/bert/blob/master/README.md. BERT README statement:"***** New February 7th, 2019: TfHub Module *****BERT has been uploaded to TensorFlow Hub. See run_classifier_with_tfhub.py for an example of how to use the TF Hub module, or run an example in the browser on Colab." TF Hub statement:"The weights of this model are those released by the original BERT authors."<jupyter_code>text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
preprocessor = hub.load(
"https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3"
)
tokenizer = hub.KerasLayer(preprocessor.tokenize, name="tokenizer")
tokenized_text = tokenizer(text_input)
packer = hub.KerasLayer(
preprocessor.bert_pack_inputs, arguments=dict(seq_length=512), name="packer"
)
encoder_inputs = packer([tokenized_text])
encoder = hub.KerasLayer(
f"https://tfhub.dev/tensorflow/bert_en_cased_{MODEL_SPEC_STR}/4",
trainable=True,
)
outputs = encoder(encoder_inputs)
pooled_output = outputs["pooled_output"] # [batch_size, 1024].
sequence_output = outputs["sequence_output"] # [batch_size, seq_length, 1024].
embedding_model = tf.keras.Model(text_input, (pooled_output, sequence_output))
def preprocess(x):
tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(
vocabulary=vocab_path, lowercase=False
)
packer = keras_nlp.layers.MultiSegmentPacker(
sequence_length=model.max_sequence_length,
start_value=tokenizer.token_to_id("[CLS]"),
end_value=tokenizer.token_to_id("[SEP]"),
)
return packer(tokenizer(x))
token_ids, segment_ids = preprocess(["The quick brown fox."])
keras_nlp_output = model(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)
orig_pooled_output, orig_sequence_output = embedding_model(
tf.constant(["The quick brown fox."])
)
keras_nlp_output["pooled_output"][0, :10], orig_pooled_output[0, :10]
# Very close! Though not 100% exact.
(
tf.reduce_mean(keras_nlp_output["pooled_output"] - orig_pooled_output),
tf.reduce_mean(keras_nlp_output["sequence_output"] - orig_sequence_output),
)
# Save BertLarge checkpoint
model.save_weights(f"""{MODEL_NAME}.h5""")
model2 = keras_nlp.models.BertLarge(vocabulary_size=VOCAB_SIZE)
model2.load_weights(f"""{MODEL_NAME}.h5""")
# Same output from loaded checkpoint
keras_nlp_output2 = model2(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)
(
tf.reduce_mean(
keras_nlp_output["pooled_output"] - keras_nlp_output2["pooled_output"]
),
tf.reduce_mean(
keras_nlp_output["sequence_output"]
- keras_nlp_output2["sequence_output"]
),
)
# Save vocab file as well
vocab_info = tf.io.gfile.GFile(vocab_path).read()
f = open("vocab.txt", "w")
f.write(vocab_info)
# Get MD5 of model
!md5sum """{MODEL_NAME}.h5"""
# Upload model to drive
# from google.colab import drive
# drive.mount('/content/drive')
# Check uploaded model once added to repo
model_cloud = keras_nlp.models.BertLarge(weights="cased_en")
# Same output from cloud model
keras_nlp_output_cloud = model_cloud(
{
"token_ids": token_ids,
"segment_ids": segment_ids,
"padding_mask": token_ids != 0,
}
)["pooled_output"]
tf.reduce_mean(keras_nlp_output["pooled_output"] - keras_nlp_output_cloud)
keras_nlp_output_cloud[0, :10]<jupyter_output><empty_output> | keras-nlp/tools/checkpoint_conversion/bert_large_cased_en.ipynb/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/bert_large_cased_en.ipynb",
"repo_id": "keras-nlp",
"token_count": 4981
} | 149 |
# Copyright 2023 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import numpy as np
import tensorflow as tf
import transformers
from absl import app
from absl import flags
from checkpoint_conversion_utils import get_md5_checksum
import keras_nlp
PRESET_MAP = {
"opt_125m_en": "facebook/opt-125m",
"opt_1.3b_en": "facebook/opt-1.3b",
"opt_2.7b_en": "facebook/opt-2.7b",
"opt_6.7b_en": "facebook/opt-6.7b",
}
FLAGS = flags.FLAGS
flags.DEFINE_string(
"preset", None, f'Must be one of {",".join(PRESET_MAP.keys())}'
)
def convert_weights(hf_model):
print("\n-> Convert original weights to KerasNLP format.")
# Load PyTorch OPT checkpoint.
keras_nlp_model = keras_nlp.models.OPTBackbone.from_preset(
FLAGS.preset, load_weights=False
)
# Token embedding.
keras_nlp_model.get_layer("embeddings").token_embedding.embeddings.assign(
hf_model.model.decoder.embed_tokens.weight
)
# Position embedding.
keras_nlp_model.get_layer(
"embeddings"
).position_embedding.position_embeddings.assign(
hf_model.model.decoder.embed_positions.weight[2:, :]
)
num_heads = keras_nlp_model.num_heads
hidden_dim = keras_nlp_model.hidden_dim
# Transformer layers.
for i in range(keras_nlp_model.num_layers):
# Self-attention.
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.kernel.assign(
tf.reshape(
hf_model.model.decoder.layers[i].self_attn.q_proj.kernel,
(hidden_dim, num_heads, -1),
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._query_dense.bias.assign(
tf.reshape(
hf_model.model.decoder.layers[i].self_attn.q_proj.bias,
(num_heads, -1),
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.kernel.assign(
tf.reshape(
hf_model.model.decoder.layers[i].self_attn.k_proj.kernel,
(hidden_dim, num_heads, -1),
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._key_dense.bias.assign(
tf.reshape(
hf_model.model.decoder.layers[i].self_attn.k_proj.bias,
(num_heads, -1),
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.kernel.assign(
tf.reshape(
hf_model.model.decoder.layers[i].self_attn.v_proj.kernel,
(hidden_dim, num_heads, -1),
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._value_dense.bias.assign(
tf.reshape(
hf_model.model.decoder.layers[i].self_attn.v_proj.bias,
(num_heads, -1),
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.kernel.assign(
tf.reshape(
hf_model.model.decoder.layers[i].self_attn.out_proj.kernel,
(num_heads, -1, hidden_dim),
)
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer._output_dense.bias.assign(
hf_model.model.decoder.layers[i].self_attn.out_proj.bias,
)
# Attention LayerNorm
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.gamma.assign(
hf_model.model.decoder.layers[i].self_attn_layer_norm.gamma
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._self_attention_layer_norm.beta.assign(
hf_model.model.decoder.layers[i].self_attn_layer_norm.beta
)
# Intermediate FF layer
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.kernel.assign(
hf_model.model.decoder.layers[i].fc1.kernel
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_intermediate_dense.bias.assign(
hf_model.model.decoder.layers[i].fc1.bias
)
# Output dense layer
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.kernel.assign(
hf_model.model.decoder.layers[i].fc2.kernel
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_output_dense.bias.assign(
hf_model.model.decoder.layers[i].fc2.bias
)
# FF LayerNorm
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.gamma.assign(
hf_model.model.decoder.layers[i].final_layer_norm.gamma
)
keras_nlp_model.get_layer(
f"transformer_layer_{i}"
)._feedforward_layer_norm.beta.assign(
hf_model.model.decoder.layers[i].final_layer_norm.beta
)
# Output LayerNorm
keras_nlp_model.get_layer("layer_norm").gamma.assign(
hf_model.model.decoder.final_layer_norm.gamma
)
keras_nlp_model.get_layer("layer_norm").beta.assign(
hf_model.model.decoder.final_layer_norm.beta
)
# Save the model.
model_path = f"./{FLAGS.preset}/model.h5"
print(f"-> Save KerasNLP model weights to `{model_path}`.")
keras_nlp_model.save_weights(model_path)
print("-> Print MD5 checksum of the model weights files.")
print(f"`{model_path}` md5sum: ", get_md5_checksum(model_path))
return keras_nlp_model
def extract_vocab(hf_tokenizer):
vocabulary_path = f"./{FLAGS.preset}/vocab.json"
merges_path = f"./{FLAGS.preset}/merges.txt"
print(f"\n-> Save KerasNLP vocab to `{vocabulary_path}`.")
print(f"-> Save KerasNLP merges to `{merges_path}`.")
# Huggingface has a save_vocabulary function but it's not byte-for-byte
# with the source. Instead copy the original downloaded file directly.
shutil.copyfile(
transformers.utils.hub.get_file_from_repo(
hf_tokenizer.name_or_path, "vocab.json"
),
vocabulary_path,
)
shutil.copyfile(
transformers.utils.hub.get_file_from_repo(
hf_tokenizer.name_or_path, "merges.txt"
),
merges_path,
)
keras_nlp_tokenizer = keras_nlp.models.OPTTokenizer(
vocabulary=vocabulary_path, merges=merges_path
)
print("-> Print MD5 checksum of the vocab files.")
print(f"`{vocabulary_path}` md5sum: ", get_md5_checksum(vocabulary_path))
print(f"`{merges_path}` md5sum: ", get_md5_checksum(merges_path))
return keras_nlp_tokenizer
def check_output(
keras_nlp_model,
keras_nlp_tokenizer,
hf_model,
hf_tokenizer,
):
print("\n-> Check the outputs.")
input_str = ["the quick brown fox ran, galloped and jumped."]
sequence_length = 16
packer = keras_nlp.layers.StartEndPacker(
sequence_length=sequence_length,
start_value=keras_nlp_tokenizer.start_token_id,
pad_value=keras_nlp_tokenizer.pad_token_id,
)
# KerasNLP
token_ids = packer(keras_nlp_tokenizer(input_str))
padding_mask = token_ids != keras_nlp_tokenizer.pad_token_id
keras_nlp_inputs = {
"token_ids": token_ids,
"padding_mask": padding_mask,
}
keras_nlp_output = keras_nlp_model(keras_nlp_inputs)
# HF
hf_inputs = hf_tokenizer(
input_str,
padding="max_length",
max_length=sequence_length,
return_tensors="tf",
)
hf_output = hf_model(
**hf_inputs, return_dict=True, output_hidden_states=True
)
# Compare tokenized inputs. This should be a compete match.
print("KerasNLP inputs:", keras_nlp_inputs)
print("HF inputs:", hf_inputs)
# Compare outputs, this should match closely, though not exactly.
hf_output = hf_output.last_hidden_state
print("KerasNLP output:", keras_nlp_output[0, 0, :5])
print("HF output:", hf_output[0, 0, :5])
difference = keras_nlp_output - hf_output
difference_non_padding = tf.gather_nd(difference, tf.where(padding_mask))
print("Difference:", np.mean(difference_non_padding))
def main(_):
hf_id = PRESET_MAP[FLAGS.preset]
os.mkdir(f"./{FLAGS.preset}")
print("\n-> Load HF model.")
hf_tokenizer = transformers.AutoTokenizer.from_pretrained(hf_id)
hf_model = transformers.TFAutoModel.from_pretrained(hf_id)
keras_nlp_tokenizer = extract_vocab(hf_tokenizer)
keras_nlp_model = convert_weights(hf_model)
check_output(
keras_nlp_model,
keras_nlp_tokenizer,
hf_model,
hf_tokenizer,
)
if __name__ == "__main__":
flags.mark_flag_as_required("preset")
app.run(main)
| keras-nlp/tools/checkpoint_conversion/convert_opt_checkpoints.py/0 | {
"file_path": "keras-nlp/tools/checkpoint_conversion/convert_opt_checkpoints.py",
"repo_id": "keras-nlp",
"token_count": 4670
} | 150 |
"""Enables dynamic setting of underlying Keras module.
"""
# flake8: noqa:F401
from .affine_transformations import *
from .dataframe_iterator import DataFrameIterator
from .directory_iterator import DirectoryIterator
from .image_data_generator import ImageDataGenerator
from .iterator import Iterator
from .numpy_array_iterator import NumpyArrayIterator
from .utils import *
| keras-preprocessing/keras_preprocessing/image/__init__.py/0 | {
"file_path": "keras-preprocessing/keras_preprocessing/image/__init__.py",
"repo_id": "keras-preprocessing",
"token_count": 96
} | 151 |
from keras_preprocessing.image import iterator
def test_iterator_empty_directory():
# Testing with different batch sizes
for batch_size in [0, 32]:
data_iterator = iterator.Iterator(0, batch_size, False, 0)
ret = next(data_iterator.index_generator)
assert ret.size == 0
| keras-preprocessing/tests/image/iterator_test.py/0 | {
"file_path": "keras-preprocessing/tests/image/iterator_test.py",
"repo_id": "keras-preprocessing",
"token_count": 107
} | 152 |
coverage:
status:
patch:
default:
target: 100%
| keras-tuner/codecov.yml/0 | {
"file_path": "keras-tuner/codecov.yml",
"repo_id": "keras-tuner",
"token_count": 34
} | 153 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hypertunable version of EfficientNet based on Keras.applications."""
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.backend import keras
from keras_tuner.backend.keras import layers
from keras_tuner.engine import hypermodel
efficientnet = keras.applications.efficientnet
EFFICIENTNET_MODELS = {
"B0": efficientnet.EfficientNetB0,
"B1": efficientnet.EfficientNetB1,
"B2": efficientnet.EfficientNetB2,
"B3": efficientnet.EfficientNetB3,
"B4": efficientnet.EfficientNetB4,
"B5": efficientnet.EfficientNetB5,
"B6": efficientnet.EfficientNetB6,
"B7": efficientnet.EfficientNetB7,
}
EFFICIENTNET_IMG_SIZE = {
"B0": 224,
"B1": 240,
"B2": 260,
"B3": 300,
"B4": 380,
"B5": 456,
"B6": 528,
"B7": 600,
}
@keras_tuner_export("keras_tuner.applications.HyperEfficientNet")
class HyperEfficientNet(hypermodel.HyperModel):
"""An EfficientNet hypermodel.
Models built by `HyperEfficientNet` take images with shape (height, width,
channels) as input. The output are one-hot encoded with the length matching
the number of classes specified by the `classes` argument.
Args:
input_shape: Optional shape tuple, e.g. `(256, 256, 3)`. One of
`input_shape` or `input_tensor` must be specified.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model. One of `input_shape` or
`input_tensor` must be specified.
classes: Optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified.
augmentation_model: Optional `Model` or `HyperModel` instance for image
augmentation.
**kwargs: Additional keyword arguments that apply to all hypermodels.
See `keras_tuner.HyperModel`.
"""
def __init__(
self,
input_shape=None,
input_tensor=None,
classes=None,
augmentation_model=None,
**kwargs,
):
if not isinstance(
augmentation_model, (hypermodel.HyperModel, keras.Model, type(None))
):
raise ValueError(
"Keyword augmentation_model should be "
"a `HyperModel`, a Keras `Model` or "
f"empty. Received {augmentation_model}."
)
if not classes:
raise ValueError("You must specify `classes`.")
if input_shape is None and input_tensor is None:
raise ValueError(
"You must specify either `input_shape` or `input_tensor`."
)
self.input_shape = input_shape
self.input_tensor = input_tensor
self.classes = classes
self.augmentation_model = augmentation_model
super().__init__(**kwargs)
def build(self, hp):
if self.input_tensor is not None:
inputs = keras.utils.get_source_inputs(self.input_tensor)
x = self.input_tensor
else:
inputs = layers.Input(shape=self.input_shape)
x = inputs
if self.augmentation_model:
if isinstance(self.augmentation_model, hypermodel.HyperModel):
augmentation_model = self.augmentation_model.build(hp)
elif isinstance(self.augmentation_model, keras.models.Model):
augmentation_model = self.augmentation_model
x = augmentation_model(x)
# Select one of pre-trained EfficientNet as feature extractor
version = hp.Choice(
"version", [f"B{i}" for i in range(8)], default="B0"
)
img_size = EFFICIENTNET_IMG_SIZE[version]
x = layers.Resizing(img_size, img_size, interpolation="bilinear")(x)
efficientnet_model = EFFICIENTNET_MODELS[version](
include_top=False, input_tensor=x
)
# Rebuild top layers of the model.
x = efficientnet_model.output
pooling = hp.Choice("pooling", ["avg", "max"], default="avg")
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
top_dropout_rate = hp.Float(
"top_dropout_rate",
min_value=0.2,
max_value=0.8,
step=0.2,
default=0.2,
)
x = layers.Dropout(top_dropout_rate, name="top_dropout")(x)
x = layers.Dense(self.classes, activation="softmax", name="probs")(x)
# compile
model = keras.Model(inputs, x, name="EfficientNet")
self._compile(model, hp)
return model
def _compile(self, model, hp):
"""Compile model using hyperparameters in hp.
When subclassing the hypermodel, this may be overridden to change
behavior of compiling.
"""
learning_rate = hp.Choice(
"learning_rate", [0.1, 0.01, 0.001], default=0.01
)
optimizer = keras.optimizers.SGD(
momentum=0.1, learning_rate=learning_rate
)
model.compile(
optimizer=optimizer,
loss="categorical_crossentropy",
metrics=["accuracy"],
)
| keras-tuner/keras_tuner/applications/efficientnet.py/0 | {
"file_path": "keras-tuner/keras_tuner/applications/efficientnet.py",
"repo_id": "keras-tuner",
"token_count": 2548
} | 154 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the OracleServicer class."""
import os
import keras_tuner
from keras_tuner.distribute import oracle_chief
from keras_tuner.distribute import oracle_client
from keras_tuner.engine import metrics_tracking
from keras_tuner.test_utils import mock_distribute
from keras_tuner.tuners import randomsearch
def test_get_space(tmp_path):
def _test_get_space():
hps = keras_tuner.HyperParameters()
hps.Int("a", 0, 10, default=3)
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"),
max_trials=10,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_path, "untitled")
tuner_id = os.environ["KERASTUNER_TUNER_ID"]
if "chief" in tuner_id:
oracle_chief.start_server(oracle)
else:
client = oracle_client.OracleClient(oracle)
retrieved_hps = client.get_space()
assert retrieved_hps.values == {"a": 3}
assert len(retrieved_hps.space) == 1
mock_distribute.mock_distribute(_test_get_space)
def test_update_space(tmp_path):
def _test_update_space():
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"), max_trials=10
)
oracle._set_project_dir(tmp_path, "untitled")
tuner_id = os.environ["KERASTUNER_TUNER_ID"]
if "chief" in tuner_id:
oracle_chief.start_server(oracle)
else:
client = oracle_client.OracleClient(oracle)
hps = keras_tuner.HyperParameters()
hps.Int("a", 0, 10, default=5)
hps.Choice("b", [1, 2, 3])
client.update_space(hps)
retrieved_hps = client.get_space()
assert len(retrieved_hps.space) == 2
assert retrieved_hps.values["a"] == 5
assert retrieved_hps.values["b"] == 1
mock_distribute.mock_distribute(_test_update_space)
def test_create_trial(tmp_path):
def _test_create_trial():
hps = keras_tuner.HyperParameters()
hps.Int("a", 0, 10, default=5)
hps.Choice("b", [1, 2, 3])
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"),
max_trials=10,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_path, "untitled")
tuner_id = os.environ["KERASTUNER_TUNER_ID"]
if "chief" in tuner_id:
oracle_chief.start_server(oracle)
else:
client = oracle_client.OracleClient(oracle)
trial = client.create_trial(tuner_id)
assert trial.status == "RUNNING"
a = trial.hyperparameters.get("a")
assert a >= 0 and a <= 10
b = trial.hyperparameters.get("b")
assert b in {1, 2, 3}
mock_distribute.mock_distribute(_test_create_trial)
def test_update_trial(tmp_path):
def _test_update_trial():
hps = keras_tuner.HyperParameters()
hps.Int("a", 0, 10, default=5)
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"),
max_trials=10,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_path, "untitled")
tuner_id = os.environ["KERASTUNER_TUNER_ID"]
if "chief" in tuner_id:
oracle_chief.start_server(oracle)
else:
client = oracle_client.OracleClient(oracle)
trial = client.create_trial(tuner_id)
assert "score" not in trial.metrics.metrics
trial_id = trial.trial_id
client.update_trial(trial_id, {"score": 1}, step=2)
updated_trial = client.get_trial(trial_id)
assert updated_trial.metrics.get_history("score") == [
metrics_tracking.MetricObservation([1], step=2)
]
mock_distribute.mock_distribute(_test_update_trial)
def test_end_trial(tmp_path):
def _test_end_trial():
hps = keras_tuner.HyperParameters()
hps.Int("a", 0, 10, default=5)
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", "max"),
max_trials=10,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_path, "untitled")
tuner_id = os.environ["KERASTUNER_TUNER_ID"]
if "chief" in tuner_id:
oracle_chief.start_server(oracle)
else:
client = oracle_client.OracleClient(oracle)
trial = client.create_trial(tuner_id)
trial_id = trial.trial_id
client.update_trial(trial_id, {"score": 1}, step=2)
trial.status = "FAILED"
client.end_trial(trial)
updated_trial = client.get_trial(trial_id)
assert updated_trial.status == "FAILED"
mock_distribute.mock_distribute(_test_end_trial)
def test_get_best_trials(tmp_path):
def _test_get_best_trials():
hps = keras_tuner.HyperParameters()
hps.Int("a", 0, 100, default=5)
hps.Int("b", 0, 100, default=6)
oracle = randomsearch.RandomSearchOracle(
objective=keras_tuner.Objective("score", direction="max"),
max_trials=10,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_path, "untitled")
tuner_id = os.environ["KERASTUNER_TUNER_ID"]
if "chief" in tuner_id:
oracle_chief.start_server(oracle)
else:
client = oracle_client.OracleClient(oracle)
trial_scores = {}
for score in range(10):
trial = client.create_trial(tuner_id)
assert trial.status == "RUNNING"
assert "a" in trial.hyperparameters.values
assert "b" in trial.hyperparameters.values
trial_id = trial.trial_id
client.update_trial(trial_id, {"score": score})
trial.status = "COMPLETED"
client.end_trial(trial)
trial_scores[trial_id] = score
best_trials = client.get_best_trials(3)
best_scores = [t.score for t in best_trials]
assert best_scores == [9, 8, 7]
# Check that trial_ids are correctly mapped to scores.
for t in best_trials:
assert trial_scores[t.trial_id] == t.score
mock_distribute.mock_distribute(_test_get_best_trials, num_workers=1)
| keras-tuner/keras_tuner/distribute/oracle_chief_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/distribute/oracle_chief_test.py",
"repo_id": "keras-tuner",
"token_count": 3321
} | 155 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from keras_tuner.engine import hyperparameters as hp_module
def test_choice():
choice = hp_module.Choice("choice", [1, 2, 3], default=2)
choice = hp_module.Choice.from_config(choice.get_config())
assert choice.default == 2
assert choice.random_sample() in [1, 2, 3]
assert choice.random_sample(123) == choice.random_sample(123)
assert abs(choice.value_to_prob(1) - 1.0 / 6) < 1e-4
# No default
choice = hp_module.Choice("choice", [1, 2, 3])
assert choice.default == 1
with pytest.raises(ValueError, match="default value should be"):
hp_module.Choice("choice", [1, 2, 3], default=4)
@pytest.mark.parametrize(
"values,ordered_arg,ordered_val",
[
([1, 2, 3], True, True),
([1, 2, 3], False, False),
([1, 2, 3], None, True),
(["a", "b", "c"], False, False),
(["a", "b", "c"], None, False),
],
)
def test_choice_ordered(values, ordered_arg, ordered_val):
choice = hp_module.Choice("choice", values, ordered=ordered_arg)
assert choice.ordered == ordered_val
choice_new = hp_module.Choice(**choice.get_config())
assert choice_new.ordered == ordered_val
def test_choice_ordered_invalid():
with pytest.raises(ValueError, match="must be `False`"):
hp_module.Choice("a", ["a", "b"], ordered=True)
def test_choice_types():
values1 = ["a", "b", 0]
with pytest.raises(TypeError, match="can contain only one"):
hp_module.Choice("a", values1)
values2 = [{"a": 1}, {"a": 2}]
with pytest.raises(TypeError, match="can contain only `int`"):
hp_module.Choice("a", values2)
def test_choice_value_not_provided_error():
with pytest.raises(ValueError, match="`values` must be provided"):
hp_module.Choice("a", [])
def test_choice_repr():
assert repr(hp_module.Choice("a", [1, 2, 3])) == repr(
hp_module.Choice("a", [1, 2, 3])
)
def test_choice_none_as_default():
hp = hp_module.Choice("a", [1, 2], default=None)
assert hp.default == 1
def test_choice_default_not_none():
hp = hp_module.Choice("a", [1, 2], default=2)
assert hp.default == 2
def test_choice_proto():
hp = hp_module.Choice("a", [2.3, 4.5, 6.3], ordered=True)
proto = hp.to_proto()
assert proto.name == "a"
assert proto.ordered
assert np.allclose([v.float_value for v in proto.values], [2.3, 4.5, 6.3])
# Proto stores the implicit default.
assert np.isclose(proto.default.float_value, 2.3)
new_hp = hp_module.Choice.from_proto(proto)
assert new_hp.name == "a"
assert np.allclose(new_hp.values, hp.values)
assert new_hp.ordered
assert np.isclose(new_hp._default, 2.3)
# Test int values.
int_choice = hp_module.Choice("b", [1, 2, 3], ordered=False, default=2)
new_int_choice = hp_module.Choice.from_proto(int_choice.to_proto())
assert int_choice.get_config() == new_int_choice.get_config()
# Test float values.
float_choice = hp_module.Choice(
"b", [0.5, 2.5, 4.0], ordered=False, default=2.5
)
new_float_choice = hp_module.Choice.from_proto(float_choice.to_proto())
assert float_choice.get_config() == new_float_choice.get_config()
def test_prob_one_choice():
hp = hp_module.Choice("a", [0, 1, 2])
# Check that boundaries are valid.
value = hp.prob_to_value(1)
assert value == 2
value = hp.prob_to_value(0)
assert value == 0
def test_choice_values_property():
assert list(hp_module.Choice("choice", [0, 1, 2]).values) == [0, 1, 2]
| keras-tuner/keras_tuner/engine/hyperparameters/hp_types/choice_hp_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/hyperparameters/hp_types/choice_hp_test.py",
"repo_id": "keras-tuner",
"token_count": 1607
} | 156 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keras_tuner.api_export import keras_tuner_export
from keras_tuner.engine import metrics_tracking
@keras_tuner_export(
["keras_tuner.Objective", "keras_tuner.engine.oracle.Objective"]
)
class Objective:
"""The objective for optimization during tuning.
Args:
name: String. The name of the objective.
direction: String. The value should be "min" or "max" indicating
whether the objective value should be minimized or maximized.
"""
def __init__(self, name, direction):
self.name = name
self.direction = direction
def has_value(self, logs):
"""Check if objective value exists in logs.
Args:
logs: A dictionary with the metric names as the keys and the metric
values as the values, which is in the same format as the `logs`
argument for `Callback.on_epoch_end()`.
Returns:
Boolean, whether we can compute objective value from the logs.
"""
return self.name in logs
def get_value(self, logs):
"""Get the objective value from the metrics logs.
Args:
logs: A dictionary with the metric names as the keys and the metric
values as the values, which is in the same format as the `logs`
argument for `Callback.on_epoch_end()`.
Returns:
The objective value.
"""
return logs[self.name]
def better_than(self, a, b):
"""Whether the first objective value is better than the second.
Args:
a: A float, an objective value to compare.
b: A float, another objective value to compare.
Returns:
Boolean, whether the first objective value is better than the
second.
"""
return (a > b and self.direction == "max") or (
a < b and self.direction == "min"
)
def __eq__(self, obj):
return self.name == obj.name and self.direction == obj.direction
def __str__(self):
return f'Objective(name="{self.name}", direction="{self.direction}")'
class DefaultObjective(Objective):
"""Default objective to minimize if not provided by the user."""
def __init__(self):
super().__init__(name="default_objective", direction="min")
class MultiObjective(Objective):
"""A container for a list of objectives.
Args:
objectives: A list of `Objective`s.
"""
def __init__(self, objectives):
super().__init__(name="multi_objective", direction="min")
self.objectives = objectives
self.name_to_direction = {
objective.name: objective.direction for objective in self.objectives
}
def has_value(self, logs):
return all(key in logs for key in self.name_to_direction)
def get_value(self, logs):
obj_value = 0
for metric_name, metric_value in logs.items():
if metric_name not in self.name_to_direction:
continue
if self.name_to_direction[metric_name] == "min":
obj_value += metric_value
else:
obj_value -= metric_value
return obj_value
def __eq__(self, obj):
if self.name_to_direction.keys() != obj.name_to_direction.keys():
return False
return sorted(self.objectives, key=lambda x: x.name) == sorted(
obj.objectives, key=lambda x: x.name
)
def __str__(self):
return (
"Multi"
+ super().__str__()
+ f": [{', '.join(map(lambda x: str(x), self.objectives))}]"
)
def create_objective(objective):
if objective is None:
return DefaultObjective()
if isinstance(objective, list):
return MultiObjective([create_objective(obj) for obj in objective])
if isinstance(objective, Objective):
return objective
if not isinstance(objective, str):
raise TypeError(
"`objective` not understood, expected str or "
f"`Objective` object, found: {objective}"
)
direction = metrics_tracking.infer_metric_direction(objective)
if direction is None:
error_msg = (
'Could not infer optimization direction ("min" or "max") '
'for unknown metric "{obj}". Please specify the objective as'
"a `keras_tuner.Objective`, for example `keras_tuner.Objective("
'"{obj}", direction="min")`.'
)
error_msg = error_msg.format(obj=objective)
raise ValueError(error_msg)
return Objective(name=objective, direction=direction)
| keras-tuner/keras_tuner/engine/objective.py/0 | {
"file_path": "keras-tuner/keras_tuner/engine/objective.py",
"repo_id": "keras-tuner",
"token_count": 2079
} | 157 |
# Copyright 2019 The KerasTuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test mock running KerasTuner in a distributed tuning setting."""
import os
import time
import pytest
from keras_tuner.test_utils import mock_distribute
def test_mock_distribute(tmp_path):
def process_fn():
assert "KERASTUNER_ORACLE_IP" in os.environ
# Wait, to test that other threads aren't overriding env vars.
time.sleep(1)
assert isinstance(os.environ, mock_distribute.MockEnvVars)
tuner_id = os.environ["KERASTUNER_TUNER_ID"]
if "worker" in tuner_id:
# Give the chief process time to write its value,
# as we do not join on the chief since it will run
# a server.
time.sleep(2)
fname = os.path.join(str(tmp_path), tuner_id)
with open(fname, "w") as f:
f.write(tuner_id)
mock_distribute.mock_distribute(process_fn, num_workers=3)
for tuner_id in {"chief", "worker0", "worker1", "worker2"}:
fname = os.path.join(str(tmp_path), tuner_id)
with open(fname, "r") as f:
assert f.read() == tuner_id
def test_exception_raising():
def worker_error_fn():
if "worker" in os.environ["KERASTUNER_TUNER_ID"]:
raise ValueError("Found a worker error")
with pytest.raises(ValueError, match="Found a worker error"):
mock_distribute.mock_distribute(worker_error_fn, num_workers=2)
def chief_error_fn():
if "chief" in os.environ["KERASTUNER_TUNER_ID"]:
raise ValueError("Found a chief error")
with pytest.raises(ValueError, match="Found a chief error"):
mock_distribute.mock_distribute(chief_error_fn, num_workers=2)
| keras-tuner/keras_tuner/test_utils/mock_distribute_test.py/0 | {
"file_path": "keras-tuner/keras_tuner/test_utils/mock_distribute_test.py",
"repo_id": "keras-tuner",
"token_count": 872
} | 158 |
[tool.black]
line-length = 80
[tool.isort]
profile = "black"
known_first_party = ["keras_tuner", "tests"]
default_section = "THIRDPARTY"
line_length = 80
skip_glob = "keras_tuner/protos/*"
force_single_line = "True" | keras-tuner/pyproject.toml/0 | {
"file_path": "keras-tuner/pyproject.toml",
"repo_id": "keras-tuner",
"token_count": 87
} | 159 |
import numpy as np
from keras import Model
from keras import layers
from keras import losses
from keras import metrics
from keras import optimizers
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = layers.Dense(hidden_dim, activation="relu")
self.dense2 = layers.Dense(hidden_dim, activation="relu")
self.dense3 = layers.Dense(output_dim)
def call(self, x):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
model = MyModel(hidden_dim=256, output_dim=16)
x = np.random.random((50000, 128))
y = np.random.random((50000, 16))
batch_size = 32
epochs = 6
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(
x, y, batch_size=batch_size, epochs=epochs, validation_split=0.2
)
print("History:")
print(history.history)
model.summary()
| keras/examples/demo_subclass.py/0 | {
"file_path": "keras/examples/demo_subclass.py",
"repo_id": "keras",
"token_count": 402
} | 160 |
from keras.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.backend.common.dtypes import result_type
from keras.backend.common.keras_tensor import KerasTensor
from keras.backend.common.keras_tensor import any_symbolic_tensors
from keras.backend.common.keras_tensor import is_keras_tensor
from keras.backend.common.name_scope import name_scope
from keras.backend.common.stateless_scope import StatelessScope
from keras.backend.common.stateless_scope import get_stateless_scope
from keras.backend.common.stateless_scope import in_stateless_scope
from keras.backend.common.variables import AutocastScope
from keras.backend.common.variables import get_autocast_scope
from keras.backend.common.variables import is_float_dtype
from keras.backend.common.variables import is_int_dtype
from keras.backend.common.variables import standardize_dtype
from keras.backend.common.variables import standardize_shape
from keras.backend.config import epsilon
from keras.backend.config import floatx
from keras.backend.config import image_data_format
from keras.backend.config import set_epsilon
from keras.backend.config import set_floatx
from keras.backend.config import set_image_data_format
from keras.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.backend.tensorflow import * # noqa: F403
elif backend() == "jax":
from keras.backend.jax import * # noqa: F403
elif backend() == "torch":
from keras.backend.torch import * # noqa: F403
distribution_lib = None
elif backend() == "numpy":
from keras.backend.numpy import * # noqa: F403
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
| keras/keras/backend/__init__.py/0 | {
"file_path": "keras/keras/backend/__init__.py",
"repo_id": "keras",
"token_count": 618
} | 161 |
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
from keras import backend
from keras import initializers
from keras.backend.common.variables import ALLOWED_DTYPES
from keras.backend.common.variables import AutocastScope
from keras.backend.common.variables import KerasVariable
from keras.backend.common.variables import shape_equal
from keras.backend.common.variables import standardize_dtype
from keras.backend.common.variables import standardize_shape
from keras.testing import test_case
class VariableInitializationTest(test_case.TestCase):
"""Tests for KerasVariable.__init__()"""
def test_deferred_initialization(self):
"""Tests deferred initialization of variables."""
with backend.StatelessScope():
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
# Variables can nevertheless be accessed
_ = v + 1
self.assertEqual(v._value.shape, (2, 2))
with self.assertRaisesRegex(ValueError, "while in a stateless scope"):
with backend.StatelessScope():
v = backend.Variable(initializer=0)
def test_variable_initialization_with_non_callable(self):
"""Test variable init with non-callable initializer."""
v = backend.Variable(initializer=np.ones((2, 2)))
self.assertAllClose(v.value, np.ones((2, 2)))
def test_variable_initialization_with_strings(self):
"""Test variable init with non-callable initializer."""
v = backend.Variable(initializer="ones", shape=(2, 2))
self.assertAllClose(v.value, np.ones((2, 2)))
def test_variable_initialization_with_non_trainable(self):
"""Test variable initialization with non-trainable flag."""
v = backend.Variable(initializer=np.ones((2, 2)), trainable=False)
self.assertFalse(v.trainable)
def test_variable_initialization_without_shape(self):
"""Test variable init without a shape."""
with self.assertRaisesRegex(
ValueError,
"When creating a Variable from an initializer, the `shape` ",
):
backend.Variable(initializer=initializers.RandomNormal())
def test_deferred_initialize_already_initialized(self):
"""Test deferred init on an already initialized variable."""
v = backend.Variable(initializer=np.ones((2, 2)))
with self.assertRaisesRegex(
ValueError, f"Variable {v.path} is already initialized."
):
v._deferred_initialize()
def test_variable_initialize(self):
"""Test initializing a variable."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
init_value = np.array([4, 5, 6])
v._initialize(value=init_value)
self.assertAllClose(v.value, init_value)
def test_variable_without_shape_from_callable_initializer(self):
"""Test that KerasVariable raises error
if shape is not provided for callable initializer."""
with self.assertRaisesRegex(
ValueError, "When creating a Variable from an initializer"
):
KerasVariable(initializer=lambda: np.ones((2, 2)))
class VariablePropertiesTest(test_case.TestCase, parameterized.TestCase):
"""Tests for KerasVariable._deferred_initialize
KerasVariable._maybe_autocast"""
def test_deferred_assignment(self):
"""Tests deferred assignment to variables."""
with backend.StatelessScope() as scope:
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
v.assign(np.zeros((2, 2)))
v.assign_add(2 * np.ones((2, 2)))
v.assign_sub(np.ones((2, 2)))
out = scope.get_current_value(v)
self.assertAllClose(out, np.ones((2, 2)))
def test_trainable_setter(self):
"""Tests the trainable setter."""
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
)
self.assertTrue(v.trainable)
v.trainable = False
self.assertFalse(v.trainable)
if backend.backend() == "torch":
v.trainable = True
self.assertTrue(v._value.requires_grad)
v.trainable = False
self.assertFalse(v._value.requires_grad)
def test_autocasting(self):
"""Tests autocasting of float variables."""
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
print("open scope")
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
# Test non-float variables are not affected
v = backend.Variable(
initializer=initializers.Ones(),
shape=(2, 2),
dtype="int32",
trainable=False,
)
self.assertEqual(v.dtype, "int32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
with AutocastScope("float16"):
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
@parameterized.parameters(
*((dtype for dtype in ALLOWED_DTYPES if dtype != "string"))
)
def test_standardize_dtype(self, dtype):
"""Tests standardize_dtype for all ALLOWED_DTYPES except string."""
if backend.backend() == "torch" and dtype in (
"uint16",
"uint32",
"uint64",
):
self.skipTest(f"torch backend does not support dtype {dtype}")
if backend.backend() == "jax":
import jax
if not jax.config.x64_enabled and "64" in dtype:
self.skipTest(
f"jax backend does not support {dtype} without x64 enabled"
)
x = backend.convert_to_tensor(np.zeros(()), dtype)
actual = standardize_dtype(x.dtype)
self.assertEqual(actual, dtype)
def test_standardize_dtype_with_torch_dtype(self):
"""Tests dtype standardization with PyTorch dtypes."""
import torch
x = torch.randn(4, 4)
backend.standardize_dtype(x.dtype)
def test_name_validation(self):
"""Tests validation of variable names."""
with self.assertRaisesRegex(
ValueError, "Argument `name` must be a string"
):
KerasVariable(initializer=initializers.RandomNormal(), name=12345)
with self.assertRaisesRegex(ValueError, "cannot contain character `/`"):
KerasVariable(
initializer=initializers.RandomNormal(), name="invalid/name"
)
def test_standardize_shape_with_none(self):
"""Tests standardizing shape with None."""
with self.assertRaisesRegex(
ValueError, "Undefined shapes are not supported."
):
standardize_shape(None)
def test_standardize_shape_with_non_iterable(self):
"""Tests shape standardization with non-iterables."""
with self.assertRaisesRegex(
ValueError, "Cannot convert '42' to a shape."
):
standardize_shape(42)
def test_standardize_shape_with_valid_input(self):
"""Tests standardizing shape with valid input."""
shape = [3, 4, 5]
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_negative_entry(self):
"""Tests standardizing shape with negative entries."""
with self.assertRaisesRegex(
ValueError,
"Cannot convert '\\(3, 4, -5\\)' to a shape. Negative dimensions",
):
standardize_shape([3, 4, -5])
def test_autocast_scope_with_non_float_dtype(self):
"""Tests autocast scope with non-float dtype."""
with self.assertRaisesRegex(
ValueError,
"`AutocastScope` can only be used with a floating-point",
):
_ = AutocastScope("int32")
def test_variable_path_creation(self):
"""Test path creation for a variable."""
v = backend.Variable(initializer=np.ones((2, 2)), name="test_var")
self.assertEqual(v.path, "test_var")
class VariableNumpyValueAndAssignmentTest(test_case.TestCase):
"""tests for KerasVariable.numpy(), KerasVariable.value()
and KerasVariable.assign()"""
def test_variable_numpy(self):
"""Test retrieving the value of a variable as a numpy array."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertIsInstance(v.numpy(), np.ndarray)
self.assertAllClose(v.numpy(), np.array([1, 2, 3]))
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Tests for MirroredVariable under tf backend",
)
def test_variable_numpy_scalar(self):
from keras.utils.module_utils import tensorflow as tf
strategy = tf.distribute.MirroredStrategy(["cpu:0", "cpu:1"])
with strategy.scope():
v = backend.Variable(initializer=0.0)
np_value = backend.convert_to_numpy(v)
self.assertIsInstance(np_value, np.ndarray)
self.assertAllClose(np_value, 0.0)
def test_variable_value(self):
"""Test retrieving the value of a variable."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(v.value, np.array([1, 2, 3]))
def test_variable_assign(self):
"""Test assigning a new value to a variable."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
v.assign(np.array([4, 5, 6]))
self.assertAllClose(v.value, np.array([4, 5, 6]))
def test_variable_assign_add(self):
"""Test the assign_add method on a variable."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
v.assign_add(np.array([1, 1, 1]))
self.assertAllClose(v.value, np.array([2, 3, 4]))
def test_variable_assign_sub(self):
"""Test the assign_sub method on a variable."""
v = backend.Variable(initializer=np.array([2, 3, 4]))
v.assign_sub(np.array([1, 1, 1]))
self.assertAllClose(v.value, np.array([1, 2, 3]))
def test_deferred_initialize_within_stateless_scope(self):
"""Test deferred init within a stateless scope."""
with backend.StatelessScope():
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
with self.assertRaisesRegex(
ValueError,
"You are attempting to initialize a variable "
"while in a stateless scope. This is disallowed.",
):
v._deferred_initialize()
class VariableDtypeShapeNdimRepr(test_case.TestCase):
"""tests for dtype, shape, ndim, __repr__"""
def test_variable_dtype(self):
"""Test retrieving the dtype of a variable."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertEqual(v.dtype, "float32")
def test_variable_shape(self):
"""Test retrieving the shape of a variable."""
v = backend.Variable(initializer=np.array([[1, 2], [3, 4]]))
self.assertEqual(v.shape, (2, 2))
def test_variable_ndim(self):
"""Test retrieving the number of dimensions of a variable."""
v = backend.Variable(initializer=np.array([[1, 2], [3, 4]]))
self.assertEqual(v.ndim, 2)
def test_variable_repr(self):
"""Test the string representation of a variable."""
v = backend.Variable(initializer=np.array([1, 2, 3]), name="test_var")
expected_repr = (
"<KerasVariable shape=(3,), dtype=float32, path=test_var>"
)
self.assertEqual(repr(v), expected_repr)
def test_variable_getitem(self):
"""Test getting an item from a variable."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertEqual(v[0], 1)
def test_variable_initialize(self):
"""Test initializing a variable."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
init_value = np.array([4, 5, 6])
v._initialize(value=init_value)
self.assertAllClose(v.value, init_value)
def test_variable_convert_to_tensor(self):
"""Test converting a variable to a tensor."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(v._convert_to_tensor(v.value), np.array([1, 2, 3]))
def test_variable_convert_to_tensor_with_dtype(self):
"""Test converting a variable to a tensor with a dtype."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(
v._convert_to_tensor(v.value, dtype="float32"), np.array([1, 2, 3])
)
def test_variable_array(self):
"""Test converting a variable to an array."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(v.__array__(), np.array([1, 2, 3]))
class VariableOperationsTest(test_case.TestCase):
"""Tests for operations on KerasVariable."""
def test_variable_as_boolean(self):
"""Test converting a variable to boolean."""
v = backend.Variable(initializer=np.ones((2, 2)))
with self.assertRaisesRegex(
TypeError, "A Keras Variable cannot be used as a boolean."
):
bool(v)
def test__neg__(self):
"""Test negating a variable."""
v = backend.Variable(initializer=np.array([-1, 2]), trainable=False)
self.assertAllClose(v.__neg__(), np.array([1, -2]))
def test__abs__(self):
"""Test absolute value on a variable."""
v = backend.Variable(initializer=np.array([-1, 2]), trainable=False)
self.assertAllClose(v.__abs__(), np.array([1, 2]))
def test__invert__(self):
"""Test bitwise not on a variable."""
v = backend.Variable(
initializer=np.array([True, False]), trainable=False, dtype="bool"
)
self.assertAllClose(v.__invert__(), np.array([False, True]))
def test__eq__(self):
"""Test equality comparison on a variable."""
v = backend.Variable(initializer=np.array([1, 2]), trainable=False)
self.assertAllClose(v.__eq__(np.array([1, 2])), np.array([True, True]))
def test__ne__(self):
"""Test inequality comparison on a variable."""
v = backend.Variable(initializer=np.array([1, 2]), trainable=False)
self.assertAllClose(
v.__ne__(np.array([1, 2])), np.array([False, False])
)
def test__lt__(self):
"""Test less than comparison on a variable."""
v = backend.Variable(initializer=np.array([1, 2]), trainable=False)
self.assertAllClose(
v.__lt__(np.array([1, 2])), np.array([False, False])
)
def test__le__(self):
"""Test less than or equal to comparison on a variable."""
v = backend.Variable(initializer=np.array([1, 2]), trainable=False)
self.assertAllClose(v.__le__(np.array([1, 2])), np.array([True, True]))
def test__gt__(self):
"""Test greater than comparison on a variable."""
v = backend.Variable(initializer=np.array([1, 2]), trainable=False)
self.assertAllClose(
v.__gt__(np.array([1, 2])), np.array([False, False])
)
def test__ge__(self):
"""Test greater than or equal to comparison on a variable."""
v = backend.Variable(initializer=np.array([1, 2]), trainable=False)
self.assertAllClose(v.__ge__(np.array([1, 2])), np.array([True, True]))
def test__add__(self):
"""Test addition operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([4, 5, 6]))
self.assertAllClose(v1.__add__(v2), np.array([5, 7, 9]))
def test__radd__(self):
"""Test reverse addition operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([4, 5, 6]))
self.assertAllClose(v1.__radd__(v2), np.array([5, 7, 9]))
def test__sub__(self):
"""Test subtraction operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([4, 5, 6]))
self.assertAllClose(v1.__sub__(v2), np.array([-3, -3, -3]))
def test__rsub__(self):
"""Test reverse subtraction operation on a variable."""
v1 = backend.Variable(initializer=np.array([4, 5, 6]))
v2 = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(v1.__rsub__(v2), np.array([-3, -3, -3]))
def test__mul__(self):
"""Test multiplication operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([4, 5, 6]))
self.assertAllClose(v1.__mul__(v2), np.array([4, 10, 18]))
def test__rmul__(self):
"""Test reverse multiplication operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([4, 5, 6]))
self.assertAllClose(v1.__rmul__(v2), np.array([4, 10, 18]))
def test__truediv__(self):
"""Test true division operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([4, 5, 6]))
self.assertAllClose(v1.__truediv__(v2), np.array([0.25, 0.4, 0.5]))
def test__rtruediv__(self):
"""Test reverse true division operation on a variable."""
v1 = backend.Variable(initializer=np.array([4, 5, 6]))
v2 = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(v1.__rtruediv__(v2), np.array([0.25, 0.4, 0.5]))
def test__floordiv__(self):
"""Test floordiv operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([-4, 5, 6]))
self.assertAllClose(v1.__floordiv__(v2), np.array([-1, 0, 0]))
def test__rfloordiv__(self):
"""Test reverse floordiv operation on a variable."""
v1 = backend.Variable(initializer=np.array([-4, 5, 6]))
v2 = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(v1.__rfloordiv__(v2), np.array([-1, 0, 0]))
def test__mod__(self):
"""Test mod operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([-4, 5, 6]))
self.assertAllClose(v1.__mod__(v2), np.array([-3, 2, 3]))
def test__rmod__(self):
"""Test reverse mod operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(v1.__rmod__(v2), np.array([0, 0, 0]))
def test__pow__(self):
"""Test pow operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([-4, 5, 6]))
self.assertAllClose(v1.__pow__(v2), np.array([1, 32, 729]))
def test__rpow__(self):
"""Test reverse power operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([1, 2, 3]))
self.assertAllClose(v1.__rpow__(v2), np.array([1, 4, 27]))
def test__matmul__(self):
"""Test matmul operation on a variable."""
v1 = backend.Variable(initializer=np.array([[1, 2], [3, 4]]))
v2 = backend.Variable(initializer=np.array([[5, 6], [7, 8]]))
self.assertAllClose(v1.__matmul__(v2), np.array([[19, 22], [43, 50]]))
def test__rmatmul__(self):
"""Test reverse matmul operation on a variable."""
v1 = backend.Variable(initializer=np.array([[1, 2], [3, 4]]))
v2 = backend.Variable(initializer=np.array([[5, 6], [7, 8]]))
self.assertAllClose(v1.__rmatmul__(v2), np.array([[23, 34], [31, 46]]))
def test__and__(self):
"""Test bitwise and operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__and__(v2), np.array([True, False]))
def test__rand__(self):
"""Test reverse bitwise and operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__rand__(v2), np.array([True, False]))
def test__or__(self):
"""Test bitwise or operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__or__(v2), np.array([True, True]))
def test__ror__(self):
"""Test reverse bitwise or operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__ror__(v2), np.array([True, True]))
def test__xor__(self):
"""Test bitwise xor operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__xor__(v2), np.array([False, True]))
def test__rxor__(self):
"""Test reverse bitwise xor operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__rxor__(v2), np.array([False, True]))
def test__pos__(self):
"""Test unary plus on a variable."""
v = backend.Variable(initializer=np.array([-1, 2]), trainable=False)
self.assertAllClose(v.__pos__(), np.array([-1, 2]))
def test_variable_pow(self):
"""Test pow operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([4, 5, 6]))
result = v1**v2
self.assertAllClose(result, np.array([1, 32, 729]))
def test_variable_rpow(self):
"""Test reverse power operation on a variable."""
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([4, 5, 6]))
result = v2**v1
self.assertAllClose(result, np.array([4, 25, 216]))
class VariableBinaryOperationsTest(test_case.TestCase):
"""Tests for binary operations on KerasVariable."""
def test_variable_bool(self):
"""Test converting a variable to boolean."""
v = backend.Variable(initializer=np.array([1, 2, 3]))
with self.assertRaises(TypeError):
bool(v)
def test_variable_neg(self):
"""Test negating a variable."""
v = backend.Variable(initializer=np.array([-1, 2]))
neg_v = -v
self.assertAllClose(neg_v, np.array([1, -2]))
def test_variable_abs(self):
"""Test absolute value of a variable."""
v = backend.Variable(initializer=np.array([-1, 2]))
abs_v = abs(v)
self.assertAllClose(abs_v, np.array([1, 2]))
def test_invalid_dtype(self):
"""Test invalid dtype standardization."""
invalid_dtype = "invalid_dtype"
with self.assertRaisesRegex(
ValueError, f"Invalid dtype: {invalid_dtype}"
):
standardize_dtype(invalid_dtype)
@patch("keras.backend.config.backend", return_value="jax")
def test_jax_backend_b_dimension(self, mock_backend):
"""Test 'b' dimension handling with JAX backend."""
shape = (3, "b", 5)
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, shape)
def test_negative_shape_entry(self):
"""Test negative shape entry."""
shape = (3, -1, 5)
with self.assertRaisesRegex(
ValueError,
"Negative dimensions are not allowed",
):
standardize_shape(shape)
def test_shape_equal_length_mismatch(self):
"""Test mismatch in lengths of shapes."""
self.assertFalse(shape_equal((3, 2), (3, 2, 4)))
self.assertFalse(shape_equal((), (3,)))
self.assertFalse(shape_equal((3, 2, 4, 5), (3, 2, 4)))
@pytest.mark.skipif(
backend.backend() != "torch",
reason="Tests for standardize_shape with Torch backend",
)
class TestStandardizeShapeWithTorch(test_case.TestCase):
"""Tests for standardize_shape with Torch backend."""
def test_standardize_shape_with_torch_size_containing_negative_value(self):
"""Tests shape with a negative value."""
shape_with_negative_value = (3, 4, -5)
with self.assertRaisesRegex(
ValueError,
"Cannot convert '\\(3, 4, -5\\)' to a shape. Negative dimensions",
):
_ = standardize_shape(shape_with_negative_value)
def test_standardize_shape_with_torch_size_valid(self):
"""Tests a valid shape."""
shape_valid = (3, 4, 5)
standardized_shape = standardize_shape(shape_valid)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_torch_size_multidimensional(self):
"""Tests shape of a multi-dimensional tensor."""
import torch
tensor = torch.randn(3, 4, 5)
shape = tensor.size()
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_torch_size_single_dimension(self):
"""Tests shape of a single-dimensional tensor."""
import torch
tensor = torch.randn(10)
shape = tensor.size()
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (10,))
def test_standardize_shape_with_torch_size_with_valid_1_dimension(self):
"""Tests a valid shape."""
shape_valid = [3]
standardized_shape = standardize_shape(shape_valid)
self.assertEqual(standardized_shape, (3,))
def test_standardize_shape_with_torch_size_with_valid_2_dimension(self):
"""Tests a valid shape."""
shape_valid = [3, 4]
standardized_shape = standardize_shape(shape_valid)
self.assertEqual(standardized_shape, (3, 4))
def test_standardize_shape_with_torch_size_with_valid_3_dimension(self):
"""Tests a valid shape."""
shape_valid = [3, 4, 5]
standardized_shape = standardize_shape(shape_valid)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_torch_size_with_negative_value(self):
"""Tests shape with a negative value appended."""
import torch
tensor = torch.randn(3, 4, 5)
shape = tuple(tensor.size())
shape_with_negative = shape + (-1,)
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Negative dimensions are not",
):
_ = standardize_shape(shape_with_negative)
def test_standardize_shape_with_non_integer_entry(self):
"""Tests shape with a non-integer value."""
with self.assertRaisesRegex(
# different error message for torch
ValueError,
r"invalid literal for int\(\) with base 10: 'a'",
):
standardize_shape([3, 4, "a"])
def test_standardize_shape_with_negative_entry(self):
"""Tests shape with a negative value."""
with self.assertRaisesRegex(
ValueError,
"Cannot convert '\\(3, 4, -5\\)' to a shape. Negative dimensions",
):
standardize_shape([3, 4, -5])
def test_standardize_shape_with_valid_not_tuple(self):
"""Tests a valid shape."""
shape_valid = [3, 4, 5]
standardized_shape = standardize_shape(shape_valid)
self.assertEqual(standardized_shape, (3, 4, 5))
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Tests for standardize_shape with others backend",
)
class TestStandardizeShapeWithOutTorch(test_case.TestCase):
"""Tests for standardize_shape with others backend."""
def test_standardize_shape_with_out_torch_negative_value(self):
"""Tests shape with a negative value."""
shape_with_negative_value = (3, 4, -5)
with self.assertRaisesRegex(
ValueError,
"Cannot convert '\\(3, 4, -5\\)' to a shape. Negative dimensions",
):
_ = standardize_shape(shape_with_negative_value)
def test_standardize_shape_with_out_torch_string(self):
"""Tests shape with a string value."""
shape_with_string = (3, 4, "5")
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Found invalid entry '5'.",
):
_ = standardize_shape(shape_with_string)
def test_standardize_shape_with_out_torch_float(self):
"""Tests shape with a float value."""
shape_with_float = (3, 4, 5.0)
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Found invalid entry '5.0'.",
):
_ = standardize_shape(shape_with_float)
def test_standardize_shape_with_out_torch_valid(self):
"""Tests a valid shape."""
shape_valid = (3, 4, 5)
standardized_shape = standardize_shape(shape_valid)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_out_torch_valid_not_tuple(self):
"""Tests a valid shape."""
shape_valid = [3, 4, 5]
standardized_shape = standardize_shape(shape_valid)
self.assertEqual(standardized_shape, (3, 4, 5))
| keras/keras/backend/common/variables_test.py/0 | {
"file_path": "keras/keras/backend/common/variables_test.py",
"repo_id": "keras",
"token_count": 13573
} | 162 |
import functools
import jax.experimental.sparse as jax_sparse
import jax.numpy as jnp
from keras.utils import jax_utils
def axis_shape_dims_for_broadcast_in_dim(axis, input_shape, insert_dims):
"""Turn the `axis` argument to the arguments needed by `broadcast_in_dim`.
Args:
axis: single int or a tuple of ints for the axis argument. The list of
dimensions to reduce or insert.
input_shape: the shape of the input as a tuple ints.
insert_dims: `False` turns dimensions in `axis` to 1s (use case:
reduction along `axis` with `keep_dims=True`). `True`, inserts 1s
according to `axis` (use case: `expand_dims`).
Returns:
A tuple of three lists
- The canonical value for `axis`: always a list, negative values have
been resolved and values are sorted in ascending order.
- The output shape: `input_shape` with 1s at the indices in `axis`, for
use as the `shape` argument of `broadcast_in_dim`.
- The broadcast dimensions: list of dimensions not in `axis`, for use as
the `broadcast_dimensions` argument of `broadcast_in_dim`.
"""
if axis is None:
raise ValueError("Received `None` value for `axis`")
if isinstance(axis, int):
axis = (axis,)
# Check uniqueness.
if len(set(axis)) != len(axis):
raise ValueError(f"Repeated axis in `axis`: {axis}")
result_dims = len(input_shape)
if insert_dims:
result_dims += len(axis)
# Resolve negative values.
canonical_axis = []
for a in axis:
if not -result_dims <= a < result_dims:
raise ValueError(
f"In `axis`, axis {a} is out of bounds for array "
f"of dimension {result_dims}"
)
if a < 0:
a = a + result_dims
canonical_axis.append(a)
# Check uniqueness again after resolving negative values.
if len(set(canonical_axis)) != len(canonical_axis):
raise ValueError(f"Repeated axis in `axis`: {canonical_axis}")
canonical_axis = sorted(canonical_axis)
# Compute output shape.
output_shape = list(input_shape)
for i in canonical_axis:
if insert_dims:
output_shape.insert(i, 1)
else:
output_shape[i] = 1
broadcast_dims = [i for i in range(result_dims) if i not in canonical_axis]
return canonical_axis, output_shape, broadcast_dims
def bcoo_add_indices(x1, x2, sum_duplicates):
"""Add the indices of `x2` to `x1` with zero values.
Args:
x1: `BCOO` tensor to add indices to.
x2: `BCOO` tensor to take the indices to add to x1.
sum_duplicates: if `True` calls `bcoo_sum_duplicates` on the output.
Returns:
a `BCOO` tensor equal to `x1` but with extra zeros at indices in `x2`
that were missing in `x1`.
"""
x2_zeros = jnp.zeros(x2.data.shape, x1.data.dtype)
concat_axis = len(x1.indices.shape) - 2
output_indices = jnp.concatenate([x1.indices, x2.indices], axis=concat_axis)
output_data = jnp.concatenate([x1.data, x2_zeros], axis=concat_axis)
output = jax_sparse.BCOO((output_data, output_indices), shape=x1.shape)
if sum_duplicates:
output = jax_sparse.bcoo_sum_duplicates(output)
return output
def densifying_unary(func):
"""Decorator to add support for `JAXSparse` tensors (including `BCOO`) to a
non-zero-preserving element-wise unary operator.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise
- The operator must be unary (one input tensor and one output tensor)
- The operator must return a tensor of the same shape.
Additional arguments to the function (besides the input tensor) are
supported. The returned result is a dense tensor.
Args:
func: The unary operator to wrap.
Returns:
Wrapped function that supports `JAXSparse` tensors.
"""
@functools.wraps(func)
def sparse_wrapper(x, *args, **kwargs):
if isinstance(x, jax_sparse.JAXSparse):
x = x.todense()
return func(x, *args, **kwargs)
return sparse_wrapper
def elementwise_unary(linear):
"""Decorator to add support for `BCOO` sparse tensors to a zero-preserving
element-wise unary operator.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise
- The operator must be unary (one input tensor and one output tensor)
- The operator must return a tensor of the same shape, and if it is a
`BCOO` tensor, the indices of the result must be the same. Therefore:
- Reduction operations are not supported (e.g. `mean`).
- Operations for which the result may be dense (e.g. `reciprocal`), or
the sparse indices depend on the inputs are not supported (e.g.
`clip`). This implies that `func(0)` must be 0.
Additional arguments to the function (besides the input tensor) are
supported as long as they cannot change the indices of the result. For
instance,`round` is supported, but `clip` is not supported as
`clip(x, 1.0, 2.0)` would always return a dense tensor.
Note that if an input sparse tensor contains zero values, the indices and
the zero values are preserved.
Args:
linear: if `True`, means that the operation is such that
`op(a + b) == op(a) + op(b)`.
Returns:
Wrapped function that supports `BCOO` sparse tensors.
"""
def wrap_elementwise_unary(func):
@functools.wraps(func)
def sparse_wrapper(x, *args, **kwargs):
if isinstance(x, jax_sparse.BCOO):
if not linear and not x.unique_indices:
x = jax_sparse.bcoo_sum_duplicates(x)
return jax_sparse.BCOO(
(func(x.data, *args, **kwargs), x.indices), shape=x.shape
)
else:
return func(x, *args, **kwargs)
return sparse_wrapper
return wrap_elementwise_unary
def elementwise_binary_union(linear, use_sparsify):
"""Decorator to add support for `JAXSparse` tensors (including `BCOO`) to an
element-wise binary operator such that the indices present in the result are
are the union of the indices in the two operand.
The primary use case for this is the `add` and `subtract` operators.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise.
- The operator must be binary (two input tensors and one output tensor).
- Both inputs must be of the same shape or one input must be a scalar.
- The output must be of the same shape as the (non scalar) inputs.
- The indices of the output must be the union of the indices of the inputs.
This implies that func(0, 0) must be 0. As a result, if one operand is
dense or a scalar, then the result will be dense.
Additional arguments to the function (besides the input tensors) are not
supported.
Note that if the result of the operation is zero at some indices, including
because the operands were zero at these indices, the zeros and indices are
preserved.
The `BCOO` format is the only supported one in all cases. Other formats are
not supported when `use_sparsify` is `False`.
Args:
use_sparsify: indicates that the JAX `sparsify` transform supports this
operation.
linear: if `True`, mean that the operation is such that
`op(a + b, c) == op(a, c) + op(b, c)` and
`op(a, c + d) == op(a, c) + op(a, d)`.
Returns:
Wrapped function that supports `JAXSparse`.
"""
def wrap_elementwise_binary_union(func):
sparse_func = jax_sparse.sparsify(func) if use_sparsify else None
@functools.wraps(func)
def sparse_wrapper(x1, x2):
if isinstance(x1, jax_sparse.JAXSparse):
if isinstance(x2, jax_sparse.JAXSparse):
# x1 and x2 are sparse.
# The way we use `sparsify` it cannot know that the indices
# are the same, so we optimize this case here.
if (
x1.indices is x2.indices
and isinstance(x1, jax_sparse.BCOO)
and isinstance(x2, jax_sparse.BCOO)
):
if not linear and not x1.unique_indices:
x1 = jax_sparse.bcoo_sum_duplicates(x1)
x2 = jax_sparse.bcoo_sum_duplicates(x2)
return jax_sparse.BCOO(
(func(x1.data, x2.data), x1.indices),
shape=x1.shape,
indices_sorted=x1.indices_sorted,
unique_indices=x1.unique_indices,
)
elif use_sparsify:
return sparse_func(x1, x2)
elif isinstance(x1, jax_sparse.BCOO) and isinstance(
x2, jax_sparse.BCOO
):
x1 = bcoo_add_indices(x1, x2, sum_duplicates=not linear)
x2 = bcoo_add_indices(x2, x1, sum_duplicates=not linear)
return jax_sparse.BCOO(
(func(x1.data, x2.data), x1.indices),
shape=x1.shape,
indices_sorted=True,
unique_indices=True,
)
else:
ValueError(
"Unsupported sparse format: "
f"{x1.__class__} and {x2.__class__}"
)
else:
# x1 is sparse, x2 is dense, densify x2.
x1 = x1.todense()
elif isinstance(x2, jax_sparse.JAXSparse):
# x1 is dense, x2 is sparse, densify x2.
x2 = x2.todense()
return func(x1, x2)
return sparse_wrapper
return wrap_elementwise_binary_union
def elementwise_division(func):
"""Decorator to add support for `BCOO` sparse tensors to element-wise binary
division and related operators.
This decorator is designed for operations related to the division of two
two operands (e.g. `divide`). It accepts `BCOO` tensors for both the
dividend and the divisor, but handles them differently based on whether they
are the dividend or the divisor.
- If the divisor is sparse, it is densified and the result is dense because
the result contains Inf or Nan outside of the indices of the dividend.
- If the dividend is sparse and the divisor is dense, it finds occurrences
of zeros and NaNs in the divisor. The result may therefore have more
indices than there were in the dividend to return correct values where the
divisor was zero or NaN.
- If the dividend is sparse and the divisor is a scalar, it does the
division element-wise. Note that the result is incorrectly sparse if the
scalar divisor is zero.
Args:
func: The function to wrap.
Returns:
Wrapped function that supports `BCOO` sparse tensors.
"""
sparse_func = jax_sparse.sparsify(func)
@functools.wraps(func)
def sparse_wrapper(x1, x2):
if isinstance(x1, jax_sparse.JAXSparse):
if isinstance(x2, jax_sparse.JAXSparse):
# x1 is sparse and x2 is sparse.
# Divisor is sparse, meaning we're doing divisions by zero
# outside of x2.indices, so the result is dense. Densify both.
x1 = x1.todense()
x2 = x2.todense()
elif isinstance(x1, jax_sparse.BCOO):
if not hasattr(x2, "shape") or len(x2.shape) == 0:
# x1 is sparse BCOO, x2 is scalar, apply func element-wise.
return jax_sparse.BCOO(
(func(x1.data, x2), x1.indices),
shape=x1.shape,
indices_sorted=x1.indices_sorted,
unique_indices=x1.unique_indices,
)
else:
# x1 is sparse BCOO, x2 is dense.
if not jax_utils.is_in_jax_tracing_scope(x2):
# Find zeros and nans in x2 and add indices to x1.
# 1. Create a dense mask for zeros and nans.
x2_zeros_and_nans = jnp.equal(x2, 0)
if not jnp.issubdtype(x2.dtype, jnp.integer):
x2_zeros_and_nans = jnp.logical_or(
x2_zeros_and_nans, jnp.isnan(x2)
)
# 2. Make it a BCOO of True values.
x2_zeros_and_nans = jax_sparse.bcoo_fromdense(
x2_zeros_and_nans,
n_batch=x1.n_batch,
n_dense=x1.n_dense,
index_dtype=x1.indices.dtype,
)
# 3. Add the indices to x1.
x1 = bcoo_add_indices(
x1, x2_zeros_and_nans, sum_duplicates=True
)
return sparse_func(x1, x2)
else:
raise ValueError(f"Unsupported sparse format: {x1.__class__}")
elif isinstance(x2, jax_sparse.JAXSparse):
# x1 is dense, x2 is sparse, densify x2
x2 = x2.todense()
return func(x1, x2)
return sparse_wrapper
| keras/keras/backend/jax/sparse.py/0 | {
"file_path": "keras/keras/backend/jax/sparse.py",
"repo_id": "keras",
"token_count": 6522
} | 163 |
"""!!!DO NOT USE!!!
Distribution related class for Tensorflow backend.
This is just a prototype and we might want to unify it
with other backends in the future.
"""
import tensorflow as tf
from tensorflow.experimental import dtensor
def list_devices(device_type=None):
"""Return all the available devices based on the device type.
Note that this should return the global devices in a distributed setting.
Args:
device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Default to `gpu` or
`tpu` if available when device_type is not provided. Otherwise will
return the `cpu` devices.
Return:
List of devices that are available for distribute computation.
"""
device_type = device_type.upper() if device_type else "CPU"
# DTensor doesn't support getting global devices, even when knowing the
# Mesh. Use TF API instead to get global devices. Coordinator service is
# enabled by default with DTensor, so that list_logical_devices() returns
# a list of global devices. More context can be found in b/254911601.
tf_devices = tf.config.list_logical_devices(device_type=device_type)
return [
f"{device.device_type.lower()}:{device.name.split(':')[-1]}"
for device in tf_devices
]
def distribute_value(value, tensor_layout):
# TODO
pass
def _to_dtensor_mesh(device_mesh):
"""Convert the DeviceMesh to Tensorflow backend specific Mesh.
Args:
device_mesh: DeviceMesh instance to convert.
Returns:
A `tf.dtensor.Mesh` instance.
"""
mesh_dims = list(zip(device_mesh.axis_names, device_mesh.shape))
return dtensor.create_distributed_mesh(
mesh_dims=mesh_dims, local_devices=device_mesh.devices.flatten()
)
def _to_dtensor_layout(tensor_layout):
"""Convert the TensorLayout to Tensorflow backend specific Sharding.
Args:
tensor_layout: TensorLayout instance to convert.
Returns:
A `tf.dtensor.Layout` instance.
"""
if tensor_layout.device_mesh is None:
raise ValueError(
"Cannot create sharding when device mesh is not set for "
"TensorLayout."
)
sharding_specs = [
axis if axis else dtensor.UNSHARDED for axis in tensor_layout.axes
]
dtensor_mesh = _to_dtensor_mesh(tensor_layout.device_mesh)
return dtensor.Layout(sharding_specs=sharding_specs, mesh=dtensor_mesh)
| keras/keras/backend/tensorflow/distribution_lib.py/0 | {
"file_path": "keras/keras/backend/tensorflow/distribution_lib.py",
"repo_id": "keras",
"token_count": 896
} | 164 |
import contextlib
import warnings
import numpy as np
import tensorflow as tf
import tree
from packaging.version import Version
from tensorflow.python.eager import context as tf_context
from keras import callbacks as callbacks_module
from keras import metrics as metrics_module
from keras import optimizers as optimizers_module
from keras.trainers import trainer as base_trainer
from keras.trainers.data_adapters import data_adapter_utils
from keras.trainers.epoch_iterator import EpochIterator
from keras.utils import traceback_utils
class TensorFlowTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.train_function = None
self.test_function = None
self.predict_function = None
# Model must be created under scope of DistStrat it will be trained
# with.
if tf.distribute.has_strategy():
self._distribute_strategy = tf.distribute.get_strategy()
else:
self._distribute_strategy = None
self._distribute_reduction_method = None
self._supports_reduce_retracing = Version(tf.__version__) >= Version(
"2.9.0"
)
@property
def distribute_strategy(self):
return self._distribute_strategy or tf.distribute.get_strategy()
@property
def distribute_reduction_method(self):
return self._distribute_reduction_method or "auto"
@distribute_reduction_method.setter
def distribute_reduction_method(self, value):
self._distribute_reduction_method = value
def train_step(self, data):
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
# Forward pass
with tf.GradientTape() as tape:
if self._call_has_training_arg:
y_pred = self(x, training=True)
else:
y_pred = self(x)
loss = self.compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight
)
self._loss_tracker.update_state(loss)
if self.optimizer is not None:
loss = self.optimizer.scale_loss(loss)
# Compute gradients
if self.trainable_weights:
trainable_weights = self.trainable_weights
gradients = tape.gradient(loss, trainable_weights)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_weights))
else:
warnings.warn("The model does not have any trainable weights.")
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def test_step(self, data):
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
loss = self.compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight
)
self._loss_tracker.update_state(loss)
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def predict_step(self, data):
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
return y_pred
def make_train_function(self, force=False):
if self.train_function is not None and not force:
return self.train_function
@tf.autograph.experimental.do_not_convert
def one_step_on_data(data):
"""Runs a single training step on a batch of data."""
return self.train_step(data)
if not self.run_eagerly:
kwargs = {"jit_compile": self.jit_compile}
if self._supports_reduce_retracing:
kwargs.update({"reduce_retracing": True})
one_step_on_data = tf.function(one_step_on_data, **kwargs)
@tf.autograph.experimental.do_not_convert
def one_step_on_iterator(iterator):
"""Runs a single training step given a Dataset iterator."""
data = next(iterator)
outputs = self.distribute_strategy.run(
one_step_on_data, args=(data,)
)
outputs = reduce_per_replica(
outputs,
self.distribute_strategy,
reduction=self.distribute_reduction_method,
)
return outputs
@tf.autograph.experimental.do_not_convert
def multi_step_on_iterator(iterator):
for _ in range(self.steps_per_execution):
outputs = one_step_on_iterator(iterator)
return outputs
if self.steps_per_execution > 1:
train_function = multi_step_on_iterator
else:
train_function = one_step_on_iterator
if not self.run_eagerly:
kwargs = {}
if self._supports_reduce_retracing:
kwargs.update({"reduce_retracing": True})
train_function = tf.function(train_function, **kwargs)
self.train_function = train_function
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
@tf.autograph.experimental.do_not_convert
def one_step_on_data(data):
"""Runs a single test step on a batch of data."""
return self.test_step(data)
if not self.run_eagerly and self.jit_compile:
kwargs = {"jit_compile": True}
if self._supports_reduce_retracing:
kwargs.update({"reduce_retracing": True})
one_step_on_data = tf.function(one_step_on_data, **kwargs)
@tf.autograph.experimental.do_not_convert
def one_step_on_iterator(iterator):
"""Runs a single test step given a Dataset iterator."""
data = next(iterator)
outputs = self.distribute_strategy.run(
one_step_on_data, args=(data,)
)
outputs = reduce_per_replica(
outputs,
self.distribute_strategy,
reduction=self.distribute_reduction_method,
)
return outputs
@tf.autograph.experimental.do_not_convert
def multi_step_on_iterator(iterator):
for _ in range(self.steps_per_execution):
outputs = one_step_on_iterator(iterator)
return outputs
if self.steps_per_execution > 1:
test_function = multi_step_on_iterator
else:
test_function = one_step_on_iterator
if not self.run_eagerly:
kwargs = {}
if self._supports_reduce_retracing:
kwargs.update({"reduce_retracing": True})
test_function = tf.function(test_function, **kwargs)
self.test_function = test_function
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
@tf.autograph.experimental.do_not_convert
def one_step_on_data(data):
"""Runs a predict test step on a batch of data."""
return self.predict_step(data)
if not self.run_eagerly and self.jit_compile:
kwargs = {"jit_compile": True}
if self._supports_reduce_retracing:
kwargs.update({"reduce_retracing": True})
one_step_on_data = tf.function(one_step_on_data, **kwargs)
@tf.autograph.experimental.do_not_convert
def one_step_on_data_distributed(data):
data = data[0]
outputs = self.distribute_strategy.run(
one_step_on_data, args=(data,)
)
outputs = reduce_per_replica(
outputs,
self.distribute_strategy,
reduction=self.distribute_reduction_method,
)
return outputs
@tf.autograph.experimental.do_not_convert
def multi_step_on_data(data):
outputs = one_step_on_data_distributed(data[:1])
for single_step_data in data[1:]:
step_outputs = one_step_on_data_distributed([single_step_data])
outputs = tf.nest.map_structure(
lambda t1, t2: concat([t1, t2]), outputs, step_outputs
)
return outputs
if self.steps_per_execution > 1:
predict_function = multi_step_on_data
else:
predict_function = one_step_on_data_distributed
if not self.run_eagerly:
kwargs = {}
if self._supports_reduce_retracing:
kwargs.update({"reduce_retracing": True})
predict_function = tf.function(predict_function, **kwargs)
self.predict_function = predict_function
@traceback_utils.filter_traceback
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
self._assert_compile_called("fit")
# TODO: respect compiled trainable state
self._eval_epoch_iterator = None
if validation_split and validation_data is None:
# Create the validation data using the training data. Only supported
# for TF/numpy/jax arrays.
(
x,
y,
sample_weight,
), validation_data = data_adapter_utils.train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
if validation_data is not None:
(
val_x,
val_y,
val_sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(validation_data)
# Create an iterator that yields batches for one epoch.
epoch_iterator = TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
distribute_strategy=self.distribute_strategy,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=epochs,
steps=epoch_iterator.num_batches,
model=self,
)
self.stop_training = False
self.make_train_function()
callbacks.on_train_begin()
training_logs = None
logs = None
for epoch in range(initial_epoch, epochs):
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
with epoch_iterator.catch_stop_iteration():
for step, iterator in epoch_iterator.enumerate_epoch():
callbacks.on_train_batch_begin(step)
logs = self.train_function(iterator)
callbacks.on_train_batch_end(
step, self._pythonify_logs(logs)
)
if self.stop_training:
break
# Override with model metrics instead of last step logs
epoch_logs = self.get_metrics_result()
# Run validation.
if validation_data is not None and self._should_eval(
epoch, validation_freq
):
# Create EpochIterator for evaluation and cache it.
if getattr(self, "_eval_epoch_iterator", None) is None:
self._eval_epoch_iterator = TFEpochIterator(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
distribute_strategy=self.distribute_strategy,
steps_per_execution=self.steps_per_execution,
steps_per_epoch=validation_steps,
shuffle=False,
)
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
return_dict=True,
_use_cached_eval_dataset=True,
)
val_logs = {
"val_" + name: val for name, val in val_logs.items()
}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
training_logs = epoch_logs
if self.stop_training:
break
if (
isinstance(self.optimizer, optimizers_module.Optimizer)
and epochs > 0
):
self.optimizer.finalize_variable_values(self.trainable_weights)
# If _eval_epoch_iterator exists, delete it after all epochs are done.
if getattr(self, "_eval_epoch_iterator", None) is not None:
del self._eval_epoch_iterator
callbacks.on_train_end(logs=training_logs)
return self.history
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
self._assert_compile_called("evaluate")
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of input/target data.
epoch_iterator = TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
distribute_strategy=self.distribute_strategy,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = None
self.reset_metrics()
with epoch_iterator.catch_stop_iteration():
for step, iterator in epoch_iterator.enumerate_epoch():
callbacks.on_test_batch_begin(step)
logs = self.test_function(iterator)
callbacks.on_test_batch_end(step, self._pythonify_logs(logs))
if self.stop_evaluating:
break
logs = self.get_metrics_result()
callbacks.on_test_end(logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = TFEpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
distribute_strategy=self.distribute_strategy,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tf.nest.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
def get_data(iterator):
"""Returns data for the next execution."""
data = []
for _ in range(self.steps_per_execution):
try:
single_step_data = next(iterator)
except (StopIteration, tf.errors.OutOfRangeError) as e:
if hasattr(data, "__len__") and len(data) > 0:
# Suppress the error when still have remaining data.
return data
else:
# Re-raise the error for
# TFEpochIterator.catch_stop_iteration() to catch when
# no data left.
raise e
data.append(single_step_data)
return data
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
outputs = None
with epoch_iterator.catch_stop_iteration():
for step, iterator in epoch_iterator.enumerate_epoch():
callbacks.on_predict_batch_begin(step)
data = get_data(iterator)
batch_outputs = self.predict_function(data)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(step, {"outputs": batch_outputs})
if self.stop_predicting:
break
callbacks.on_predict_end()
outputs = tree.map_structure_up_to(
batch_outputs, potentially_ragged_concat, outputs
)
return tf.nest.map_structure(convert_to_np_if_not_ragged, outputs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
self._assert_compile_called("train_on_batch")
self.make_train_function()
if class_weight is not None:
if sample_weight is not None:
raise ValueError(
"Arguments `sample_weight` and `class_weight` "
"cannot be specified at the same time. "
f"Received: sample_weight={sample_weight}, "
f"class_weight={class_weight}"
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
def data():
yield (x, y, sample_weight)
logs = self.train_function(data())
logs = tf.nest.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
self.make_test_function()
def data():
yield (x, y, sample_weight)
logs = self.test_function(data())
logs = tf.nest.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
self.make_predict_function()
batch_outputs = self.predict_function([(x,)])
batch_outputs = tf.nest.map_structure(
convert_to_np_if_not_ragged, batch_outputs
)
return batch_outputs
# Backwards compatibility shims.
@property
def compiled_metrics(self):
class DeprecatedCompiledMetric:
def update_state(_, y, y_pred, sample_weight=None):
return self._compiled_metrics_update_state(
y, y_pred, sample_weight=sample_weight
)
return DeprecatedCompiledMetric()
def _compiled_metrics_update_state(self, y, y_pred, sample_weight=None):
warnings.warn(
"`model.compiled_metrics()` is deprecated. "
"Instead, use e.g.:\n"
"```\n"
"for metric in self.metrics:\n"
" metric.update_state(y, y_pred)\n"
"```\n",
stacklevel=2,
)
for metric in self.metrics:
if isinstance(metric, metrics_module.Mean):
metric.update_state(y_pred, sample_weight=sample_weight)
else:
metric.update_state(y, y_pred, sample_weight=sample_weight)
def compiled_loss(
self, y, y_pred, sample_weight=None, regularization_losses=None
):
warnings.warn(
"`model.compiled_loss()` is deprecated. "
"Instead, use `model.compute_loss(x, y, y_pred, sample_weight)`.",
)
return self.compute_loss(
x=None, y=y, y_pred=y_pred, sample_weight=sample_weight
)
def loss(self, y, y_pred, sample_weight=None):
warnings.warn(
"`model.loss` is deprecated. "
"Instead, use `model.compute_loss(x, y, y_pred, sample_weight)`.",
)
return self.compute_loss(
x=None, y=y, y_pred=y_pred, sample_weight=sample_weight
)
class TFEpochIterator(EpochIterator):
def __init__(self, distribute_strategy=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._distribute_strategy = distribute_strategy
dataset = self._get_iterator()
if not isinstance(dataset, tf.distribute.DistributedDataset):
dataset = self._distribute_strategy.experimental_distribute_dataset(
dataset
)
self._distributed_dataset = dataset
self._steps_seen = 0
def _get_iterator(self):
return self.data_adapter.get_tf_dataset()
def enumerate_epoch(self):
if self.steps_per_epoch:
if not self._current_iterator:
self._current_iterator = iter(self._distributed_dataset)
for step in range(
0, self.steps_per_epoch, self.steps_per_execution
):
yield step, self._current_iterator
else:
iterator = iter(self._distributed_dataset)
if self.num_batches:
for step in range(
0, self.num_batches, self.steps_per_execution
):
yield step, iterator
else:
step = -1
while True:
step += self.steps_per_execution
self._steps_seen = step + 1
yield step, iterator
self.data_adapter.on_epoch_end()
def tf_sync(self):
tf_context.async_wait()
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
self.tf_sync()
except (StopIteration, tf.errors.OutOfRangeError):
if self._num_batches is None:
self._num_batches = self._steps_seen
warnings.warn(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate "
"at least `steps_per_epoch * epochs` batches. "
"You may need to use the `.repeat()` "
"function when building your dataset.",
stacklevel=2,
)
self._current_iterator = None
self.data_adapter.on_epoch_end()
def reduce_per_replica(values, strategy, reduction):
"""Attempt to reduce the structure `values` to single values.
Given `values` (a `tf.Tensor` or a `PerReplica` structure),
which represents the values across all the replicas, `reduce_per_replica`
attempts to "reduce" those values and returns the corresponding structure
that represents only single values.
Currently, `reduce_per_replica` is only used for reducing the metric results
from `tf.distribute.Strategy.run()`. Depending on the underlying
`Strategy` implementation, `values` may be a `PerReplica` object,
which can be thought of as a collection of values across the replicas,
or a `tf.Tensor`, if the strategy has already conducted the reduction
for the downstream library.
There are five possible outcomes of reduction:
1) if the `values` is a structure of simple `tf.Tensor`s, meaning that
reduction is not actually needed, `reduce_per_replica` returns the
structure as-is.
2) else, if `reduction="auto"`, then the best reduction strategy is
chosen based on the current environment. This should only be used
for training cases (`fit()`).
3) else, if `reduction="first"`, then `reduce_per_replica`
returns the values of the first replica. This is used in the case of
training and evaluation, where `values` is expected to hold the same
value across the replicas as a result of `Strategy`'s synchronization
across the replicas.
`reduce_per_replica` does not synchronize the values.
4) else, if `reduction="sum"`, then `reduce_per_replica` returns the sum
of values for all replicas. This may be used in the custom training loop
case, where each replica contain different values which are not
synchronized.
5) else, if `reduction="concat"`, then `reduce_per_replica`
returns the concatenation of the values across the replicas, along the
axis of dimension 0. This is used in the inference case (`predict()`).
Args:
values: Structure of `PerReplica` objects or `tf.Tensor`s. `tf.Tensor`s
are returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of `"auto"`, `"first"`, `"concat"`, or `"sum"`.
`"auto"` will select `"first"` when used under a TPUStrategy, or
`"sum"` otherwise.
Returns:
Structure of `Tensor`s, representing the result of reduction.
Raises:
ValueError: if the reduction method is not supported.
"""
if reduction == "auto":
reduction = "sum" # Ignore TPU strategy which should default to "first"
def _reduce(v):
"""Reduce a single `PerReplica` object."""
if _collective_all_reduce_multi_worker(strategy):
if reduction == "concat":
return _multi_worker_concat(v, strategy)
elif reduction == "sum":
return strategy.reduce("SUM", v, axis=None)
if not _is_per_replica_instance(v):
return v
elif reduction == "first":
return strategy.experimental_local_results(v)[0]
elif reduction == "concat":
if _is_tpu_multi_host(strategy):
return _tpu_multi_host_concat(v, strategy)
else:
return concat(strategy.experimental_local_results(v))
elif reduction == "sum":
return tf.reduce_sum(strategy.experimental_local_results(v))
else:
raise ValueError(
'`reduction` must be "first", "concat", "sum", or "auto". '
f"Received: reduction={reduction}."
)
return tf.nest.map_structure(_reduce, values)
def _multi_worker_concat(v, strategy):
"""Order PerReplica objects for CollectiveAllReduceStrategy and concat."""
replicas = strategy.gather(v, axis=0)
# v might not have the same shape on different replicas
if _is_per_replica_instance(v):
shapes = tf.concat(
[
tf.expand_dims(tf.shape(single_value)[0], axis=0)
for single_value in v.values
],
axis=0,
)
all_shapes = strategy.gather(shapes, axis=0)
else:
# v is a tensor. This may happen when, say, we have 2x1 multi-worker.
all_shapes = strategy.gather(
tf.expand_dims(tf.shape(v)[0], axis=0), axis=0
)
replicas = tf.split(
replicas,
num_or_size_splits=all_shapes,
num=strategy.num_replicas_in_sync,
)
ordered_replicas = []
num_replicas_per_worker = len(strategy.extended.worker_devices)
for replica_id in range(num_replicas_per_worker):
ordered_replicas += replicas[replica_id::num_replicas_per_worker]
return concat(ordered_replicas)
def concat(tensors, axis=0):
"""Concats `tensor`s along `axis`."""
if isinstance(tensors[0], tf.SparseTensor):
return tf.sparse.concat(axis=axis, sp_inputs=tensors)
elif _is_scalar(tensors[0]):
return tf.stack(tensors, axis=axis)
else:
return tf.concat(tensors, axis=axis)
def _tpu_multi_host_concat(v, strategy):
"""Correctly order TPU PerReplica objects."""
replicas = strategy.experimental_local_results(v)
# When distributed datasets are created from Tensors / NumPy,
# TPUStrategy.experimental_distribute_dataset shards data in
# (Replica, Host) order, and TPUStrategy.experimental_local_results returns
# it in (Host, Replica) order.
num_replicas_per_host = strategy.extended.num_replicas_per_host
ordered_replicas = []
for replica_id in range(num_replicas_per_host):
ordered_replicas += replicas[replica_id::num_replicas_per_host]
return concat(ordered_replicas)
def _collective_all_reduce_multi_worker(strategy):
return (
isinstance(strategy, tf.distribute.MultiWorkerMirroredStrategy)
) and strategy.extended._in_multi_worker_mode()
def _is_per_replica_instance(obj):
return isinstance(obj, tf.distribute.DistributedValues) and isinstance(
obj, tf.__internal__.CompositeTensor
)
def _is_scalar(x):
return isinstance(x, (tf.Tensor, tf.Variable)) and x.shape.rank == 0
def _is_tpu_multi_host(strategy):
return _is_tpu_strategy(strategy) and strategy.extended.num_hosts > 1
def _is_tpu_strategy(strategy):
return _is_tpu_strategy_class(strategy.__class__)
def _is_tpu_strategy_class(clz):
def is_tpu_strat(k):
return k.__name__.startswith("TPUStrategy")
if is_tpu_strat(clz):
return True
return any(map(_is_tpu_strategy_class, clz.__bases__))
def convert_to_np_if_not_ragged(x):
if isinstance(x, tf.RaggedTensor):
return x
return x.numpy()
def potentially_ragged_concat(tensors):
"""Concats `Tensor`s along their first dimension.
Args:
tensors: List of `Tensor`s.
Returns:
Concatenation of the inputs along the first dimension -- of type
`np.ndarray` if all input shapes are compatible, or `tf.RaggedTensor`
if not.
"""
if len(tensors) == 1:
return tensors[0]
elif isinstance(tensors[0], tf.SparseTensor):
return tf.sparse.concat(axis=0, sp_inputs=tensors)
elif isinstance(tensors[0], tf.RaggedTensor):
return tf.concat(tensors, axis=0)
non_batch_shapes = tf.stack([tf.shape(tensor)[1:] for tensor in tensors])
constant_dims = tf.math.reduce_all(
non_batch_shapes == non_batch_shapes[:1], axis=0
)
if tf.math.reduce_all(constant_dims).numpy().item():
# All non-batch dims are constant
if _is_scalar(tensors[0]):
return tf.stack(tensors, axis=0)
else:
return tf.concat(tensors, axis=0)
# First, identify constant inner dimensions by finding the
# rightmost dimension that is not constant
constant_inner_dimensions = (
constant_dims.numpy().tolist()[::-1].index(False)
)
# If there are constant inner dimensions, define a constant inner shape
if constant_inner_dimensions == 0:
constant_inner_shape = None
else:
constant_inner_shape = tensors[0].shape[-constant_inner_dimensions:]
return tf.ragged.constant(
[tensor.numpy() for tensor in tensors], inner_shape=constant_inner_shape
).merge_dims(0, 1)
| keras/keras/backend/tensorflow/trainer.py/0 | {
"file_path": "keras/keras/backend/tensorflow/trainer.py",
"repo_id": "keras",
"token_count": 16034
} | 165 |
from keras import optimizers
from keras.backend.torch.optimizers import torch_adam
class AdamW(torch_adam.Adam, optimizers.AdamW):
pass
| keras/keras/backend/torch/optimizers/torch_adamw.py/0 | {
"file_path": "keras/keras/backend/torch/optimizers/torch_adamw.py",
"repo_id": "keras",
"token_count": 50
} | 166 |
import collections
import csv
import numpy as np
from keras.api_export import keras_export
from keras.callbacks.callback import Callback
from keras.utils import file_utils
@keras_export("keras.callbacks.CSVLogger")
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
"""
def __init__(self, filename, separator=",", append=False):
super().__init__()
self.sep = separator
self.filename = file_utils.path_to_string(filename)
self.append = append
self.writer = None
self.keys = None
self.append_header = True
def on_train_begin(self, logs=None):
if self.append:
if file_utils.exists(self.filename):
with file_utils.File(self.filename, "r") as f:
self.append_header = not bool(len(f.readline()))
mode = "a"
else:
mode = "w"
self.csv_file = file_utils.File(self.filename, mode)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, str):
return k
elif (
isinstance(k, collections.abc.Iterable)
and not is_zero_dim_ndarray
):
return f"\"[{', '.join(map(str, k))}]\""
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
# When validation_freq > 1, `val_` keys are not in first epoch logs
# Add the `val_` keys so that its part of the fieldnames of writer.
val_keys_found = False
for key in self.keys:
if key.startswith("val_"):
val_keys_found = True
break
if not val_keys_found:
self.keys.extend(["val_" + k for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ["epoch"] + self.keys
self.writer = csv.DictWriter(
self.csv_file, fieldnames=fieldnames, dialect=CustomDialect
)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({"epoch": epoch})
row_dict.update(
(key, handle_value(logs.get(key, "NA"))) for key in self.keys
)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
| keras/keras/callbacks/csv_logger.py/0 | {
"file_path": "keras/keras/callbacks/csv_logger.py",
"repo_id": "keras",
"token_count": 1516
} | 167 |
from keras import backend
from keras import ops
from keras.api_export import keras_export
from keras.callbacks.callback import Callback
@keras_export("keras.callbacks.SwapEMAWeights")
class SwapEMAWeights(Callback):
"""Swaps model weights and EMA weights before and after evaluation.
This callbacks replaces the model's weight values with the values of
the optimizer's EMA weights (the exponential moving average of the past
model weights values, implementing "Polyak averaging") before model
evaluation, and restores the previous weights after evaluation.
The `SwapEMAWeights` callback is to be used in conjunction with
an optimizer that sets `use_ema=True`.
Note that the weights are swapped in-place in order to save memory.
The behavior is undefined if you modify the EMA weights
or model weights in other callbacks.
Example:
```python
# Remember to set `use_ema=True` in the optimizer
optimizer = SGD(use_ema=True)
model.compile(optimizer=optimizer, loss=..., metrics=...)
# Metrics will be computed with EMA weights
model.fit(X_train, Y_train, callbacks=[SwapEMAWeights()])
# If you want to save model checkpoint with EMA weights, you can set
# `swap_on_epoch=True` and place ModelCheckpoint after SwapEMAWeights.
model.fit(
X_train,
Y_train,
callbacks=[SwapEMAWeights(swap_on_epoch=True), ModelCheckpoint(...)]
)
```
Args:
swap_on_epoch: whether to perform swapping at `on_epoch_begin()`
and `on_epoch_end()`. This is useful if you want to use
EMA weights for other callbacks such as `ModelCheckpoint`.
Defaults to `False`.
"""
def __init__(self, swap_on_epoch=False):
super().__init__()
self.swap_on_epoch = swap_on_epoch
self._ema_weights_in_model = False
def _tf_swap_variables(self, optimizer):
for var, average_var in zip(
self.model.trainable_variables,
optimizer._model_variables_moving_average,
):
if isinstance(var, backend.Variable):
var = var.value
if isinstance(average_var, backend.Variable):
average_var = average_var.value
# swap using addition to prevent variable creation
optimizer._distribution_strategy.extended.update(
var,
lambda a, b: a.assign_add(b),
args=(average_var,),
)
optimizer._distribution_strategy.extended.update(
var,
lambda a, b: b.assign(a - b),
args=(average_var,),
)
optimizer._distribution_strategy.extended.update(
var,
lambda a, b: a.assign(a - b),
args=(average_var,),
)
def _backend_swap_variables(self, optimizer):
for var, average_var in zip(
self.model.trainable_variables,
optimizer._model_variables_moving_average,
):
temporary_variable = ops.convert_to_numpy(var)
var.assign(average_var)
average_var.assign(temporary_variable)
def _tf_finalize_ema_values(self, optimizer):
for var, average_var in zip(
self.model.trainable_variables,
optimizer._model_variables_moving_average,
):
if isinstance(var, backend.Variable):
var = var.value
if isinstance(average_var, backend.Variable):
average_var = average_var.value
optimizer._distribution_strategy.extended.update(
average_var,
lambda a, b: a.assign(b),
args=(var,),
)
def _backend_finalize_ema_values(self, optimizer):
for var, average_var in zip(
self.model.trainable_variables,
optimizer._model_variables_moving_average,
):
average_var.assign(var)
def _swap_variables(self):
if hasattr(self.model.optimizer, "inner_optimizer"):
# LossScaleOptimizer
optimizer = self.model.optimizer.inner_optimizer
else:
optimizer = self.model.optimizer
if not hasattr(optimizer, "_model_variables_moving_average"):
raise ValueError(
"SwapEMAWeights must be used when "
"`use_ema=True` is set on the optimizer. "
f"Received: use_ema={optimizer.use_ema}"
)
if backend.backend() == "tensorflow":
self._tf_swap_variables(optimizer)
else:
self._backend_swap_variables(optimizer)
def _finalize_ema_values(self):
if hasattr(self.model.optimizer, "inner_optimizer"):
# LossScaleOptimizer
optimizer = self.model.optimizer.inner_optimizer
else:
optimizer = self.model.optimizer
if not hasattr(optimizer, "_model_variables_moving_average"):
raise ValueError(
"SwapEMAWeights must be used when "
"`use_ema=True` is set on the optimizer. "
f"Received: use_ema={optimizer.use_ema}"
)
if backend.backend() == "tensorflow":
self._tf_finalize_ema_values(optimizer)
else:
self._backend_finalize_ema_values(optimizer)
def on_epoch_begin(self, epoch, logs=None):
if self.swap_on_epoch and self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = False
def on_epoch_end(self, epoch, logs=None):
if self.swap_on_epoch and not self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = True
# We need to recover EMA weights from the previously swapped weights
# in the last epoch. This is becuase, at the end of the fitting,
# `finalize_variable_values` will be called to assign
# `_model_variables_moving_average` to `trainable_variables`.
if epoch == self.params["epochs"] - 1:
self._finalize_ema_values()
def on_test_begin(self, logs=None):
if not self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = True
def on_test_end(self, logs=None):
if self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = False
def on_predict_begin(self, logs=None):
if not self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = True
def on_predict_end(self, logs=None):
if not self._ema_weights_in_model:
self._swap_variables()
self._ema_weights_in_model = False
| keras/keras/callbacks/swap_ema_weights.py/0 | {
"file_path": "keras/keras/callbacks/swap_ema_weights.py",
"repo_id": "keras",
"token_count": 3121
} | 168 |
import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras import constraints
from keras import layers
from keras import models
from keras import saving
from keras import testing
class EinsumDenseTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{
"testcase_name": "_1d_end_weight",
"equation": "ab,b->a",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (),
"expected_kernel_shape": (32,),
"expected_bias_shape": None,
"expected_output_shape": (2,),
},
{
"testcase_name": "_2d_middle_weight",
"equation": "ab,bc->ac",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (64),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 64),
},
{
"testcase_name": "_3d_bert",
"equation": "abc,cde->abde",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (4,),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_2_bias",
"equation": "abc,cde->abde",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 1),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_1_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "be",
"input_shape": (2, 7, 2),
"output_shape": (7, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (7, 1, 4),
"expected_output_shape": (2, 7, 3, 4),
},
{
"testcase_name": "_3d_bert_projection",
"equation": "BFNH,NHD->BFD",
"bias_axes": None,
"input_shape": (2, 1, 2, 3),
"output_shape": (1, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 4),
},
{
"testcase_name": "_2d_bert",
"equation": "abc,cd->abd",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (1, 4),
"expected_kernel_shape": (2, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 4),
},
{
"testcase_name": "_embedding_1d",
"equation": "i,d->id",
"bias_axes": None,
"input_shape": (2,),
"output_shape": (2,),
"expected_kernel_shape": (2,),
"expected_bias_shape": None,
"expected_output_shape": (2, 2),
},
{
"testcase_name": "_xlnet_lm",
"equation": "ibd,nd->ibn",
"bias_axes": None,
"input_shape": (2, 2, 1),
"output_shape": (2, 2),
"expected_kernel_shape": (2, 1),
"expected_bias_shape": None,
"expected_output_shape": (2, 2, 2),
},
{
"testcase_name": "_2d_precast",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (64,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 64),
},
{
"testcase_name": "_2d_precast_elided_input_used_in_output",
"equation": "...bc,bc->...b",
"bias_axes": None,
"input_shape": (2, 32, 64),
"output_shape": (32,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 32),
},
{
"testcase_name": "_2d_precast_multiple_elided_dims",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (2, 3, 32),
"output_shape": (64,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 3, 64),
},
{
"testcase_name": "_3d_precast",
"equation": "...c,cde->...de",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (4,),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_2_bias",
"equation": "...c,cde->...de",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 1),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_2_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "de",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 4),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_2d_postcast",
"equation": "bc...,cd->bd...",
"bias_axes": None,
"input_shape": (2, 1, 2, 3),
"output_shape": (4,),
"expected_kernel_shape": (1, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 4, 2, 3),
},
{
"testcase_name": "_3d_postcast",
"equation": "bc...,cde->bde...",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_1_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (3, 1, 1),
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (4, 1),
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_1_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "de",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (3, 4, 1),
"expected_output_shape": (2, 3, 4, 2),
},
)
@pytest.mark.requires_trainable_backend
def test_einsum_dense_basics(
self,
equation,
bias_axes,
input_shape,
output_shape,
expected_kernel_shape,
expected_bias_shape,
expected_output_shape,
):
self.run_layer_test(
layers.EinsumDense,
init_kwargs={
"equation": equation,
"output_shape": output_shape,
"bias_axes": bias_axes,
},
input_shape=input_shape,
expected_output_shape=expected_output_shape,
expected_num_trainable_weights=(
2 if expected_bias_shape is not None else 1
),
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
layer = layers.EinsumDense(
equation, output_shape=output_shape, bias_axes=bias_axes
)
layer.build(input_shape)
self.assertEqual(layer.kernel.shape, expected_kernel_shape)
if expected_bias_shape is not None:
self.assertEqual(layer.bias.shape, expected_bias_shape)
def test_einsum_dense_constraints(self):
layer = layers.EinsumDense(
"abc,cde->abde", (1, 3, 4), kernel_constraint="non_neg"
)
layer.build((2, 1, 2))
self.assertIsInstance(layer.kernel.constraint, constraints.NonNeg)
layer = layers.EinsumDense(
"ab,b->a", (1, 3, 4), bias_axes="a", bias_constraint="non_neg"
)
layer.build((2, 1, 2))
self.assertIsInstance(layer.bias.constraint, constraints.NonNeg)
@pytest.mark.requires_trainable_backend
def test_enable_lora(self):
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes=None,
)
layer.build((None, 3))
layer.enable_lora(2)
self.assertLen(layer.trainable_weights, 2)
self.assertLen(layer.non_trainable_weights, 1)
# Try eager call
x = np.random.random((64, 3))
y = np.random.random((64, 8, 32))
_ = layer(x[:2])
init_lora_a_kernel_value = layer.lora_kernel_a.numpy()
init_lora_b_kernel_value = layer.lora_kernel_b.numpy()
# Try calling fit()
model = models.Sequential(
[
layer,
]
)
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y, epochs=2)
final_lora_a_kernel_value = layer.lora_kernel_a.numpy()
final_lora_b_kernel_value = layer.lora_kernel_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_kernel_value - final_lora_a_kernel_value)
)
diff_b = np.max(
np.abs(init_lora_b_kernel_value - final_lora_b_kernel_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras")
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertFalse(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
model.save_weights(temp_filepath)
# Load the file into a fresh, non-lora model
new_model = models.Sequential(
[
layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes=None,
),
]
)
new_model.build((None, 3))
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@pytest.mark.requires_trainable_backend
def test_lora_rank_argument(self):
self.run_layer_test(
layers.EinsumDense,
init_kwargs={
"equation": "ab,bcd->acd",
"output_shape": (8, 32),
"bias_axes": None,
"lora_rank": 2,
},
input_shape=(2, 3),
expected_output_shape=(2, 8, 32),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
| keras/keras/layers/core/einsum_dense_test.py/0 | {
"file_path": "keras/keras/layers/core/einsum_dense_test.py",
"repo_id": "keras",
"token_count": 7323
} | 169 |
import numpy as np
import pytest
from keras import backend
from keras import initializers
from keras import layers
from keras import testing
class ConvLSTM1DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
channels_last = backend.config.image_data_format() == "channels_last"
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={"filters": 5, "kernel_size": 3, "padding": "same"},
input_shape=(3, 2, 4, 3) if channels_last else (3, 2, 3, 4),
expected_output_shape=(3, 4, 5) if channels_last else (3, 5, 4),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"recurrent_dropout": 0.5,
},
input_shape=(3, 2, 8, 3) if channels_last else (3, 2, 3, 8),
call_kwargs={"training": True},
expected_output_shape=(3, 6, 5) if channels_last else (3, 5, 6),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM1D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"return_sequences": True,
},
input_shape=(3, 2, 8, 3) if channels_last else (3, 2, 3, 8),
expected_output_shape=(
(3, 2, 6, 5) if channels_last else (3, 2, 5, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = np.arange(120).reshape((2, 3, 4, 5)).astype("float32") / 10
expected_output = np.array(
[
[[0.40807986, 0.40807986], [0.46421072, 0.46421072]],
[[0.80933154, 0.80933154], [0.8233646, 0.8233646]],
]
)
if backend.config.image_data_format() == "channels_first":
sequence = sequence.transpose((0, 1, 3, 2))
expected_output = expected_output.transpose((0, 2, 1))
layer = layers.ConvLSTM1D(
filters=2,
kernel_size=3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
expected_output,
output,
)
| keras/keras/layers/rnn/conv_lstm1d_test.py/0 | {
"file_path": "keras/keras/layers/rnn/conv_lstm1d_test.py",
"repo_id": "keras",
"token_count": 1491
} | 170 |
from keras.api_export import keras_export
from keras.losses.losses import categorical_hinge
from keras.losses.losses import hinge
from keras.losses.losses import squared_hinge
from keras.metrics import reduction_metrics
@keras_export("keras.metrics.Hinge")
class Hinge(reduction_metrics.MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.Hinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
1.3
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.1
"""
def __init__(self, name="hinge", dtype=None):
super().__init__(fn=hinge, name=name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.SquaredHinge")
class SquaredHinge(reduction_metrics.MeanMetricWrapper):
"""Computes the hinge metric between `y_true` and `y_pred`.
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.SquaredHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
1.86
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.46
"""
def __init__(self, name="squared_hinge", dtype=None):
super().__init__(fn=squared_hinge, name=name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.CategoricalHinge")
class CategoricalHinge(reduction_metrics.MeanMetricWrapper):
"""Computes the categorical hinge metric between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = keras.metrics.CategoricalHinge()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result().numpy()
1.4000001
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
1.2
"""
def __init__(self, name="categorical_hinge", dtype=None):
super().__init__(fn=categorical_hinge, name=name, dtype=dtype)
# Metric should be minimized during optimization.
self._direction = "down"
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
| keras/keras/metrics/hinge_metrics.py/0 | {
"file_path": "keras/keras/metrics/hinge_metrics.py",
"repo_id": "keras",
"token_count": 1405
} | 171 |
import copy
import inspect
import warnings
import tree
from keras import backend
from keras import ops
from keras.backend.common import global_state
from keras.layers.core.input_layer import Input
from keras.layers.core.input_layer import InputLayer
from keras.layers.input_spec import InputSpec
from keras.layers.layer import Layer
from keras.legacy.saving import saving_utils
from keras.legacy.saving import serialization as legacy_serialization
from keras.models.model import Model
from keras.ops.function import Function
from keras.ops.function import _build_map
from keras.ops.function import make_node_key
from keras.ops.node import Node
from keras.saving import serialization_lib
from keras.utils import tracking
from keras.utils.nest import pack_sequence_as
class Functional(Function, Model):
"""A `Functional` model is a `Model` defined as a directed graph of layers.
Three types of `Model` exist: subclassed `Model`, `Functional` model,
and `Sequential` (a special case of `Functional`).
A `Functional` model can be instantiated by passing two arguments to
`__init__()`. The first argument is the `keras.Input` objects
that represent the inputs to the model.
The second argument specifies the output tensors that represent
the outputs of this model. Both arguments can be a nested structure
of tensors.
Example:
```
inputs = {'x1': keras.Input(shape=(10,), name='x1'),
'x2': keras.Input(shape=(1,), name='x2')}
t = keras.layers.Dense(1, activation='relu')(inputs['x1'])
outputs = keras.layers.Add()([t, inputs['x2']])
model = keras.Model(inputs, outputs)
```
A `Functional` model constructed using the Functional API can also
include raw Keras 3 ops.
Example:
```python
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(1)(inputs)
outputs = ops.nn.relu(x)
model = keras.Model(inputs, outputs)
```
A new `Functional` model can also be created by using the
intermediate tensors. This enables you to quickly extract sub-components
of the model.
Example:
```python
inputs = keras.Input(shape=(None, None, 3))
processed = keras.layers.RandomCrop(width=32, height=32)(inputs)
conv = keras.layers.Conv2D(filters=2, kernel_size=3)(processed)
pooling = keras.layers.GlobalAveragePooling2D()(conv)
feature = keras.layers.Dense(10)(pooling)
full_model = keras.Model(inputs, feature)
backbone = keras.Model(processed, conv)
activations = keras.Model(conv, feature)
```
Note that the `backbone` and `activations` models are not
created with `keras.Input` objects, but with the tensors
that are originated from `keras.Input` objects.
Under the hood, the layers and weights will
be shared across these models, so that user can train the `full_model`, and
use `backbone` or `activations` to do feature extraction.
The inputs and outputs of the model can be nested structures of tensors as
well, and the created models are standard `Functional` model that support
all the existing API.
Args:
inputs: List of input tensors (must be created via `keras.Input()`
or originated from `keras.Input()`).
outputs: List of output tensors.
name: String, optional. Name of the model.
trainable: Boolean, optional. If the model's variables should be
trainable.
"""
@tracking.no_automatic_dependency_tracking
def __init__(self, inputs, outputs, name=None, **kwargs):
if isinstance(inputs, dict):
for k, v in inputs.items():
if not isinstance(v, backend.KerasTensor):
raise ValueError(
"When providing `inputs` as a dict, all values in the "
f"dict must be KerasTensors. Received: inputs={inputs} "
f"including invalid value {v} of type {type(v)}"
)
if k != v.name:
warnings.warn(
"When providing `inputs` as a dict, all keys in the "
"dict must match the names of the corresponding "
f"tensors. Received key '{k}' mapping to value {v} "
f"which has name '{v.name}'. Change the tensor name to "
f"'{k}' (via `Input(..., name='{k}')`)"
)
elif isinstance(inputs, (list, tuple)):
for x in inputs:
if not isinstance(x, backend.KerasTensor):
raise ValueError(
"When providing `inputs` as a list/tuple, all values "
f"in the list/tuple must be KerasTensors. Received: "
f"inputs={inputs} including invalid value {x} of type "
f"{type(x)}"
)
elif not isinstance(inputs, backend.KerasTensor):
raise ValueError(
f"Unrecognized type for `inputs`: {inputs} "
f"(of type {type(inputs)})"
)
if isinstance(outputs, dict):
for k, v in outputs.items():
if not isinstance(v, backend.KerasTensor):
raise ValueError(
"When providing `outputs` as a dict, all values in the "
f"dict must be KerasTensors. Received: "
f"outputs={outputs} including invalid value {v} of "
f"type {type(v)}"
)
elif isinstance(outputs, (list, tuple)):
for x in outputs:
if not isinstance(x, backend.KerasTensor):
raise ValueError(
"When providing `outputs` as a list/tuple, all values "
f"in the list/tuple must be KerasTensors. Received: "
f"outputs={outputs} including invalid value {x} of "
f"type {type(x)}"
)
elif not isinstance(outputs, backend.KerasTensor):
raise ValueError(
f"Unrecognized type for `outputs`: {outputs} "
f"(of type {type(outputs)})"
)
trainable = kwargs.pop("trainable", None)
if not all([is_input_keras_tensor(t) for t in tree.flatten(inputs)]):
inputs, outputs = clone_graph_nodes(inputs, outputs)
Function.__init__(self, inputs, outputs, name=name, **kwargs)
if trainable is not None:
self.trainable = trainable
self._layers = self.layers
self.build(None)
# We will convert directly (to the correct dtype per input).
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
output_layers = [x._keras_history[0] for x in self.outputs]
self.output_names = [x.name for x in output_layers]
def _lock_state(self):
# Unlike other layers, we allow Functional state to be mutable after
# build. E.g. to attach a layer to a model that is not part of the
# functional DAG.
pass
@property
def layers(self):
layers = []
for operation in self._operations:
if isinstance(operation, Layer):
layers.append(operation)
return layers
def call(self, inputs, training=None, mask=None):
# Add support for traning, masking
inputs = self._standardize_inputs(inputs)
if mask is None:
masks = [None] * len(inputs)
else:
masks = self._flatten_to_reference_inputs(mask)
for x, mask in zip(inputs, masks):
if mask is not None:
x._keras_mask = mask
outputs = self._run_through_graph(
inputs, operation_fn=lambda op: operation_fn(op, training=training)
)
return unpack_singleton(outputs)
def compute_output_spec(self, inputs, training=None, mask=None):
# From Function
return super().compute_output_spec(inputs)
def build(self, input_shape):
self.built = True
@property
def input_shape(self):
input_shapes = tree.map_structure(lambda x: x.shape, self.inputs)
if isinstance(input_shapes, list) and len(input_shapes) == 1:
return input_shapes[0]
return input_shapes
@property
def output_shape(self):
output_shapes = tree.map_structure(lambda x: x.shape, self.outputs)
if isinstance(output_shapes, list) and len(output_shapes) == 1:
return output_shapes[0]
return output_shapes
def _assert_input_compatibility(self, *args):
return super(Model, self)._assert_input_compatibility(*args)
def _flatten_to_reference_inputs(self, inputs, allow_extra_keys=True):
if isinstance(inputs, dict):
ref_inputs = self._inputs_struct
if not tree.is_nested(ref_inputs):
ref_inputs = [self._inputs_struct]
if isinstance(ref_inputs, dict):
# In the case that the graph is constructed with dict input
# tensors, We will use the original dict key to map with the
# keys in the input data. Note that the model.inputs is using
# tree.flatten to process the input tensors, which means the
# dict input tensors are ordered by their keys.
ref_input_names = sorted(ref_inputs.keys())
else:
ref_input_names = [
inp._keras_history.operation.name for inp in ref_inputs
]
# Raise an warning if there are more input data comparing to input
# tensor
if not allow_extra_keys and len(inputs) > len(ref_input_names):
warnings.warn(
"Input dict contained keys {} which did not match any "
"model input. They will be ignored by the model.".format(
[n for n in inputs.keys() if n not in ref_input_names]
),
stacklevel=2,
)
# Flatten in the order `Input`s were passed during Model
# construction.
return [inputs[n] for n in ref_input_names]
# Otherwise both ref inputs and inputs will already be in same order.
return tree.flatten(inputs)
def _convert_inputs_to_tensors(self, flat_inputs):
converted = []
for x, input in zip(flat_inputs, self._inputs):
converted.append(
ops.convert_to_tensor(x, dtype=input.dtype, sparse=input.sparse)
)
return converted
def _adjust_input_rank(self, flat_inputs):
flat_ref_shapes = [x.shape for x in self._inputs]
adjusted = []
for x, ref_shape in zip(flat_inputs, flat_ref_shapes):
x_rank = len(x.shape)
ref_rank = len(ref_shape)
if x_rank == ref_rank:
adjusted.append(x)
continue
if x_rank == ref_rank + 1:
if x.shape[-1] == 1:
adjusted.append(ops.squeeze(x, axis=-1))
continue
if x_rank == ref_rank - 1:
if ref_shape[-1] == 1:
adjusted.append(ops.expand_dims(x, axis=-1))
continue
raise ValueError(
f"Invalid input shape for input {x}. Expected shape "
f"{ref_shape}, but input has incompatible shape {x.shape}"
)
# Add back metadata.
for i in range(len(flat_inputs)):
if hasattr(flat_inputs[i], "_keras_history"):
adjusted[i]._keras_history = flat_inputs[i]._keras_history
if hasattr(flat_inputs[i], "_keras_mask"):
adjusted[i]._keras_mask = flat_inputs[i]._keras_mask
return adjusted
def _standardize_inputs(self, inputs):
flat_inputs = self._flatten_to_reference_inputs(inputs)
flat_inputs = self._convert_inputs_to_tensors(flat_inputs)
return self._adjust_input_rank(flat_inputs)
@property
def input(self):
# For backwards compatibility,
# override `input` to retrieve the used-provided
# constructor inputs
return self._inputs_struct
@property
def output(self):
return self._outputs_struct
def add_loss(self, loss):
# Symbolic only. TODO
raise NotImplementedError
@property
def input_spec(self):
if hasattr(self, "_manual_input_spec"):
return self._manual_input_spec
def shape_with_no_batch_size(x):
x = list(x)
if x:
x[0] = None
return tuple(x)
if isinstance(self._inputs_struct, dict):
# Case where `_nested_inputs` is a plain dict of Inputs.
names = sorted(self._inputs_struct.keys())
return [
InputSpec(
shape=shape_with_no_batch_size(
self._inputs_struct[name].shape
),
allow_last_axis_squeeze=True,
name=name,
)
for name in names
]
else:
# Single input, or list/tuple of inputs.
# The data may be passed as a dict keyed by input name.
return [
InputSpec(
shape=shape_with_no_batch_size(x.shape),
allow_last_axis_squeeze=True,
name=x._keras_history[0].name,
)
for x in self._inputs
]
@input_spec.setter
def input_spec(self, value):
self._manual_input_spec = value
def get_config(self):
if not functional_like_constructor(self.__class__):
# Subclassed networks are not serializable
# (unless serialization is implemented by
# the author of the subclassed network).
return Model.get_config(self)
config = {
"name": self.name,
"trainable": self.trainable,
}
# Build a map from a layer unique name (make_node_key)
# to the index of the nodes that are saved in the config.
# Only nodes in network_nodes are saved.
node_reindexing_map = {}
for operation in self.operations:
if issubclass(operation.__class__, Functional):
# Functional models start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(
operation._inbound_nodes
):
node_key = make_node_key(operation, original_node_index)
if node_key in self._nodes:
# i.e. we mark it to be saved
node_reindexing_map[node_key] = kept_nodes
kept_nodes += 1
# serialize and save the layers in layer_configs
layer_configs = []
for operation in self.operations: # From the earliest layers on.
filtered_inbound_nodes = []
for original_node_index, node in enumerate(
operation._inbound_nodes
):
node_key = make_node_key(operation, original_node_index)
if node_key in self._nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
node_data = serialize_node(node, node_reindexing_map)
if node_data is not None:
filtered_inbound_nodes.append(node_data)
serialize_obj_fn = serialization_lib.serialize_keras_object
if global_state.get_global_attribute("use_legacy_config", False):
# Legacy format serialization used for H5 and SavedModel
serialize_obj_fn = legacy_serialization.serialize_keras_object
layer_config = serialize_obj_fn(operation)
layer_config["name"] = operation.name
layer_config["inbound_nodes"] = filtered_inbound_nodes
layer_configs.append(layer_config)
config["layers"] = layer_configs
# Gather info about inputs and outputs.
def get_tensor_config(tensor):
operation = tensor._keras_history[0]
node_index = tensor._keras_history[1]
tensor_index = tensor._keras_history[2]
node_key = make_node_key(operation, node_index)
assert node_key in self._nodes
new_node_index = node_reindexing_map[node_key]
return [operation.name, new_node_index, tensor_index]
def map_tensors(tensors):
if isinstance(tensors, dict):
return {k: get_tensor_config(v) for k, v in tensors.items()}
if isinstance(tensors, (list, tuple)):
return [get_tensor_config(v) for v in tensors]
else:
return [get_tensor_config(tensors)]
config["input_layers"] = map_tensors(self._inputs_struct)
config["output_layers"] = map_tensors(self._outputs_struct)
return copy.deepcopy(config)
def functional_from_config(cls, config, custom_objects=None):
"""Instantiates a Functional model from its config (from `get_config()`).
Args:
cls: Class of the model, e.g. a custom subclass of `Model`.
config: Output of `get_config()` for the original model instance.
custom_objects: Optional dict of custom objects.
Returns:
An instance of `cls`.
"""
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
"""Add node to layer list
Arg:
layer: layer object
node_data: Node data specifying layer call
"""
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
"""Reconstruct node by linking to inbound layers
Args:
layer: Layer to process
node_data: List of layer configs
"""
args, kwargs = deserialize_node(node_data, created_layers)
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
layer(*args, **kwargs)
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Args:
layer_data: layer config dict.
"""
layer_name = layer_data["name"]
# Instantiate layer.
if "module" not in layer_data:
# Legacy format deserialization (no "module" key)
# used for H5 and SavedModel formats
layer = saving_utils.model_from_config(
layer_data, custom_objects=custom_objects
)
else:
layer = serialization_lib.deserialize_keras_object(
layer_data, custom_objects=custom_objects
)
created_layers[layer_name] = layer
# Gather layer inputs.
inbound_nodes_data = layer_data["inbound_nodes"]
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config["layers"]:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config["layers"]:
layer = created_layers[layer_data["name"]]
# Process all nodes in layer, if not yet processed
if layer in unprocessed_nodes:
node_data_list = unprocessed_nodes[layer]
# Process nodes in order
node_index = 0
while node_index < len(node_data_list):
node_data = node_data_list[node_index]
try:
process_node(layer, node_data)
# If the node does not have all inbound layers
# available, stop processing and continue later
except IndexError:
break
node_index += 1
# If not all nodes processed then store unprocessed nodes
if node_index < len(node_data_list):
unprocessed_nodes[layer] = node_data_list[node_index:]
# If all nodes processed remove the layer
else:
del unprocessed_nodes[layer]
# Create lits of input and output tensors and return new class
name = config.get("name")
trainable = config.get("trainable")
def get_tensor(layer_name, node_index, tensor_index):
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
return layer_output_tensors[tensor_index]
def map_tensors(tensors):
if isinstance(tensors, dict):
return {k: get_tensor(*v) for k, v in tensors.items()}
else:
return [get_tensor(*v) for v in tensors]
input_tensors = map_tensors(config["input_layers"])
output_tensors = map_tensors(config["output_layers"])
return cls(
inputs=input_tensors,
outputs=output_tensors,
name=name,
trainable=trainable,
)
def operation_fn(operation, training):
def call(*args, **kwargs):
if (
hasattr(operation, "_call_has_training_arg")
and operation._call_has_training_arg
and training is not None
):
kwargs["training"] = training
return operation(*args, **kwargs)
return call
def functional_like_constructor(cls):
init_args = inspect.getfullargspec(cls.__init__).args[1:]
functional_init_args = inspect.getfullargspec(Functional.__init__).args[1:]
if init_args == functional_init_args:
return True
return False
def unpack_singleton(x):
if isinstance(x, (list, tuple)) and len(x) == 1:
return x[0]
return x
def serialize_node(node, node_reindexing_map):
if not node.input_tensors:
# Does not need to be serialized.
return
args = node.arguments.args
kwargs = node.arguments.kwargs
return {
"args": serialization_lib.serialize_keras_object(args),
"kwargs": serialization_lib.serialize_keras_object(kwargs),
}
def deserialize_node(node_data, created_layers):
"""Return (args, kwargs) for calling the node layer."""
if not node_data:
return [], {}
if isinstance(node_data, list):
# Legacy case.
input_tensors = []
for input_data in node_data:
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError(
"Cannot deserialize the model (invalid config data?)"
)
inbound_layer = created_layers[inbound_layer_name]
# Raise an error if the corresponding layer node
# has not yet been created
if len(inbound_layer._inbound_nodes) <= inbound_node_index:
raise IndexError(
"Layer node index out of bounds.\n"
f"inbound_layer = {inbound_layer}\n"
"inbound_layer._inbound_nodes = "
f"{inbound_layer._inbound_nodes}\n"
f"inbound_node_index = {inbound_node_index}"
)
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(
inbound_node.output_tensors[inbound_tensor_index]
)
return [unpack_singleton(input_tensors)], kwargs
args = serialization_lib.deserialize_keras_object(node_data["args"])
kwargs = serialization_lib.deserialize_keras_object(node_data["kwargs"])
def convert_revived_tensor(x):
if isinstance(x, backend.KerasTensor):
history = x._pre_serialization_keras_history
if history is None:
return x
layer = created_layers.get(history[0], None)
if layer is None:
raise ValueError(f"Unknown layer: {history[0]}")
inbound_node_index = history[1]
inbound_tensor_index = history[2]
if len(layer._inbound_nodes) <= inbound_node_index:
raise ValueError(
"Layer node index out of bounds.\n"
f"inbound_layer = {layer}\n"
f"inbound_layer._inbound_nodes = {layer._inbound_nodes}\n"
f"inbound_node_index = {inbound_node_index}"
)
inbound_node = layer._inbound_nodes[inbound_node_index]
return inbound_node.output_tensors[inbound_tensor_index]
return x
args = tree.map_structure(convert_revived_tensor, args)
kwargs = tree.map_structure(convert_revived_tensor, kwargs)
return args, kwargs
def is_input_keras_tensor(x):
(
operation,
node_index,
_,
) = x._keras_history
node = operation._inbound_nodes[node_index]
return node.is_input
def clone_single_keras_tensor(x):
return backend.KerasTensor(
shape=x.shape, dtype=x.dtype, sparse=x.sparse, name=x.name + "_clone"
)
def clone_keras_tensors(tensors, kt_id_mapping):
def swap(x):
if not isinstance(x, backend.KerasTensor):
return x
if id(x) in kt_id_mapping:
return kt_id_mapping[id(x)]
new_x = clone_single_keras_tensor(x)
kt_id_mapping[id(x)] = new_x
return new_x
return tree.map_structure(swap, tensors)
def find_nodes_by_inputs_and_outputs(inputs, outputs):
nodes, _ = _build_map(inputs, outputs)
return nodes
def clone_graph_nodes(inputs, outputs):
"""Clone the `Node` between the inputs and output tensors.
This function is used to create a new functional model from any intermediate
Keras tensors. The clone of the nodes mimic the behavior of reconstructing
the functional graph network by re-executing all the `__call__()` methods.
The cloned nodes will be appended to the layers.
Note that a new `keras.Input` will be created for any items in the
`inputs`
Args:
inputs: A nested structure of `KerasTensor` instances.
outputs: A nested structure of `KerasTensor` instances.
Returns:
A pair of inputs and outputs, with cloned `KerasTensor` instances.
They can be used to create a new functional model.
"""
nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)
cloned_inputs = []
cloned_outputs = []
# We not only need to create copies of Nodes (mimic the calls), also need to
# clone Keras tensors to avoid the override of _keras_history attached on
# the Keras tensor. The following dict is used to track any keras tensor we
# cloned The key is the string ID of the original keras tensor, and value is
# the cloned Keras tensor instance.
kt_id_mapping = {}
op_id_mapping = {}
for kt_input in tree.flatten(inputs):
if is_input_keras_tensor(kt_input):
# For any existing Keras tensor from keras.Input, leave them as is.
cloned_inputs.append(kt_input)
kt_id_mapping[id(kt_input)] = kt_input
else:
# We need to create a new Keras tensor for any intermediate tensor
cloned_input = Input(
batch_shape=kt_input.shape,
dtype=kt_input.dtype,
sparse=kt_input.sparse,
name=kt_input.name + "CLONE",
)
cloned_inputs.append(cloned_input)
kt_id_mapping[id(kt_input)] = cloned_input
op_id_mapping[id(kt_input._keras_history[0])] = (
cloned_input._keras_history[0]
)
cloned_inputs = pack_sequence_as(inputs, cloned_inputs)
for kt_output in tree.flatten(outputs):
cpy = clone_single_keras_tensor(kt_output)
# We reuse the _keras_history here, which contains the old information.
cpy._keras_history = kt_output._keras_history
cloned_outputs.append(cpy)
kt_id_mapping[id(kt_output)] = cpy
cloned_outputs = pack_sequence_as(outputs, cloned_outputs)
for node in nodes_to_clone:
if id(node.operation) in op_id_mapping:
operation = op_id_mapping[id(node.operation)]
else:
operation = node.operation
# Clone any Keras tensor to avoid override of _keras_history
# Or reuse an existing Keras tensor if it has already been cloned.
output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping)
if not isinstance(operation, InputLayer):
call_args_copy = clone_keras_tensors(
node.arguments.args, kt_id_mapping
)
call_kwargs_copy = clone_keras_tensors(
node.arguments.kwargs, kt_id_mapping
)
else:
call_args_copy = ()
call_kwargs_copy = {}
# Creating new nodes based on the existing node information. Node wires
# itself to inbound and outbound layers. The Node constructor actually
# updates this layer's self._inbound_nodes, sets _keras_history on the
# outputs, and adds itself to the `_outbound_nodes` of the layers that
# produced the inputs to this layer call.
Node(
operation,
call_args=call_args_copy,
call_kwargs=call_kwargs_copy,
outputs=output_copy,
)
return cloned_inputs, cloned_outputs
| keras/keras/models/functional.py/0 | {
"file_path": "keras/keras/models/functional.py",
"repo_id": "keras",
"token_count": 14354
} | 172 |
import numpy as np
from absl.testing import parameterized
from keras import backend
from keras import ops
from keras import testing
from keras.backend.common.keras_tensor import KerasTensor
from keras.ops import linalg
from keras.testing.test_utils import named_product
class LinalgOpsDynamicShapeTest(testing.TestCase):
def test_cholesky(self):
x = KerasTensor([None, 20, 20])
out = linalg.cholesky(x)
self.assertEqual(out.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.cholesky(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.cholesky(x)
def test_det(self):
x = KerasTensor([None, 20, 20])
out = linalg.det(x)
self.assertEqual(out.shape, (None,))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.det(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.det(x)
def test_eig(self):
x = KerasTensor([None, 20, 20])
w, v = linalg.eig(x)
self.assertEqual(w.shape, (None, 20))
self.assertEqual(v.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.eig(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.eig(x)
def test_inv(self):
x = KerasTensor([None, 20, 20])
out = linalg.inv(x)
self.assertEqual(out.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.inv(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.inv(x)
def test_lu_factor(self):
x = KerasTensor([None, 4, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (None, 4, 3))
self.assertEqual(p.shape, (None, 3))
x = KerasTensor([None, 2, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (None, 2, 3))
self.assertEqual(p.shape, (None, 2))
def test_norm(self):
x = KerasTensor((None, 3))
self.assertEqual(linalg.norm(x).shape, ())
x = KerasTensor((None, 3, 3))
self.assertEqual(linalg.norm(x, axis=1).shape, (None, 3))
self.assertEqual(
linalg.norm(x, axis=1, keepdims=True).shape, (None, 1, 3)
)
def test_qr(self):
x = KerasTensor((None, 4, 3), dtype="float32")
q, r = linalg.qr(x, mode="reduced")
qref, rref = np.linalg.qr(np.ones((2, 4, 3)), mode="reduced")
qref_shape = (None,) + qref.shape[1:]
rref_shape = (None,) + rref.shape[1:]
self.assertEqual(q.shape, qref_shape)
self.assertEqual(r.shape, rref_shape)
q, r = linalg.qr(x, mode="complete")
qref, rref = np.linalg.qr(np.ones((2, 4, 3)), mode="complete")
qref_shape = (None,) + qref.shape[1:]
rref_shape = (None,) + rref.shape[1:]
self.assertEqual(q.shape, qref_shape)
self.assertEqual(r.shape, rref_shape)
def test_solve(self):
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, None, 20])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
a = KerasTensor([None, 20, 15])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, None, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
def test_solve_triangular(self):
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve_triangular(a, b, lower=True)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve_triangular(a, b, lower=True)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, 20, 15])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve_triangular(a, b)
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, None, 5])
with self.assertRaises(ValueError):
linalg.solve_triangular(a, b)
def test_svd(self):
x = KerasTensor((None, 3, 2))
u, s, v = linalg.svd(x)
self.assertEqual(u.shape, (None, 3, 3))
self.assertEqual(s.shape, (None, 2))
self.assertEqual(v.shape, (None, 2, 2))
u, s, v = linalg.svd(x, full_matrices=False)
self.assertEqual(u.shape, (None, 3, 2))
self.assertEqual(s.shape, (None, 2))
self.assertEqual(v.shape, (None, 2, 2))
s = linalg.svd(x, compute_uv=False)
self.assertEqual(s.shape, (None, 2))
class LinalgOpsStaticShapeTest(testing.TestCase):
def test_cholesky(self):
x = KerasTensor([4, 3, 3])
out = linalg.cholesky(x)
self.assertEqual(out.shape, (4, 3, 3))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.cholesky(x)
def test_det(self):
x = KerasTensor([4, 3, 3])
out = linalg.det(x)
self.assertEqual(out.shape, (4,))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.det(x)
def test_eig(self):
x = KerasTensor([4, 3, 3])
w, v = linalg.eig(x)
self.assertEqual(w.shape, (4, 3))
self.assertEqual(v.shape, (4, 3, 3))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.eig(x)
def test_inv(self):
x = KerasTensor([4, 3, 3])
out = linalg.inv(x)
self.assertEqual(out.shape, (4, 3, 3))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.inv(x)
def test_lu_factor(self):
x = KerasTensor([10, 4, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (10, 4, 3))
self.assertEqual(p.shape, (10, 3))
x = KerasTensor([10, 2, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (10, 2, 3))
self.assertEqual(p.shape, (10, 2))
def test_norm(self):
x = KerasTensor((10, 3))
self.assertEqual(linalg.norm(x).shape, ())
x = KerasTensor((10, 3, 3))
self.assertEqual(linalg.norm(x, axis=1).shape, (10, 3))
self.assertEqual(
linalg.norm(x, axis=1, keepdims=True).shape, (10, 1, 3)
)
def test_qr(self):
x = KerasTensor((4, 3), dtype="float32")
q, r = linalg.qr(x, mode="reduced")
qref, rref = np.linalg.qr(np.ones((4, 3)), mode="reduced")
self.assertEqual(q.shape, qref.shape)
self.assertEqual(r.shape, rref.shape)
q, r = linalg.qr(x, mode="complete")
qref, rref = np.linalg.qr(np.ones((4, 3)), mode="complete")
self.assertEqual(q.shape, qref.shape)
self.assertEqual(r.shape, rref.shape)
with self.assertRaises(ValueError):
linalg.qr(x, mode="invalid")
def test_solve(self):
a = KerasTensor([4, 3, 3])
b = KerasTensor([4, 3, 5])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (4, 3, 5))
a = KerasTensor([4, 3, 3])
b = KerasTensor([4, 3])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (4, 3))
a = KerasTensor([10, 20, 15])
b = KerasTensor([10, 20, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
a = KerasTensor([20, 20])
b = KerasTensor([])
with self.assertRaises(ValueError):
linalg.solve(a, b)
def test_solve_triangular(self):
a = KerasTensor([4, 3, 3])
b = KerasTensor([4, 3, 5])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (4, 3, 5))
a = KerasTensor([4, 3, 3])
b = KerasTensor([4, 3])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (4, 3))
a = KerasTensor([10, 20, 15])
b = KerasTensor([10, 20, 5])
with self.assertRaises(ValueError):
linalg.solve_triangular(a, b)
def test_svd(self):
x = KerasTensor((10, 3, 2))
u, s, v = linalg.svd(x)
self.assertEqual(u.shape, (10, 3, 3))
self.assertEqual(s.shape, (10, 2))
self.assertEqual(v.shape, (10, 2, 2))
u, s, v = linalg.svd(x, full_matrices=False)
self.assertEqual(u.shape, (10, 3, 2))
self.assertEqual(s.shape, (10, 2))
self.assertEqual(v.shape, (10, 2, 2))
s = linalg.svd(x, compute_uv=False)
self.assertEqual(s.shape, (10, 2))
class LinalgOpsCorrectnessTest(testing.TestCase, parameterized.TestCase):
def test_cholesky(self):
x = np.random.rand(4, 3, 3).astype("float32")
with self.assertRaises(ValueError):
linalg.cholesky(x)
x_psd = x @ x.transpose((0, 2, 1)) + 1e-5 * np.eye(3)
out = linalg.cholesky(x_psd)
self.assertAllClose(out, np.linalg.cholesky(x_psd), atol=1e-4)
def test_det(self):
x = np.random.rand(4, 3, 3)
out = linalg.det(x)
self.assertAllClose(out, np.linalg.det(x), atol=1e-5)
with self.assertRaises(ValueError):
x = np.random.rand(4, 3, 4)
linalg.det(x)
def test_eig(self):
x = np.random.rand(2, 3, 3)
x = x @ x.transpose((0, 2, 1))
if backend.backend() == "jax":
import jax
if jax.default_backend() == "gpu":
# eig not implemented for jax on gpu backend
with self.assertRaises(NotImplementedError):
linalg.eig(x)
return
w, v = map(ops.convert_to_numpy, linalg.eig(x))
x_reconstructed = (v * w[..., None, :]) @ v.transpose((0, 2, 1))
self.assertAllClose(x_reconstructed, x, atol=1e-4)
def test_inv(self):
x = np.random.rand(4, 3, 3)
x_inv = ops.convert_to_numpy(linalg.inv(x))
x_reconstructed = x @ x_inv
# high tolerance due to numerical instability
self.assertAllClose(
x_reconstructed, np.repeat(np.eye(3)[None], 4, 0), atol=1e-3
)
def test_lu_factor(self):
def _pivot_matrix(pivots, n):
p_matrix = np.eye(n)
for i, p in enumerate(pivots):
identity = np.eye(n, n)
q = identity[i, :].copy()
identity[i, :] = identity[p, :]
identity[p, :] = q
p_matrix = np.dot(p_matrix, identity)
return p_matrix
def _reconstruct(lu, pivots, m, n):
lower = np.tril(lu[:, : min(m, n)], -1) + np.eye(m, min(m, n))
upper = np.triu(lu[: min(m, n)])
# pivots are defined differently in tensorflow
# compared to the other backends
if backend.backend() == "tensorflow":
p_matrix = np.eye(m)[pivots]
else:
p_matrix = _pivot_matrix(pivots, m)
out = p_matrix @ lower @ upper
return out
m, n = 4, 4
x = np.random.rand(m, n)
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
x_reconstructed = _reconstruct(lu, pivots, m, n)
self.assertAllClose(x_reconstructed, x, atol=1e-5)
m, n = 4, 3
x = np.random.rand(m, n)
if backend.backend() == "tensorflow":
with self.assertRaises(ValueError):
linalg.lu_factor(x)
else:
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
x_reconstructed = _reconstruct(lu, pivots, m, n)
self.assertAllClose(x_reconstructed, x, atol=1e-5)
# batched case
m, n = 3, 4
x = np.random.rand(2, m, n)
if backend.backend() == "tensorflow":
with self.assertRaises(ValueError):
linalg.lu_factor(x)
else:
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
for i in range(2):
self.assertAllClose(
_reconstruct(lu[i], pivots[i], m, n), x[i], atol=1e-5
)
@parameterized.named_parameters(
named_product(
ord=[None, "fro", "nuc", -np.inf, -2, -1, 0, 1, 2, np.inf, 3],
axis=[None, 1, -1],
keepdims=[False, True],
)
)
def test_norm_vectors(self, ord, axis, keepdims):
if axis is None:
x = np.random.random((5,))
else:
x = np.random.random((5, 6))
if ord in ("fro", "nuc"):
error = RuntimeError if backend.backend() == "torch" else ValueError
with self.assertRaises(error):
linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
return
output = linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
expected_result = np.linalg.norm(
x, ord=ord, axis=axis, keepdims=keepdims
)
self.assertAllClose(output, expected_result)
def test_qr(self):
x = np.random.random((4, 5))
q, r = linalg.qr(x, mode="reduced")
qref, rref = np.linalg.qr(x, mode="reduced")
self.assertAllClose(qref, q)
self.assertAllClose(rref, r)
q, r = linalg.qr(x, mode="complete")
qref, rref = np.linalg.qr(x, mode="complete")
self.assertAllClose(qref, q)
self.assertAllClose(rref, r)
def test_solve(self):
x1 = np.array([[1, 2], [4, 5]], dtype="float32")
x2 = np.array([[2, 4], [8, 10]], dtype="float32")
output = linalg.solve(x1, x2)
expected_result = np.array([[2, 0], [0, 2]], dtype="float32")
self.assertAllClose(output, expected_result)
def test_solve_triangular(self):
# 2d-case
x1 = np.array([[1, 2], [0, 5]], dtype="float32")
x2 = np.array([2, 10], dtype="float32")
output = linalg.solve_triangular(x1, x2, lower=True)
expected_result = np.array([2, 2], dtype="float32")
self.assertAllClose(output, expected_result)
output = linalg.solve_triangular(x1, x2, lower=False)
expected_result = np.array([-2, 2], dtype="float32")
self.assertAllClose(output, expected_result)
# batched case
x1 = np.array([[[1, 2], [0, 5]], [[1, 2], [0, 5]]], dtype="float32")
x2 = np.array([[2, 10], [2, 10]], dtype="float32")
output = linalg.solve_triangular(x1, x2, lower=True)
expected_result = np.array([[2, 2], [2, 2]], dtype="float32")
self.assertAllClose(output, expected_result)
def test_svd(self):
x = np.random.rand(4, 30, 20)
u, s, vh = linalg.svd(x)
x_reconstructed = (u[..., :, : s.shape[-1]] * s[..., None, :]) @ vh[
..., : s.shape[-1], :
]
self.assertAllClose(x_reconstructed, x, atol=1e-4)
class QrOpTest(testing.TestCase):
def test_qr_init_mode_reduced(self):
qr_op = linalg.Qr(mode="reduced")
self.assertIsNotNone(qr_op)
def test_qr_init_mode_complete(self):
qr_op = linalg.Qr(mode="complete")
self.assertIsNotNone(qr_op)
def test_qr_init_invalid_mode(self):
invalid_mode = "invalid_mode"
expected_error = (
r"`mode` argument value not supported. "
r"Expected one of \{'reduced', 'complete'\}. "
f"Received: mode={invalid_mode}"
)
with self.assertRaisesRegex(ValueError, expected_error):
linalg.Qr(mode=invalid_mode)
def test_compute_output_spec_low_rank(self):
qr_op = linalg.Qr(mode="reduced")
low_rank_input = np.random.rand(3)
with self.assertRaisesRegex(
ValueError, r"Input should have rank >= 2. Received: .*"
):
qr_op.compute_output_spec(low_rank_input)
def test_compute_output_spec_undefined_dimensions(self):
qr_op = linalg.Qr(mode="reduced")
undefined_dim_input = KerasTensor(shape=(None, 4), dtype="float32")
with self.assertRaisesRegex(
ValueError,
r"Input should have its last 2 dimensions "
r"fully-defined. Received: .*",
):
qr_op.compute_output_spec(undefined_dim_input)
def test_qr_call_mode_reduced(self):
qr_op = linalg.Qr(mode="reduced")
test_input = np.random.rand(10, 10)
q, r = qr_op.call(test_input)
self.assertEqual(q.shape, (10, 10))
self.assertEqual(r.shape, (10, 10))
def test_qr_call_mode_complete(self):
qr_op = linalg.Qr(mode="complete")
test_input = np.random.rand(10, 10)
q, r = qr_op.call(test_input)
self.assertEqual(q.shape, (10, 10))
self.assertEqual(r.shape, (10, 10))
| keras/keras/ops/linalg_test.py/0 | {
"file_path": "keras/keras/ops/linalg_test.py",
"repo_id": "keras",
"token_count": 9333
} | 173 |
from keras.random.random import categorical
from keras.random.random import dropout
from keras.random.random import gamma
from keras.random.random import normal
from keras.random.random import randint
from keras.random.random import shuffle
from keras.random.random import truncated_normal
from keras.random.random import uniform
from keras.random.seed_generator import SeedGenerator
| keras/keras/random/__init__.py/0 | {
"file_path": "keras/keras/random/__init__.py",
"repo_id": "keras",
"token_count": 101
} | 174 |
"""Tests for serialization_lib."""
import json
import numpy as np
import pytest
import keras
from keras import ops
from keras import testing
from keras.saving import serialization_lib
def custom_fn(x):
return x**2
class CustomLayer(keras.layers.Layer):
def __init__(self, factor):
super().__init__()
self.factor = factor
def call(self, x):
return x * self.factor
def get_config(self):
return {"factor": self.factor}
class NestedCustomLayer(keras.layers.Layer):
def __init__(self, factor, dense=None, activation=None):
super().__init__()
self.factor = factor
if dense is None:
self.dense = keras.layers.Dense(1, activation=custom_fn)
else:
self.dense = serialization_lib.deserialize_keras_object(dense)
self.activation = serialization_lib.deserialize_keras_object(activation)
def call(self, x):
return self.dense(x * self.factor)
def get_config(self):
return {
"factor": self.factor,
"dense": self.dense,
"activation": self.activation,
}
class WrapperLayer(keras.layers.Wrapper):
def call(self, x):
return self.layer(x)
class SerializationLibTest(testing.TestCase):
def roundtrip(self, obj, custom_objects=None, safe_mode=True):
serialized = serialization_lib.serialize_keras_object(obj)
json_data = json.dumps(serialized)
json_data = json.loads(json_data)
deserialized = serialization_lib.deserialize_keras_object(
json_data, custom_objects=custom_objects, safe_mode=safe_mode
)
reserialized = serialization_lib.serialize_keras_object(deserialized)
return serialized, deserialized, reserialized
def test_simple_objects(self):
for obj in [
"hello",
b"hello",
np.array([0, 1]),
np.array([0.0, 1.0]),
np.float32(1.0),
["hello", 0, "world", 1.0, True],
{"1": "hello", "2": 0, "3": True},
{"1": "hello", "2": [True, False]},
slice(None, 20, 1),
slice(None, np.array([0, 1]), 1),
]:
serialized, _, reserialized = self.roundtrip(obj)
self.assertEqual(serialized, reserialized)
def test_builtin_layers(self):
layer = keras.layers.Dense(
3,
name="foo",
trainable=False,
dtype="float16",
)
serialized, restored, reserialized = self.roundtrip(layer)
self.assertEqual(serialized, reserialized)
self.assertEqual(layer.name, restored.name)
self.assertEqual(layer.trainable, restored.trainable)
self.assertEqual(layer.compute_dtype, restored.compute_dtype)
def test_tensors_and_shapes(self):
x = ops.random.normal((2, 2), dtype="float64")
obj = {"x": x}
_, new_obj, _ = self.roundtrip(obj)
self.assertAllClose(x, new_obj["x"], atol=1e-5)
obj = {"x.shape": x.shape}
_, new_obj, _ = self.roundtrip(obj)
self.assertEqual(tuple(x.shape), tuple(new_obj["x.shape"]))
def test_custom_fn(self):
obj = {"activation": custom_fn}
serialized, _, reserialized = self.roundtrip(
obj, custom_objects={"custom_fn": custom_fn}
)
self.assertEqual(serialized, reserialized)
# Test inside layer
dense = keras.layers.Dense(1, activation=custom_fn)
dense.build((None, 2))
_, new_dense, _ = self.roundtrip(
dense, custom_objects={"custom_fn": custom_fn}
)
x = ops.random.normal((2, 2))
y1 = dense(x)
_ = new_dense(x)
new_dense.set_weights(dense.get_weights())
y2 = new_dense(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_custom_layer(self):
layer = CustomLayer(factor=2)
x = ops.random.normal((2, 2))
y1 = layer(x)
_, new_layer, _ = self.roundtrip(
layer, custom_objects={"CustomLayer": CustomLayer}
)
y2 = new_layer(x)
self.assertAllClose(y1, y2, atol=1e-5)
layer = NestedCustomLayer(factor=2)
x = ops.random.normal((2, 2))
y1 = layer(x)
_, new_layer, _ = self.roundtrip(
layer,
custom_objects={
"NestedCustomLayer": NestedCustomLayer,
"custom_fn": custom_fn,
},
)
_ = new_layer(x)
new_layer.set_weights(layer.get_weights())
y2 = new_layer(x)
self.assertAllClose(y1, y2, atol=1e-5)
def test_lambda_fn(self):
obj = {"activation": lambda x: x**2}
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
self.roundtrip(obj, safe_mode=True)
_, new_obj, _ = self.roundtrip(obj, safe_mode=False)
self.assertEqual(obj["activation"](3), new_obj["activation"](3))
# TODO
# def test_lambda_layer(self):
# lmbda = keras.layers.Lambda(lambda x: x**2)
# with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
# self.roundtrip(lmbda, safe_mode=True)
# _, new_lmbda, _ = self.roundtrip(lmbda, safe_mode=False)
# x = ops.random.normal((2, 2))
# y1 = lmbda(x)
# y2 = new_lmbda(x)
# self.assertAllClose(y1, y2, atol=1e-5)
# def test_safe_mode_scope(self):
# lmbda = keras.layers.Lambda(lambda x: x**2)
# with serialization_lib.SafeModeScope(safe_mode=True):
# with self.assertRaisesRegex(
# ValueError, "arbitrary code execution"
# ):
# self.roundtrip(lmbda)
# with serialization_lib.SafeModeScope(safe_mode=False):
# _, new_lmbda, _ = self.roundtrip(lmbda)
# x = ops.random.normal((2, 2))
# y1 = lmbda(x)
# y2 = new_lmbda(x)
# self.assertAllClose(y1, y2, atol=1e-5)
@pytest.mark.requires_trainable_backend
def test_dict_inputs_outputs(self):
input_foo = keras.Input((2,), name="foo")
input_bar = keras.Input((2,), name="bar")
dense = keras.layers.Dense(1)
output_foo = dense(input_foo)
output_bar = dense(input_bar)
model = keras.Model(
{"foo": input_foo, "bar": input_bar},
{"foo": output_foo, "bar": output_bar},
)
_, new_model, _ = self.roundtrip(model)
original_output = model(
{"foo": np.zeros((2, 2)), "bar": np.zeros((2, 2))}
)
restored_output = model(
{"foo": np.zeros((2, 2)), "bar": np.zeros((2, 2))}
)
self.assertAllClose(original_output["foo"], restored_output["foo"])
self.assertAllClose(original_output["bar"], restored_output["bar"])
@pytest.mark.requires_trainable_backend
def test_shared_inner_layer(self):
with serialization_lib.ObjectSharingScope():
input_1 = keras.Input((2,))
input_2 = keras.Input((2,))
shared_layer = keras.layers.Dense(1)
output_1 = shared_layer(input_1)
wrapper_layer = WrapperLayer(shared_layer)
output_2 = wrapper_layer(input_2)
model = keras.Model([input_1, input_2], [output_1, output_2])
_, new_model, _ = self.roundtrip(
model, custom_objects={"WrapperLayer": WrapperLayer}
)
self.assertIs(model.layers[2], model.layers[3].layer)
self.assertIs(new_model.layers[2], new_model.layers[3].layer)
@pytest.mark.requires_trainable_backend
def test_functional_subclass(self):
class PlainFunctionalSubclass(keras.Model):
pass
inputs = keras.Input((2,), batch_size=3)
outputs = keras.layers.Dense(1)(inputs)
model = PlainFunctionalSubclass(inputs, outputs)
x = ops.random.normal((2, 2))
y1 = model(x)
_, new_model, _ = self.roundtrip(
model,
custom_objects={"PlainFunctionalSubclass": PlainFunctionalSubclass},
)
new_model.set_weights(model.get_weights())
y2 = new_model(x)
self.assertAllClose(y1, y2, atol=1e-5)
self.assertIsInstance(new_model, PlainFunctionalSubclass)
class FunctionalSubclassWCustomInit(keras.Model):
def __init__(self, num_units=2):
inputs = keras.Input((2,), batch_size=3)
outputs = keras.layers.Dense(num_units)(inputs)
super().__init__(inputs, outputs)
self.num_units = num_units
def get_config(self):
return {"num_units": self.num_units}
model = FunctionalSubclassWCustomInit(num_units=3)
x = ops.random.normal((2, 2))
y1 = model(x)
_, new_model, _ = self.roundtrip(
model,
custom_objects={
"FunctionalSubclassWCustomInit": FunctionalSubclassWCustomInit
},
)
new_model.set_weights(model.get_weights())
y2 = new_model(x)
self.assertAllClose(y1, y2, atol=1e-5)
self.assertIsInstance(new_model, FunctionalSubclassWCustomInit)
def test_shared_object(self):
class MyLayer(keras.layers.Layer):
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
if isinstance(activation, dict):
self.activation = (
serialization_lib.deserialize_keras_object(activation)
)
else:
self.activation = activation
def call(self, x):
return self.activation(x)
def get_config(self):
config = super().get_config()
config["activation"] = self.activation
return config
class SharedActivation:
def __call__(self, x):
return x**2
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls()
shared_act = SharedActivation()
layer_1 = MyLayer(activation=shared_act)
layer_2 = MyLayer(activation=shared_act)
layers = [layer_1, layer_2]
with serialization_lib.ObjectSharingScope():
serialized, new_layers, reserialized = self.roundtrip(
layers,
custom_objects={
"MyLayer": MyLayer,
"SharedActivation": SharedActivation,
},
)
self.assertIn("shared_object_id", serialized[0]["config"]["activation"])
obj_id = serialized[0]["config"]["activation"]
self.assertIn("shared_object_id", serialized[1]["config"]["activation"])
self.assertEqual(obj_id, serialized[1]["config"]["activation"])
self.assertIs(layers[0].activation, layers[1].activation)
self.assertIs(new_layers[0].activation, new_layers[1].activation)
@keras.saving.register_keras_serializable()
class MyDense(keras.layers.Layer):
def __init__(
self,
units,
*,
kernel_regularizer=None,
kernel_initializer=None,
**kwargs
):
super().__init__(**kwargs)
self._units = units
self._kernel_regularizer = kernel_regularizer
self._kernel_initializer = kernel_initializer
def get_config(self):
return dict(
units=self._units,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
**super().get_config()
)
def build(self, input_shape):
_, input_units = input_shape
self._kernel = self.add_weight(
name="kernel",
shape=[input_units, self._units],
dtype="float32",
regularizer=self._kernel_regularizer,
initializer=self._kernel_initializer,
)
def call(self, inputs):
return ops.matmul(inputs, self._kernel)
@keras.saving.register_keras_serializable()
class MyWrapper(keras.layers.Layer):
def __init__(self, wrapped, **kwargs):
super().__init__(**kwargs)
self._wrapped = wrapped
def get_config(self):
return dict(wrapped=self._wrapped, **super().get_config())
@classmethod
def from_config(cls, config):
config["wrapped"] = keras.saving.deserialize_keras_object(
config["wrapped"]
)
return cls(**config)
def call(self, inputs):
return self._wrapped(inputs)
| keras/keras/saving/serialization_lib_test.py/0 | {
"file_path": "keras/keras/saving/serialization_lib_test.py",
"repo_id": "keras",
"token_count": 6161
} | 175 |
import math
import time
import jax
import numpy as np
import tensorflow as tf
import torch
from absl.testing import parameterized
from keras import testing
from keras.testing.test_utils import named_product
from keras.trainers.data_adapters import py_dataset_adapter
from keras.utils.rng_utils import set_random_seed
class ExamplePyDataset(py_dataset_adapter.PyDataset):
def __init__(
self, x_set, y_set, sample_weight=None, batch_size=32, delay=0, **kwargs
):
super().__init__(**kwargs)
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.sample_weight = sample_weight
self.delay = delay
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
# Create artificial delay to test multiprocessing
time.sleep(self.delay)
# Return x, y for batch idx.
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.x))
batch_x = self.x[low:high]
batch_y = self.y[low:high]
if self.sample_weight is not None:
return batch_x, batch_y, self.sample_weight[low:high]
return batch_x, batch_y
class DictPyDataset(py_dataset_adapter.PyDataset):
def __init__(self, inputs, batch_size=32, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.inputs["x"]) / self.batch_size)
def __getitem__(self, idx):
# Return x, y for batch idx.
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.inputs["x"]))
batch_x = self.inputs["x"][low:high]
batch_y = self.inputs["y"][low:high]
batch = {"x": batch_x, "y": batch_y}
return batch
class PyDatasetAdapterTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
named_product(
[
{
"testcase_name": "multiprocessing",
"workers": 2,
"use_multiprocessing": True,
"max_queue_size": 10,
"dataset_type": "np",
},
{
"testcase_name": "multithreading",
"workers": 2,
"use_multiprocessing": False,
"max_queue_size": 10,
"dataset_type": "np",
},
{
"testcase_name": "single_np",
"dataset_type": "np",
},
{
"testcase_name": "single_tf",
"dataset_type": "tf",
},
{
"testcase_name": "single_jax",
"dataset_type": "jax",
},
{
"testcase_name": "single_torch",
"dataset_type": "torch",
},
],
iterator_type=["np", "tf", "jax", "torch"],
shuffle=[True, False],
)
)
def test_basic_flow(
self,
shuffle,
dataset_type,
iterator_type,
workers=0,
use_multiprocessing=False,
max_queue_size=0,
):
set_random_seed(1337)
x = np.random.random((64, 4)).astype("float32")
y = np.array([[i, i] for i in range(64)], dtype="float32")
if dataset_type == "tf":
x, y = tf.constant(x), tf.constant(y)
elif dataset_type == "jax":
x, y = jax.numpy.array(x), jax.numpy.array(y)
elif dataset_type == "torch":
x, y = torch.as_tensor(x), torch.as_tensor(y)
py_dataset = ExamplePyDataset(
x,
y,
batch_size=16,
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
py_dataset, shuffle=shuffle
)
if iterator_type == "np":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif iterator_type == "tf":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif iterator_type == "jax":
it = adapter.get_jax_iterator()
expected_class = jax.Array
elif iterator_type == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
sample_order = []
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
for i in range(by.shape[0]):
sample_order.append(by[i, 0])
if shuffle:
self.assertNotAllClose(sample_order, list(range(64)))
else:
self.assertAllClose(sample_order, list(range(64)))
# TODO: test class_weight
# TODO: test sample weights
# TODO: test inference mode (single output)
def test_speedup(self):
x = np.random.random((40, 4))
y = np.random.random((40, 2))
no_speedup_py_dataset = ExamplePyDataset(
x,
y,
batch_size=4,
delay=0.5,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
no_speedup_py_dataset, shuffle=False
)
gen = adapter.get_numpy_iterator()
t0 = time.time()
for batch in gen:
pass
no_speedup_time = time.time() - t0
speedup_py_dataset = ExamplePyDataset(
x,
y,
batch_size=4,
workers=4,
# TODO: the github actions runner may have performance issue with
# multiprocessing
# use_multiprocessing=True,
max_queue_size=8,
delay=0.5,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
speedup_py_dataset, shuffle=False
)
gen = adapter.get_numpy_iterator()
t0 = time.time()
for batch in gen:
pass
speedup_time = time.time() - t0
self.assertLess(speedup_time, no_speedup_time)
def test_dict_inputs(self):
inputs = {
"x": np.random.random((40, 4)),
"y": np.random.random((40, 2)),
}
py_dataset = DictPyDataset(inputs, batch_size=4)
adapter = py_dataset_adapter.PyDatasetAdapter(py_dataset, shuffle=False)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.shape, (4, 4))
self.assertEqual(by.shape, (4, 2))
ds = adapter.get_tf_dataset()
for batch in ds:
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(tuple(bx.shape), (4, 4))
self.assertEqual(tuple(by.shape), (4, 2))
| keras/keras/trainers/data_adapters/py_dataset_adapter_test.py/0 | {
"file_path": "keras/keras/trainers/data_adapters/py_dataset_adapter_test.py",
"repo_id": "keras",
"token_count": 4206
} | 176 |
import tree
def pack_sequence_as(structure, flat_sequence, sequence_fn=None):
"""Implements sequence packing, i.e. nest.pack_sequence_as()."""
is_nested_fn = tree.is_nested
sequence_fn = sequence_fn or tree._sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_nested_fn(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a structure, but found "
"incompatible type `{}` instead.".format(
truncate(flat_sequence, 100), type(flat_sequence)
)
)
if not is_nested_fn(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure),
truncate(structure, 100),
type(flat_sequence),
len(flat_sequence),
truncate(flat_sequence, 100),
)
)
return flat_sequence[0]
try:
final_index, packed = packed_nest_with_indices(
structure, flat_sequence, 0, is_nested_fn, sequence_fn
)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = tree.flatten(structure)
if len(flat_structure) != len(flat_sequence):
# pylint: disable=raise-missing-from
raise ValueError(
"Could not pack sequence. "
f"Structure had {len(flat_structure)} atoms, but "
f"flat_sequence had {len(flat_sequence)} items. "
f"Structure: {structure}, flat_sequence: {flat_sequence}."
)
return sequence_fn(structure, packed)
def packed_nest_with_indices(
structure, flat, index, is_nested_fn, sequence_fn=None
):
"""Helper function for pack_sequence_as.
Args:
structure: structure to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_nested_fn: Function used to test if a value should
be treated as a nested structure.
sequence_fn: Function used to generate a new strcuture instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat`
having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
"""
packed = []
sequence_fn = sequence_fn or tree._sequence_like
for s in yield_value(structure):
if is_nested_fn(s):
new_index, child = packed_nest_with_indices(
s, flat, index, is_nested_fn, sequence_fn
)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def yield_value(iterable):
for _, v in tree._yield_sorted_items(iterable):
yield v
def lists_to_tuples(structure):
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return tree._sequence_like(instance, args)
return pack_sequence_as(
structure,
tree.flatten(structure),
sequence_fn=sequence_fn,
)
| keras/keras/utils/nest.py/0 | {
"file_path": "keras/keras/utils/nest.py",
"repo_id": "keras",
"token_count": 1664
} | 177 |
import numpy as np
from keras.api_export import keras_export
from keras.utils.module_utils import tensorflow as tf
@keras_export(
[
"keras.utils.timeseries_dataset_from_array",
"keras.preprocessing.timeseries_dataset_from_array",
]
)
def timeseries_dataset_from_array(
data,
targets,
sequence_length,
sequence_stride=1,
sampling_rate=1,
batch_size=128,
shuffle=False,
seed=None,
start_index=None,
end_index=None,
):
"""Creates a dataset of sliding windows over a timeseries provided as array.
This function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
length of the sequences/windows, spacing between two sequence/windows, etc.,
to produce batches of timeseries inputs and targets.
Args:
data: Numpy array or eager tensor
containing consecutive data points (timesteps).
Axis 0 is expected to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
`targets[i]` should be the target
corresponding to the window that starts at index `i`
(see example 2 below).
Pass `None` if you don't have target data (in this case the dataset
will only yield the input data).
sequence_length: Length of the output sequences
(in number of timesteps).
sequence_stride: Period between successive output sequences.
For stride `s`, output samples would
start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc.
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i], data[i + r], ... data[i + sequence_length]`
are used for creating a sample sequence.
batch_size: Number of timeseries samples in each batch
(except maybe the last one). If `None`, the data will not be batched
(the dataset will yield individual samples).
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
seed: Optional int; random seed for shuffling.
start_index: Optional int; data points earlier (exclusive)
than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Optional int; data points later (exclusive) than `end_index`
will not be used in the output sequences.
This is useful to reserve part of the data for test or validation.
Returns:
A `tf.data.Dataset` instance. If `targets` was passed, the dataset yields
tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields
only `batch_of_sequences`.
Example 1:
Consider indices `[0, 1, ... 98]`.
With `sequence_length=10, sampling_rate=2, sequence_stride=3`,
`shuffle=False`, the dataset will yield batches of sequences
composed of the following indices:
```
First sequence: [0 2 4 6 8 10 12 14 16 18]
Second sequence: [3 5 7 9 11 13 15 17 19 21]
Third sequence: [6 8 10 12 14 16 18 20 22 24]
...
Last sequence: [78 80 82 84 86 88 90 92 94 96]
```
In this case the last 2 data points are discarded since no full sequence
can be generated to include them (the next sequence would have started
at index 81, and thus its last step would have gone over 98).
Example 2: Temporal regression.
Consider an array `data` of scalar values, of shape `(steps,)`.
To generate a dataset that uses the past 10
timesteps to predict the next timestep, you would use:
```python
input_data = data[:-10]
targets = data[10:]
dataset = timeseries_dataset_from_array(
input_data, targets, sequence_length=10)
for batch in dataset:
inputs, targets = batch
assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9]
# Corresponding target: step 10
assert np.array_equal(targets[0], data[10])
break
```
Example 3: Temporal regression for many-to-many architectures.
Consider two arrays of scalar values `X` and `Y`,
both of shape `(100,)`. The resulting dataset should consist samples with
20 timestamps each. The samples should not overlap.
To generate a dataset that uses the current timestamp
to predict the corresponding target timestep, you would use:
```python
X = np.arange(100)
Y = X*2
sample_length = 20
input_dataset = timeseries_dataset_from_array(
X, None, sequence_length=sample_length, sequence_stride=sample_length)
target_dataset = timeseries_dataset_from_array(
Y, None, sequence_length=sample_length, sequence_stride=sample_length)
for batch in zip(input_dataset, target_dataset):
inputs, targets = batch
assert np.array_equal(inputs[0], X[:sample_length])
# second sample equals output timestamps 20-40
assert np.array_equal(targets[1], Y[sample_length:2*sample_length])
break
```
"""
if start_index:
if start_index < 0:
raise ValueError(
"`start_index` must be 0 or greater. Received: "
f"start_index={start_index}"
)
if start_index >= len(data):
raise ValueError(
"`start_index` must be lower than the length of the "
f"data. Received: start_index={start_index}, for data "
f"of length {len(data)}"
)
if end_index:
if start_index and end_index <= start_index:
raise ValueError(
"`end_index` must be higher than `start_index`. "
f"Received: start_index={start_index}, and "
f"end_index={end_index} "
)
if end_index >= len(data):
raise ValueError(
"`end_index` must be lower than the length of the "
f"data. Received: end_index={end_index}, for data of "
f"length {len(data)}"
)
if end_index <= 0:
raise ValueError(
"`end_index` must be higher than 0. "
f"Received: end_index={end_index}"
)
# Validate strides
if sampling_rate <= 0:
raise ValueError(
"`sampling_rate` must be higher than 0. Received: "
f"sampling_rate={sampling_rate}"
)
if sampling_rate >= len(data):
raise ValueError(
"`sampling_rate` must be lower than the length of the "
f"data. Received: sampling_rate={sampling_rate}, for data "
f"of length {len(data)}"
)
if sequence_stride <= 0:
raise ValueError(
"`sequence_stride` must be higher than 0. Received: "
f"sequence_stride={sequence_stride}"
)
if sequence_stride >= len(data):
raise ValueError(
"`sequence_stride` must be lower than the length of the "
f"data. Received: sequence_stride={sequence_stride}, for "
f"data of length {len(data)}"
)
if start_index is None:
start_index = 0
if end_index is None:
end_index = len(data)
# Determine the lowest dtype to store start positions (to lower memory
# usage).
num_seqs = end_index - start_index - (sequence_length - 1) * sampling_rate
if targets is not None:
num_seqs = min(num_seqs, len(targets))
if num_seqs < 2147483647:
index_dtype = "int32"
else:
index_dtype = "int64"
# Generate start positions
start_positions = np.arange(0, num_seqs, sequence_stride, dtype=index_dtype)
if shuffle:
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(start_positions)
sequence_length = tf.cast(sequence_length, dtype=index_dtype)
sampling_rate = tf.cast(sampling_rate, dtype=index_dtype)
positions_ds = tf.data.Dataset.from_tensors(start_positions).repeat()
# For each initial window position, generates indices of the window elements
indices = tf.data.Dataset.zip(
(tf.data.Dataset.range(len(start_positions)), positions_ds)
).map(
lambda i, positions: tf.range(
positions[i],
positions[i] + sequence_length * sampling_rate,
sampling_rate,
),
num_parallel_calls=tf.data.AUTOTUNE,
)
dataset = sequences_from_indices(data, indices, start_index, end_index)
if targets is not None:
indices = tf.data.Dataset.zip(
(tf.data.Dataset.range(len(start_positions)), positions_ds)
).map(
lambda i, positions: positions[i],
num_parallel_calls=tf.data.AUTOTUNE,
)
target_ds = sequences_from_indices(
targets, indices, start_index, end_index
)
dataset = tf.data.Dataset.zip((dataset, target_ds))
dataset = dataset.prefetch(tf.data.AUTOTUNE)
if batch_size is not None:
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
else:
if shuffle:
dataset = dataset.shuffle(buffer_size=1024, seed=seed)
return dataset
def sequences_from_indices(array, indices_ds, start_index, end_index):
dataset = tf.data.Dataset.from_tensors(array[start_index:end_index])
dataset = tf.data.Dataset.zip((dataset.repeat(), indices_ds)).map(
lambda steps, inds: tf.gather(steps, inds),
num_parallel_calls=tf.data.AUTOTUNE,
)
return dataset
| keras/keras/utils/timeseries_dataset_utils.py/0 | {
"file_path": "keras/keras/utils/timeseries_dataset_utils.py",
"repo_id": "keras",
"token_count": 4076
} | 178 |
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
if os.path.exists("keras/version.py"):
VERSION = get_version("keras/version.py")
else:
VERSION = get_version("keras/__init__.py")
setup(
name="keras",
description="Multi-backend Keras.",
long_description_content_type="text/markdown",
long_description=README,
version=VERSION,
url="https://github.com/keras-team/keras",
author="Keras team",
author_email="[email protected]",
license="Apache License 2.0",
install_requires=[
"absl-py",
"numpy",
"rich",
"namex",
"h5py",
"dm-tree",
"ml-dtypes",
],
# Supported Python versions
python_requires=">=3.9",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*_test.py",)),
)
| keras/setup.py/0 | {
"file_path": "keras/setup.py",
"repo_id": "keras",
"token_count": 774
} | 179 |
py_library(
name = "expect_absl_installed",
# This is a dummy rule used as a absl dependency in open-source.
# We expect absl to already be installed on the system, e.g. via
# `pip install absl`
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_h5py_installed",
# This is a dummy rule used as a h5 dependency in open-source.
# We expect h5py to already be installed on the system, e.g. via
# `pip install h5py'
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_numpy_installed",
# This is a dummy rule used as a numpy dependency in open-source.
# We expect numpy to already be installed on the system, e.g. via
# `pip install numpy`
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_pandas_installed",
# This is a dummy rule used as a pandas dependency in open-source.
# We expect pandas to already be installed on the system, e.g. via
# `pip install pandas'
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_pillow_installed",
# This is a dummy rule used as a pillow dependency in open-source.
# We expect pillow to already be installed on the system, e.g. via
# `pip install Pillow'
visibility = ["//visibility:public"],
deps = [],
)
# Note that this dependency is for testing only.
py_library(
name = "expect_portpicker_installed",
# This is a dummy rule used as a pandas dependency in open-source.
# We expect portpicker to already be installed on the system, e.g. via
# `pip install portpicker'
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_pydot_installed",
# This is a dummy rule used as a pydot dependency in open-source.
# We expect pydot to already be installed on the system, e.g. via
# `pip install pydot'
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_scipy_installed",
# This is a dummy rule used as a scipy dependency in open-source.
# We expect scipy to already be installed on the system, e.g. via
# `pip install scipy'
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_six_installed",
# This is a dummy rule used as a six dependency in open-source.
# We expect six to already be installed on the system, e.g. via
# `pip install six`
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_tensorboard_installed",
# This is a dummy rule used as a tensorboard dependency in open-source.
# We expect tensorboard to already be installed on the system, e.g. via
# `pip install tensorflow`
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_tensorflow_installed",
# This is a dummy rule used as a tensorflow dependency in open-source.
# We expect tensorflow to already be installed on the system, e.g. via
# `pip install tensorflow`
visibility = ["//visibility:public"],
deps = [],
)
py_library(
name = "expect_yaml_installed",
# This is a dummy rule used as a yaml dependency in open-source.
# We expect yaml to already be installed on the system, e.g. via
# `pip install yaml`
visibility = ["//visibility:public"],
deps = [],
)
# Note that this dependency is for testing only.
py_library(
name = "expect_tensorflow_io_installed",
# This is a dummy rule used as a tensorflow_io dependency in open-source.
# We expect tensorflow_io to already be installed on the system, e.g. via
# `pip install tensorflow-io`
visibility = ["//visibility:public"],
deps = [],
)
| tf-keras/BUILD/0 | {
"file_path": "tf-keras/BUILD",
"repo_id": "tf-keras",
"token_count": 1350
} | 180 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-Keras activation functions."""
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import tf_keras.layers.activation as activation_layers
from tf_keras import activations
from tf_keras import backend
from tf_keras.layers import core
from tf_keras.layers import serialization
from tf_keras.testing_infra import test_combinations
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
def _ref_softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
@test_combinations.generate(test_combinations.combine(mode=["graph", "eager"]))
class KerasActivationsTest(tf.test.TestCase, parameterized.TestCase):
def test_serialization(self):
all_activations = [
"softmax",
"relu",
"elu",
"tanh",
"sigmoid",
"hard_sigmoid",
"linear",
"softplus",
"softsign",
"selu",
"gelu",
"relu6",
"mish",
]
for name in all_activations:
fn = activations.get(name)
ref_fn = getattr(activations, name)
assert fn == ref_fn
config = activations.serialize(fn)
fn = activations.deserialize(config)
assert fn == ref_fn
def test_serialization_v2(self):
activation_map = {tf.math.softmax: "softmax"}
for fn_v2_key in activation_map:
fn_v2 = activations.get(fn_v2_key)
config = activations.serialize(fn_v2)
fn = activations.deserialize(config)
assert fn.__name__ == activation_map[fn_v2_key]
def test_serialization_with_layers(self):
activation = activation_layers.LeakyReLU(alpha=0.1)
layer = core.Dense(3, activation=activation)
config = serialization.serialize(layer)
# with custom objects
deserialized_layer = serialization.deserialize(
config, custom_objects={"LeakyReLU": activation}
)
self.assertEqual(
deserialized_layer.__class__.__name__, layer.__class__.__name__
)
self.assertEqual(
deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__,
)
# without custom objects
deserialized_layer = serialization.deserialize(config)
self.assertEqual(
deserialized_layer.__class__.__name__, layer.__class__.__name__
)
self.assertEqual(
deserialized_layer.activation.__class__.__name__,
activation.__class__.__name__,
)
def test_softmax(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = _ref_softmax(test_values[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
x = backend.placeholder(ndim=1)
with self.assertRaises(ValueError):
activations.softmax(x)
def test_softmax_2d_axis_0(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softmax(x, axis=0)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = np.zeros((2, 5))
for i in range(5):
expected[:, i] = _ref_softmax(test_values[:, i])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_3d_axis_tuple(self):
x = backend.placeholder(ndim=3)
f = backend.function([x], [activations.softmax(x, axis=(1, 2))])
test_values = np.random.random((2, 3, 5))
result = f([test_values])[0]
expected = np.zeros((2, 3, 5))
for i in range(2):
expected[i, :, :] = _ref_softmax(test_values[i, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_temporal_softmax(self):
x = backend.placeholder(shape=(2, 2, 3))
f = backend.function([x], [activations.softmax(x)])
test_values = np.random.random((2, 2, 3)) * 10
result = f([test_values])[0]
expected = _ref_softmax(test_values[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.selu(x)])
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=backend.floatx())
result = f([positive_values])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softplus(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = _ref_softplus(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softsign(self):
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.softsign(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = softsign(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sigmoid(self):
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_sigmoid(self):
def ref_hard_sigmoid(x):
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.hard_sigmoid(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_relu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.relu(x)])
positive_values = np.random.random((2, 5))
result = f([positive_values])[0]
self.assertAllClose(result, positive_values, rtol=1e-05)
negative_values = np.random.uniform(-1, 0, (2, 5))
result = f([negative_values])[0]
expected = np.zeros((2, 5))
self.assertAllClose(result, expected, rtol=1e-05)
def test_gelu(self):
def gelu(x, approximate=False):
if approximate:
return (
0.5
* x
* (
1.0
+ np.tanh(
np.sqrt(2.0 / np.pi)
* (x + 0.044715 * np.power(x, 3))
)
)
)
else:
from scipy.stats import norm
return x * norm.cdf(x)
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.gelu(x)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = gelu(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
f = backend.function([x], [activations.gelu(x, True)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
expected = gelu(test_values, True)
self.assertAllClose(result, expected, rtol=1e-05)
def test_elu(self):
x = backend.placeholder(ndim=2)
f = backend.function([x], [activations.elu(x, 0.5)])
test_values = np.random.random((2, 5))
result = f([test_values])[0]
self.assertAllClose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
self.assertAllClose(result, true_result)
def test_tanh(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
exp = activations.tanh(x)
f = backend.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_exponential(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
exp = activations.exponential(x)
f = backend.function([x], [exp])
result = f([test_values])[0]
expected = np.exp(test_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_mish(self):
test_values = np.random.random((2, 5))
x = backend.placeholder(ndim=2)
output = activations.mish(x)
f = backend.function([x], [output])
result = f([test_values])[0]
expected = test_values * np.tanh(_ref_softplus(test_values))
self.assertAllClose(result, expected, rtol=1e-05)
def test_linear(self):
x = np.random.random((10, 5))
self.assertAllClose(x, activations.linear(x))
def test_invalid_usage(self):
with self.assertRaises(ValueError):
activations.get("unknown")
# The following should be possible but should raise a warning:
activations.get(activation_layers.LeakyReLU())
if __name__ == "__main__":
tf.test.main()
| tf-keras/tf_keras/activations_test.py/0 | {
"file_path": "tf-keras/tf_keras/activations_test.py",
"repo_id": "tf-keras",
"token_count": 5138
} | 181 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""EfficientNet V2 models for TF-Keras.
Reference:
- [EfficientNetV2: Smaller Models and Faster Training](
https://arxiv.org/abs/2104.00298) (ICML 2021)
"""
import copy
import math
import tensorflow.compat.v2 as tf
from tf_keras import backend
from tf_keras import layers
from tf_keras.applications import imagenet_utils
from tf_keras.engine import training
from tf_keras.utils import data_utils
from tf_keras.utils import layer_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/" # noqa: E501
WEIGHTS_HASHES = {
"b0": (
"21ecbf6da12460d5c40bb2f29ceb2188",
"893217f2bb855e2983157299931e43ff",
),
"b1": (
"069f0534ff22adf035c89e2d9547a9dc",
"0e80663031ca32d657f9caa404b6ec37",
),
"b2": (
"424e49f28180edbde1e94797771950a7",
"1dfe2e7a5d45b6632553a8961ea609eb",
),
"b3": (
"1f1fc43bd98a6e4fd8fdfd551e02c7a0",
"f6abf7b5849ac99a89b50dd3fd532856",
),
"-s": (
"e1d88a8495beba45748fedd0cecbe016",
"af0682fb74e8c54910f2d4393339c070",
),
"-m": (
"a3bf6aa3276309f4fc6a34aa114c95cd",
"1b8dc055df72dde80d614482840fe342",
),
"-l": (
"27e6d408b53c7ebc868fefa357689935",
"b0b66b5c863aef5b46e8608fe1711615",
),
}
DEFAULT_BLOCKS_ARGS = {
"efficientnetv2-s": [
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 24,
"output_filters": 24,
"expand_ratio": 1,
"se_ratio": 0.0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 4,
"input_filters": 24,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0.0,
"strides": 2,
"conv_type": 1,
},
{
"conv_type": 1,
"expand_ratio": 4,
"input_filters": 48,
"kernel_size": 3,
"num_repeat": 4,
"output_filters": 64,
"se_ratio": 0,
"strides": 2,
},
{
"conv_type": 0,
"expand_ratio": 4,
"input_filters": 64,
"kernel_size": 3,
"num_repeat": 6,
"output_filters": 128,
"se_ratio": 0.25,
"strides": 2,
},
{
"conv_type": 0,
"expand_ratio": 6,
"input_filters": 128,
"kernel_size": 3,
"num_repeat": 9,
"output_filters": 160,
"se_ratio": 0.25,
"strides": 1,
},
{
"conv_type": 0,
"expand_ratio": 6,
"input_filters": 160,
"kernel_size": 3,
"num_repeat": 15,
"output_filters": 256,
"se_ratio": 0.25,
"strides": 2,
},
],
"efficientnetv2-m": [
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 24,
"output_filters": 24,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 24,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 48,
"output_filters": 80,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 7,
"input_filters": 80,
"output_filters": 160,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 14,
"input_filters": 160,
"output_filters": 176,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 18,
"input_filters": 176,
"output_filters": 304,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 304,
"output_filters": 512,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
],
"efficientnetv2-l": [
{
"kernel_size": 3,
"num_repeat": 4,
"input_filters": 32,
"output_filters": 32,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 7,
"input_filters": 32,
"output_filters": 64,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 7,
"input_filters": 64,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 10,
"input_filters": 96,
"output_filters": 192,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 19,
"input_filters": 192,
"output_filters": 224,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 25,
"input_filters": 224,
"output_filters": 384,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 7,
"input_filters": 384,
"output_filters": 640,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
],
"efficientnetv2-b0": [
{
"kernel_size": 3,
"num_repeat": 1,
"input_filters": 32,
"output_filters": 16,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 16,
"output_filters": 32,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 32,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 48,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 96,
"output_filters": 112,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 8,
"input_filters": 112,
"output_filters": 192,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
],
"efficientnetv2-b1": [
{
"kernel_size": 3,
"num_repeat": 1,
"input_filters": 32,
"output_filters": 16,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 16,
"output_filters": 32,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 32,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 48,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 96,
"output_filters": 112,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 8,
"input_filters": 112,
"output_filters": 192,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
],
"efficientnetv2-b2": [
{
"kernel_size": 3,
"num_repeat": 1,
"input_filters": 32,
"output_filters": 16,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 16,
"output_filters": 32,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 32,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 48,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 96,
"output_filters": 112,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 8,
"input_filters": 112,
"output_filters": 192,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
],
"efficientnetv2-b3": [
{
"kernel_size": 3,
"num_repeat": 1,
"input_filters": 32,
"output_filters": 16,
"expand_ratio": 1,
"se_ratio": 0,
"strides": 1,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 16,
"output_filters": 32,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 2,
"input_filters": 32,
"output_filters": 48,
"expand_ratio": 4,
"se_ratio": 0,
"strides": 2,
"conv_type": 1,
},
{
"kernel_size": 3,
"num_repeat": 3,
"input_filters": 48,
"output_filters": 96,
"expand_ratio": 4,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 5,
"input_filters": 96,
"output_filters": 112,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 1,
"conv_type": 0,
},
{
"kernel_size": 3,
"num_repeat": 8,
"input_filters": 112,
"output_filters": 192,
"expand_ratio": 6,
"se_ratio": 0.25,
"strides": 2,
"conv_type": 0,
},
],
}
CONV_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 2.0,
"mode": "fan_out",
"distribution": "truncated_normal",
},
}
DENSE_KERNEL_INITIALIZER = {
"class_name": "VarianceScaling",
"config": {
"scale": 1.0 / 3.0,
"mode": "fan_out",
"distribution": "uniform",
},
}
BASE_DOCSTRING = """Instantiates the {name} architecture.
Reference:
- [EfficientNetV2: Smaller Models and Faster Training](
https://arxiv.org/abs/2104.00298) (ICML 2021)
This function returns a TF-Keras image classification model,
optionally loaded with weights pre-trained on ImageNet.
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each TF-Keras Application expects a specific kind of input
preprocessing. For EfficientNetV2, by default input preprocessing is included
as a part of the model (as a `Rescaling` layer), and thus
`tf.keras.applications.efficientnet_v2.preprocess_input` is actually a
pass-through function. In this use case, EfficientNetV2 models expect their
inputs to be float tensors of pixels with values in the [0-255] range.
At the same time, preprocessing as a part of the model (i.e. `Rescaling`
layer) can be disabled by setting `include_preprocessing` argument to False.
With preprocessing disabled EfficientNetV2 models expect their inputs to be
float tensors of pixels with values in the [-1, 1] range.
Args:
include_top: Boolean, whether to include the fully-connected
layer at the top of the network. Defaults to `True`.
weights: One of `None` (random initialization),
`"imagenet"` (pre-training on ImageNet),
or the path to the weights file to be loaded. Defaults to `"imagenet"`.
input_tensor: Optional TF-Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: Optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `"avg"` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `"max"` means that global max pooling will
be applied.
Defaults to `None`.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified. 1000 is how many
ImageNet classes there are. Defaults to `1000`.
classifier_activation: A string or callable. The activation function to use
on the `"top"` layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Defaults to `"softmax"`.
Returns:
A `keras.Model` instance.
"""
def round_filters(filters, width_coefficient, min_depth, depth_divisor):
"""Round number of filters based on depth multiplier."""
filters *= width_coefficient
minimum_depth = min_depth or depth_divisor
new_filters = max(
minimum_depth,
int(filters + depth_divisor / 2) // depth_divisor * depth_divisor,
)
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def MBConvBlock(
input_filters: int,
output_filters: int,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
bn_momentum=0.9,
activation="swish",
survival_probability: float = 0.8,
name=None,
):
"""MBConv block: Mobile Inverted Residual Bottleneck."""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
if name is None:
name = backend.get_uid("block0")
def apply(inputs):
# Expansion phase
filters = input_filters * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters=filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format=backend.image_data_format(),
use_bias=False,
name=name + "expand_conv",
)(inputs)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
name=name + "expand_bn",
)(x)
x = layers.Activation(activation, name=name + "expand_activation")(
x
)
else:
x = inputs
# Depthwise conv
x = layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format=backend.image_data_format(),
use_bias=False,
name=name + "dwconv2",
)(x)
x = layers.BatchNormalization(
axis=bn_axis, momentum=bn_momentum, name=name + "bn"
)(x)
x = layers.Activation(activation, name=name + "activation")(x)
# Squeeze and excite
if 0 < se_ratio <= 1:
filters_se = max(1, int(input_filters * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=name + "se_reshape")(se)
se = layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "se_reduce",
)(se)
se = layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "se_expand",
)(se)
x = layers.multiply([x, se], name=name + "se_excite")
# Output phase
x = layers.Conv2D(
filters=output_filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format=backend.image_data_format(),
use_bias=False,
name=name + "project_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis, momentum=bn_momentum, name=name + "project_bn"
)(x)
if strides == 1 and input_filters == output_filters:
if survival_probability:
x = layers.Dropout(
survival_probability,
noise_shape=(None, 1, 1, 1),
name=name + "drop",
)(x)
x = layers.add([x, inputs], name=name + "add")
return x
return apply
def FusedMBConvBlock(
input_filters: int,
output_filters: int,
expand_ratio=1,
kernel_size=3,
strides=1,
se_ratio=0.0,
bn_momentum=0.9,
activation="swish",
survival_probability: float = 0.8,
name=None,
):
"""Fused MBConv Block: Fusing the proj conv1x1 and depthwise_conv into a
conv2d."""
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
if name is None:
name = backend.get_uid("block0")
def apply(inputs):
filters = input_filters * expand_ratio
if expand_ratio != 1:
x = layers.Conv2D(
filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer=CONV_KERNEL_INITIALIZER,
data_format=backend.image_data_format(),
padding="same",
use_bias=False,
name=name + "expand_conv",
)(inputs)
x = layers.BatchNormalization(
axis=bn_axis, momentum=bn_momentum, name=name + "expand_bn"
)(x)
x = layers.Activation(
activation=activation, name=name + "expand_activation"
)(x)
else:
x = inputs
# Squeeze and excite
if 0 < se_ratio <= 1:
filters_se = max(1, int(input_filters * se_ratio))
se = layers.GlobalAveragePooling2D(name=name + "se_squeeze")(x)
if bn_axis == 1:
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = layers.Reshape(se_shape, name=name + "se_reshape")(se)
se = layers.Conv2D(
filters_se,
1,
padding="same",
activation=activation,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "se_reduce",
)(se)
se = layers.Conv2D(
filters,
1,
padding="same",
activation="sigmoid",
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=name + "se_expand",
)(se)
x = layers.multiply([x, se], name=name + "se_excite")
# Output phase:
x = layers.Conv2D(
output_filters,
kernel_size=1 if expand_ratio != 1 else kernel_size,
strides=1 if expand_ratio != 1 else strides,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
use_bias=False,
name=name + "project_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis, momentum=bn_momentum, name=name + "project_bn"
)(x)
if expand_ratio == 1:
x = layers.Activation(
activation=activation, name=name + "project_activation"
)(x)
# Residual:
if strides == 1 and input_filters == output_filters:
if survival_probability:
x = layers.Dropout(
survival_probability,
noise_shape=(None, 1, 1, 1),
name=name + "drop",
)(x)
x = layers.add([x, inputs], name=name + "add")
return x
return apply
def EfficientNetV2(
width_coefficient,
depth_coefficient,
default_size,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
min_depth=8,
bn_momentum=0.9,
activation="swish",
blocks_args="default",
model_name="efficientnetv2",
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
):
"""Instantiates the EfficientNetV2 architecture using given scaling
coefficients.
Args:
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_size: integer, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: integer, a unit of network width.
min_depth: integer, minimum number of filters.
bn_momentum: float. Momentum parameter for Batch Normalization layers.
activation: activation function.
blocks_args: list of dicts, parameters to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected layer at the top of
the network.
weights: one of `None` (random initialization), `"imagenet"` (pre-training
on ImageNet), or the path to the weights file to be loaded.
input_tensor: optional TF-Keras tensor (i.e. output of `layers.Input()`)
or numpy array to use as image input for the model.
input_shape: optional shape tuple, only to be specified if `include_top`
is False. It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor output
of the last convolutional layer.
- "avg" means that global average pooling will be applied to the output
of the last convolutional layer, and thus the output of the model will
be a 2D tensor.
- `"max"` means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified.
classifier_activation: A string or callable. The activation function to
use on the `"top"` layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the `"top"` layer.
include_preprocessing: Boolean, whether to include the preprocessing layer
(`Rescaling`) at the bottom of the network. Defaults to `True`.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `"softmax"` or `None` when
using a pretrained top layer.
"""
if blocks_args == "default":
blocks_args = DEFAULT_BLOCKS_ARGS[model_name]
if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
raise ValueError(
"The `weights` argument should be either "
"`None` (random initialization), `imagenet` "
"(pre-training on ImageNet), "
"or the path to the weights file to be loaded."
f"Received: weights={weights}"
)
if weights == "imagenet" and include_top and classes != 1000:
raise ValueError(
"If using `weights` as `'imagenet'` with `include_top`"
" as true, `classes` should be 1000"
f"Received: classes={classes}"
)
# Determine proper input shape
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights,
)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == "channels_last" else 1
x = img_input
if include_preprocessing:
# Apply original V1 preprocessing for Bx variants
# if number of channels allows it
num_channels = input_shape[bn_axis - 1]
if model_name.split("-")[-1].startswith("b") and num_channels == 3:
x = layers.Rescaling(scale=1.0 / 255)(x)
x = layers.Normalization(
mean=[0.485, 0.456, 0.406],
variance=[0.229**2, 0.224**2, 0.225**2],
axis=bn_axis,
)(x)
else:
x = layers.Rescaling(scale=1.0 / 128.0, offset=-1)(x)
# Build stem
stem_filters = round_filters(
filters=blocks_args[0]["input_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = layers.Conv2D(
filters=stem_filters,
kernel_size=3,
strides=2,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
use_bias=False,
name="stem_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
name="stem_bn",
)(x)
x = layers.Activation(activation, name="stem_activation")(x)
# Build blocks
blocks_args = copy.deepcopy(blocks_args)
b = 0
blocks = float(sum(args["num_repeat"] for args in blocks_args))
for i, args in enumerate(blocks_args):
assert args["num_repeat"] > 0
# Update block input and output filters based on depth multiplier.
args["input_filters"] = round_filters(
filters=args["input_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
args["output_filters"] = round_filters(
filters=args["output_filters"],
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
# Determine which conv type to use:
block = {0: MBConvBlock, 1: FusedMBConvBlock}[args.pop("conv_type")]
repeats = round_repeats(
repeats=args.pop("num_repeat"), depth_coefficient=depth_coefficient
)
for j in range(repeats):
# The first block needs to take care of stride and filter size
# increase.
if j > 0:
args["strides"] = 1
args["input_filters"] = args["output_filters"]
x = block(
activation=activation,
bn_momentum=bn_momentum,
survival_probability=drop_connect_rate * b / blocks,
name=f"block{i + 1}{chr(j + 97)}_",
**args,
)(x)
b += 1
# Build top
top_filters = round_filters(
filters=1280,
width_coefficient=width_coefficient,
min_depth=min_depth,
depth_divisor=depth_divisor,
)
x = layers.Conv2D(
filters=top_filters,
kernel_size=1,
strides=1,
kernel_initializer=CONV_KERNEL_INITIALIZER,
padding="same",
data_format=backend.image_data_format(),
use_bias=False,
name="top_conv",
)(x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=bn_momentum,
name="top_bn",
)(x)
x = layers.Activation(activation=activation, name="top_activation")(x)
if include_top:
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
if dropout_rate > 0:
x = layers.Dropout(dropout_rate, name="top_dropout")(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(
classes,
activation=classifier_activation,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
bias_initializer=tf.constant_initializer(0),
name="predictions",
)(x)
else:
if pooling == "avg":
x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
elif pooling == "max":
x = layers.GlobalMaxPooling2D(name="max_pool")(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name=model_name)
# Load weights.
if weights == "imagenet":
if include_top:
file_suffix = ".h5"
file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
else:
file_suffix = "_notop.h5"
file_hash = WEIGHTS_HASHES[model_name[-2:]][1]
file_name = model_name + file_suffix
weights_path = data_utils.get_file(
file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir="models",
file_hash=file_hash,
)
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
@keras_export(
"keras.applications.efficientnet_v2.EfficientNetV2B0",
"keras.applications.EfficientNetV2B0",
)
def EfficientNetV2B0(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
):
return EfficientNetV2(
width_coefficient=1.0,
depth_coefficient=1.0,
default_size=224,
model_name="efficientnetv2-b0",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.efficientnet_v2.EfficientNetV2B1",
"keras.applications.EfficientNetV2B1",
)
def EfficientNetV2B1(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
):
return EfficientNetV2(
width_coefficient=1.0,
depth_coefficient=1.1,
default_size=240,
model_name="efficientnetv2-b1",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.efficientnet_v2.EfficientNetV2B2",
"keras.applications.EfficientNetV2B2",
)
def EfficientNetV2B2(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
):
return EfficientNetV2(
width_coefficient=1.1,
depth_coefficient=1.2,
default_size=260,
model_name="efficientnetv2-b2",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.efficientnet_v2.EfficientNetV2B3",
"keras.applications.EfficientNetV2B3",
)
def EfficientNetV2B3(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
):
return EfficientNetV2(
width_coefficient=1.2,
depth_coefficient=1.4,
default_size=300,
model_name="efficientnetv2-b3",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.efficientnet_v2.EfficientNetV2S",
"keras.applications.EfficientNetV2S",
)
def EfficientNetV2S(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
):
return EfficientNetV2(
width_coefficient=1.0,
depth_coefficient=1.0,
default_size=384,
model_name="efficientnetv2-s",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.efficientnet_v2.EfficientNetV2M",
"keras.applications.EfficientNetV2M",
)
def EfficientNetV2M(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
):
return EfficientNetV2(
width_coefficient=1.0,
depth_coefficient=1.0,
default_size=480,
model_name="efficientnetv2-m",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
include_preprocessing=include_preprocessing,
)
@keras_export(
"keras.applications.efficientnet_v2.EfficientNetV2L",
"keras.applications.EfficientNetV2L",
)
def EfficientNetV2L(
include_top=True,
weights="imagenet",
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
classifier_activation="softmax",
include_preprocessing=True,
):
return EfficientNetV2(
width_coefficient=1.0,
depth_coefficient=1.0,
default_size=480,
model_name="efficientnetv2-l",
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape=input_shape,
pooling=pooling,
classes=classes,
classifier_activation=classifier_activation,
include_preprocessing=include_preprocessing,
)
EfficientNetV2B0.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2B0")
EfficientNetV2B1.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2B1")
EfficientNetV2B2.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2B2")
EfficientNetV2B3.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2B3")
EfficientNetV2S.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2S")
EfficientNetV2M.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2M")
EfficientNetV2L.__doc__ = BASE_DOCSTRING.format(name="EfficientNetV2L")
@keras_export("keras.applications.efficientnet_v2.preprocess_input")
def preprocess_input(x, data_format=None):
"""A placeholder method for backward compatibility.
The preprocessing logic has been included in the EfficientNetV2 model
implementation. Users are no longer required to call this method to
normalize the input data. This method does nothing and only kept as a
placeholder to align the API surface between old and new version of model.
Args:
x: A floating point `numpy.array` or a `tf.Tensor`.
data_format: Optional data format of the image tensor/array. `None` means
the global setting `tf.keras.backend.image_data_format()` is used
(unless you changed it, it uses "channels_last").
Defaults to `None`.
Returns:
Unchanged `numpy.array` or `tf.Tensor`.
"""
return x
@keras_export("keras.applications.efficientnet_v2.decode_predictions")
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| tf-keras/tf_keras/applications/efficientnet_v2.py/0 | {
"file_path": "tf-keras/tf_keras/applications/efficientnet_v2.py",
"repo_id": "tf-keras",
"token_count": 21354
} | 182 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.