repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/tests/test_gmm_tf.py
|
import numpy as np
from alibi_detect.models.tensorflow.gmm import gmm_energy, gmm_params
N, K, D = 10, 5, 1
z = np.random.rand(N, D).astype(np.float32)
gamma = np.random.rand(N, K).astype(np.float32)
def test_gmm_params_energy():
phi, mu, cov, L, log_det_cov = gmm_params(z, gamma)
assert phi.numpy().shape[0] == K == log_det_cov.shape[0]
assert mu.numpy().shape == (K, D)
assert cov.numpy().shape == L.numpy().shape == (K, D, D)
for _ in range(cov.numpy().shape[0]):
assert (np.diag(cov[_].numpy()) >= 0.).all()
assert (np.diag(L[_].numpy()) >= 0.).all()
sample_energy, cov_diag = gmm_energy(z, phi, mu, cov, L, log_det_cov, return_mean=True)
assert sample_energy.numpy().shape == cov_diag.numpy().shape == ()
sample_energy, _ = gmm_energy(z, phi, mu, cov, L, log_det_cov, return_mean=False)
assert sample_energy.numpy().shape[0] == N
| 893 | 37.869565 | 91 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/tests/test_autoencoder_tf.py
|
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from alibi_detect.models.tensorflow.autoencoder import AE, AEGMM, VAE, VAEGMM, Seq2Seq, EncoderLSTM, DecoderLSTM
from alibi_detect.models.tensorflow.losses import loss_aegmm, loss_vaegmm
from alibi_detect.models.tensorflow.trainer import trainer
input_dim = 784
latent_dim = 50
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(128, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
ae = AE(encoder_net, decoder_net)
vae = VAE(encoder_net, decoder_net, latent_dim)
tests = [ae, vae]
@pytest.fixture
def tf_v_ae_mnist(request):
# load and preprocess MNIST data
(X_train, _), (X_test, _) = tf.keras.datasets.mnist.load_data()
X = X_train.reshape(60000, input_dim)[:1000] # only train on 1000 instances
X = X.astype(np.float32)
X /= 255
# init model, predict with untrained model, train and predict with trained model
model = request.param
X_recon_untrained = model(X).numpy()
model_weights = model.weights[1].numpy().copy()
model.compile(optimizer='adam', loss='mse')
model.fit(X, X, epochs=5)
X_recon = model(X).numpy()
assert (model_weights != model.weights[1].numpy()).any()
assert np.sum((X - X_recon_untrained)**2) > np.sum((X - X_recon)**2)
@pytest.mark.parametrize('tf_v_ae_mnist', tests, indirect=True)
def test_ae_vae(tf_v_ae_mnist):
pass
n_gmm = 1
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
aegmm = AEGMM(encoder_net, decoder_net, gmm_density_net, n_gmm)
vaegmm = VAEGMM(encoder_net, decoder_net, gmm_density_net, n_gmm, latent_dim)
tests = [(aegmm, loss_aegmm), (vaegmm, loss_vaegmm)]
n_tests = len(tests)
@pytest.fixture
def tf_v_aegmm_mnist(request):
# load and preprocess MNIST data
(X_train, _), (X_test, _) = tf.keras.datasets.mnist.load_data()
X = X_train.reshape(60000, input_dim)[:1000] # only train on 1000 instances
X = X.astype(np.float32)
X /= 255
# init model, predict with untrained model, train and predict with trained model
model, loss_fn = tests[request.param]
X_recon_untrained, z, gamma = model(X)
assert X_recon_untrained.shape == X.shape
assert z.shape[1] == latent_dim + 2
assert gamma.shape[1] == n_gmm
model_weights = model.weights[1].numpy().copy()
trainer(model, loss_fn, X, epochs=5, verbose=False, batch_size=1000)
assert (model_weights != model.weights[1].numpy()).any()
@pytest.mark.parametrize('tf_v_aegmm_mnist', list(range(n_tests)), indirect=True)
def test_aegmm_vaegmm(tf_v_aegmm_mnist):
pass
seq_len = 10
tests_seq2seq = [(DecoderLSTM(latent_dim, 1, None), 1),
(DecoderLSTM(latent_dim, 2, None), 2)]
n_tests = len(tests_seq2seq)
@pytest.fixture
def tf_seq2seq_sine(request):
# create artificial sine time series
X = np.sin(np.linspace(-50, 50, 10000)).astype(np.float32)
# init model
decoder_net_, n_features = tests_seq2seq[request.param]
encoder_net = EncoderLSTM(latent_dim)
threshold_net = tf.keras.Sequential(
[
InputLayer(input_shape=(seq_len, latent_dim)),
Dense(10, activation=tf.nn.relu)
]
)
model = Seq2Seq(encoder_net, decoder_net_, threshold_net, n_features)
# reshape data
shape = (-1, seq_len, n_features)
y = np.roll(X, -1, axis=0).reshape(shape)
X = X.reshape(shape)
# predict with untrained model, train and predict with trained model
X_recon_untrained = model(X)
assert X_recon_untrained.shape == X.shape
model_weights = model.weights[1].numpy().copy()
trainer(model, tf.keras.losses.mse, X, y_train=y, epochs=2, verbose=False, batch_size=64)
X_recon = model(X).numpy()
assert (model_weights != model.weights[1].numpy()).any()
assert np.sum((X - X_recon_untrained)**2) > np.sum((X - X_recon)**2)
@pytest.mark.parametrize('tf_seq2seq_sine', list(range(n_tests)), indirect=True)
def test_seq2seq(tf_seq2seq_sine):
pass
| 4,397 | 31.338235 | 112 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/tests/test_losses_tf.py
|
import pytest
import numpy as np
import tensorflow as tf
from alibi_detect.models.tensorflow.losses import elbo, loss_adv_ae, loss_aegmm, loss_vaegmm, loss_distillation
N, K, D, F = 10, 5, 1, 3
x = np.random.rand(N, F).astype(np.float32)
y = np.random.rand(N, F).astype(np.float32)
sim = 1.
cov_diag = tf.ones(x.shape[1])
cov_full = tf.eye(x.shape[1])
def test_elbo():
assert elbo(x, y, cov_full=cov_full) == elbo(x, y, cov_diag=cov_diag) == elbo(x, y, sim=sim)
assert elbo(x, y) == elbo(x, y, sim=1.) # Passing no kwarg's should lead to an identity covariance matrix
assert elbo(x, y, sim=.05).numpy() > 0
assert elbo(x, x, sim=.05).numpy() < 0
def test_elbo_error():
with pytest.raises(ValueError):
elbo(x, y, cov_full=cov_full, cov_diag=cov_diag)
elbo(x, y, cov_full=cov_full, sim=sim)
elbo(x, y, cov_diag=cov_diag, sim=sim)
z = np.random.rand(N, D).astype(np.float32)
gamma = np.random.rand(N, K).astype(np.float32)
def test_loss_aegmm():
loss = loss_aegmm(x, y, z, gamma, w_energy=.1, w_cov_diag=.005)
loss_no_cov = loss_aegmm(x, y, z, gamma, w_energy=.1, w_cov_diag=0.)
loss_xx = loss_aegmm(x, x, z, gamma, w_energy=.1, w_cov_diag=0.)
assert loss > loss_no_cov
assert loss_no_cov > loss_xx
def test_loss_vaegmm():
loss = loss_vaegmm(x, y, z, gamma, w_recon=1e-7, w_energy=.1, w_cov_diag=.005)
loss_no_recon = loss_vaegmm(x, y, z, gamma, w_recon=0., w_energy=.1, w_cov_diag=.005)
loss_no_recon_cov = loss_vaegmm(x, y, z, gamma, w_recon=0., w_energy=.1, w_cov_diag=0.)
loss_xx = loss_vaegmm(x, x, z, gamma, w_recon=1e-7, w_energy=.1, w_cov_diag=.005)
assert loss > loss_no_recon
assert loss_no_recon > loss_no_recon_cov
assert loss > loss_xx
inputs = tf.keras.Input(shape=(x.shape[1],))
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
def test_loss_adv_ae():
loss = loss_adv_ae(x, y, model, w_model=1., w_recon=0.)
loss_with_recon = loss_adv_ae(x, y, model, w_model=1., w_recon=1.)
assert loss > 0.
assert loss_with_recon > loss
layers = [tf.keras.layers.InputLayer(input_shape=(x.shape[1],)),
tf.keras.layers.Dense(5, activation=tf.nn.softmax)]
distilled_model = tf.keras.Sequential(layers)
def test_loss_adv_md():
y_true = distilled_model(x).numpy()
loss_kld = loss_distillation(x, y_true, model, loss_type='kld')
loss_xent = loss_distillation(x, y_true, model, loss_type='xent')
assert loss_kld > 0.
assert loss_xent > 0.
| 2,563 | 34.123288 | 111 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/models/tensorflow/tests/test_trainer_tf.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.losses import categorical_crossentropy
from alibi_detect.models.tensorflow.trainer import trainer
from alibi_detect.utils.tensorflow.data import TFDataset
N, F = 100, 2
x = np.random.rand(N, F).astype(np.float32)
y = np.concatenate([np.zeros((N, 1)), np.ones((N, 1))], axis=1).astype(np.float32)
inputs = tf.keras.Input(shape=(x.shape[1],))
outputs = tf.keras.layers.Dense(F, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
check_model_weights = model.weights[0].numpy()
def preprocess_fn(x: np.ndarray) -> np.ndarray:
return x
X_train = [x]
y_train = [None, y]
dataset = [partial(TFDataset, batch_size=10), None]
loss_fn_kwargs = [None, {'from_logits': False}]
preprocess = [preprocess_fn, None]
verbose = [False, True]
tests = list(product(X_train, y_train, dataset, loss_fn_kwargs, preprocess, verbose))
n_tests = len(tests)
@pytest.fixture
def trainer_params(request):
x_train, y_train, dataset, loss_fn_kwargs, preprocess, verbose = tests[request.param]
return x_train, y_train, dataset, loss_fn_kwargs, preprocess, verbose
@pytest.mark.parametrize('trainer_params', list(range(n_tests)), indirect=True)
def test_trainer(trainer_params):
x_train, y_train, dataset, loss_fn_kwargs, preprocess, verbose = trainer_params
if dataset is not None and y_train is not None:
ds = dataset(x_train, y_train)
else:
ds = None
trainer(model, categorical_crossentropy, x_train, y_train=y_train, dataset=ds,
loss_fn_kwargs=loss_fn_kwargs, preprocess_fn=preprocess, epochs=2, verbose=verbose)
assert (model.weights[0].numpy() != check_model_weights).any()
| 1,795 | 34.215686 | 95 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/loading.py
|
import logging
import os
from functools import partial
from importlib import import_module
from pathlib import Path
from typing import Any, Callable, Optional, Union, Type, TYPE_CHECKING
import dill
import numpy as np
import toml
from transformers import AutoTokenizer
from alibi_detect.saving.registry import registry
from alibi_detect.saving._tensorflow import load_detector_legacy, load_embedding_tf, load_kernel_config_tf, \
load_model_tf, load_optimizer_tf, prep_model_and_emb_tf, get_tf_dtype
from alibi_detect.saving._pytorch import load_embedding_pt, load_kernel_config_pt, load_model_pt, \
load_optimizer_pt, prep_model_and_emb_pt, get_pt_dtype
from alibi_detect.saving._keops import load_kernel_config_ke
from alibi_detect.saving._sklearn import load_model_sk
from alibi_detect.saving.validate import validate_config
from alibi_detect.base import Detector, ConfigurableDetector, StatefulDetectorOnline
from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch, Framework
from alibi_detect.saving.schemas import supported_models_tf, supported_models_torch
from alibi_detect.utils.missing_optional_dependency import import_optional
get_device = import_optional('alibi_detect.utils.pytorch.misc', names=['get_device'])
if TYPE_CHECKING:
import tensorflow as tf
import torch
STATE_PATH = 'state/' # directory (relative to detector directory) where state is saved (and loaded from)
logger = logging.getLogger(__name__)
# Fields to resolve in resolve_config ("resolve" meaning either load local artefact or resolve @registry, conversion to
# tuple, np.ndarray and np.dtype are dealt with separately).
# Note: For fields consisting of nested dicts, they must be listed in order from deepest to shallowest, so that the
# deepest fields are resolved first. e.g. 'preprocess_fn.src' must be resolved before 'preprocess_fn'.
FIELDS_TO_RESOLVE = [
['preprocess_fn', 'src'],
['preprocess_fn', 'model'],
['preprocess_fn', 'embedding'],
['preprocess_fn', 'tokenizer'],
['preprocess_fn', 'preprocess_batch_fn'],
['preprocess_fn'],
['x_ref'],
['c_ref'],
['model'],
['optimizer'],
['reg_loss_fn'],
['dataset'],
['kernel', 'src'],
['kernel', 'proj'],
['kernel', 'init_sigma_fn'],
['kernel', 'kernel_a', 'src'],
['kernel', 'kernel_a', 'init_sigma_fn'],
['kernel', 'kernel_b', 'src'],
['kernel', 'kernel_b', 'init_sigma_fn'],
['kernel'],
['x_kernel', 'src'],
['x_kernel', 'init_sigma_fn'],
['x_kernel'],
['c_kernel', 'src'],
['c_kernel', 'init_sigma_fn'],
['c_kernel'],
['initial_diffs'],
['tokenizer']
]
# Fields to convert from str to dtype
FIELDS_TO_DTYPE = [
['preprocess_fn', 'dtype']
]
def load_detector(filepath: Union[str, os.PathLike], **kwargs) -> Union[Detector, ConfigurableDetector]:
"""
Load outlier, drift or adversarial detector.
Parameters
----------
filepath
Load directory.
Returns
-------
Loaded outlier or adversarial detector object.
"""
filepath = Path(filepath)
# If reference is a 'config.toml' itself, pass to new load function
if filepath.name == 'config.toml':
return _load_detector_config(filepath)
# Otherwise, if a directory, look for meta.dill, meta.pickle or config.toml inside it
elif filepath.is_dir():
files = [str(f.name) for f in filepath.iterdir() if f.is_file()]
if 'config.toml' in files:
return _load_detector_config(filepath.joinpath('config.toml'))
elif 'meta.dill' in files:
return load_detector_legacy(filepath, '.dill', **kwargs)
elif 'meta.pickle' in files:
return load_detector_legacy(filepath, '.pickle', **kwargs)
else:
raise ValueError(f'Neither meta.dill, meta.pickle or config.toml exist in {filepath}.')
# No other file types are accepted, so if not dir raise error
else:
raise ValueError("load_detector accepts only a filepath to a directory, or a config.toml file.")
# TODO - will eventually become load_detector
def _load_detector_config(filepath: Union[str, os.PathLike]) -> ConfigurableDetector:
"""
Loads a drift detector specified in a detector config dict. Validation is performed with pydantic.
Parameters
----------
filepath
Filepath to the `config.toml` file.
Returns
-------
The instantiated detector.
"""
# Load toml if needed
if isinstance(filepath, (str, os.PathLike)):
config_file = Path(filepath)
config_dir = config_file.parent
cfg = read_config(config_file)
else:
raise ValueError("`filepath` should point to a directory containing a 'config.toml' file.")
# Resolve and validate config
cfg = validate_config(cfg)
logger.info('Validated unresolved config.')
cfg = resolve_config(cfg, config_dir=config_dir)
cfg = validate_config(cfg, resolved=True)
logger.info('Validated resolved config.')
# Init detector from config
logger.info('Instantiating detector.')
detector = _init_detector(cfg)
# Load state if it exists (and detector supports it)
# TODO - this will be removed in follow-up offline state PR, as loading to be moved to __init__ (w/ state_dir kwarg)
if isinstance(detector, StatefulDetectorOnline):
state_dir = config_dir.joinpath(STATE_PATH)
if state_dir.is_dir():
detector.load_state(state_dir)
logger.info('Finished loading detector.')
return detector
def _init_detector(cfg: dict) -> ConfigurableDetector:
"""
Instantiates a detector from a fully resolved config dictionary.
Parameters
----------
cfg
The detector's resolved config dictionary.
Returns
-------
The instantiated detector.
"""
detector_name = cfg.pop('name')
# Instantiate the detector
klass = getattr(import_module('alibi_detect.cd'), detector_name)
detector = klass.from_config(cfg)
logger.info('Instantiated drift detector {}'.format(detector_name))
return detector
def _load_kernel_config(cfg: dict, backend: str = Framework.TENSORFLOW) -> Callable:
"""
Loads a kernel from a kernel config dict.
Parameters
----------
cfg
A kernel config dict. (see pydantic schema's).
backend
The backend.
Returns
-------
The kernel.
"""
if backend == Framework.TENSORFLOW:
kernel = load_kernel_config_tf(cfg)
elif backend == Framework.PYTORCH:
kernel = load_kernel_config_pt(cfg)
else: # backend=='keops'
kernel = load_kernel_config_ke(cfg)
return kernel
def _load_preprocess_config(cfg: dict) -> Optional[Callable]:
"""
This function builds a preprocess_fn from the preprocess dict in a detector config dict. The dict format is
expected to match that generated by serialize_preprocess in alibi_detect.utils.saving (also see pydantic schema).
The model, tokenizer and preprocess_batch_fn are expected to be already resolved.
Parameters
----------
cfg
A preprocess_fn config dict. (see pydantic schemas).
Returns
-------
The preprocess_fn function.
"""
preprocess_fn = cfg.pop('src')
if callable(preprocess_fn):
if preprocess_fn.__name__ == 'preprocess_drift':
# If preprocess_drift function, kwargs is preprocess cfg minus 'src' and 'kwargs'
cfg.pop('kwargs')
kwargs = cfg.copy()
# Final processing of model (and/or embedding)
model = kwargs['model']
emb = kwargs.pop('embedding') # embedding passed to preprocess_drift as `model` therefore remove
# Backend specifics
if has_tensorflow and isinstance(model, supported_models_tf):
model = prep_model_and_emb_tf(model, emb)
elif has_pytorch and isinstance(model, supported_models_torch):
model = prep_model_and_emb_pt(model, emb)
elif model is None:
model = emb
if model is None:
raise ValueError("A 'model' and/or `embedding` must be specified when "
"preprocess_fn='preprocess_drift'")
kwargs.update({'model': model})
# Set`device` if a PyTorch model, otherwise remove from kwargs
if isinstance(model, supported_models_torch):
device = get_device(cfg['device'])
model = model.to(device).eval()
kwargs.update({'device': device})
kwargs.update({'model': model})
else:
kwargs.pop('device')
else:
kwargs = cfg['kwargs'] # If generic callable, kwargs is cfg['kwargs']
else:
logger.warning('Unable to process preprocess_fn. No preprocessing function is defined.')
return None
if kwargs == {}:
return preprocess_fn
else:
return partial(preprocess_fn, **kwargs)
def _load_model_config(cfg: dict) -> Callable:
"""
Loads supported models from a model config dict.
Parameters
----------
cfg
Model config dict. (see pydantic model schemas).
Returns
-------
The loaded model.
"""
# Load model
flavour = cfg['flavour']
src = cfg['src']
custom_obj = cfg['custom_objects']
layer = cfg['layer']
src = Path(src)
if not src.is_dir():
raise FileNotFoundError("The `src` field is not a recognised directory. It should be a directory containing "
"a compatible model.")
if flavour == Framework.TENSORFLOW:
model = load_model_tf(src, custom_objects=custom_obj, layer=layer)
elif flavour == Framework.PYTORCH:
model = load_model_pt(src, layer=layer)
elif flavour == Framework.SKLEARN:
model = load_model_sk(src)
return model
def _load_embedding_config(cfg: dict) -> Callable: # TODO: Could type return more tightly
"""
Load a pre-trained text embedding from an embedding config dict.
Parameters
----------
cfg
An embedding config dict. (see the pydantic schemas).
Returns
-------
The loaded embedding.
"""
src = cfg['src']
layers = cfg['layers']
typ = cfg['type']
flavour = cfg['flavour']
if flavour == Framework.TENSORFLOW:
emb = load_embedding_tf(src, embedding_type=typ, layers=layers)
else:
emb = load_embedding_pt(src, embedding_type=typ, layers=layers)
return emb
def _load_tokenizer_config(cfg: dict) -> AutoTokenizer:
"""
Loads a text tokenizer from a tokenizer config dict.
Parameters
----------
cfg
A tokenizer config dict. (see the pydantic schemas).
Returns
-------
The loaded tokenizer.
"""
src = cfg['src']
kwargs = cfg['kwargs']
src = Path(src)
tokenizer = AutoTokenizer.from_pretrained(src, **kwargs)
return tokenizer
def _load_optimizer_config(cfg: dict, backend: str) \
-> Union['tf.keras.optimizers.Optimizer', Type['tf.keras.optimizers.Optimizer'],
Type['torch.optim.Optimizer']]:
"""
Loads an optimzier from an optimizer config dict.
Parameters
----------
cfg
The optimizer config dict.
backend
The backend.
Returns
-------
The loaded optimizer.
"""
if backend == Framework.TENSORFLOW:
return load_optimizer_tf(cfg)
else:
return load_optimizer_pt(cfg)
def _get_nested_value(dic: dict, keys: list) -> Any:
"""
Get a value from a nested dictionary.
Parameters
----------
dic
The dictionary.
keys
List of keys to "walk" to nested value.
For example, to extract the value `dic['key1']['key2']['key3']`, set `keys = ['key1', 'key2', 'key3']`.
Returns
-------
The nested value specified by `keys`.
"""
for key in keys:
try:
dic = dic[key]
except (TypeError, KeyError):
return None
return dic
def _set_nested_value(dic: dict, keys: list, value: Any):
"""
Set a value in a nested dictionary.
Parameters
----------
dic
The dictionary.
keys
List of keys to "walk" to nested value.
For example, to set the value `dic['key1']['key2']['key3']`, set `keys = ['key1', 'key2', 'key3']`.
value
The value to set.
"""
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
def _set_dtypes(cfg: dict):
"""
Converts str's in the config dictionary to dtypes e.g. 'np.float32' is converted to np.float32.
Parameters
----------
cfg
The config dictionary.
"""
# TODO - we could explore a custom pydantic generic type for this (similar to how we handle NDArray)
for key in FIELDS_TO_DTYPE:
val = _get_nested_value(cfg, key)
if val is not None:
lib, dtype, *_ = val.split('.')
# val[0] = np if val[0] == 'np' else tf if val[0] == 'tf' else torch if val[0] == 'torch' else None
# TODO - add above back in once optional deps are handled properly
if lib is None:
raise ValueError("`dtype` must be in format np.<dtype>, tf.<dtype> or torch.<dtype>.")
{
'tf': lambda: _set_nested_value(cfg, key, get_tf_dtype(dtype)),
'torch': lambda: _set_nested_value(cfg, key, get_pt_dtype(dtype)),
'np': lambda: _set_nested_value(cfg, key, getattr(np, dtype)),
}[lib]()
def read_config(filepath: Union[os.PathLike, str]) -> dict:
"""
This function reads a detector toml config file and returns a dict specifying the detector.
Parameters
----------
filepath
The filepath to the config.toml file.
Returns
-------
Parsed toml dictionary.
"""
filepath = Path(filepath)
cfg = dict(toml.load(filepath)) # toml.load types return as MutableMapping, force to dict
logger.info('Loaded config file from {}'.format(str(filepath)))
# This is necessary as no None/null in toml spec., and missing values are set to defaults set in pydantic models.
# But we sometimes need to explicitly spec as None.
cfg = _replace(cfg, "None", None)
return cfg
def resolve_config(cfg: dict, config_dir: Optional[Path]) -> dict:
"""
Resolves artefacts in a config dict. For example x_ref='x_ref.npy' is resolved by loading the np.ndarray from
the .npy file. For a list of fields that are resolved, see
https://docs.seldon.io/projects/alibi-detect/en/stable/overview/config_file.html.
Parameters
----------
cfg
The unresolved config dict.
config_dir
Filepath to directory the `config.toml` is located in. Only required if different from the
runtime directory, and artefacts are specified with filepaths relative to the config.toml file.
Returns
-------
The resolved config dict.
"""
# Convert selected str's to required dtype's (all other type coercion is performed by pydantic)
_set_dtypes(cfg)
# Before main resolution, update filepaths relative to config file
if config_dir is not None:
_prepend_cfg_filepaths(cfg, config_dir)
# Resolve filepaths (load files) and resolve function/object registries
for key in FIELDS_TO_RESOLVE:
logger.info('Resolving config field: {}.'.format(key))
src = _get_nested_value(cfg, key)
obj = None
# Resolve string references to registered objects and filepaths
if isinstance(src, str):
# Resolve registry references
if src.startswith('@'):
src = src[1:]
if src in registry.get_all():
obj = registry.get(src)
else:
raise ValueError(
f"Can't find {src} in the custom function registry, It may be misspelled or missing "
"if you have incorrect optional dependencies installed. Make sure the loading environment"
" is the same as the saving environment. For more information, check the Installation "
"documentation at "
"https://docs.seldon.io/projects/alibi-detect/en/stable/overview/getting_started.html."
)
logger.info('Successfully resolved registry entry {}'.format(src))
# Resolve dill or numpy file references
elif Path(src).is_file():
if Path(src).suffix == '.dill':
obj = dill.load(open(src, 'rb'))
if Path(src).suffix == '.npy':
obj = np.load(src)
# Resolve artefact dicts
elif isinstance(src, dict):
backend = cfg.get('backend', Framework.TENSORFLOW)
if key[-1] in ('model', 'proj'):
obj = _load_model_config(src)
elif key[-1] == 'embedding':
obj = _load_embedding_config(src)
elif key[-1] == 'tokenizer':
obj = _load_tokenizer_config(src)
elif key[-1] == 'optimizer':
obj = _load_optimizer_config(src, backend)
elif key[-1] == 'preprocess_fn':
obj = _load_preprocess_config(src)
elif key[-1] in ('kernel', 'x_kernel', 'c_kernel'):
obj = _load_kernel_config(src, backend)
# Put the resolved function into the cfg dict
if obj is not None:
_set_nested_value(cfg, key, obj)
return cfg
def _replace(cfg: dict, orig: Optional[str], new: Optional[str]) -> dict:
"""
Recursively traverse a nested dictionary and replace values.
Parameters
----------
cfg
The dictionary.
orig
Original value to search.
new
Value to replace original with.
Returns
-------
The updated dictionary.
"""
for k, v in cfg.items():
if isinstance(v == orig, bool) and v == orig:
cfg[k] = new
elif isinstance(v, dict):
_replace(v, orig, new)
return cfg
def _prepend_cfg_filepaths(cfg: dict, prepend_dir: Path):
"""
Recursively traverse through a nested dictionary and prepend a directory to any filepaths.
Parameters
----------
cfg
The dictionary.
prepend_dir
The filepath to prepend to any filepaths in the dictionary.
Returns
-------
The updated config dictionary.
"""
for k, v in cfg.items():
if isinstance(v, str):
v = prepend_dir.joinpath(Path(v))
if v.is_file() or v.is_dir(): # Update if prepending config_dir made config value a real filepath
cfg[k] = str(v)
elif isinstance(v, dict):
_prepend_cfg_filepaths(v, prepend_dir)
| 18,954 | 31.624785 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/schemas.py
|
"""
Pydantic models used by :func:`~alibi_detect.utils.validate.validate_config` to validate configuration dictionaries.
The `resolved` kwarg of :func:`~alibi_detect.utils.validate.validate_config` determines whether the *unresolved* or
*resolved* pydantic models are used:
- The *unresolved* models expect any artefacts specified within it to not yet have been resolved.
The artefacts are still string references to local filepaths or registries (e.g. `x_ref = 'x_ref.npy'`).
- The *resolved* models expect all artefacts to be have been resolved into runtime objects. For example, `x_ref`
should have been resolved into an `np.ndarray`.
.. note::
For detector pydantic models, the fields match the corresponding detector's args/kwargs. Refer to the
detector's api docs for a full description of each arg/kwarg.
"""
from typing import Callable, Dict, List, Optional, Type, Union, Any
import numpy as np
from pydantic import BaseModel, validator
from alibi_detect.utils.frameworks import Framework
from alibi_detect.utils._types import (Literal, supported_models_all, supported_models_tf,
supported_models_sklearn, supported_models_torch, supported_optimizers_tf,
supported_optimizers_torch, supported_optimizers_all)
from alibi_detect.saving.validators import NDArray, validate_framework, coerce_int2list, coerce_2_tensor
class SupportedModel:
"""
Pydantic custom type to check the model is one of the supported types (conditional on what optional deps
are installed).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate_model
@classmethod
def validate_model(cls, model: Any, values: dict) -> Any:
backend = values['backend']
err_msg = f"`backend={backend}` but the `model` doesn't appear to be a {backend} supported model, "\
f"or {backend} is not installed. Model: {model}"
if backend == Framework.TENSORFLOW and not isinstance(model, supported_models_tf):
raise TypeError(err_msg)
elif backend == Framework.PYTORCH and not isinstance(model, supported_models_torch):
raise TypeError(err_msg)
elif backend == Framework.SKLEARN and not isinstance(model, supported_models_sklearn):
raise TypeError(f"`backend={backend}` but the `model` doesn't appear to be a {backend} supported model.")
elif isinstance(model, supported_models_all): # If model supported and no `backend` incompatibility
return model
else: # Catch any other unexpected issues
raise TypeError('The model is not recognised as a supported type.')
class SupportedOptimizer:
"""
Pydantic custom type to check the optimizer is one of the supported types (conditional on what optional deps
are installed).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate_optimizer
@classmethod
def validate_optimizer(cls, optimizer: Any, values: dict) -> Any:
backend = values['backend']
err_msg = f"`backend={backend}` but the `optimizer` doesn't appear to be a {backend} supported optimizer, "\
f"or {backend} is not installed. Optimizer: {optimizer}"
if backend == Framework.TENSORFLOW and not isinstance(optimizer, supported_optimizers_tf):
raise TypeError(err_msg)
elif backend == Framework.PYTORCH and not isinstance(optimizer, supported_optimizers_torch):
raise TypeError(err_msg)
elif isinstance(optimizer, supported_optimizers_all): # If optimizer supported and no `backend` incompatibility
return optimizer
else: # Catch any other unexpected issues
raise TypeError('The model is not recognised as a supported type.')
# TODO - We could add validator to check `model` and `embedding` type when chained together. Leave this until refactor
# of preprocess_drift.
# Custom BaseModel so that we can set default config
class CustomBaseModel(BaseModel):
"""
Base pydantic model schema. The default pydantic settings are set here.
"""
class Config:
arbitrary_types_allowed = True # since we have np.ndarray's etc
extra = 'forbid' # Forbid extra fields so that we catch misspelled fields
# Custom BaseModel with additional kwarg's allowed
class CustomBaseModelWithKwargs(BaseModel):
"""
Base pydantic model schema. The default pydantic settings are set here.
"""
class Config:
arbitrary_types_allowed = True # since we have np.ndarray's etc
extra = 'allow' # Allow extra fields
class MetaData(CustomBaseModel):
version: str
version_warning: bool = False
class DetectorConfig(CustomBaseModel):
"""
Base detector config schema. Only fields universal across all detectors are defined here.
"""
name: str
"Name of the detector e.g. `MMDDrift`."
meta: Optional[MetaData] = None
"Config metadata. Should not be edited."
# Note: Although not all detectors have a backend, we define in base class as `backend` also determines
# whether tf or torch models used for preprocess_fn.
# backend validation (only applied if the detector config has a `backend` field
_validate_backend = validator('backend', allow_reuse=True, pre=False, check_fields=False)(validate_framework)
class ModelConfig(CustomBaseModel):
"""
Unresolved schema for (ML) models. Note that the model "backend" e.g. 'tensorflow', 'pytorch', 'sklearn', is set
by `backend` in :class:`DetectorConfig`.
Examples
--------
A TensorFlow classifier model stored in the `model/` directory, with the softmax layer extracted:
.. code-block :: toml
[model]
flavour = "tensorflow"
src = "model/"
layer = -1
"""
flavour: Literal['tensorflow', 'pytorch', 'sklearn']
"""
Whether the model is a `tensorflow`, `pytorch` or `sklearn` model. XGBoost models following the scikit-learn API
are also included under `sklearn`.
"""
src: str
"""
Filepath to directory storing the model (relative to the `config.toml` file, or absolute). At present,
TensorFlow models must be stored in
`H5 format <https://www.tensorflow.org/guide/keras/save_and_serialize#keras_h5_format>`_.
"""
custom_objects: Optional[dict] = None
"""
Dictionary of custom objects. Passed to the tensorflow
`load_model <https://www.tensorflow.org/api_docs/python/tf/keras/models/load_model>`_ function. This can be
used to pass custom registered functions and classes to a model.
"""
layer: Optional[int] = None
"""
Optional index of hidden layer to extract. If not `None`, a
:class:`~alibi_detect.cd.tensorflow.preprocess.HiddenOutput` or
:class:`~alibi_detect.cd.pytorch.preprocess.HiddenOutput` model is returned (dependent on `flavour`).
Only applies to 'tensorflow' and 'pytorch' models.
"""
# Validators
_validate_flavour = validator('flavour', allow_reuse=True, pre=False)(validate_framework)
class EmbeddingConfig(CustomBaseModel):
"""
Unresolved schema for text embedding models. Currently, only pre-trained
`HuggingFace transformer <https://github.com/huggingface/transformers>`_ models are supported.
Examples
--------
Using the hidden states at the output of each layer of a TensorFlow
`BERT base <https://huggingface.co/bert-base-cased>`_ model as text embeddings:
.. code-block :: toml
[embedding]
flavour = "tensorflow"
src = "bert-base-cased"
type = "hidden_state"
layers = [-1, -2, -3, -4, -5, -6, -7, -8]
"""
flavour: Literal['tensorflow', 'pytorch'] = 'tensorflow'
"""
Whether the embedding model is a `tensorflow` or `pytorch` model.
"""
type: Literal['pooler_output', 'last_hidden_state', 'hidden_state', 'hidden_state_cls']
"""
The type of embedding to be loaded. See `embedding_type` in
:class:`~alibi_detect.models.tensorflow.embedding.TransformerEmbedding`.
"""
layers: Optional[List[int]] = None
"List specifying the hidden layers to be used to extract the embedding."
# TODO - add check conditional on embedding type (see docstring in above)
src: str
"""
Model name e.g. `"bert-base-cased"`, or a filepath to directory storing the model to extract embeddings from
(relative to the `config.toml` file, or absolute).
"""
# Validators
_validate_flavour = validator('flavour', allow_reuse=True, pre=False)(validate_framework)
class TokenizerConfig(CustomBaseModel):
"""
Unresolved schema for text tokenizers. Currently, only pre-trained
`HuggingFace tokenizer <https://github.com/huggingface/tokenizers>`_ models are supported.
Examples
--------
`BERT base <https://huggingface.co/bert-base-cased>`_ tokenizer with additional keyword arguments passed to the
HuggingFace :meth:`~transformers.AutoTokenizer.from_pretrained` method:
.. code-block :: toml
[tokenizer]
src = "bert-base-cased"
[tokenizer.kwargs]
use_fast = false
force_download = true
"""
src: str
"""
Model name e.g. `"bert-base-cased"`, or a filepath to directory storing the tokenizer model (relative to the
`config.toml` file, or absolute). Passed to passed to :meth:`transformers.AutoTokenizer.from_pretrained`.
"""
kwargs: Optional[dict] = {}
"Dictionary of keyword arguments to pass to :meth:`transformers.AutoTokenizer.from_pretrained`."
class PreprocessConfig(CustomBaseModel):
"""
Unresolved schema for drift detector preprocess functions, to be passed to a detector's `preprocess_fn` kwarg.
Once loaded, the function is wrapped in a :func:`~functools.partial`, to be evaluated within the detector.
If `src` specifies a generic Python function, the dictionary specified by `kwargs` is passed to it. Otherwise,
if `src` specifies :func:`~alibi_detect.cd.tensorflow.preprocess.preprocess_drift`
(`src='@cd.tensorflow.preprocess.preprocess_drift'`), all fields (except `kwargs`) are passed to it.
Examples
--------
Preprocessor with a `model`, text `embedding` and `tokenizer` passed to
:func:`~alibi_detect.cd.tensorflow.preprocess.preprocess_drift`:
.. code-block :: toml
[preprocess_fn]
src = "@cd.tensorflow.preprocess.preprocess_drift"
batch_size = 32
max_len = 100
tokenizer.src = "tokenizer/" # TokenizerConfig
[preprocess_fn.model]
# ModelConfig
src = "model/"
[preprocess_fn.embedding]
# EmbeddingConfig
src = "embedding/"
type = "hidden_state"
layers = [-1, -2, -3, -4, -5, -6, -7, -8]
A serialized Python function with keyword arguments passed to it:
.. code-block :: toml
[preprocess_fn]
src = 'myfunction.dill'
kwargs = {'kwarg1'=0.7, 'kwarg2'=true}
"""
src: str = "@cd.tensorflow.preprocess.preprocess_drift"
"""
The preprocessing function. A string referencing a filepath to a serialized function in `dill` format, or an
object registry reference.
"""
# Below kwargs are only passed if src == @preprocess_drift
model: Optional[Union[str, ModelConfig]] = None
"""
Model used for preprocessing. Either an object registry reference, or a
:class:`~alibi_detect.utils.schemas.ModelConfig`.
"""
# TODO - make model required field when src is preprocess_drift
embedding: Optional[Union[str, EmbeddingConfig]] = None
"""
A text embedding model. Either a string referencing a HuggingFace transformer model name, an object registry
reference, or a :class:`~alibi_detect.utils.schemas.EmbeddingConfig`. If `model=None`, the `embedding` is passed to
:func:`~alibi_detect.cd.tensorflow.preprocess.preprocess_drift` as `model`. Otherwise, the `model` is chained to
the output of the `embedding` as an additional preprocessing step.
"""
tokenizer: Optional[Union[str, TokenizerConfig]] = None
"""
Optional tokenizer for text drift. Either a string referencing a HuggingFace tokenizer model name, or a
:class:`~alibi_detect.utils.schemas.TokenizerConfig`.
"""
device: Optional[Literal['cpu', 'cuda']] = None
"""
Device type used. The default `None` tries to use the GPU and falls back on CPU if needed. Only relevant if
`src='@cd.torch.preprocess.preprocess_drift'`
"""
preprocess_batch_fn: Optional[str] = None
"""
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be processed
by the `model`.
"""
max_len: Optional[int] = None
"Optional max token length for text drift."
batch_size: Optional[int] = int(1e10)
"Batch size used during prediction."
dtype: str = 'np.float32'
"Model output type, e.g. `'tf.float32'`"
# Additional kwargs
kwargs: dict = {}
"""
Dictionary of keyword arguments to be passed to the function specified by `src`. Only used if `src` specifies a
generic Python function.
"""
class KernelConfig(CustomBaseModelWithKwargs):
"""
Unresolved schema for kernels, to be passed to a detector's `kernel` kwarg.
If `src` specifies a :class:`~alibi_detect.utils.tensorflow.GaussianRBF` kernel, the `sigma`, `trainable` and
`init_sigma_fn` fields are passed to it. Otherwise, all fields except `src` are passed as kwargs.
Examples
--------
A :class:`~alibi_detect.utils.tensorflow.GaussianRBF` kernel, with three different bandwidths:
.. code-block :: toml
[kernel]
src = "@alibi_detect.utils.tensorflow.GaussianRBF"
trainable = false
sigma = [0.1, 0.2, 0.3]
A serialized kernel with keyword arguments passed:
.. code-block :: toml
[kernel]
src = "mykernel.dill"
sigma = 0.42
custom_setting = "xyz"
"""
src: str
"A string referencing a filepath to a serialized kernel in `.dill` format, or an object registry reference."
# Below kwargs are only passed if kernel == @GaussianRBF
flavour: Literal['tensorflow', 'pytorch', 'keops']
"""
Whether the kernel is a `tensorflow` or `pytorch` kernel.
"""
sigma: Optional[Union[float, List[float]]] = None
"""
Bandwidth used for the kernel. Needn’t be specified if being inferred or trained. Can pass multiple values to eval
kernel with and then average.
"""
trainable: bool = False
"Whether or not to track gradients w.r.t. sigma to allow it to be trained."
init_sigma_fn: Optional[str] = None
"""
Function used to compute the bandwidth `sigma`. Used when `sigma` is to be inferred. The function's signature
should match :py:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`. If `None`, it is set to
:func:`~alibi_detect.utils.tensorflow.kernels.sigma_median`.
"""
# Validators
_validate_flavour = validator('flavour', allow_reuse=True, pre=False)(validate_framework)
_coerce_sigma2tensor = validator('sigma', allow_reuse=True, pre=False)(coerce_2_tensor)
class DeepKernelConfig(CustomBaseModel):
"""
Unresolved schema for :class:`~alibi_detect.utils.tensorflow.kernels.DeepKernel`'s.
Examples
--------
A :class:`~alibi_detect.utils.tensorflow.DeepKernel`, with a trainable
:class:`~alibi_detect.utils.tensorflow.GaussianRBF` kernel applied to the projected inputs and a custom
serialized kernel applied to the raw inputs:
.. code-block :: toml
[kernel]
eps = 0.01
[kernel.kernel_a]
src = "@utils.tensorflow.kernels.GaussianRBF"
trainable = true
[kernel.kernel_b]
src = "custom_kernel.dill"
sigma = [ 1.2,]
trainable = false
[kernel.proj]
src = "model/"
"""
proj: Union[str, ModelConfig]
"""
The projection to be applied to the inputs before applying `kernel_a`. This should be a Tensorflow or PyTorch
model, specified as an object registry reference, or a :class:`~alibi_detect.utils.schemas.ModelConfig`.
"""
kernel_a: Union[str, KernelConfig] = "@utils.tensorflow.kernels.GaussianRBF"
"""
The kernel to apply to the projected inputs. Defaults to a
:class:`~alibi_detect.utils.tensorflow.kernels.GaussianRBF` with trainable bandwidth.
"""
kernel_b: Optional[Union[str, KernelConfig]] = "@utils.tensorflow.kernels.GaussianRBF"
"""
The kernel to apply to the raw inputs. Defaults to a :class:`~alibi_detect.utils.tensorflow.kernels.GaussianRBF`
with trainable bandwidth. Set to `None` in order to use only the deep component (i.e. `eps=0`).
"""
eps: Union[float, str] = 'trainable'
"""
The proportion (in [0,1]) of weight to assign to the kernel applied to raw inputs. This can be either specified or
set to `'trainable'`. Only relevant is `kernel_b` is not `None`.
"""
class OptimizerConfig(CustomBaseModelWithKwargs):
"""
Unresolved schema for optimizers. The `optimizer` dictionary has two possible formats:
1. A configuration dictionary compatible with
`tf.keras.optimizers.deserialize <https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/deserialize>`_.
For `backend='tensorflow'` only.
2. A dictionary containing only `class_name`, where this is a string referencing the optimizer name e.g.
`optimizer.class_name = 'Adam'`. In this case, the tensorflow or pytorch optimizer class of the same name is
loaded. For `backend='tensorflow'` and `backend='pytorch'`.
Examples
--------
A TensorFlow Adam optimizer:
.. code-block :: toml
[optimizer]
class_name = "Adam"
[optimizer.config]
name = "Adam"
learning_rate = 0.001
decay = 0.0
A PyTorch Adam optimizer:
.. code-block :: toml
[optimizer]
class_name = "Adam"
"""
class_name: str
config: Optional[Dict[str, Any]] = None
class DriftDetectorConfig(DetectorConfig):
"""
Unresolved base schema for drift detectors.
"""
# args/kwargs shared by all drift detectors
x_ref: str
"Data used as reference distribution. Should be a string referencing a NumPy `.npy` file."
preprocess_fn: Optional[Union[str, PreprocessConfig]] = None
"""
Function to preprocess the data before computing the data drift metrics. A string referencing a serialized function
in `.dill` format, an object registry reference, or a :class:`~alibi_detect.utils.schemas.PreprocessConfig`.
"""
input_shape: Optional[tuple] = None
"Optionally pass the shape of the input data. Used when saving detectors."
data_type: Optional[str] = None
"Specify data type added to the metadata. E.g. `‘tabular’`or `‘image’`."
x_ref_preprocessed: bool = False
"""
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only the test
data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference data will also be
preprocessed.
"""
class DriftDetectorConfigResolved(DetectorConfig):
"""
Resolved base schema for drift detectors.
"""
# args/kwargs shared by all drift detectors
x_ref: Union[np.ndarray, list]
"Data used as reference distribution."
preprocess_fn: Optional[Callable] = None
"Function to preprocess the data before computing the data drift metrics."
input_shape: Optional[tuple] = None
"Optionally pass the shape of the input data. Used when saving detectors."
data_type: Optional[str] = None
"Specify data type added to the metadata. E.g. `‘tabular’` or `‘image’`."
x_ref_preprocessed: bool = False
"""
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only the test
data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference data will also be
preprocessed.
"""
class KSDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`KSDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/ksdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.KSDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class KSDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`KSDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/ksdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.KSDrift` documentation for a description of each field.
Resolved schema for the :class:`~alibi_detect.cd.KSDrift` detector.
"""
p_val: float = .05
preprocess_at_init: bool = True # Note: Duplication needed to avoid mypy error (unless we allow reassignment)
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class ChiSquareDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`ChiSquareDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/chisquaredrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ChiSquareDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
categories_per_feature: Dict[int, Union[int, List[int]]] = None
n_features: Optional[int] = None
class ChiSquareDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`ChiSquareDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/chisquaredrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ChiSquareDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: str = 'bonferroni'
categories_per_feature: Dict[int, Union[int, List[int]]] = None
n_features: Optional[int] = None
class TabularDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`TabularDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/tabulardrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.TabularDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
categories_per_feature: Dict[int, Optional[Union[int, List[int]]]] = None
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class TabularDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`TabularDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/tabulardrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.TabularDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
categories_per_feature: Dict[int, Optional[Union[int, List[int]]]] = None
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class CVMDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`CVMDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/cvmdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.CVMDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
n_features: Optional[int] = None
class CVMDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`CVMDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/cvmdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.CVMDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: str = 'bonferroni'
n_features: Optional[int] = None
class FETDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`FETDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/fetdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.FETDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class FETDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`FETDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/fetdrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.FETDrift` documentation for a description of each field.
"""
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
correction: Literal['bonferroni', 'fdr'] = 'bonferroni'
alternative: Literal['two-sided', 'greater', 'less'] = 'two-sided'
n_features: Optional[int] = None
class MMDDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`MMDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/mmddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
kernel: Optional[Union[str, KernelConfig]] = None
sigma: Optional[NDArray[np.float32]] = None
configure_kernel_from_x_ref: bool = True
n_permutations: int = 100
batch_size_permutations: int = 1000000
device: Optional[Literal['cpu', 'cuda']] = None
class MMDDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`MMDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/mmddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
kernel: Optional[Callable] = None
sigma: Optional[NDArray[np.float32]] = None
configure_kernel_from_x_ref: bool = True
n_permutations: int = 100
batch_size_permutations: int = 1000000
device: Optional[Literal['cpu', 'cuda']] = None
class LSDDDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`LSDDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/lsdddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LSDDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
sigma: Optional[NDArray[np.float32]] = None
n_permutations: int = 100
n_kernel_centers: Optional[int] = None
lambda_rd_max: float = 0.2
device: Optional[Literal['cpu', 'cuda']] = None
class LSDDDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`LSDDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/lsdddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LSDDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
sigma: Optional[NDArray[np.float32]] = None
n_permutations: int = 100
n_kernel_centers: Optional[int] = None
lambda_rd_max: float = 0.2
device: Optional[Literal['cpu', 'cuda']] = None
class ClassifierDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`ClassifierDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/classifierdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ClassifierDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'sklearn'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
model: Union[str, ModelConfig]
preds_type: Literal['probs', 'logits'] = 'probs'
binarize_preds: bool = False
reg_loss_fn: Optional[str] = None
train_size: Optional[float] = .75
n_folds: Optional[int] = None
retrain_from_scratch: bool = True
seed: int = 0
optimizer: Optional[Union[str, OptimizerConfig]] = None
learning_rate: float = 1e-3
batch_size: int = 32
preprocess_batch_fn: Optional[str] = None
epochs: int = 3
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[str] = None
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[str] = None # TODO: placeholder, will need to be updated for pytorch implementation
use_calibration: bool = False
calibration_kwargs: Optional[dict] = None
use_oob: bool = False
class ClassifierDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`ClassifierDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/classifierdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ClassifierDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'sklearn'] = 'tensorflow'
p_val: float = .05
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
model: Optional[SupportedModel] = None
preds_type: Literal['probs', 'logits'] = 'probs'
binarize_preds: bool = False
reg_loss_fn: Optional[Callable] = None
train_size: Optional[float] = .75
n_folds: Optional[int] = None
retrain_from_scratch: bool = True
seed: int = 0
optimizer: Optional[SupportedOptimizer] = None
learning_rate: float = 1e-3
batch_size: int = 32
preprocess_batch_fn: Optional[Callable] = None
epochs: int = 3
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[Callable] = None
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[Callable] = None # TODO: placeholder, will need to be updated for pytorch implementation
use_calibration: bool = False
calibration_kwargs: Optional[dict] = None
use_oob: bool = False
class SpotTheDiffDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`SpotTheDiffDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/spotthediffdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.SpotTheDiffDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
binarize_preds: bool = False
train_size: Optional[float] = .75
n_folds: Optional[int] = None
retrain_from_scratch: bool = True
seed: int = 0
optimizer: Optional[Union[str, OptimizerConfig]] = None
learning_rate: float = 1e-3
batch_size: int = 32
preprocess_batch_fn: Optional[str] = None
epochs: int = 3
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[str] = None
kernel: Optional[Union[str, KernelConfig]] = None
n_diffs: int = 1
initial_diffs: Optional[str] = None
l1_reg: float = 0.01
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[str] = None # TODO: placeholder, will need to be updated for pytorch implementation
class SpotTheDiffDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`SpotTheDiffDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/spotthediffdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.SpotTheDiffDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
binarize_preds: bool = False
train_size: Optional[float] = .75
n_folds: Optional[int] = None
retrain_from_scratch: bool = True
seed: int = 0
optimizer: Optional[SupportedOptimizer] = None
learning_rate: float = 1e-3
batch_size: int = 32
preprocess_batch_fn: Optional[Callable] = None
epochs: int = 3
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[Callable] = None
kernel: Optional[Callable] = None
n_diffs: int = 1
initial_diffs: Optional[np.ndarray] = None
l1_reg: float = 0.01
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[Callable] = None # TODO: placeholder, will need to be updated for pytorch implementation
class LearnedKernelDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`LearnedKernelDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/learnedkerneldrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LearnedKernelDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow'
p_val: float = .05
kernel: Union[str, DeepKernelConfig]
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
n_permutations: int = 100
batch_size_permutations: int = 1000000
var_reg: float = 1e-5
reg_loss_fn: Optional[str] = None
train_size: Optional[float] = .75
retrain_from_scratch: bool = True
optimizer: Optional[Union[str, OptimizerConfig]] = None
learning_rate: float = 1e-3
batch_size: int = 32
batch_size_predict: int = 1000000
preprocess_batch_fn: Optional[str] = None
epochs: int = 3
num_workers: int = 0
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[str] = None
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[str] = None # TODO: placeholder, will need to be updated for pytorch implementation
class LearnedKernelDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`LearnedKernelDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/learnedkerneldrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LearnedKernelDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch', 'keops'] = 'tensorflow'
p_val: float = .05
kernel: Optional[Callable] = None
preprocess_at_init: bool = True
update_x_ref: Optional[Dict[str, int]] = None
n_permutations: int = 100
batch_size_permutations: int = 1000000
var_reg: float = 1e-5
reg_loss_fn: Optional[Callable] = None
train_size: Optional[float] = .75
retrain_from_scratch: bool = True
optimizer: Optional[SupportedOptimizer] = None
learning_rate: float = 1e-3
batch_size: int = 32
batch_size_predict: int = 1000000
preprocess_batch_fn: Optional[Callable] = None
epochs: int = 3
num_workers: int = 0
verbose: int = 0
train_kwargs: Optional[dict] = None
dataset: Optional[Callable] = None
device: Optional[Literal['cpu', 'cuda']] = None
dataloader: Optional[Callable] = None # TODO: placeholder, will need to be updated for pytorch implementation
class ContextMMDDriftConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`ContextMMDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/contextmmddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ContextMMDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
c_ref: str
preprocess_at_init: bool = True
update_ref: Optional[Dict[str, int]] = None
x_kernel: Optional[Union[str, KernelConfig]] = None
c_kernel: Optional[Union[str, KernelConfig]] = None
n_permutations: int = 100
prop_c_held: float = 0.25
n_folds: int = 5
batch_size: Optional[int] = 256
verbose: bool = False
device: Optional[Literal['cpu', 'cuda']] = None
class ContextMMDDriftConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`MMDDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/mmddrift.html>`_ detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
p_val: float = .05
c_ref: np.ndarray
preprocess_at_init: bool = True
update_ref: Optional[Dict[str, int]] = None
x_kernel: Optional[Callable] = None
c_kernel: Optional[Callable] = None
n_permutations: int = 100
prop_c_held: float = 0.25
n_folds: int = 5
batch_size: Optional[int] = 256
verbose: bool = False
device: Optional[Literal['cpu', 'cuda']] = None
class MMDDriftOnlineConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`MMDDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinemmddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDriftOnline` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
ert: float
window_size: int
kernel: Optional[Union[str, KernelConfig]] = None
sigma: Optional[np.ndarray] = None
n_bootstraps: int = 1000
device: Optional[Literal['cpu', 'cuda']] = None
verbose: bool = True
class MMDDriftOnlineConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`MMDDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinemmddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.MMDDriftOnline` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
ert: float
window_size: int
kernel: Optional[Callable] = None
sigma: Optional[np.ndarray] = None
n_bootstraps: int = 1000
device: Optional[Literal['cpu', 'cuda']] = None
verbose: bool = True
class LSDDDriftOnlineConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`LSDDDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinelsdddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LSDDDriftOnline` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
ert: float
window_size: int
sigma: Optional[np.ndarray] = None
n_bootstraps: int = 1000
n_kernel_centers: Optional[int] = None
lambda_rd_max: float = 0.2
device: Optional[Literal['cpu', 'cuda']] = None
verbose: bool = True
class LSDDDriftOnlineConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`LSDDDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinelsdddrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.LSDDDriftOnline` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
ert: float
window_size: int
sigma: Optional[np.ndarray] = None
n_bootstraps: int = 1000
n_kernel_centers: Optional[int] = None
lambda_rd_max: float = 0.2
device: Optional[Literal['cpu', 'cuda']] = None
verbose: bool = True
class CVMDriftOnlineConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`CVMDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinecvmdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.CVMDriftOnline` documentation for a description of each field.
"""
ert: float
window_sizes: List[int]
n_bootstraps: int = 10000
batch_size: int = 64
n_features: Optional[int] = None
verbose: bool = True
# validators
_coerce_int2list = validator('window_sizes', allow_reuse=True, pre=True)(coerce_int2list)
class CVMDriftOnlineConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`CVMDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinecvmdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.CVMDriftOnline` documentation for a description of each field.
"""
ert: float
window_sizes: List[int]
n_bootstraps: int = 10000
batch_size: int = 64
n_features: Optional[int] = None
verbose: bool = True
# validators
_coerce_int2list = validator('window_sizes', allow_reuse=True, pre=True)(coerce_int2list)
class FETDriftOnlineConfig(DriftDetectorConfig):
"""
Unresolved schema for the
`FETDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinefetdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.FETDriftOnline` documentation for a description of each field.
"""
ert: float
window_sizes: List[int]
n_bootstraps: int = 10000
t_max: Optional[int] = None
alternative: Literal['greater', 'less'] = 'greater'
lam: float = 0.99
n_features: Optional[int] = None
verbose: bool = True
# validators
_coerce_int2list = validator('window_sizes', allow_reuse=True, pre=True)(coerce_int2list)
class FETDriftOnlineConfigResolved(DriftDetectorConfigResolved):
"""
Resolved schema for the
`FETDriftOnline <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/onlinefetdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.FETDriftOnline` documentation for a description of each field.
"""
ert: float
window_sizes: List[int]
n_bootstraps: int = 10000
t_max: Optional[int] = None
alternative: Literal['greater', 'less'] = 'greater'
lam: float = 0.99
n_features: Optional[int] = None
verbose: bool = True
# validators
_coerce_int2list = validator('window_sizes', allow_reuse=True, pre=True)(coerce_int2list)
# The uncertainty detectors don't inherit from DriftDetectorConfig since their kwargs are a little different from the
# other drift detectors (e.g. no preprocess_fn). Subject to change in the future.
class ClassifierUncertaintyDriftConfig(DetectorConfig):
"""
Unresolved schema for the
`ClassifierUncertaintyDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/modeluncdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ClassifierUncertaintyDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
x_ref: str
model: Union[str, ModelConfig]
p_val: float = .05
x_ref_preprocessed: bool = False
update_x_ref: Optional[Dict[str, int]] = None
preds_type: Literal['probs', 'logits'] = 'probs'
uncertainty_type: Literal['entropy', 'margin'] = 'entropy'
margin_width: float = 0.1
batch_size: int = 32
preprocess_batch_fn: Optional[str] = None
device: Optional[str] = None
tokenizer: Optional[Union[str, TokenizerConfig]] = None
max_len: Optional[int] = None
input_shape: Optional[tuple] = None
data_type: Optional[str] = None
class ClassifierUncertaintyDriftConfigResolved(DetectorConfig):
"""
Resolved schema for the
`ClassifierUncertaintyDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/modeluncdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.ClassifierUncertaintyDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
x_ref: Union[np.ndarray, list]
model: Optional[SupportedModel] = None
p_val: float = .05
x_ref_preprocessed: bool = False
update_x_ref: Optional[Dict[str, int]] = None
preds_type: Literal['probs', 'logits'] = 'probs'
uncertainty_type: Literal['entropy', 'margin'] = 'entropy'
margin_width: float = 0.1
batch_size: int = 32
preprocess_batch_fn: Optional[Callable] = None
device: Optional[str] = None
tokenizer: Optional[Union[str, Callable]] = None
max_len: Optional[int] = None
input_shape: Optional[tuple] = None
data_type: Optional[str] = None
class RegressorUncertaintyDriftConfig(DetectorConfig):
"""
Unresolved schema for the
`RegressorUncertaintyDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/modeluncdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.RegressorUncertaintyDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
x_ref: str
model: Union[str, ModelConfig]
p_val: float = .05
x_ref_preprocessed: bool = False
update_x_ref: Optional[Dict[str, int]] = None
uncertainty_type: Literal['mc_dropout', 'ensemble'] = 'mc_dropout'
n_evals: int = 25
batch_size: int = 32
preprocess_batch_fn: Optional[str] = None
device: Optional[str] = None
tokenizer: Optional[Union[str, TokenizerConfig]] = None
max_len: Optional[int] = None
input_shape: Optional[tuple] = None
data_type: Optional[str] = None
class RegressorUncertaintyDriftConfigResolved(DetectorConfig):
"""
Resolved schema for the
`RegressorUncertaintyDrift <https://docs.seldon.io/projects/alibi-detect/en/stable/cd/methods/modeluncdrift.html>`_
detector.
Except for the `name` and `meta` fields, the fields match the detector's args and kwargs. Refer to the
:class:`~alibi_detect.cd.RegressorUncertaintyDrift` documentation for a description of each field.
"""
backend: Literal['tensorflow', 'pytorch'] = 'tensorflow'
x_ref: Union[np.ndarray, list]
model: Optional[SupportedModel] = None
p_val: float = .05
x_ref_preprocessed: bool = False
update_x_ref: Optional[Dict[str, int]] = None
uncertainty_type: Literal['mc_dropout', 'ensemble'] = 'mc_dropout'
n_evals: int = 25
batch_size: int = 32
preprocess_batch_fn: Optional[Callable] = None
device: Optional[str] = None
tokenizer: Optional[Callable] = None
max_len: Optional[int] = None
input_shape: Optional[tuple] = None
data_type: Optional[str] = None
# Unresolved schema dictionary (used in alibi_detect.utils.loading)
DETECTOR_CONFIGS: Dict[str, Type[DetectorConfig]] = {
'KSDrift': KSDriftConfig,
'ChiSquareDrift': ChiSquareDriftConfig,
'TabularDrift': TabularDriftConfig,
'CVMDrift': CVMDriftConfig,
'FETDrift': FETDriftConfig,
'MMDDrift': MMDDriftConfig,
'LSDDDrift': LSDDDriftConfig,
'ClassifierDrift': ClassifierDriftConfig,
'SpotTheDiffDrift': SpotTheDiffDriftConfig,
'LearnedKernelDrift': LearnedKernelDriftConfig,
'ContextMMDDrift': ContextMMDDriftConfig,
'MMDDriftOnline': MMDDriftOnlineConfig,
'LSDDDriftOnline': LSDDDriftOnlineConfig,
'CVMDriftOnline': CVMDriftOnlineConfig,
'FETDriftOnline': FETDriftOnlineConfig,
'ClassifierUncertaintyDrift': ClassifierUncertaintyDriftConfig,
'RegressorUncertaintyDrift': RegressorUncertaintyDriftConfig,
}
# Resolved schema dictionary (used in alibi_detect.utils.loading)
DETECTOR_CONFIGS_RESOLVED: Dict[str, Type[DetectorConfig]] = {
'KSDrift': KSDriftConfigResolved,
'ChiSquareDrift': ChiSquareDriftConfigResolved,
'TabularDrift': TabularDriftConfigResolved,
'CVMDrift': CVMDriftConfigResolved,
'FETDrift': FETDriftConfigResolved,
'MMDDrift': MMDDriftConfigResolved,
'LSDDDrift': LSDDDriftConfigResolved,
'ClassifierDrift': ClassifierDriftConfigResolved,
'SpotTheDiffDrift': SpotTheDiffDriftConfigResolved,
'LearnedKernelDrift': LearnedKernelDriftConfigResolved,
'ContextMMDDrift': ContextMMDDriftConfigResolved,
'MMDDriftOnline': MMDDriftOnlineConfigResolved,
'LSDDDriftOnline': LSDDDriftOnlineConfigResolved,
'CVMDriftOnline': CVMDriftOnlineConfigResolved,
'FETDriftOnline': FETDriftOnlineConfigResolved,
'ClassifierUncertaintyDrift': ClassifierUncertaintyDriftConfigResolved,
'RegressorUncertaintyDrift': RegressorUncertaintyDriftConfigResolved,
}
| 52,612 | 39.193277 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/validators.py
|
import sys
from typing import Any, Generic, Optional, Type, TypeVar, Union, List
import numpy as np
from numpy.lib import NumpyVersion
from pydantic.fields import ModelField
from alibi_detect.utils.frameworks import has_tensorflow, has_pytorch, has_keops, Framework
if has_tensorflow:
import tensorflow as tf
if has_pytorch:
import torch
def coerce_int2list(value: int) -> List[int]:
"""Validator to coerce int to list (pydantic doesn't do this by default)."""
if not isinstance(value, list):
return [value]
else:
return value
# Framework validator (validates `flavour` and `backend` fields)
def validate_framework(framework: str, field: ModelField) -> str:
if (framework == Framework.TENSORFLOW and has_tensorflow) or (framework == Framework.PYTORCH and has_pytorch) or \
(framework == Framework.KEOPS and has_keops):
return framework
elif framework == Framework.SKLEARN: # sklearn is a core dep
return framework
else:
raise ImportError(f"`{field.name} = '{framework}'` not possible since {framework} is not installed.")
# NumPy NDArray pydantic validator type
# The code below is adapted from https://github.com/cheind/pydantic-numpy.
T = TypeVar("T", bound=np.generic)
if NumpyVersion(np.__version__) < "1.22.0" or sys.version_info < (3, 9):
class NDArray(Generic[T], np.ndarray):
"""
A Generic pydantic model to coerce to np.ndarray's.
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, val: Any, field: ModelField) -> np.ndarray:
return _coerce_2_ndarray(cls, val, field)
else:
class NDArray(Generic[T], np.ndarray[Any, T]): # type: ignore[no-redef, type-var]
"""
A Generic pydantic model to coerce to np.ndarray's.
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, val: Any, field: ModelField) -> Optional[np.ndarray]:
return _coerce_2_ndarray(cls, val, field)
def _coerce_2_ndarray(cls: Type, val: Any, field: ModelField) -> np.ndarray:
if field.sub_fields is not None:
dtype_field = field.sub_fields[0]
return np.asarray(val, dtype=dtype_field.type_)
else:
return np.asarray(val)
def coerce_2_tensor(value: Union[float, List[float]], values: dict):
if value is None:
return value
framework = values.get('backend') or values.get('flavour')
if framework is None:
raise ValueError('`coerce_2tensor` failed since no framework identified.')
elif framework == Framework.TENSORFLOW and has_tensorflow:
return tf.convert_to_tensor(value)
elif (framework == Framework.PYTORCH and has_pytorch) or (framework == Framework.KEOPS and has_keops):
return torch.tensor(value)
else:
# Error should not be raised since `flavour` should have already been validated.
raise ImportError(f'Cannot coerce to {framework} Tensor since {framework} is not installed.')
| 3,113 | 35.635294 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/registry.py
|
"""
This registry allows Python objects to be registered and accessed by their string reference later on. The primary usage
is to register objects so that they can be specified in a `config.toml` file. A number of Alibi Detect functions are
also pre-registered in the registry for convenience. See the
`Registering artefacts <https://docs.seldon.io/projects/alibi-detect/en/stable/overview/config_files.html#registering-artefacts>`_ # noqa: E501
documentation.
Examples
--------
Registering a simple function using the `@registry.register` decorator, and immediately fetching it:
.. code-block :: python
import numpy as np
from alibi_detect.saving import registry
# Register a simple function
@registry.register('my_function.v1')
def my_function(x: np.ndarray) -> np.ndarray:
"A custom function to normalise input data."
return (x - x.mean()) / x.std()
# Get function from registry
fetched_function = registry.get('my_function.v1')
Instead of using a decorator, objects can also be registered by directly using the `registry.register()` function:
.. code-block :: python
from alibi_detect.saving import registry
my_object = ...
registry.register("my_object.v1", func=my_object)
"""
import catalogue
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops
if has_tensorflow:
from alibi_detect.cd.tensorflow import \
preprocess_drift as preprocess_drift_tf
from alibi_detect.utils.tensorflow.data import TFDataset as TFDataset_tf
from alibi_detect.utils.tensorflow.kernels import \
GaussianRBF as GaussianRBF_tf, sigma_median as sigma_median_tf
from alibi_detect.cd.tensorflow.context_aware import _sigma_median_diag as _sigma_median_diag_tf
if has_pytorch:
from alibi_detect.cd.pytorch import \
preprocess_drift as preprocess_drift_torch
from alibi_detect.utils.pytorch.kernels import \
GaussianRBF as GaussianRBF_torch, sigma_median as sigma_median_torch
from alibi_detect.cd.pytorch.context_aware import _sigma_median_diag as _sigma_median_diag_torch
if has_keops:
from alibi_detect.utils.keops.kernels import \
GaussianRBF as GaussianRBF_keops, sigma_mean as sigma_mean_keops
# Create registry
registry = catalogue.create("alibi_detect", "registry")
# Register alibi-detect classes/functions
if has_tensorflow:
registry.register('utils.tensorflow.kernels.GaussianRBF', func=GaussianRBF_tf)
registry.register('utils.tensorflow.kernels.sigma_median', func=sigma_median_tf)
registry.register('cd.tensorflow.context_aware._sigma_median_diag', func=_sigma_median_diag_tf)
registry.register('cd.tensorflow.preprocess.preprocess_drift', func=preprocess_drift_tf)
registry.register('utils.tensorflow.data.TFDataset', func=TFDataset_tf)
if has_pytorch:
registry.register('utils.pytorch.kernels.GaussianRBF', func=GaussianRBF_torch)
registry.register('utils.pytorch.kernels.sigma_median', func=sigma_median_torch)
registry.register('cd.pytorch.context_aware._sigma_median_diag', func=_sigma_median_diag_torch)
registry.register('cd.pytorch.preprocess.preprocess_drift', func=preprocess_drift_torch)
if has_keops:
registry.register('utils.keops.kernels.GaussianRBF', func=GaussianRBF_keops)
registry.register('utils.keops.kernels.sigma_mean', func=sigma_mean_keops)
| 3,373 | 41.708861 | 144 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/validate.py
|
import warnings
from alibi_detect.saving.schemas import (
DETECTOR_CONFIGS, DETECTOR_CONFIGS_RESOLVED)
from alibi_detect.version import __version__
def validate_config(cfg: dict, resolved: bool = False) -> dict:
"""
Validates a detector config dict by passing the dict to the detector's pydantic model schema.
Parameters
----------
cfg
The detector config dict.
resolved
Whether the config is resolved or not. For example, if resolved=True, `x_ref` is expected to be a
np.ndarray, wheras if resolved=False, `x_ref` is expected to be a str.
Returns
-------
The validated config dict, with missing fields set to their default values.
"""
# Get detector name and meta
if 'name' in cfg:
detector_name = cfg['name']
else:
raise ValueError('`name` missing from config.toml.')
# Validate detector specific config
if detector_name in DETECTOR_CONFIGS.keys():
if resolved:
cfg = DETECTOR_CONFIGS_RESOLVED[detector_name](**cfg).dict()
else:
cfg = DETECTOR_CONFIGS[detector_name](**cfg).dict()
else:
raise ValueError(f'Loading the {detector_name} detector from a config.toml is not yet supported.')
# Get meta data
meta = cfg.get('meta')
meta = {} if meta is None else meta # Needed because pydantic sets meta=None if it is missing from the config
version_warning = meta.get('version_warning', False)
version = meta.get('version', None)
# Raise warning if config file already contains a version_warning
if version_warning:
warnings.warn('The config file appears to be have been generated from a detector which may have been '
'loaded with a version mismatch. This may lead to breaking code or invalid results.')
# check version
if version is not None and version != __version__:
warnings.warn(f'Config is from version {version} but current version is '
f'{__version__}. This may lead to breaking code or invalid results.')
cfg['meta'].update({'version_warning': True})
return cfg
| 2,144 | 36.631579 | 114 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/saving.py
|
import logging
import os
import shutil
import warnings
from functools import partial
from pathlib import Path
from typing import Callable, Optional, Tuple, Union, Any, Dict, TYPE_CHECKING
import dill
import numpy as np
import toml
from transformers import PreTrainedTokenizerBase
from alibi_detect.saving._typing import VALID_DETECTORS
from alibi_detect.saving.loading import _replace, validate_config, STATE_PATH
from alibi_detect.saving.registry import registry
from alibi_detect.utils._types import supported_models_all, supported_models_tf, supported_models_torch, \
supported_models_sklearn
from alibi_detect.base import Detector, ConfigurableDetector, StatefulDetectorOnline
from alibi_detect.saving._tensorflow import save_detector_legacy, save_model_config_tf, save_optimizer_config_tf
from alibi_detect.saving._pytorch import save_model_config_pt
from alibi_detect.saving._sklearn import save_model_config_sk
if TYPE_CHECKING:
import tensorflow as tf
# do not extend pickle dispatch table so as not to change pickle behaviour
dill.extend(use_dill=False)
logger = logging.getLogger(__name__)
X_REF_FILENAME = 'x_ref.npy'
C_REF_FILENAME = 'c_ref.npy'
def save_detector(
detector: Union[Detector, ConfigurableDetector],
filepath: Union[str, os.PathLike],
legacy: bool = False,
) -> None:
"""
Save outlier, drift or adversarial detector.
Parameters
----------
detector
Detector object.
filepath
Save directory.
legacy
Whether to save in the legacy .dill format instead of via a config.toml file. Default is `False`.
This option will be removed in a future version.
"""
if legacy:
warnings.warn('The `legacy` option will be removed in a future version.', DeprecationWarning)
# TODO: Replace .__args__ w/ typing.get_args() once Python 3.7 dropped (and remove type ignore below)
detector_name = detector.__class__.__name__
if detector_name not in [detector for detector in VALID_DETECTORS]:
raise NotImplementedError(f'{detector_name} is not supported by `save_detector`.')
# Saving is wrapped in a try, with cleanup in except. To prevent a half-saved detector remaining upon error.
filepath = Path(filepath)
try:
# Create directory if it doesn't exist
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# If a drift detector, wrap drift detector save method
if isinstance(detector, ConfigurableDetector) and not legacy:
_save_detector_config(detector, filepath)
# Otherwise, save via the previous meta and state_dict approach
else:
save_detector_legacy(detector, filepath)
except Exception as error:
# Get a list of all existing files in `filepath` (so we know what not to cleanup if an error occurs)
orig_files = set(filepath.iterdir())
_cleanup_filepath(orig_files, filepath)
raise RuntimeError(f'Saving failed. The save directory {filepath} has been cleaned.') from error
logger.info('finished saving.')
def _cleanup_filepath(orig_files: set, filepath: Path):
"""
Cleans up the `filepath` directory in the event of a saving failure.
Parameters
----------
orig_files
Set of original files (not to delete).
filepath
The directory to clean up.
"""
# Find new files
new_files = set(filepath.iterdir())
files_to_rm = new_files - orig_files
# Delete new files
for file in files_to_rm:
if file.is_dir():
shutil.rmtree(file)
elif file.is_file():
file.unlink()
# Delete filepath directory if it is now empty
if filepath is not None:
if not any(filepath.iterdir()):
filepath.rmdir()
# TODO - eventually this will become save_detector (once outlier and adversarial updated to save via config.toml)
def _save_detector_config(detector: ConfigurableDetector,
filepath: Union[str, os.PathLike]):
"""
Save a drift detector. The detector is saved as a yaml config file. Artefacts such as
`preprocess_fn`, models, embeddings, tokenizers etc are serialized, and their filepaths are
added to the config file.
The detector can be loaded again by passing the resulting config file or filepath to `load_detector`.
Parameters
----------
detector
The detector to save.
filepath
File path to save serialized artefacts to.
"""
# detector name
detector_name = detector.__class__.__name__
# Process file paths
filepath = Path(filepath)
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Get the detector config (with artefacts still within it)
if hasattr(detector, 'get_config'):
cfg = detector.get_config() # TODO - remove once all detectors have get_config
cfg = validate_config(cfg, resolved=True)
else:
raise NotImplementedError(f'{detector_name} does not yet support config.toml based saving.')
# Save state if an online detector and online state exists (self.t > 0)
if isinstance(detector, StatefulDetectorOnline):
if detector.t > 0:
detector.save_state(filepath.joinpath(STATE_PATH))
# Save x_ref
save_path = filepath.joinpath(X_REF_FILENAME)
np.save(str(save_path), cfg['x_ref'])
cfg.update({'x_ref': X_REF_FILENAME})
# Save c_ref
c_ref = cfg.get('c_ref')
if c_ref is not None:
save_path = filepath.joinpath(C_REF_FILENAME)
np.save(str(save_path), cfg['c_ref'])
cfg.update({'c_ref': C_REF_FILENAME})
# Save preprocess_fn
preprocess_fn = cfg.get('preprocess_fn')
if preprocess_fn is not None:
logger.info('Saving the preprocess_fn function.')
preprocess_cfg = _save_preprocess_config(preprocess_fn, cfg['input_shape'], filepath)
cfg['preprocess_fn'] = preprocess_cfg
# Serialize kernels
for kernel_str in ('kernel', 'x_kernel', 'c_kernel'):
kernel = cfg.get(kernel_str)
if kernel is not None:
cfg[kernel_str] = _save_kernel_config(kernel, filepath, Path(kernel_str))
if 'proj' in cfg[kernel_str]: # serialise proj from DeepKernel - do here as need input_shape
cfg[kernel_str]['proj'], _ = _save_model_config(cfg[kernel_str]['proj'], base_path=filepath,
input_shape=cfg['input_shape'])
# ClassifierDrift and SpotTheDiffDrift specific artefacts.
# Serialize detector model
model = cfg.get('model')
if model is not None:
model_cfg, _ = _save_model_config(model, base_path=filepath, input_shape=cfg['input_shape'])
cfg['model'] = model_cfg
# Serialize optimizer
optimizer = cfg.get('optimizer')
if optimizer is not None:
cfg['optimizer'] = _save_optimizer_config(optimizer)
# Serialize dataset
dataset = cfg.get('dataset')
if dataset is not None:
dataset_cfg, dataset_kwargs = _serialize_object(dataset, filepath, Path('dataset'))
cfg.update({'dataset': dataset_cfg})
if len(dataset_kwargs) != 0:
cfg['dataset']['kwargs'] = dataset_kwargs
# Serialize reg_loss_fn
reg_loss_fn = cfg.get('reg_loss_fn')
if reg_loss_fn is not None:
reg_loss_fn_cfg, _ = _serialize_object(reg_loss_fn, filepath, Path('reg_loss_fn'))
cfg['reg_loss_fn'] = reg_loss_fn_cfg
# Save initial_diffs
initial_diffs = cfg.get('initial_diffs')
if initial_diffs is not None:
save_path = filepath.joinpath('initial_diffs.npy')
np.save(str(save_path), initial_diffs)
cfg.update({'initial_diffs': 'initial_diffs.npy'})
# Save config
write_config(cfg, filepath)
def write_config(cfg: dict, filepath: Union[str, os.PathLike]):
"""
Save an unresolved detector config dict to a TOML file.
Parameters
----------
cfg
Unresolved detector config dict.
filepath
Filepath to directory to save 'config.toml' file in.
"""
# Create directory if it doesn't exist
filepath = Path(filepath)
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Convert pathlib.Path's to str's
cfg = _path2str(cfg)
# Validate config before final tweaks
validate_config(cfg) # Must validate here as replacing None w/ str will break validation
# Replace None with "None", and dicts with integer keys with str keys
# TODO: Subject to change depending on toml library updates
cfg = _replace(cfg, None, "None") # Note: None replaced with "None" as None/null not valid TOML
cfg = _int2str_keys(cfg)
# Write to TOML file
logger.info('Writing config to {}'.format(filepath.joinpath('config.toml')))
with open(filepath.joinpath('config.toml'), 'w') as f:
toml.dump(cfg, f, encoder=toml.TomlNumpyEncoder()) # type: ignore[misc]
def _save_preprocess_config(preprocess_fn: Callable,
input_shape: Optional[tuple],
filepath: Path) -> dict:
"""
Serializes a drift detectors preprocess_fn. Artefacts are saved to disk, and a config dict containing filepaths
to the saved artefacts is returned.
Parameters
----------
preprocess_fn
The preprocess function to be serialized.
input_shape
Input shape for a model (if a model exists).
filepath
Directory to save serialized artefacts to.
Returns
-------
The config dictionary, containing references to the serialized artefacts. The format if this dict matches that \
of the `preprocess` field in the drift detector specification.
"""
preprocess_cfg: Dict[str, Any] = {}
local_path = Path('preprocess_fn')
# Serialize function
func, func_kwargs = _serialize_object(preprocess_fn, filepath, local_path.joinpath('function'))
preprocess_cfg.update({'src': func})
# Process partial function kwargs (if they exist)
kwargs = {}
for k, v in func_kwargs.items():
# Model/embedding
if isinstance(v, supported_models_all):
cfg_model, cfg_embed = _save_model_config(v, filepath, input_shape, local_path)
kwargs.update({k: cfg_model})
if cfg_embed is not None:
kwargs.update({'embedding': cfg_embed})
# Tokenizer
elif isinstance(v, PreTrainedTokenizerBase):
cfg_token = _save_tokenizer_config(v, filepath, local_path)
kwargs.update({k: cfg_token})
# torch device
elif v.__class__.__name__ == 'device': # avoiding torch import in case not installed
kwargs.update({k: v.type})
# Arbitrary function
elif callable(v):
src, _ = _serialize_object(v, filepath, local_path.joinpath(k))
kwargs.update({k: src})
# Put remaining kwargs directly into cfg
else:
kwargs.update({k: v})
if 'preprocess_drift' in func:
preprocess_cfg.update(kwargs)
else:
preprocess_cfg.update({'kwargs': kwargs})
return preprocess_cfg
def _serialize_object(obj: Callable, base_path: Path,
local_path: Path = Path('.')) -> Tuple[str, dict]:
"""
Serializes a python object. If the object is in the object registry, the registry str is returned. If not,
the object is saved to dill, and if wrapped in a functools.partial, the kwargs are returned.
Parameters
----------
obj
The object to serialize.
base_path
Base directory to save in.
local_path
A local (relative) filepath to append to base_path.
Returns
-------
Tuple containing a string referencing the save filepath and a dict of kwargs.
"""
# If a functools.partial, unpick function and kwargs
if isinstance(obj, partial):
kwargs = obj.keywords
obj = obj.func
else:
kwargs = {}
# If object has been registered, save registry string
keys = [k for k, v in registry.get_all().items() if obj == v]
registry_str = keys[0] if len(keys) == 1 else None
if registry_str is not None: # alibi-detect registered object
src = '@' + registry_str
# Otherwise, save as dill
else:
# create folder to save object in
filepath = base_path.joinpath(local_path)
if not filepath.parent.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath.parent))
filepath.parent.mkdir(parents=True, exist_ok=True)
logger.info('Saving object to {}.'.format(filepath.with_suffix('.dill')))
with open(filepath.with_suffix('.dill'), 'wb') as f:
dill.dump(obj, f)
src = str(local_path.with_suffix('.dill'))
return src, kwargs
def _path2str(cfg: dict, absolute: bool = False) -> dict:
"""
Private function to traverse a config dict and convert pathlib Path's to strings.
Parameters
----------
cfg
The config dict.
absolute
Whether to convert to absolute filepaths.
Returns
-------
The converted config dict.
"""
for k, v in cfg.items():
if isinstance(v, dict):
_path2str(v, absolute)
elif isinstance(v, Path):
if absolute:
v = v.resolve()
cfg.update({k: str(v.as_posix())})
return cfg
def _int2str_keys(dikt: dict) -> dict:
"""
Private function to traverse a dict and convert any dict's with int keys to str keys (e.g.
`categories_per_feature` kwarg for `TabularDrift`.
Parameters
----------
dikt
The dictionary.
Returns
-------
The converted dictionary.
"""
dikt_copy = dikt.copy()
for k, v in dikt.items():
if isinstance(k, int):
dikt_copy[str(k)] = dikt[k]
dikt_copy.pop(k)
if isinstance(v, dict):
dikt_copy[k] = _int2str_keys(v)
return dikt_copy
def _save_model_config(model: Any,
base_path: Path,
input_shape: Optional[tuple] = None,
path: Path = Path('.')) -> Tuple[dict, Optional[dict]]:
"""
Save a model to a config dictionary. When a model has a text embedding model contained within it,
this is extracted and saved separately.
Parameters
----------
model
The model to save.
base_path
Base filepath to save to.
input_shape
The input dimensions of the model (after the optional embedding has been applied).
path
A local (relative) filepath to append to base_path.
Returns
-------
A tuple containing the model and embedding config dicts.
"""
if isinstance(model, supported_models_tf):
return save_model_config_tf(model, base_path, input_shape, path)
elif isinstance(model, supported_models_torch):
return save_model_config_pt(model, base_path, path)
elif isinstance(model, supported_models_sklearn):
return save_model_config_sk(model, base_path, path), None
else:
raise NotImplementedError("Support for saving the given model is not yet implemented")
def _save_tokenizer_config(tokenizer: PreTrainedTokenizerBase,
base_path: Path,
path: Path = Path('.')) -> dict:
"""
Saves HuggingFace tokenizers.
Parameters
----------
tokenizer
The tokenizer.
base_path
Base filepath to save to.
path
A local (relative) filepath to append to base_path.
Returns
-------
The tokenizer config dict.
"""
# create folder to save model in
filepath = base_path.joinpath(path).joinpath('tokenizer')
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
cfg_token = {}
logger.info('Saving tokenizer to {}.'.format(filepath))
tokenizer.save_pretrained(filepath)
cfg_token.update({'src': path.joinpath('tokenizer')})
return cfg_token
def _save_kernel_config(kernel: Callable,
base_path: Path,
local_path: Path = Path('.')) -> dict:
"""Function to save kernel.
If the kernel is stored in the artefact registry, the registry key (and kwargs) are written
to config. If the kernel is a generic callable, it is pickled.
Parameters
----------
kernel
The kernel to save.
base_path
Base directory to save in.
local_path
A local (relative) filepath to append to base_path.
Returns
-------
The kernel config dictionary.
"""
# if a DeepKernel
if hasattr(kernel, 'proj'):
if hasattr(kernel, 'get_config'):
cfg_kernel = kernel.get_config()
else:
raise AttributeError("The detector's `kernel` must have a .get_config() method for it to be saved.")
# Serialize the kernels (if needed)
kernel_a = cfg_kernel.get('kernel_a')
kernel_b = cfg_kernel.get('kernel_b')
if not isinstance(kernel_a, str):
cfg_kernel['kernel_a'] = _save_kernel_config(cfg_kernel['kernel_a'], base_path, Path('kernel_a'))
if not isinstance(kernel_b, str) and kernel_b is not None:
cfg_kernel['kernel_b'] = _save_kernel_config(cfg_kernel['kernel_b'], base_path, Path('kernel_b'))
# If any other kernel, serialize the class to disk and get config
else:
if isinstance(kernel, type): # if still a class
kernel_class = kernel
cfg_kernel = {}
else: # if an object
kernel_class = kernel.__class__
if hasattr(kernel, 'get_config'):
cfg_kernel = kernel.get_config()
cfg_kernel['init_sigma_fn'], _ = _serialize_object(cfg_kernel['init_sigma_fn'], base_path,
local_path.joinpath('init_sigma_fn'))
else:
raise AttributeError("The detector's `kernel` must have a .get_config() method for it to be saved.")
# Serialize the kernel class
cfg_kernel['src'], _ = _serialize_object(kernel_class, base_path, local_path.joinpath('kernel'))
return cfg_kernel
def _save_optimizer_config(optimizer: Union['tf.keras.optimizers.Optimizer', type]) -> dict:
"""
Function to save tensorflow or pytorch optimizers.
Parameters
----------
optimizer
The optimizer to save.
Returns
-------
Optimizer config dict.
"""
if isinstance(optimizer, type):
return {'class_name': optimizer.__name__}
else:
return save_optimizer_config_tf(optimizer)
| 19,169 | 34.369004 | 116 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/__init__.py
|
from alibi_detect.saving.validate import validate_config
from alibi_detect.saving.loading import load_detector, read_config, resolve_config
from alibi_detect.saving.registry import registry
from alibi_detect.saving.saving import save_detector, write_config
__all__ = [
"save_detector",
"write_config",
"load_detector",
"read_config",
"resolve_config",
"validate_config",
"registry"
]
| 413 | 26.6 | 82 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_typing.py
|
"""Typing constructs for saving and loading functionality
List of detectors that are valid for saving and loading either via the legacy methods or the new config driven
functionality"""
VALID_DETECTORS = [
'AdversarialAE',
'ChiSquareDrift',
'ClassifierDrift',
'IForest',
'KSDrift',
'LLR',
'Mahalanobis',
'MMDDrift',
'LSDDDrift',
'ModelDistillation',
'OutlierAE',
'OutlierAEGMM',
'OutlierProphet',
'OutlierSeq2Seq',
'OutlierVAE',
'OutlierVAEGMM',
'SpectralResidual',
'TabularDrift',
'CVMDrift',
'FETDrift',
'SpotTheDiffDrift',
'ClassifierUncertaintyDrift',
'RegressorUncertaintyDrift',
'LearnedKernelDrift',
'ContextMMDDrift',
'MMDDriftTF', # TODO - remove when legacy loading removed
'ClassifierDriftTF', # TODO - remove when legacy loading removed
'MMDDriftOnline',
'LSDDDriftOnline',
'CVMDriftOnline',
'FETDriftOnline'
]
| 951 | 23.410256 | 110 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_sklearn/loading.py
|
import os
from pathlib import Path
from typing import Union
import joblib
from sklearn.base import BaseEstimator
def load_model(filepath: Union[str, os.PathLike],
) -> BaseEstimator:
"""
Load scikit-learn (or xgboost) model. Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`.
This includes xgboost models following the scikit-learn API
(see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn).
Parameters
----------
filepath
Saved model directory.
Returns
-------
Loaded model.
"""
model_dir = Path(filepath)
return joblib.load(model_dir.joinpath('model.joblib'))
| 706 | 25.185185 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_sklearn/saving.py
|
import logging
import os
from pathlib import Path
from typing import Union
import joblib
from sklearn.base import BaseEstimator
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
def save_model_config(model: BaseEstimator,
base_path: Path,
local_path: Path = Path('.')) -> dict:
"""
Save a scikit-learn (or xgboost) model to a config dictionary.
Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`. This includes xgboost models
following the scikit-learn API
(see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn).
Parameters
----------
model
The model to save.
base_path
Base filepath to save to (the location of the `config.toml` file).
local_path
A local (relative) filepath to append to base_path.
Returns
-------
The model config dict.
"""
filepath = base_path.joinpath(local_path)
save_model(model, filepath=filepath, save_dir='model')
cfg_model = {
'flavour': Framework.SKLEARN.value,
'src': local_path.joinpath('model')
}
return cfg_model
def save_model(model: BaseEstimator,
filepath: Union[str, os.PathLike],
save_dir: Union[str, os.PathLike] = 'model') -> None:
"""
Save scikit-learn (and xgboost) models. Models are assumed to be a subclass of :class:`~sklearn.base.BaseEstimator`.
This includes xgboost models following the scikit-learn API
(see https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn).
Parameters
----------
model
The tf.keras.Model to save.
filepath
Save directory.
save_dir
Name of folder to save to within the filepath directory.
"""
# create folder to save model in
model_path = Path(filepath).joinpath(save_dir)
if not model_path.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_path))
model_path.mkdir(parents=True, exist_ok=True)
# save model
model_path = model_path.joinpath('model.joblib')
joblib.dump(model, model_path)
| 2,235 | 30.942857 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_sklearn/__init__.py
|
from alibi_detect.saving._sklearn.saving import save_model_config as save_model_config_sk
from alibi_detect.saving._sklearn.loading import load_model as load_model_sk
__all__ = [
"save_model_config_sk",
"load_model_sk"
]
| 230 | 27.875 | 89 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_sklearn/tests/test_saving_sk.py
|
from pytest_cases import param_fixture, parametrize, parametrize_with_cases
from alibi_detect.saving.tests.datasets import ContinuousData
from alibi_detect.saving.tests.models import classifier_model, xgb_classifier_model
from alibi_detect.saving.loading import _load_model_config
from alibi_detect.saving.saving import _path2str, _save_model_config
from alibi_detect.saving.schemas import ModelConfig
backend = param_fixture("backend", ['sklearn'])
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_')
@parametrize('model', [classifier_model, xgb_classifier_model])
def test_save_model_sk(data, model, tmp_path):
"""
Unit test for _save_model_config and _load_model_config with scikit-learn and xgboost model.
"""
# Save model
filepath = tmp_path
cfg_model, _ = _save_model_config(model, base_path=filepath)
cfg_model = _path2str(cfg_model)
cfg_model = ModelConfig(**cfg_model).dict()
assert tmp_path.joinpath('model').is_dir()
assert tmp_path.joinpath('model/model.joblib').is_file()
# Adjust config
cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here
# Load model
model_load = _load_model_config(cfg_model)
assert isinstance(model_load, type(model))
| 1,295 | 38.272727 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/tests/conftest.py
|
import pytest
@pytest.fixture
def seed(pytestconfig):
"""
Returns the random seed set by pytest-randomly.
"""
return pytestconfig.getoption("randomly_seed")
| 175 | 16.6 | 51 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/tests/datasets.py
|
import numpy as np
import pytest
from alibi_testing.data import get_movie_sentiment_data
from pytest_cases import parametrize
from requests import RequestException
# Note: If any of below cases become large, see https://smarie.github.io/python-pytest-cases/#c-caching-cases
FLOAT = np.float32
INT = np.int32
# Group dataset "cases" by type of data i.e. continuous, binary, categorical, mixed
class ContinuousData:
# Note: we could parametrize cases here (and/or pass them fixtures).
# See https://smarie.github.io/python-pytest-cases/#case-generators
@staticmethod
@parametrize(data_shape=[(50, 4)])
def data_synthetic_nd(data_shape):
n_samples, input_dim = data_shape
X_ref = np.random.default_rng(0).standard_normal(size=data_shape, dtype=FLOAT) * 0.5
X_h0 = np.random.default_rng(1).standard_normal(size=data_shape, dtype=FLOAT) * 0.5
return X_ref, X_h0
# @staticmethod
# def data_synthetic_1d(): # TODO - add if we decide to support 1D data
# n_samples = 50
# X_ref = np.random.rand(n_samples)
# X_h0 = np.random.rand(n_samples)
# return X_ref, X_h0
class CategoricalData:
@staticmethod
@parametrize(data_shape=[(50, 4)])
def data_synthetic_nd(data_shape):
n_samples, input_dim = data_shape
X_ref = np.random.default_rng(0).choice(a=[0, 1, 2], size=(n_samples, input_dim), p=[0.5, 0.3, 0.2]).astype(INT)
X_h0 = np.random.default_rng(1).choice(a=[0, 1, 2], size=(n_samples, input_dim), p=[0.5, 0.3, 0.2]).astype(INT)
return X_ref, X_h0
class MixedData:
@staticmethod
@parametrize(data_shape=[(50, 4)])
def data_synthetic_nd(data_shape):
n_samples, input_dim = data_shape
X_ref = np.random.default_rng(0).standard_normal(size=data_shape, dtype=FLOAT) * 0.5
X_ref[:, :2] = np.random.default_rng(0).choice(a=[0, 1, 2], size=(n_samples, 2), p=[0.5, 0.3, 0.2]).astype(INT)
X_h0 = np.random.default_rng(1).standard_normal(size=data_shape, dtype=FLOAT) * 0.5
X_h0[:, :2] = np.random.default_rng(1).choice(a=[0, 1, 2], size=(n_samples, 2), p=[0.5, 0.3, 0.2]).astype(INT)
return X_ref, X_h0
class BinData:
@staticmethod
@parametrize(data_shape=[(50, 2)])
def data_synthetic_nd(data_shape):
n_samples, input_dim = data_shape
X_ref = np.random.default_rng(0).choice([0, 1], (n_samples, input_dim), p=[0.6, 0.4]).astype(INT)
X_h0 = np.random.default_rng(0).choice([0, 1], (n_samples, input_dim), p=[0.6, 0.4]).astype(INT)
return X_ref, X_h0
class TextData:
@staticmethod
def movie_sentiment_data():
try:
return get_movie_sentiment_data()
except RequestException:
pytest.skip('Movie sentiment dataset URL down')
| 2,799 | 37.888889 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/tests/models.py
|
from functools import partial
from importlib import import_module
import numpy as np
import tensorflow as tf
import torch
import torch.nn as nn
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from requests.exceptions import HTTPError
import pytest
from pytest_cases import fixture, parametrize
from transformers import AutoTokenizer
from alibi_detect.cd.pytorch import UAE as UAE_pt
from alibi_detect.cd.pytorch import preprocess_drift as preprocess_drift_pt
from alibi_detect.cd.tensorflow import UAE as UAE_tf
from alibi_detect.cd.tensorflow import preprocess_drift as preprocess_drift_tf
from alibi_detect.utils.pytorch.kernels import GaussianRBF as GaussianRBF_pt
from alibi_detect.utils.pytorch.kernels import DeepKernel as DeepKernel_pt
from alibi_detect.utils.tensorflow.kernels import GaussianRBF as GaussianRBF_tf
from alibi_detect.utils.tensorflow.kernels import DeepKernel as DeepKernel_tf
from alibi_detect.models.pytorch import TransformerEmbedding as TransformerEmbedding_pt
from alibi_detect.models.tensorflow import TransformerEmbedding as TransformerEmbedding_tf
from alibi_detect.cd.pytorch import HiddenOutput as HiddenOutput_pt
from alibi_detect.cd.tensorflow import HiddenOutput as HiddenOutput_tf
from alibi_detect.utils.frameworks import has_keops
if has_keops: # pykeops only installed in Linux CI
from alibi_detect.utils.keops.kernels import GaussianRBF as GaussianRBF_ke
from alibi_detect.utils.keops.kernels import DeepKernel as DeepKernel_ke
LATENT_DIM = 2 # Must be less than input_dim set in ./datasets.py
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
@fixture
def encoder_model(backend, current_cases):
"""
An untrained encoder of given input dimension and backend (this is a "custom" model, NOT an Alibi Detect UAE).
"""
_, _, data_params = current_cases["data"]
_, input_dim = data_params['data_shape']
if backend == 'tensorflow':
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
tf.keras.layers.Dense(5, activation=tf.nn.relu),
tf.keras.layers.Dense(LATENT_DIM, activation=None)
]
)
elif backend in ('pytorch', 'keops'):
model = nn.Sequential(nn.Linear(input_dim, 5),
nn.ReLU(),
nn.Linear(5, LATENT_DIM))
else:
pytest.skip('`encoder_model` only implemented for tensorflow and pytorch.')
return model
@fixture
def encoder_dropout_model(backend, current_cases):
"""
An untrained encoder with dropout, of given input dimension and backend.
TODO: consolidate this model (and encoder_model above) with models like that in test_model_uncertainty.py
"""
_, _, data_params = current_cases["data"]
_, input_dim = data_params['data_shape']
if backend == 'tensorflow':
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
tf.keras.layers.Dense(5, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.0), # 0.0 to ensure determinism
tf.keras.layers.Dense(LATENT_DIM, activation=None)
]
)
elif backend in ('pytorch', 'keops'):
model = nn.Sequential(nn.Linear(input_dim, 5),
nn.ReLU(),
nn.Dropout(0.0), # 0.0 to ensure determinism
nn.Linear(5, LATENT_DIM))
else:
pytest.skip('`encoder_dropout_model` only implemented for tensorflow and pytorch.')
return model
@fixture
def preprocess_uae(encoder_model):
"""
Preprocess function with Untrained Autoencoder.
"""
if isinstance(encoder_model, tf.keras.Model):
preprocess_fn = partial(preprocess_drift_tf, model=encoder_model)
else:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
preprocess_fn = partial(preprocess_drift_pt, model=encoder_model, device=device)
return preprocess_fn
@fixture
def kernel(request, backend):
"""
Gaussian RBF kernel for given backend. Settings are parametrised in the test function.
"""
kernel = request.param
if isinstance(kernel, dict): # dict of kwargs
kernel_cfg = kernel.copy()
sigma = kernel_cfg.pop('sigma', None)
if backend == 'tensorflow':
if sigma is not None and not isinstance(sigma, tf.Tensor):
sigma = tf.convert_to_tensor(sigma)
kernel = GaussianRBF_tf(sigma=sigma, **kernel_cfg)
elif backend == 'pytorch':
if sigma is not None and not isinstance(sigma, torch.Tensor):
sigma = torch.tensor(sigma)
kernel = GaussianRBF_pt(sigma=sigma, **kernel_cfg)
elif backend == 'keops':
if sigma is not None and not isinstance(sigma, torch.Tensor):
sigma = torch.tensor(sigma)
kernel = GaussianRBF_ke(sigma=sigma, **kernel_cfg)
else:
pytest.skip('`kernel` only implemented for tensorflow, pytorch and keops.')
return kernel
@fixture
def optimizer(request, backend):
"""
Optimizer for given backend. Optimizer is expected to be passed via `request` as a string, i.e. "Adam".
For tensorflow, the optimizer is an instantiated `tf.of.keras.optimizers.Optimizer` object. For pytorch,
the optimizer is a `torch.optim.Optimizer` class (NOT instantiated).
"""
optimizer = request.param # Get parametrized setting
if backend not in ('tensorflow', 'pytorch', 'keops'):
pytest.skip('`optimizer` only implemented for tensorflow, pytorch and keops.')
if isinstance(optimizer, str):
module = 'tensorflow.keras.optimizers' if backend == 'tensorflow' else 'torch.optim'
try:
optimizer = getattr(import_module(module), optimizer)
except AttributeError:
raise ValueError(f"{optimizer} is not a recognised optimizer in {module}.")
return optimizer
@fixture
def deep_kernel(request, backend, encoder_model):
"""
Deep kernel, built using the `encoder_model` fixture for the projection, and using the kernel_a and eps
parametrised in the test function.
"""
# Get DeepKernel options
kernel_a = request.param.get('kernel_a', 'rbf')
kernel_b = request.param.get('kernel_b', 'rbf')
eps = request.param.get('eps', 'trainable')
# Proj model (backend managed in encoder_model fixture)
proj = encoder_model
# Build DeepKernel
if backend == 'tensorflow':
kernel_a = GaussianRBF_tf(**kernel_a) if isinstance(kernel_a, dict) else kernel_a
kernel_b = GaussianRBF_tf(**kernel_b) if isinstance(kernel_b, dict) else kernel_b
deep_kernel = DeepKernel_tf(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
elif backend == 'pytorch':
kernel_a = GaussianRBF_pt(**kernel_a) if isinstance(kernel_a, dict) else kernel_a
kernel_b = GaussianRBF_pt(**kernel_b) if isinstance(kernel_b, dict) else kernel_b
deep_kernel = DeepKernel_pt(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
elif backend == 'keops':
kernel_a = GaussianRBF_ke(**kernel_a) if isinstance(kernel_a, dict) else kernel_a
kernel_b = GaussianRBF_ke(**kernel_b) if isinstance(kernel_b, dict) else kernel_b
deep_kernel = DeepKernel_ke(proj, kernel_a=kernel_a, kernel_b=kernel_b, eps=eps)
else:
pytest.skip('`deep_kernel` only implemented for tensorflow and pytorch.')
return deep_kernel
@fixture
def classifier_model(backend, current_cases):
"""
Classification model with given input dimension and backend.
"""
_, _, data_params = current_cases["data"]
_, input_dim = data_params['data_shape']
if backend == 'tensorflow':
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(input_dim,)),
tf.keras.layers.Dense(2, activation=tf.nn.softmax),
]
)
elif backend in ('pytorch', 'keops'):
model = nn.Sequential(nn.Linear(input_dim, 2),
nn.Softmax(1))
elif backend == 'sklearn':
model = RandomForestClassifier()
else:
pytest.skip('`classifier_model` only implemented for tensorflow, pytorch, keops and sklearn.')
return model
@fixture
def xgb_classifier_model():
model = XGBClassifier()
return model
@fixture(unpack_into=('tokenizer, embedding, max_len, enc_dim'))
@parametrize('model_name, max_len', [('bert-base-cased', 100)])
@parametrize('uae', [True, False])
def nlp_embedding_and_tokenizer(model_name, max_len, uae, backend):
"""
A fixture to build nlp embedding and tokenizer models based on the HuggingFace pre-trained models.
"""
backend = 'tf' if backend == 'tensorflow' else 'pt'
# Load tokenizer
try:
tokenizer = AutoTokenizer.from_pretrained(model_name)
except (OSError, HTTPError):
pytest.skip(f"Problem downloading {model_name} from huggingface.co")
X = 'A dummy string' # this will be padded to max_len
tokens = tokenizer(list(X[:5]), pad_to_max_length=True,
max_length=max_len, return_tensors=backend)
# Load embedding model
emb_type = 'hidden_state'
n_layers = 8
layers = [-_ for _ in range(1, n_layers + 1)]
enc_dim = 32
if backend == 'tf':
try:
embedding = TransformerEmbedding_tf(model_name, emb_type, layers)
except (OSError, HTTPError):
pytest.skip(f"Problem downloading {model_name} from huggingface.co")
if uae:
x_emb = embedding(tokens)
shape = (x_emb.shape[1],)
embedding = UAE_tf(input_layer=embedding, shape=shape, enc_dim=enc_dim)
elif backend == 'pt':
try:
embedding = TransformerEmbedding_pt(model_name, emb_type, layers)
except (OSError, HTTPError):
pytest.skip(f"Problem downloading {model_name} from huggingface.co")
if uae:
x_emb = embedding(tokens)
shape = (x_emb.shape[1],)
embedding = UAE_pt(input_layer=embedding, shape=shape, enc_dim=enc_dim)
return tokenizer, embedding, max_len, enc_dim
def preprocess_simple(x: np.ndarray):
"""
Simple function to test serialization of generic Python function within preprocess_fn.
"""
return x*2.0
@fixture
def preprocess_simple_with_kwargs():
"""
Simple function to test serialization of generic Python function with kwargs, within preprocess_fn.
"""
return partial(preprocess_simple, kwarg1=42, kwarg2=True)
@fixture
def preprocess_nlp(embedding, tokenizer, max_len, backend):
"""
Preprocess function with Untrained Autoencoder.
"""
if backend == 'tensorflow':
preprocess_fn = partial(preprocess_drift_tf, model=embedding, tokenizer=tokenizer,
max_len=max_len, preprocess_batch_fn=preprocess_simple)
elif backend in ('pytorch', 'keops'):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
preprocess_fn = partial(preprocess_drift_pt, model=embedding, tokenizer=tokenizer, max_len=max_len,
preprocess_batch_fn=preprocess_simple, device=device)
else:
pytest.skip('`preprocess_nlp` only implemented for tensorflow, pytorch and keops.')
return preprocess_fn
@fixture
def preprocess_hiddenoutput(classifier_model, current_cases, backend):
"""
Preprocess function to extract the softmax layer of a classifier (with the HiddenOutput utility function).
"""
_, _, data_params = current_cases["data"]
_, input_dim = data_params['data_shape']
if backend == 'tensorflow':
model = HiddenOutput_tf(classifier_model, layer=-1, input_shape=(None, input_dim))
preprocess_fn = partial(preprocess_drift_tf, model=model)
elif backend in ('pytorch', 'keops'):
model = HiddenOutput_pt(classifier_model, layer=-1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
preprocess_fn = partial(preprocess_drift_pt, model=model, device=device)
else:
pytest.skip('`preprocess_hiddenoutput` only implemented for tensorflow, pytorch and keops.')
return preprocess_fn
| 12,456 | 39.313916 | 114 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/tests/test_saving.py
|
# type: ignore
"""
Tests for saving/loading of detectors via config.toml files.
Internal functions such as save_kernel/load_kernel_config etc are also tested.
"""
from functools import partial
import os
from pathlib import Path
from typing import Callable
import sklearn.base
import toml
import dill
import numpy as np
import pytest
import scipy
import tensorflow as tf
import torch
import torch.nn as nn
from .datasets import BinData, CategoricalData, ContinuousData, MixedData, TextData
from .models import (encoder_model, preprocess_uae, preprocess_hiddenoutput, preprocess_simple, # noqa: F401
preprocess_simple_with_kwargs,
preprocess_nlp, LATENT_DIM, classifier_model, kernel, deep_kernel, nlp_embedding_and_tokenizer,
embedding, tokenizer, max_len, enc_dim, encoder_dropout_model, optimizer)
from alibi_detect.utils._random import fixed_seed
from packaging import version
from pytest_cases import param_fixture, parametrize, parametrize_with_cases
from sklearn.model_selection import StratifiedKFold
from alibi_detect.cd import (ChiSquareDrift, ClassifierUncertaintyDrift, RegressorUncertaintyDrift,
ClassifierDrift, FETDrift, KSDrift, LearnedKernelDrift, LSDDDrift, MMDDrift,
SpotTheDiffDrift, TabularDrift, ContextMMDDrift, MMDDriftOnline, LSDDDriftOnline,
CVMDriftOnline, FETDriftOnline)
from alibi_detect.models.pytorch import TransformerEmbedding as TransformerEmbedding_pt
from alibi_detect.models.tensorflow import TransformerEmbedding as TransformerEmbedding_tf
from alibi_detect.saving import (load_detector, read_config, registry,
resolve_config, save_detector, write_config)
from alibi_detect.saving.loading import (_get_nested_value, _replace,
_set_dtypes, _set_nested_value, _prepend_cfg_filepaths)
from alibi_detect.saving.saving import _serialize_object
from alibi_detect.saving.saving import (_path2str, _int2str_keys, _save_kernel_config, _save_model_config,
_save_preprocess_config)
from alibi_detect.saving.schemas import DeepKernelConfig, KernelConfig, ModelConfig, PreprocessConfig
from alibi_detect.utils.pytorch.kernels import DeepKernel as DeepKernel_pt
from alibi_detect.utils.tensorflow.kernels import DeepKernel as DeepKernel_tf
from alibi_detect.utils.frameworks import has_keops
if has_keops: # pykeops only installed in Linux CI
from pykeops.torch import LazyTensor
from alibi_detect.utils.keops.kernels import DeepKernel as DeepKernel_ke
if version.parse(scipy.__version__) >= version.parse('1.7.0'):
from alibi_detect.cd import CVMDrift
# TODO: We currently parametrize encoder_model etc (in models.py) with backend, so the same flavour of
# preprocessing is used as the detector backend. In the future we could decouple this in tests.
backends = ['tensorflow', 'pytorch', 'sklearn']
if has_keops: # pykeops only installed in Linux CI
backends.append('keops')
backend = param_fixture("backend", backends)
P_VAL = 0.05
ERT = 10
N_PERMUTATIONS = 10
N_BOOTSTRAPS = 100
WINDOW_SIZE = 5
REGISTERED_OBJECTS = registry.get_all()
# Define a detector config dict
MMD_CFG = {
'name': 'MMDDrift',
'x_ref': np.array([[-0.30074928], [1.50240758], [0.43135768], [2.11295779], [0.79684913]]),
'p_val': 0.05,
'n_permutations': 150,
'data_type': 'tabular'
}
CFGS = [MMD_CFG]
# TODO - future: Some of the fixtures can/should be moved elsewhere (i.e. if they can be recycled for use elsewhere)
@parametrize('cfg', CFGS)
def test_load_simple_config(cfg, tmp_path):
"""
Test that a bare-bones `config.toml` without a [meta] field can be loaded by `load_detector`.
"""
save_dir = tmp_path
x_ref_path = str(save_dir.joinpath('x_ref.npy'))
cfg_path = save_dir.joinpath('config.toml')
# Save x_ref in config.toml
x_ref = cfg['x_ref']
np.save(x_ref_path, x_ref)
cfg['x_ref'] = 'x_ref.npy'
# Save config.toml then load it
with open(cfg_path, 'w') as f:
toml.dump(cfg, f)
cd = load_detector(cfg_path)
assert cd.__class__.__name__ == cfg['name']
# Get config and compare to original (orginal cfg not fully spec'd so only compare items that are present)
cfg_new = cd.get_config()
for k, v in cfg.items():
if k == 'x_ref':
assert v == 'x_ref.npy'
else:
assert v == cfg_new[k]
@parametrize('preprocess_fn', [preprocess_uae, preprocess_hiddenoutput])
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_ksdrift(data, preprocess_fn, tmp_path):
"""
Test KSDrift on continuous datasets, with UAE and classifier_model softmax output as preprocess_fn's. Only this
detector is tested with preprocessing strategies, as other detectors should see the same preprocess_fn output.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
cd = KSDrift(X_ref,
p_val=P_VAL,
preprocess_fn=preprocess_fn,
preprocess_at_init=True,
)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
# Assert
np.testing.assert_array_equal(preprocess_fn(X_ref), cd_load.x_ref)
assert cd_load.n_features == LATENT_DIM
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.preprocess_fn, Callable)
assert cd_load.preprocess_fn.func.__name__ == 'preprocess_drift'
np.testing.assert_array_equal(cd.predict(X_h0)['data']['p_val'],
cd_load.predict(X_h0)['data']['p_val'])
@pytest.mark.skipif(backend == 'sklearn', reason="Don't test with sklearn preprocessing.")
@parametrize('preprocess_fn', [preprocess_nlp])
@parametrize_with_cases("data", cases=TextData.movie_sentiment_data, prefix='data_')
def test_save_ksdrift_nlp(data, preprocess_fn, enc_dim, tmp_path): # noqa: F811
"""
Test KSDrift on continuous datasets, with UAE and classifier_model softmax output as preprocess_fn's. Only this
detector is tested with embedding and embedding+uae, as other detectors should see the same preprocessed data.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data['X_train'][:5], data['X_test'][:5]
cd = KSDrift(X_ref,
p_val=P_VAL,
preprocess_fn=preprocess_fn,
preprocess_at_init=True,
input_shape=(768,), # hardcoded to bert-base-cased for now
)
save_detector(cd, tmp_path, legacy=False)
cd_load = load_detector(tmp_path)
# Assert
np.testing.assert_array_equal(preprocess_fn(X_ref), cd_load.x_ref)
if isinstance(preprocess_fn.keywords['model'], (TransformerEmbedding_tf, TransformerEmbedding_pt)):
assert cd_load.n_features == 768 # hardcoded to bert-base-cased for now
else:
assert cd_load.n_features == enc_dim # encoder dim
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.preprocess_fn, Callable)
assert cd_load.preprocess_fn.func.__name__ == 'preprocess_drift'
np.testing.assert_array_equal(cd.predict(X_h0)['data']['p_val'],
cd_load.predict(X_h0)['data']['p_val'])
@pytest.mark.skipif(version.parse(scipy.__version__) < version.parse('1.7.0'),
reason="Requires scipy version >= 1.7.0")
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_cvmdrift(data, preprocess_uae, tmp_path):
"""
Test CVMDrift on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
cd = CVMDrift(X_ref,
p_val=P_VAL,
preprocess_fn=preprocess_uae,
preprocess_at_init=True,
)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
# Assert
np.testing.assert_array_equal(preprocess_uae(X_ref), cd_load.x_ref)
assert cd_load.n_features == LATENT_DIM
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.preprocess_fn, Callable)
assert cd_load.preprocess_fn.func.__name__ == 'preprocess_drift'
np.testing.assert_array_equal(cd.predict(X_h0)['data']['p_val'],
cd_load.predict(X_h0)['data']['p_val'])
@parametrize('kernel', [
None, # Use default kernel
{'sigma': 0.5, 'trainable': False}, # pass kernel as object
], indirect=True
)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_mmddrift(data, kernel, preprocess_uae, backend, tmp_path, seed): # noqa: F811
"""
Test MMDDrift on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch', 'keops'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
kwargs = {
'p_val': P_VAL,
'backend': backend,
'preprocess_fn': preprocess_uae,
'n_permutations': N_PERMUTATIONS,
'preprocess_at_init': True,
'kernel': kernel,
'configure_kernel_from_x_ref': False,
'sigma': np.array([0.5], dtype=np.float32)
}
if backend in ('pytorch', 'keops'):
kwargs['device'] = 'cuda' if torch.cuda.is_available() else 'cpu'
with fixed_seed(seed):
cd = MMDDrift(X_ref, **kwargs)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# assertions
np.testing.assert_array_equal(preprocess_uae(X_ref), cd_load._detector.x_ref)
assert not cd_load._detector.infer_sigma
assert cd_load._detector.n_permutations == N_PERMUTATIONS
assert cd_load._detector.p_val == P_VAL
assert isinstance(cd_load._detector.preprocess_fn, Callable)
assert cd_load._detector.preprocess_fn.func.__name__ == 'preprocess_drift'
assert cd._detector.kernel.sigma == cd_load._detector.kernel.sigma
assert cd._detector.kernel.init_sigma_fn == cd_load._detector.kernel.init_sigma_fn
assert preds['data']['p_val'] == preds_load['data']['p_val']
# @parametrize('preprocess_fn', [preprocess_uae, preprocess_hiddenoutput])
@parametrize('preprocess_at_init', [True, False])
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_lsdddrift(data, preprocess_at_init, backend, tmp_path, seed):
"""
Test LSDDDrift on continuous datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
preprocess_fn = preprocess_simple
# TODO - TensorFlow based preprocessors currently cause un-deterministic behaviour with LSDD permutations. Replace
# preprocess_simple with parametrized preprocess_fn's once above issue resolved.
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed): # Init and predict with a fixed random state
cd = LSDDDrift(X_ref,
p_val=P_VAL,
backend=backend,
preprocess_fn=preprocess_fn,
preprocess_at_init=preprocess_at_init,
n_permutations=N_PERMUTATIONS
)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed): # Again, load and predict with fixed random state
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# assertions
if preprocess_at_init:
np.testing.assert_array_almost_equal(cd_load.get_config()['x_ref'], preprocess_fn(X_ref), 5)
else:
np.testing.assert_array_almost_equal(cd_load.get_config()['x_ref'], X_ref, 5)
np.testing.assert_array_almost_equal(cd._detector.x_ref, cd_load._detector.x_ref, 5)
assert cd_load._detector.n_permutations == N_PERMUTATIONS
assert cd_load._detector.p_val == P_VAL
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=BinData, prefix='data_')
def test_save_fetdrift(data, tmp_path):
"""
Test FETDrift on binary datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
input_dim = X_ref.shape[1]
cd = FETDrift(X_ref,
p_val=P_VAL,
alternative='less',
)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# Assert
np.testing.assert_array_equal(X_ref, cd_load.x_ref)
assert not cd_load.x_ref_preprocessed
assert cd_load.n_features == input_dim
assert cd_load.p_val == P_VAL
assert cd_load.alternative == 'less'
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=CategoricalData, prefix='data_')
def test_save_chisquaredrift(data, tmp_path):
"""
Test ChiSquareDrift on categorical datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
input_dim = X_ref.shape[1]
cd = ChiSquareDrift(X_ref,
p_val=P_VAL,
)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# Assert
np.testing.assert_array_equal(X_ref, cd_load.x_ref)
assert cd_load.n_features == input_dim
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.x_ref_categories, dict)
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
assert cd_load.x_ref_categories == cd.x_ref_categories
@parametrize_with_cases("data", cases=MixedData, prefix='data_')
def test_save_tabulardrift(data, tmp_path):
"""
Test TabularDrift on mixed datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Detector save/load
X_ref, X_h0 = data
input_dim = X_ref.shape[1]
cd = TabularDrift(X_ref,
p_val=P_VAL,
categories_per_feature={0: None},
)
preds = cd.predict(X_h0)
save_detector(cd, tmp_path)
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0)
# Assert
np.testing.assert_array_equal(X_ref, cd_load.x_ref)
assert cd_load.n_features == input_dim
assert cd_load.p_val == P_VAL
assert isinstance(cd_load.x_ref_categories, dict)
assert cd_load.x_ref_categories == cd.x_ref_categories
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize('optimizer', [None, "Adam"], indirect=True)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_classifierdrift(data, optimizer, classifier_model, backend, tmp_path, seed): # noqa: F811
"""
Test ClassifierDrift on continuous datasets.
"""
if backend not in ('tensorflow', 'pytorch', 'sklearn'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = ClassifierDrift(X_ref,
model=classifier_model,
p_val=P_VAL,
optimizer=optimizer,
n_folds=5,
backend=backend,
train_size=None)
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
# Load detector and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(X_ref, cd_load._detector.x_ref)
assert isinstance(cd_load._detector.skf, StratifiedKFold)
assert cd_load._detector.p_val == P_VAL
if backend != 'sklearn':
assert isinstance(cd_load._detector.train_kwargs, dict)
if backend == 'tensorflow':
assert isinstance(cd_load._detector.model, tf.keras.Model)
elif backend == 'pytorch':
assert isinstance(cd_load._detector.model, nn.Module)
elif backend == 'sklearn':
assert isinstance(cd_load._detector.model, sklearn.base.BaseEstimator)
# TODO - detector still not deterministic, investigate in future
# assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
# assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_spotthediff(data, classifier_model, backend, tmp_path, seed): # noqa: F811
"""
Test SpotTheDiffDrift on continuous datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = SpotTheDiffDrift(X_ref,
p_val=P_VAL,
n_folds=5,
train_size=None,
backend=backend)
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
# Load detector and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(X_ref, cd_load._detector._detector.x_ref)
assert isinstance(cd_load._detector._detector.skf, StratifiedKFold)
assert cd_load._detector._detector.p_val == P_VAL
assert isinstance(cd_load._detector._detector.train_kwargs, dict)
if backend == 'tensorflow':
assert isinstance(cd_load._detector._detector.model, tf.keras.Model)
elif backend == 'pytorch':
assert isinstance(cd_load._detector._detector.model, nn.Module)
# TODO - detector still not deterministic, investigate in future
# assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
# assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize('deep_kernel', [
{'kernel_a': 'rbf', 'eps': 0.01} # Default for kernel_a
], indirect=True
)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_learnedkernel(data, deep_kernel, backend, tmp_path, seed): # noqa: F811
"""
Test LearnedKernelDrift on continuous datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch', 'keops'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = LearnedKernelDrift(X_ref,
deep_kernel,
p_val=P_VAL,
backend=backend,
train_size=0.7,
num_workers=0)
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(X_ref, cd_load._detector.x_ref)
assert not cd_load._detector.x_ref_preprocessed
assert cd_load._detector.p_val == P_VAL
assert isinstance(cd_load._detector.train_kwargs, dict)
if backend == 'tensorflow':
assert isinstance(cd_load._detector.kernel, DeepKernel_tf)
elif backend == 'pytorch':
assert isinstance(cd_load._detector.kernel, DeepKernel_pt)
else: # backend == keops
assert isinstance(cd_load._detector.kernel, DeepKernel_ke)
# TODO: Not yet deterministic
# assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
# assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize('kernel', [
None, # Default kernel
{'sigma': 0.5, 'trainable': False}, # pass kernels as GaussianRBF objects, with default sigma_median fn
], indirect=True
)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_contextmmddrift(data, kernel, backend, tmp_path, seed): # noqa: F811
"""
Test ContextMMDDrift on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
C_ref, C_h0 = (X_ref[:, 0] + 1).reshape(-1, 1), (X_h0[:, 0] + 1).reshape(-1, 1)
with fixed_seed(seed):
cd = ContextMMDDrift(X_ref,
C_ref,
p_val=P_VAL,
backend=backend,
preprocess_fn=preprocess_simple,
n_permutations=N_PERMUTATIONS,
preprocess_at_init=True,
x_kernel=kernel,
c_kernel=kernel
)
preds = cd.predict(X_h0, C_h0)
save_detector(cd, tmp_path)
# Load and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0, C_h0)
# assertions
np.testing.assert_array_equal(preprocess_simple(X_ref), cd_load._detector.x_ref)
np.testing.assert_array_equal(C_ref, cd_load._detector.c_ref)
assert cd_load._detector.n_permutations == N_PERMUTATIONS
assert cd_load._detector.p_val == P_VAL
assert isinstance(cd_load._detector.preprocess_fn, Callable)
assert cd_load._detector.preprocess_fn.__name__ == 'preprocess_simple'
assert cd._detector.x_kernel.sigma == cd_load._detector.x_kernel.sigma
assert cd._detector.c_kernel.sigma == cd_load._detector.c_kernel.sigma
assert cd._detector.x_kernel.init_sigma_fn == cd_load._detector.x_kernel.init_sigma_fn
assert cd._detector.c_kernel.init_sigma_fn == cd_load._detector.c_kernel.init_sigma_fn
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_classifieruncertaintydrift(data, classifier_model, backend, tmp_path, seed): # noqa: F811
""" Test ClassifierDrift on continuous datasets."""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = ClassifierUncertaintyDrift(X_ref,
model=classifier_model,
p_val=P_VAL,
backend=backend,
preds_type='probs',
uncertainty_type='entropy')
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
# Load detector and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(cd._detector.preprocess_fn(X_ref), cd_load._detector.x_ref)
assert cd_load._detector.p_val == P_VAL
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
@parametrize('regressor', [encoder_dropout_model])
def test_save_regressoruncertaintydrift(data, regressor, backend, tmp_path, seed):
""" Test RegressorDrift on continuous datasets."""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and predict
X_ref, X_h0 = data
with fixed_seed(seed):
cd = RegressorUncertaintyDrift(X_ref,
model=regressor,
p_val=P_VAL,
backend=backend,
uncertainty_type='mc_dropout'
)
preds = cd.predict(X_h0) # noqa: F841
save_detector(cd, tmp_path)
# Load detector and make another prediction
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
preds_load = cd_load.predict(X_h0) # noqa: F841
# Assert
np.testing.assert_array_equal(cd._detector.preprocess_fn(X_ref), cd_load._detector.x_ref)
assert cd_load._detector.p_val == P_VAL
assert preds['data']['distance'] == pytest.approx(preds_load['data']['distance'], abs=1e-6)
assert preds['data']['p_val'] == pytest.approx(preds_load['data']['p_val'], abs=1e-6)
@parametrize('kernel', [
None, # Use default kernel
{'sigma': 0.5, 'trainable': False}, # pass kernel as object
], indirect=True
)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_onlinemmddrift(data, kernel, preprocess_uae, backend, tmp_path, seed): # noqa: F811
"""
Test MMDDriftOnline on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed):
cd = MMDDriftOnline(X_ref,
ert=ERT,
backend=backend,
preprocess_fn=preprocess_uae,
n_bootstraps=N_BOOTSTRAPS,
kernel=kernel,
window_size=WINDOW_SIZE
)
stats = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until window full
stats.append(pred['data']['test_stat'])
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
stats_load = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE:
stats_load.append(pred['data']['test_stat'])
# assertions
np.testing.assert_array_equal(preprocess_uae(X_ref), cd_load._detector.x_ref)
assert cd_load._detector.n_bootstraps == N_BOOTSTRAPS
assert cd_load._detector.ert == ERT
assert isinstance(cd_load._detector.preprocess_fn, Callable)
assert cd_load._detector.preprocess_fn.func.__name__ == 'preprocess_drift'
assert cd._detector.kernel.sigma == cd_load._detector.kernel.sigma
assert cd._detector.kernel.init_sigma_fn == cd_load._detector.kernel.init_sigma_fn
np.testing.assert_array_equal(stats, stats_load)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_onlinelsdddrift(data, preprocess_uae, backend, tmp_path, seed):
"""
Test LSDDDriftOnline on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed):
cd = LSDDDriftOnline(X_ref,
ert=ERT,
backend=backend,
preprocess_fn=preprocess_uae,
n_bootstraps=N_BOOTSTRAPS,
window_size=WINDOW_SIZE
)
stats = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until window full
stats.append(pred['data']['test_stat'])
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
stats_load = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE:
stats_load.append(pred['data']['test_stat'])
# assertions
np.testing.assert_array_almost_equal(preprocess_uae(X_ref), cd_load.get_config()['x_ref'], 5)
assert cd_load._detector.n_bootstraps == N_BOOTSTRAPS
assert cd_load._detector.ert == ERT
assert isinstance(cd_load._detector.preprocess_fn, Callable)
assert cd_load._detector.preprocess_fn.func.__name__ == 'preprocess_drift'
assert cd._detector.kernel.sigma == cd_load._detector.kernel.sigma
assert cd._detector.kernel.init_sigma_fn == cd_load._detector.kernel.init_sigma_fn
np.testing.assert_array_equal(stats, stats_load)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_onlinecvmdrift(data, preprocess_uae, tmp_path, seed):
"""
Test CVMDriftOnline on continuous datasets, with UAE as preprocess_fn.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed):
cd = CVMDriftOnline(X_ref,
ert=ERT,
preprocess_fn=preprocess_uae,
n_bootstraps=N_BOOTSTRAPS,
window_sizes=[WINDOW_SIZE]
)
stats = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until at least one window full
stats.append(pred['data']['test_stat'])
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
stats_load = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until at least one window full
stats_load.append(pred['data']['test_stat'])
# assertions
np.testing.assert_array_almost_equal(preprocess_uae(X_ref), cd_load.get_config()['x_ref'], 5)
assert cd_load.n_bootstraps == N_BOOTSTRAPS
assert cd_load.ert == ERT
assert isinstance(cd_load.preprocess_fn, Callable)
assert cd_load.preprocess_fn.func.__name__ == 'preprocess_drift'
np.testing.assert_array_equal(stats, stats_load)
@parametrize_with_cases("data", cases=BinData, prefix='data_')
def test_save_onlinefetdrift(data, tmp_path, seed):
"""
Test FETDriftOnline on binary datasets.
Detector is saved and then loaded, with assertions checking that the reinstantiated detector is equivalent.
"""
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make predictions
X_ref, X_h0 = data
with fixed_seed(seed):
cd = FETDriftOnline(X_ref,
ert=ERT,
n_bootstraps=N_BOOTSTRAPS,
window_sizes=[WINDOW_SIZE]
)
stats = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until at least one window full
stats.append(pred['data']['test_stat'])
save_detector(cd, tmp_path)
# Load and make predictions
with fixed_seed(seed):
cd_load = load_detector(tmp_path)
stats_load = []
for i, x_t in enumerate(X_h0):
pred = cd.predict(x_t)
if i >= WINDOW_SIZE: # test stats garbage until at least one window full
stats_load.append(pred['data']['test_stat'])
# assertions
np.testing.assert_array_equal(X_ref, cd_load.get_config()['x_ref'])
assert cd_load.n_bootstraps == N_BOOTSTRAPS
assert cd_load.ert == ERT
np.testing.assert_array_almost_equal(stats, stats_load, 4)
@parametrize("detector", [MMDDriftOnline, LSDDDriftOnline])
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_multivariate_online_state(detector, data, backend, seed, tmp_path):
"""
Test the saving (and loading) of multivariate online detectors' state via `save_detector`.
"""
# Skip if backend not `tensorflow` or `pytorch`
if backend not in ('tensorflow', 'pytorch'):
pytest.skip("Detector doesn't have this backend")
# Init detector and make prediction to update state
X_ref, X_h0 = data
with fixed_seed(seed):
dd = detector(X_ref, ert=100, window_size=10, backend=backend)
# Run for 10 time-steps
test_stats = []
for t, x_t in enumerate(X_h0[:10]):
if t == 5:
# Save detector (with state)
save_detector(dd, tmp_path)
test_stats.append(dd.predict(x_t)['data']['test_stat'])
# Check state file created
assert dd._detector.state_dir == tmp_path.joinpath('state')
# Load
with fixed_seed(seed):
dd_new = load_detector(tmp_path)
# Check attributes and compare predictions at t=5
assert dd_new.t == 5
if detector == LSDDDriftOnline: # Often a small (~1e-6) difference in LSDD test stats post-load # TODO - why?
np.testing.assert_array_almost_equal(dd_new.predict(X_h0[5])['data']['test_stat'], test_stats[5], 5)
else:
np.testing.assert_array_equal(dd_new.predict(X_h0[5])['data']['test_stat'], test_stats[5])
# Check that error raised if no state file inside `state/` dir
for child in tmp_path.joinpath('state').glob('*'):
if child.is_file():
child.unlink()
with pytest.raises(FileNotFoundError):
load_detector(tmp_path)
@parametrize("detector", [CVMDriftOnline])
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_cvm_online_state(detector, data, tmp_path):
"""
Test the saving (and loading) of the CVM online detector's state via `save_detector`.
"""
# Init detector and make prediction to update state
X_ref, X_h0 = data
dd = detector(X_ref, ert=100, window_sizes=[10])
# Run for 10 time-steps
test_stats = []
for t, x_t in enumerate(X_h0[:10]):
if t == 5:
# Save detector (with state)
save_detector(dd, tmp_path)
test_stats.append(dd.predict(x_t)['data']['test_stat'])
# Check state file created
assert dd.state_dir == tmp_path.joinpath('state')
# Load
dd_new = load_detector(tmp_path)
# Check attributes and compare predictions at t=5
assert dd_new.t == 5
np.testing.assert_array_equal(dd_new.predict(X_h0[5])['data']['test_stat'], test_stats[5])
# Check that error raised if no state file inside `state/` dir
for child in tmp_path.joinpath('state').glob('*'):
if child.is_file():
child.unlink()
with pytest.raises(FileNotFoundError):
load_detector(tmp_path)
@parametrize("detector", [FETDriftOnline])
@parametrize_with_cases("data", cases=BinData, prefix='data_')
def test_save_fet_online_state(detector, data, tmp_path):
"""
Test the saving (and loading) of the FET online detector's state via `save_detector`.
"""
# Init detector and make prediction to update state
X_ref, X_h0 = data
dd = detector(X_ref, ert=100, window_sizes=[10])
# Run for 10 time-steps
test_stats = []
for t, x_t in enumerate(X_h0[:10]):
if t == 5:
# Save detector (with state)
save_detector(dd, tmp_path)
test_stats.append(dd.predict(x_t)['data']['test_stat'])
# Check state file created
assert dd.state_dir == tmp_path.joinpath('state')
# Load
dd_new = load_detector(tmp_path)
# Check attributes and compare predictions at t=5
assert dd_new.t == 5
np.testing.assert_array_equal(dd_new.predict(X_h0[5])['data']['test_stat'], test_stats[5])
# Check that error raised if no state file inside `state/` dir
for child in tmp_path.joinpath('state').glob('*'):
if child.is_file():
child.unlink()
with pytest.raises(FileNotFoundError):
load_detector(tmp_path)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_save_online_state_t0(data, tmp_path):
"""
Test that state is not saved when t=0.
"""
# Init detector
X_ref, X_h0 = data
dd = CVMDriftOnline(X_ref, ert=100, window_sizes=[10])
# Check state NOT saved when t=0
state_dir = tmp_path.joinpath('state')
save_detector(dd, tmp_path)
assert not state_dir.is_dir()
# Check state IS saved when t>0
dd.predict(X_h0[0])
save_detector(dd, tmp_path)
assert state_dir.is_dir()
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd)
def test_load_absolute(data, tmp_path):
"""
Test that load_detector() works with absolute paths in config.
"""
# Init detector and save
X_ref, X_h0 = data
cd = KSDrift(X_ref, p_val=P_VAL)
save_detector(cd, tmp_path)
# Write a new cfg file elsewhere, with x_ref reference inside it an absolute path to original x_ref location
cfg = read_config(tmp_path.joinpath('config.toml'))
x_ref_path = tmp_path.joinpath(Path(cfg['x_ref'])).resolve() # Absolute path for x_ref
cfg['x_ref'] = x_ref_path.as_posix() # we always write paths to config.toml as Posix not Windows paths
new_cfg_dir = tmp_path.joinpath('new_config_dir')
new_cfg_dir.mkdir()
write_config(cfg, new_cfg_dir)
# Reload
cd_new = load_detector(new_cfg_dir)
# Assertions
np.testing.assert_array_equal(cd.x_ref, cd_new.x_ref)
@parametrize_with_cases("data", cases=ContinuousData, prefix='data_')
def test_version_warning(data, tmp_path):
"""
Test that a version mismatch warning is raised if a detector is loaded from a config generated with a
different alibi_detect version, then saved, then loaded again (warning is still expected on final load).
This is only tested on one detector since the functionality lies outside of the actual detector classes.
"""
X_ref, X_h0 = data
cd = KSDrift(X_ref, p_val=P_VAL)
# First save (just to create a config)
save_detector(cd, tmp_path)
# Emulate version mismatch
cfg = read_config(tmp_path.joinpath('config.toml'))
cfg['meta']['version'] = '0.1.x'
_ = write_config(cfg, tmp_path)
# Reload and save again
cd = load_detector(tmp_path)
save_detector(cd, tmp_path)
# Check saved config contains a "version_warning"
cfg = read_config(tmp_path.joinpath('config.toml'))
assert cfg['meta']['version_warning']
# Final load (we expect a warning to be raised here)
with pytest.warns(Warning): # error will be raised if a warning IS NOT raised
cd_new = load_detector(tmp_path)
assert cd_new.meta.get('version_warning', False)
@parametrize('kernel', [
{'sigma': 0.5, 'trainable': False, 'init_sigma_fn': None},
{'sigma': [0.5, 0.8], 'trainable': False, 'init_sigma_fn': None},
{'sigma': None, 'trainable': True, 'init_sigma_fn': None},
], indirect=True
)
def test_save_kernel(kernel, backend, tmp_path): # noqa: F811
"""
Unit test for _save/_load_kernel_config, when kernel is a GaussianRBF kernel.
Kernels are saved and then loaded, with assertions to check equivalence.
"""
# Save kernel to config
filepath = tmp_path
filename = Path('mykernel')
cfg_kernel = _save_kernel_config(kernel, filepath, filename)
cfg_kernel = KernelConfig(**cfg_kernel).dict() # Pass through validator to test, and coerce sigma to Tensor
if kernel.__class__.__name__ == 'GaussianRBF':
assert cfg_kernel['src'] == '@utils.' + backend + '.kernels.GaussianRBF'
else:
assert Path(cfg_kernel['src']).suffix == '.dill'
assert cfg_kernel['trainable'] == kernel.trainable
if not kernel.trainable and cfg_kernel['sigma'] is not None:
np.testing.assert_array_almost_equal(cfg_kernel['sigma'], kernel.sigma, 6)
# Resolve and load config (_load_kernel_config is called within resolve_config)
cfg = {'kernel': cfg_kernel, 'backend': backend}
_prepend_cfg_filepaths(cfg, tmp_path)
kernel_loaded = resolve_config(cfg, tmp_path)['kernel']
# Call kernels
if backend == 'tensorflow':
X = tf.random.normal((10, 1), dtype=tf.float32)
elif backend == 'pytorch':
X = torch.randn((10, 1), dtype=torch.float32)
else: # backend == 'keops'
X = torch.randn((10, 1), dtype=torch.float32)
X = LazyTensor(X[None, :])
kernel(X, X)
kernel_loaded(X, X)
# Final checks
assert type(kernel_loaded) == type(kernel)
if backend == 'tensorflow':
np.testing.assert_array_almost_equal(np.array(kernel_loaded.sigma), np.array(kernel.sigma), 5)
else:
np.testing.assert_array_almost_equal(kernel_loaded.sigma.detach().numpy(), kernel.sigma.detach().numpy(), 5)
assert kernel_loaded.trainable == kernel.trainable
assert kernel_loaded.init_sigma_fn == kernel.init_sigma_fn
# `data` passed below as needed in encoder_model, which is used in deep_kernel
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd)
@parametrize('deep_kernel', [
{'kernel_a': 'rbf', 'kernel_b': 'rbf', 'eps': 'trainable'}, # Default for kernel_a and kernel_b, trainable eps
{'kernel_a': {'trainable': True}, 'kernel_b': 'rbf', 'eps': 0.01}, # Explicit kernel_a, fixed eps
], indirect=True
)
def test_save_deepkernel(data, deep_kernel, backend, tmp_path): # noqa: F811
"""
Unit test for _save/_load_kernel_config, when kernel is a DeepKernel kernel.
Kernels are saved and then loaded, with assertions to check equivalence.
"""
# Get data dim
if backend == 'tensorflow':
X = tf.random.normal((10, 1), dtype=tf.float32)
elif backend == 'pytorch':
X = torch.randn((10, 1), dtype=torch.float32)
else: # backend == 'keops'
X = torch.randn((10, 1), dtype=torch.float32)
X = LazyTensor(X[None, :])
# X, _ = data
input_shape = (X.shape[1],)
# Save kernel to config
filepath = tmp_path
filename = 'mykernel'
cfg_kernel = _save_kernel_config(deep_kernel, filepath, filename)
cfg_kernel['proj'], _ = _save_model_config(cfg_kernel['proj'], base_path=filepath, input_shape=input_shape)
cfg_kernel = _path2str(cfg_kernel)
cfg_kernel['proj'] = ModelConfig(**cfg_kernel['proj']).dict() # Pass thru ModelConfig to set `layers` etc
cfg_kernel = DeepKernelConfig(**cfg_kernel).dict() # pydantic validation
assert cfg_kernel['proj']['src'] == 'model'
assert cfg_kernel['proj']['custom_objects'] is None
assert cfg_kernel['proj']['layer'] is None
# Resolve and load config
cfg = {'kernel': cfg_kernel, 'backend': backend}
kernel_loaded = resolve_config(cfg, tmp_path)['kernel'] # implicitly calls _load_kernel_config
# Call kernels
deep_kernel.kernel_a(X, X)
deep_kernel.kernel_b(X, X)
kernel_loaded.kernel_a(X, X)
kernel_loaded.kernel_b(X, X)
# Final checks
assert isinstance(kernel_loaded.proj, (torch.nn.Module, tf.keras.Model))
if backend == 'tensorflow':
assert pytest.approx(deep_kernel.eps.numpy(), abs=1e-4) == kernel_loaded.eps.numpy()
else:
assert pytest.approx(deep_kernel.eps.detach().numpy(), abs=1e-4) == kernel_loaded.eps.detach().numpy()
assert kernel_loaded.kernel_a.sigma == deep_kernel.kernel_a.sigma
assert kernel_loaded.kernel_b.sigma == deep_kernel.kernel_b.sigma
@parametrize('preprocess_fn', [preprocess_uae, preprocess_hiddenoutput])
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_')
def test_save_preprocess_drift(data, preprocess_fn, tmp_path, backend):
"""
Test saving/loading of the inbuilt `preprocess_drift` preprocessing functions when containing a `model`, with the
`model` either being a simple tf/torch model, or a `HiddenOutput` class.
"""
registry_str = 'tensorflow' if backend == 'tensorflow' else 'pytorch'
# Save preprocess_fn to config
filepath = tmp_path
X_ref, X_h0 = data
input_shape = (X_ref.shape[1],)
cfg_preprocess = _save_preprocess_config(preprocess_fn, input_shape=input_shape, filepath=filepath)
cfg_preprocess = _path2str(cfg_preprocess)
cfg_preprocess = PreprocessConfig(**cfg_preprocess).dict() # pydantic validation
assert cfg_preprocess['src'] == '@cd.' + registry_str + '.preprocess.preprocess_drift'
assert cfg_preprocess['model']['src'] == 'preprocess_fn/model'
# TODO - check layer details here once implemented
# Resolve and load preprocess config
cfg = {'preprocess_fn': cfg_preprocess, 'backend': backend}
preprocess_fn_load = resolve_config(cfg, tmp_path)['preprocess_fn'] # tests _load_preprocess_config implicitly
if backend == 'tensorflow':
assert preprocess_fn_load.func.__name__ == 'preprocess_drift'
assert isinstance(preprocess_fn_load.keywords['model'], tf.keras.Model)
else: # pytorch and keops backend
assert preprocess_fn_load.func.__name__ == 'preprocess_drift'
assert isinstance(preprocess_fn_load.keywords['model'], nn.Module)
@parametrize('preprocess_fn', [preprocess_simple, preprocess_simple_with_kwargs])
def test_save_preprocess_custom(preprocess_fn, tmp_path):
"""
Test saving/loading of custom preprocessing functions, without and with kwargs.
"""
# Save preprocess_fn to config
filepath = tmp_path
cfg_preprocess = _save_preprocess_config(preprocess_fn, input_shape=None, filepath=filepath)
cfg_preprocess = _path2str(cfg_preprocess)
cfg_preprocess = PreprocessConfig(**cfg_preprocess).dict() # pydantic validation
assert tmp_path.joinpath(cfg_preprocess['src']).is_file()
assert cfg_preprocess['src'] == os.path.join('preprocess_fn', 'function.dill')
if isinstance(preprocess_fn, partial): # kwargs expected
assert cfg_preprocess['kwargs'] == preprocess_fn.keywords
else: # no kwargs expected
assert cfg_preprocess['kwargs'] == {}
# Resolve and load preprocess config
cfg = {'preprocess_fn': cfg_preprocess}
preprocess_fn_load = resolve_config(cfg, tmp_path)['preprocess_fn'] # tests _load_preprocess_config implicitly
if isinstance(preprocess_fn, partial):
assert preprocess_fn_load.func == preprocess_fn.func
assert preprocess_fn_load.keywords == preprocess_fn.keywords
else:
assert preprocess_fn_load == preprocess_fn
@parametrize('preprocess_fn', [preprocess_nlp])
@parametrize_with_cases("data", cases=TextData.movie_sentiment_data, prefix='data_')
def test_save_preprocess_nlp(data, preprocess_fn, tmp_path, backend):
"""
Test saving/loading of the inbuilt `preprocess_drift` preprocessing functions when containing a `model`, text
`tokenizer` and text `embedding` model.
"""
registry_str = 'tensorflow' if backend == 'tensorflow' else 'pytorch'
# Save preprocess_fn to config
filepath = tmp_path
cfg_preprocess = _save_preprocess_config(preprocess_fn,
input_shape=(768,), # hardcoded to bert-base-cased for now
filepath=filepath)
cfg_preprocess = _path2str(cfg_preprocess)
cfg_preprocess = PreprocessConfig(**cfg_preprocess).dict() # pydantic validation
assert cfg_preprocess['src'] == '@cd.' + registry_str + '.preprocess.preprocess_drift'
assert cfg_preprocess['embedding']['src'] == 'preprocess_fn/embedding'
assert cfg_preprocess['tokenizer']['src'] == 'preprocess_fn/tokenizer'
assert tmp_path.joinpath(cfg_preprocess['preprocess_batch_fn']).is_file()
assert cfg_preprocess['preprocess_batch_fn'] == os.path.join('preprocess_fn', 'preprocess_batch_fn.dill')
if isinstance(preprocess_fn.keywords['model'], (TransformerEmbedding_tf, TransformerEmbedding_pt)):
assert cfg_preprocess['model'] is None
else:
assert cfg_preprocess['model']['src'] == 'preprocess_fn/model'
# Resolve and load preprocess config
cfg = {'preprocess_fn': cfg_preprocess, 'backend': backend}
preprocess_fn_load = resolve_config(cfg, tmp_path)['preprocess_fn'] # tests _load_preprocess_config implicitly
assert isinstance(preprocess_fn_load.keywords['tokenizer'], type(preprocess_fn.keywords['tokenizer']))
assert isinstance(preprocess_fn_load.keywords['model'], type(preprocess_fn.keywords['model']))
if isinstance(preprocess_fn.keywords['model'], (TransformerEmbedding_tf, TransformerEmbedding_pt)):
emb = preprocess_fn.keywords['model']
emb_load = preprocess_fn_load.keywords['model']
else:
if backend == 'tensorflow':
emb = preprocess_fn.keywords['model'].encoder.layers[0]
emb_load = preprocess_fn_load.keywords['model'].encoder.layers[0]
else: # pytorch and keops backends
emb = list(preprocess_fn.keywords['model'].encoder.children())[0]
emb_load = list(preprocess_fn_load.keywords['model'].encoder.children())[0]
assert isinstance(emb_load.model, type(emb.model))
assert emb_load.emb_type == emb.emb_type
assert emb_load.hs_emb.keywords['layers'] == emb.hs_emb.keywords['layers']
def test_nested_value():
"""
Unit test for _get_nested_value and _set_nested_value.
"""
dict1 = {'dict2': {'dict3': {}}}
_set_nested_value(dict1, ['dict2', 'dict3', 'a string'], 'hello')
_set_nested_value(dict1, ['a float'], 42.0)
_set_nested_value(dict1, ['dict2', 'a list'], [1, 2, 3])
assert _get_nested_value(dict1, ['dict2', 'dict3', 'a string']) == dict1['dict2']['dict3']['a string']
assert _get_nested_value(dict1, ['a float']) == dict1['a float']
assert _get_nested_value(dict1, ['dict2', 'a list']) == dict1['dict2']['a list']
def test_replace():
"""
A unit test for _replace.
"""
dict1 = {
'key1': 'key1',
'key7': None,
'dict2': {
'key2': 'key2',
'key4': None,
'dict3': {
'key5': 'key5',
'key6': None
}
}
}
new_dict = _replace(dict1, None, 'None')
assert new_dict['key7'] == 'None'
assert new_dict['dict2']['key4'] == 'None'
assert new_dict['dict2']['dict3']['key6'] == 'None'
assert new_dict['key1'] == dict1['key1']
def test_path2str(tmp_path):
"""
A unit test for _path2str.
"""
cfg = {
'dict': {'a path': tmp_path}
}
cfg_rel = _path2str(cfg)
rel_path = cfg_rel['dict']['a path']
assert isinstance(rel_path, str)
assert rel_path == str(tmp_path.as_posix())
cfg_abs = _path2str(cfg, absolute=True)
abs_path = cfg_abs['dict']['a path']
assert isinstance(abs_path, str)
assert abs_path == str(tmp_path.resolve().as_posix())
def test_int2str_keys():
"""
A unit test for _int2str_keys
"""
cfg = {
'dict': {'0': 'A', '1': 3, 2: 'C'},
3: 'D',
'4': 'E'
}
cfg_fixed = _int2str_keys(cfg)
# Check all str keys changed to int
assert cfg['dict'].pop(2) == cfg_fixed['dict'].pop('2')
assert cfg.pop(3) == cfg_fixed.pop('3')
# Check remaining items untouched
assert cfg == cfg_fixed
assert cfg
def generic_function(x: float, add: float = 0.0, invert: bool = True):
if invert:
return 1/x + add
else:
return x + add
@parametrize('function', [generic_function])
def test_serialize_function_partial(function, tmp_path):
"""
Unit tests for _serialize_function, with a functools.partial function.
"""
partial_func = partial(function, invert=False, add=1.0)
src, kwargs = _serialize_object(partial_func, base_path=tmp_path, local_path=Path('function'))
filepath = tmp_path.joinpath('function.dill')
assert filepath.is_file()
with open(filepath, 'rb') as f:
partial_func_load = dill.load(f)
x = 2.0
assert partial_func_load(x, **kwargs) == partial_func(x)
def test_serialize_function_registry(tmp_path):
"""
Unit tests for _serialize_function, with a registered function.
"""
registry_ref = 'cd.tensorflow.preprocess.preprocess_drift'
function = registry.get(registry_ref)
src, kwargs = _serialize_object(function, base_path=tmp_path, local_path=Path('function'))
assert kwargs == {}
assert src == '@' + registry_ref
def test_registry_get():
"""
Unit test for alibi_detect.utils.registry.get(). This will make more sense once we have a more automated
process for pre-registering alibi-detect objects, as then can compare against list of objects we wish to register.
"""
for k, v in REGISTERED_OBJECTS.items():
obj = registry.get(k)
assert type(obj) == type(v)
def test_set_dtypes(backend):
"""
Unit test to test _set_dtypes.
"""
if backend == 'tensorflow':
dtype = 'tf.float32'
elif backend == 'pytorch':
dtype = 'torch.float32'
else:
pytest.skip('Only test set_dtypes for tensorflow and pytorch.')
cfg = {
'preprocess_fn': {
'dtype': dtype
}
}
_set_dtypes(cfg)
dtype_resolved = cfg['preprocess_fn']['dtype']
if backend == 'tensorflow':
assert dtype_resolved == tf.float32
elif backend == 'pytorch':
assert dtype_resolved == torch.float32
def test_cleanup(tmp_path):
"""
Test that the filepath given to save_detector is deleted in the event of an error whilst saving.
Also check that the error is caught and raised.
"""
# Detector save/load
X_ref = np.random.normal(size=(5, 1))
cd = KSDrift(X_ref)
# Add a garbage preprocess_fn to cause an error
cd.preprocess_fn = cd.x_ref
# Save, catch and check error
with pytest.raises(RuntimeError):
save_detector(cd, tmp_path)
# Check `filepath` is deleted
assert not tmp_path.is_dir()
| 56,177 | 40.065789 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/tests/test_validate.py
|
import numpy as np
import pytest
from pydantic import ValidationError
from alibi_detect.saving import validate_config
from alibi_detect.saving.schemas import KernelConfig
from alibi_detect.saving.saving import X_REF_FILENAME
from alibi_detect.version import __version__
from copy import deepcopy
import tensorflow as tf
import torch
# Define a detector config dict
mmd_cfg = {
'meta': {
'version': __version__,
},
'name': 'MMDDrift',
'x_ref': np.array([[-0.30074928], [1.50240758], [0.43135768], [2.11295779], [0.79684913]]),
'p_val': 0.05,
}
# Define a detector config dict without meta (as simple as it gets!)
mmd_cfg_nometa = deepcopy(mmd_cfg)
mmd_cfg_nometa.pop('meta')
@pytest.mark.parametrize('cfg', [mmd_cfg])
def test_validate_config(cfg):
# Original cfg
# Check original cfg doesn't raise errors
cfg_full = validate_config(cfg, resolved=True)
# Check cfg is returned with correct metadata
meta = cfg_full.get('meta') # pop as don't want to compare meta to cfg in next bit
assert meta['version'] == __version__
assert not meta.pop('version_warning') # pop this one to remove from next check
# Check remaining values of items in cfg unchanged
for k, v in cfg.items():
assert np.all((v == cfg_full[k])) # use np.all to deal with x_ref comparision
# Check original cfg doesn't raise errors in the unresolved case
cfg_unres = cfg.copy()
cfg_unres['x_ref'] = X_REF_FILENAME
_ = validate_config(cfg_unres)
assert not cfg.get('meta').get('version_warning')
# Check warning raised and warning field added if version different
cfg_err = cfg.copy()
cfg_err['meta']['version'] = '0.1.x'
with pytest.warns(Warning): # error will be raised if a warning IS NOT raised
cfg_err = validate_config(cfg_err, resolved=True)
assert cfg_err.get('meta').get('version_warning')
# Check ValueError raised if name unrecognised
cfg_err = cfg.copy()
cfg_err['name'] = 'MMDDriftWrong'
with pytest.raises(ValueError):
cfg_err = validate_config(cfg_err, resolved=True)
assert not cfg_err.get('meta').get('version_warning')
# Check ValidationError raised if unrecognised field or type wrong
cfg_err = cfg.copy()
cfg_err['p_val'] = [cfg['p_val']] # p_val should be float not list
with pytest.raises(ValidationError):
cfg_err = validate_config(cfg_err, resolved=True)
assert not cfg_err.get('meta').get('version_warning')
cfg_err = cfg.copy()
cfg_err['wrong_var'] = 42.0
with pytest.raises(ValidationError):
cfg_err = validate_config(cfg_err, resolved=True)
assert not cfg_err.get('meta').get('version_warning')
@pytest.mark.parametrize('cfg', [mmd_cfg_nometa])
def test_validate_config_wo_meta(cfg):
# Check a config w/o a meta dict can be validated
_ = validate_config(cfg, resolved=True)
# Check the unresolved case
cfg_unres = cfg.copy()
cfg_unres['x_ref'] = X_REF_FILENAME
_ = validate_config(cfg_unres)
@pytest.mark.parametrize('sigma', [
0.5,
[0.5, 1.0],
None
])
@pytest.mark.parametrize('flavour', ['tensorflow', 'pytorch'])
def test_validate_kernel_and_coerce_2_tensor(flavour, sigma):
"""
Pass a kernel config through the KernelConfig pydantic model. This implicitly
tests the coerce_2_tensor validator.
"""
# Define a kernel config
kernel_cfg = {
'src': f'@utils.{flavour}.kernels.GaussianRBF',
'flavour': flavour,
'sigma': sigma
}
# Pass through validation and check results
kernel_cfg_val = KernelConfig(**kernel_cfg).dict()
assert kernel_cfg_val['src'] == kernel_cfg['src']
assert kernel_cfg_val['flavour'] == flavour
if sigma is None:
assert kernel_cfg_val['sigma'] is None
else:
if flavour == 'tensorflow':
assert isinstance(kernel_cfg_val['sigma'], tf.Tensor)
else:
assert isinstance(kernel_cfg_val['sigma'], torch.Tensor)
| 3,998 | 32.889831 | 95 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_pytorch/loading.py
|
import logging
import os
from importlib import import_module
from pathlib import Path
from typing import Callable, Optional, Union, Type
import dill
import torch
import torch.nn as nn
from alibi_detect.cd.pytorch import UAE, HiddenOutput
from alibi_detect.cd.pytorch.preprocess import _Encoder
from alibi_detect.models.pytorch import TransformerEmbedding
from alibi_detect.utils.pytorch.kernels import DeepKernel
logger = logging.getLogger(__name__)
def load_model(filepath: Union[str, os.PathLike],
layer: Optional[int] = None,
) -> nn.Module:
"""
Load PyTorch model.
Parameters
----------
filepath
Saved model filepath.
layer
Optional index of a hidden layer to extract. If not `None`, a
:py:class:`~alibi_detect.cd.pytorch.HiddenOutput` model is returned.
Returns
-------
Loaded model.
"""
filepath = Path(filepath).joinpath('model.pt')
model = torch.load(filepath, pickle_module=dill)
# Optionally extract hidden layer
if isinstance(layer, int):
model = HiddenOutput(model, layer=layer)
return model
def prep_model_and_emb(model: nn.Module, emb: Optional[TransformerEmbedding]) -> nn.Module:
"""
Function to perform final preprocessing of model (and/or embedding) before it is passed to preprocess_drift.
Parameters
----------
model
A compatible model.
emb
An optional text embedding model.
Returns
-------
The final model ready to passed to preprocess_drift.
"""
# Process model (and embedding)
model = model.encoder if isinstance(model, UAE) else model # This is to avoid nesting UAE's already a UAE
if emb is not None:
model = _Encoder(emb, mlp=model)
model = UAE(encoder_net=model)
return model
def load_kernel_config(cfg: dict) -> Callable:
"""
Loads a kernel from a kernel config dict.
Parameters
----------
cfg
A kernel config dict. (see pydantic schema's).
Returns
-------
The kernel.
"""
if 'src' in cfg: # Standard kernel config
kernel = cfg.pop('src')
if hasattr(kernel, 'from_config'):
kernel = kernel.from_config(cfg)
elif 'proj' in cfg: # DeepKernel config
# Kernel a
kernel_a = cfg['kernel_a']
kernel_b = cfg['kernel_b']
if kernel_a != 'rbf':
cfg['kernel_a'] = load_kernel_config(kernel_a)
if kernel_b != 'rbf':
cfg['kernel_b'] = load_kernel_config(kernel_b)
# Assemble deep kernel
kernel = DeepKernel.from_config(cfg)
else:
raise ValueError('Unable to process kernel. The kernel config dict must either be a `KernelConfig` with a '
'`src` field, or a `DeepkernelConfig` with a `proj` field.)')
return kernel
def load_optimizer(cfg: dict) -> Type[torch.optim.Optimizer]:
"""
Imports a PyTorch torch.optim.Optimizer class from an optimizer config dict.
Parameters
----------
cfg
The optimizer config dict.
Returns
-------
The loaded optimizer class.
"""
class_name = cfg.get('class_name')
try:
return getattr(import_module('torch.optim'), class_name)
except AttributeError:
raise ValueError(f"{class_name} is not a recognised optimizer in `torch.optim`.")
def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:
"""
Load a pre-trained PyTorch text embedding from a directory.
See the `:py:class:~alibi_detect.models.pytorch.TransformerEmbedding` documentation for a
full description of the `embedding_type` and `layers` kwargs.
Parameters
----------
src
Name of or path to the model.
embedding_type
Type of embedding to extract. Needs to be one of pooler_output,
last_hidden_state, hidden_state or hidden_state_cls.
layers
A list with int's referring to the hidden layers used to extract the embedding.
Returns
-------
The loaded embedding.
"""
emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)
return emb
| 4,175 | 27.8 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_pytorch/conversions.py
|
import torch
def get_pt_dtype(dtype_str: str):
"""Returns pytorch datatype specified by string."""
return getattr(torch, dtype_str)
| 143 | 17 | 55 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_pytorch/saving.py
|
import os
import logging
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Tuple, Union
import dill # dispatch table setting not done here as done in top-level saving.py file
import torch
import torch.nn as nn
from alibi_detect.cd.pytorch import UAE, HiddenOutput
from alibi_detect.models.pytorch import TransformerEmbedding
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
def save_model_config(model: Callable,
base_path: Path,
local_path: Path = Path('.')) -> Tuple[dict, Optional[dict]]:
"""
Save a PyTorch model to a config dictionary. When a model has a text embedding model contained within it,
this is extracted and saved separately.
Parameters
----------
model
The model to save.
base_path
Base filepath to save to (the location of the `config.toml` file).
local_path
A local (relative) filepath to append to base_path.
Returns
-------
A tuple containing the model and embedding config dicts.
"""
cfg_model: Optional[Dict[str, Any]] = None
cfg_embed: Optional[Dict[str, Any]] = None
if isinstance(model, UAE):
layers = list(model.encoder.children())
if isinstance(layers[0], TransformerEmbedding): # if UAE contains embedding and encoder
# embedding
embed = layers[0]
cfg_embed = save_embedding_config(embed, base_path, local_path.joinpath('embedding'))
# preprocessing encoder
model = layers[1]
else: # If UAE is simply an encoder
model = model.encoder
elif isinstance(model, TransformerEmbedding):
cfg_embed = save_embedding_config(model, base_path, local_path.joinpath('embedding'))
model = None
elif isinstance(model, HiddenOutput):
model = model.model
elif isinstance(model, nn.Module): # Last as TransformerEmbedding and UAE are nn.Module's
model = model
else:
raise ValueError('Model not recognised, cannot save.')
if model is not None:
filepath = base_path.joinpath(local_path)
save_model(model, filepath=filepath)
cfg_model = {
'flavour': Framework.PYTORCH.value,
'src': local_path.joinpath('model')
}
return cfg_model, cfg_embed
def save_model(model: nn.Module,
filepath: Union[str, os.PathLike],
save_dir: Union[str, os.PathLike] = 'model') -> None:
"""
Save PyTorch model.
Parameters
----------
model
The PyTorch model to save.
filepath
Save directory.
save_dir
Name of folder to save to within the filepath directory.
"""
# create folder to save model in
model_path = Path(filepath).joinpath(save_dir)
if not model_path.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_path))
model_path.mkdir(parents=True, exist_ok=True)
# save model
model_path = model_path.joinpath('model.pt')
if isinstance(model, nn.Module):
torch.save(model, model_path, pickle_module=dill)
else:
raise ValueError('The extracted model to save is not a `nn.Module`. Cannot save.')
def save_embedding_config(embed: TransformerEmbedding,
base_path: Path,
local_path: Path = Path('.')) -> dict:
"""
Save embeddings for text drift models.
Parameters
----------
embed
Embedding model.
base_path
Base filepath to save to (the location of the `config.toml` file).
local_path
A local (relative) filepath to append to base_path.
"""
# create folder to save model in
filepath = base_path.joinpath(local_path)
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Populate config dict
cfg_embed: Dict[str, Any] = {}
cfg_embed.update({'type': embed.emb_type})
cfg_embed.update({'layers': embed.hs_emb.keywords['layers']})
cfg_embed.update({'src': local_path})
cfg_embed.update({'flavour': Framework.PYTORCH.value})
# Save embedding model
logger.info('Saving embedding model to {}.'.format(filepath))
embed.model.save_pretrained(filepath)
return cfg_embed
| 4,430 | 32.568182 | 109 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_pytorch/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
load_kernel_config_pt, load_embedding_pt, load_model_pt, load_optimizer_pt, \
prep_model_and_emb_pt = import_optional(
'alibi_detect.saving._pytorch.loading',
names=['load_kernel_config',
'load_embedding',
'load_model',
'load_optimizer',
'prep_model_and_emb'])
save_model_config_pt = import_optional(
'alibi_detect.saving._pytorch.saving',
names=['save_model_config']
)
get_pt_dtype = import_optional(
'alibi_detect.saving._pytorch.conversions',
names=['get_pt_dtype']
)
__all__ = [
"load_kernel_config_pt",
"load_embedding_pt",
"load_model_pt",
"load_optimizer_pt",
"prep_model_and_emb_pt",
"save_model_config_pt",
"get_pt_dtype"
]
| 836 | 26 | 77 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_pytorch/tests/test_saving_pt.py
|
from pytest_cases import param_fixture, parametrize, parametrize_with_cases
from alibi_detect.saving.tests.datasets import ContinuousData
from alibi_detect.saving.tests.models import encoder_model
from alibi_detect.cd.pytorch import HiddenOutput as HiddenOutput_pt
from alibi_detect.saving.loading import _load_model_config, _load_optimizer_config
from alibi_detect.saving.saving import _path2str, _save_model_config
from alibi_detect.saving.schemas import ModelConfig
backend = param_fixture("backend", ['pytorch'])
# Note: The full save/load functionality of optimizers (inc. validation) is tested in test_save_classifierdrift.
def test_load_optimizer(backend):
"""
Test _load_optimizer_config with a pytorch optimizer, when the `torch.optim.Optimizer` class name is specified.
For pytorch, we expect a `torch.optim` class to be returned.
"""
class_name = 'Adam'
cfg_opt = {'class_name': class_name}
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
assert optimizer.__name__ == class_name
assert isinstance(optimizer, type)
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_')
@parametrize('model', [encoder_model])
@parametrize('layer', [None, -1])
def test_save_model_pt(data, model, layer, tmp_path):
"""
Unit test for _save_model_config and _load_model_config with pytorch model.
"""
# Save model
filepath = tmp_path
input_shape = (data[0].shape[1],)
cfg_model, _ = _save_model_config(model, base_path=filepath, input_shape=input_shape)
cfg_model = _path2str(cfg_model)
cfg_model = ModelConfig(**cfg_model).dict()
assert tmp_path.joinpath('model').is_dir()
assert tmp_path.joinpath('model/model.pt').is_file()
# Adjust config
cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here
if layer is not None:
cfg_model['layer'] = layer
# Load model
model_load = _load_model_config(cfg_model)
if layer is None:
assert isinstance(model_load, type(model))
else:
assert isinstance(model_load, HiddenOutput_pt)
| 2,131 | 38.481481 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_keops/loading.py
|
from typing import Callable
from alibi_detect.utils.keops.kernels import DeepKernel
def load_kernel_config(cfg: dict) -> Callable:
"""
Loads a kernel from a kernel config dict.
Parameters
----------
cfg
A kernel config dict. (see pydantic schema's).
Returns
-------
The kernel.
"""
if 'src' in cfg: # Standard kernel config
kernel = cfg.pop('src')
if hasattr(kernel, 'from_config'):
kernel = kernel.from_config(cfg)
elif 'proj' in cfg: # DeepKernel config
# Kernel a
kernel_a = cfg['kernel_a']
kernel_b = cfg['kernel_b']
if kernel_a != 'rbf':
cfg['kernel_a'] = load_kernel_config(kernel_a)
if kernel_b != 'rbf':
cfg['kernel_b'] = load_kernel_config(kernel_b)
# Assemble deep kernel
kernel = DeepKernel.from_config(cfg)
else:
raise ValueError('Unable to process kernel. The kernel config dict must either be a `KernelConfig` with a '
'`src` field, or a `DeepkernelConfig` with a `proj` field.)')
return kernel
| 1,118 | 28.447368 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_keops/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
load_kernel_config_ke = import_optional(
'alibi_detect.saving._keops.loading',
names=['load_kernel_config'])
__all__ = [
"load_kernel_config_ke",
]
| 245 | 23.6 | 74 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_tensorflow/loading.py
|
import logging
import os
from importlib import import_module
import warnings
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union, Type
import dill
import tensorflow as tf
from tensorflow_probability.python.distributions.distribution import \
Distribution
from transformers import AutoTokenizer
from alibi_detect.ad import AdversarialAE, ModelDistillation
from alibi_detect.ad.adversarialae import DenseHidden
from alibi_detect.cd import (ChiSquareDrift, ClassifierDrift, KSDrift, MMDDrift, TabularDrift)
from alibi_detect.cd.tensorflow import UAE, HiddenOutput
from alibi_detect.cd.tensorflow.preprocess import _Encoder
from alibi_detect.models.tensorflow import PixelCNN, TransformerEmbedding
from alibi_detect.models.tensorflow.autoencoder import (AE, AEGMM, VAE, VAEGMM,
DecoderLSTM,
EncoderLSTM, Seq2Seq)
from alibi_detect.od import (LLR, IForest, Mahalanobis, OutlierAE,
OutlierAEGMM, OutlierProphet, OutlierSeq2Seq,
OutlierVAE, OutlierVAEGMM, SpectralResidual)
from alibi_detect.od.llr import build_model
from alibi_detect.utils.tensorflow.kernels import DeepKernel
from alibi_detect.utils.frameworks import Framework
# Below imports are used for legacy loading, and will be removed (or moved to utils/loading.py) in the future
from alibi_detect.version import __version__
from alibi_detect.base import Detector
from alibi_detect.saving._typing import VALID_DETECTORS
logger = logging.getLogger(__name__)
def load_model(filepath: Union[str, os.PathLike],
filename: str = 'model',
custom_objects: dict = None,
layer: Optional[int] = None,
) -> tf.keras.Model:
"""
Load TensorFlow model.
Parameters
----------
filepath
Saved model directory.
filename
Name of saved model within the filepath directory.
custom_objects
Optional custom objects when loading the TensorFlow model.
layer
Optional index of a hidden layer to extract. If not `None`, a
:py:class:`~alibi_detect.cd.tensorflow.HiddenOutput` model is returned.
Returns
-------
Loaded model.
"""
# TODO - update this to accept tf format - later PR.
model_dir = Path(filepath)
model_name = filename + '.h5'
# Check if model exists
if model_name not in [f.name for f in model_dir.glob('[!.]*.h5')]:
raise FileNotFoundError(f'{model_name} not found in {model_dir.resolve()}.')
model = tf.keras.models.load_model(model_dir.joinpath(model_name), custom_objects=custom_objects)
# Optionally extract hidden layer
if isinstance(layer, int):
model = HiddenOutput(model, layer=layer)
return model
def prep_model_and_emb(model: Callable, emb: Optional[TransformerEmbedding]) -> Callable:
"""
Function to perform final preprocessing of model (and/or embedding) before it is passed to preprocess_drift.
Parameters
----------
model
A compatible model.
emb
An optional text embedding model.
Returns
-------
The final model ready to passed to preprocess_drift.
"""
# Process model (and embedding)
model = model.encoder if isinstance(model, UAE) else model # This is to avoid nesting UAE's already a UAE
if emb is not None:
model = _Encoder(emb, mlp=model)
model = UAE(encoder_net=model)
return model
def load_kernel_config(cfg: dict) -> Callable:
"""
Loads a kernel from a kernel config dict.
Parameters
----------
cfg
A kernel config dict. (see pydantic schema's).
Returns
-------
The kernel.
"""
if 'src' in cfg: # Standard kernel config
kernel = cfg.pop('src')
if hasattr(kernel, 'from_config'):
kernel = kernel.from_config(cfg)
elif 'proj' in cfg: # DeepKernel config
# Kernel a
kernel_a = cfg['kernel_a']
kernel_b = cfg['kernel_b']
if kernel_a != 'rbf':
cfg['kernel_a'] = load_kernel_config(kernel_a)
if kernel_b != 'rbf':
cfg['kernel_b'] = load_kernel_config(kernel_b)
# Assemble deep kernel
kernel = DeepKernel.from_config(cfg)
else:
raise ValueError('Unable to process kernel. The kernel config dict must either be a `KernelConfig` with a '
'`src` field, or a `DeepkernelConfig` with a `proj` field.)')
return kernel
def load_optimizer(cfg: dict) -> Union[Type[tf.keras.optimizers.Optimizer], tf.keras.optimizers.Optimizer]:
"""
Loads a TensorFlow optimzier from a optimizer config dict.
Parameters
----------
cfg
The optimizer config dict.
Returns
-------
The loaded optimizer, either as an instantiated object (if `cfg` is a tensorflow optimizer config dict), otherwise \
as an uninstantiated class.
"""
class_name = cfg.get('class_name')
tf_config = cfg.get('config')
if tf_config is not None: # cfg is a tensorflow config dict
return tf.keras.optimizers.deserialize(cfg)
else:
try:
return getattr(import_module('tensorflow.keras.optimizers'), class_name)
except AttributeError:
raise ValueError(f"{class_name} is not a recognised optimizer in `tensorflow.keras.optimizers`.")
def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:
"""
Load a pre-trained tensorflow text embedding from a directory.
See the `:py:class:~alibi_detect.models.tensorflow.TransformerEmbedding` documentation for a
full description of the `embedding_type` and `layers` kwargs.
Parameters
----------
src
Name of or path to the model.
embedding_type
Type of embedding to extract. Needs to be one of pooler_output,
last_hidden_state, hidden_state or hidden_state_cls.
layers
A list with int's referring to the hidden layers used to extract the embedding.
Returns
-------
The loaded embedding.
"""
emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)
return emb
#######################################################################################################
# TODO: Everything below here is legacy loading code, and will be removed in the future
#######################################################################################################
def load_detector_legacy(filepath: Union[str, os.PathLike], suffix: str, **kwargs) -> Detector:
"""
Legacy function to load outlier, drift or adversarial detectors stored dill or pickle files.
Warning
-------
This function will be removed in a future version.
Parameters
----------
filepath
Load directory.
suffix
File suffix for meta and state files. Either `'.dill'` or `'.pickle'`.
Returns
-------
Loaded outlier or adversarial detector object.
"""
warnings.warn('Loading of meta.dill and meta.pickle files will be removed in a future version.', DeprecationWarning)
if kwargs:
k = list(kwargs.keys())
else:
k = []
# check if path exists
filepath = Path(filepath)
if not filepath.is_dir():
raise FileNotFoundError(f'{filepath} does not exist.')
# load metadata
meta_dict = dill.load(open(filepath.joinpath('meta' + suffix), 'rb'))
# check version
try:
if meta_dict['version'] != __version__:
warnings.warn(f'Trying to load detector from version {meta_dict["version"]} when using version '
f'{__version__}. This may lead to breaking code or invalid results.')
except KeyError:
warnings.warn('Trying to load detector from an older version.'
'This may lead to breaking code or invalid results.')
if 'backend' in list(meta_dict.keys()) and meta_dict['backend'] == Framework.PYTORCH:
raise NotImplementedError('Detectors with PyTorch backend are not yet supported.')
detector_name = meta_dict['name']
if detector_name not in [detector for detector in VALID_DETECTORS]:
raise NotImplementedError(f'{detector_name} is not supported by `load_detector`.')
# load outlier detector specific parameters
state_dict = dill.load(open(filepath.joinpath(detector_name + suffix), 'rb'))
# Update the drift detector preprocess kwargs if state_dict is from an old alibi-detect version (<v0.10).
# See https://github.com/SeldonIO/alibi-detect/pull/732
if 'kwargs' in state_dict and 'other' in state_dict: # A drift detector if both of these exist
if 'x_ref_preprocessed' not in state_dict['kwargs']: # if already exists then must have been saved w/ >=v0.10
# Set x_ref_preprocessed to True
state_dict['kwargs']['x_ref_preprocessed'] = True
# Move `preprocess_x_ref` from `other` to `kwargs`
state_dict['kwargs']['preprocess_x_ref'] = state_dict['other']['preprocess_x_ref']
# initialize detector
model_dir = filepath.joinpath('model')
detector: Optional[Detector] = None # to avoid mypy errors
if detector_name == 'OutlierAE':
ae = load_tf_ae(filepath)
detector = init_od_ae(state_dict, ae)
elif detector_name == 'OutlierVAE':
vae = load_tf_vae(filepath, state_dict)
detector = init_od_vae(state_dict, vae)
elif detector_name == 'Mahalanobis':
detector = init_od_mahalanobis(state_dict) # type: ignore[assignment]
elif detector_name == 'IForest':
detector = init_od_iforest(state_dict) # type: ignore[assignment]
elif detector_name == 'OutlierAEGMM':
aegmm = load_tf_aegmm(filepath, state_dict)
detector = init_od_aegmm(state_dict, aegmm)
elif detector_name == 'OutlierVAEGMM':
vaegmm = load_tf_vaegmm(filepath, state_dict)
detector = init_od_vaegmm(state_dict, vaegmm)
elif detector_name == 'AdversarialAE':
ae = load_tf_ae(filepath)
custom_objects = kwargs['custom_objects'] if 'custom_objects' in k else None
model = load_model(model_dir, custom_objects=custom_objects)
model_hl = load_tf_hl(filepath, model, state_dict)
detector = init_ad_ae(state_dict, ae, model, model_hl)
elif detector_name == 'ModelDistillation':
md = load_model(model_dir, filename='distilled_model')
custom_objects = kwargs['custom_objects'] if 'custom_objects' in k else None
model = load_model(model_dir, custom_objects=custom_objects)
detector = init_ad_md(state_dict, md, model)
elif detector_name == 'OutlierProphet':
detector = init_od_prophet(state_dict)
elif detector_name == 'SpectralResidual':
detector = init_od_sr(state_dict) # type: ignore[assignment]
elif detector_name == 'OutlierSeq2Seq':
seq2seq = load_tf_s2s(filepath, state_dict)
detector = init_od_s2s(state_dict, seq2seq)
elif detector_name in ['ChiSquareDrift', 'ClassifierDriftTF', 'KSDrift', 'MMDDriftTF', 'TabularDrift']:
emb, tokenizer = None, None
if state_dict['other']['load_text_embedding']:
emb, tokenizer = load_text_embed(filepath)
try: # legacy load_model behaviour was to return None if not found. Now it raises error, hence need try-except.
model = load_model(model_dir, filename='encoder')
except FileNotFoundError:
logger.warning('No model found in {}, setting `model` to `None`.'.format(model_dir))
model = None
if detector_name == 'KSDrift':
load_fn = init_cd_ksdrift
elif detector_name == 'MMDDriftTF':
load_fn = init_cd_mmddrift # type: ignore[assignment]
elif detector_name == 'ChiSquareDrift':
load_fn = init_cd_chisquaredrift # type: ignore[assignment]
elif detector_name == 'TabularDrift':
load_fn = init_cd_tabulardrift # type: ignore[assignment]
elif detector_name == 'ClassifierDriftTF':
# Don't need try-except here since model is not optional for ClassifierDrift
clf_drift = load_model(model_dir, filename='clf_drift')
load_fn = partial(init_cd_classifierdrift, clf_drift) # type: ignore[assignment]
else:
raise NotImplementedError
detector = load_fn(state_dict, model, emb, tokenizer, **kwargs) # type: ignore[assignment]
elif detector_name == 'LLR':
models = load_tf_llr(filepath, **kwargs)
detector = init_od_llr(state_dict, models)
else:
raise NotImplementedError
# TODO - add tests back in!
detector.meta = meta_dict
logger.info('Finished loading detector.')
return detector
def load_tf_hl(filepath: Union[str, os.PathLike], model: tf.keras.Model, state_dict: dict) -> List[tf.keras.Model]:
"""
Load hidden layer models for AdversarialAE.
Parameters
----------
filepath
Saved model directory.
model
tf.keras classification model.
state_dict
Dictionary containing the detector's parameters.
Returns
-------
List with loaded tf.keras models.
"""
model_dir = Path(filepath).joinpath('model')
hidden_layer_kld = state_dict['hidden_layer_kld']
if not hidden_layer_kld:
return []
model_hl = []
for i, (hidden_layer, output_dim) in enumerate(hidden_layer_kld.items()):
m = DenseHidden(model, hidden_layer, output_dim)
m.load_weights(model_dir.joinpath('model_hl_' + str(i) + '.ckpt'))
model_hl.append(m)
return model_hl
def load_tf_ae(filepath: Union[str, os.PathLike]) -> tf.keras.Model:
"""
Load AE.
Parameters
----------
filepath
Saved model directory.
Returns
-------
Loaded AE.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No encoder, decoder or ae found in {}.'.format(model_dir))
return None
encoder_net = tf.keras.models.load_model(model_dir.joinpath('encoder_net.h5'))
decoder_net = tf.keras.models.load_model(model_dir.joinpath('decoder_net.h5'))
ae = AE(encoder_net, decoder_net)
ae.load_weights(model_dir.joinpath('ae.ckpt'))
return ae
def load_tf_vae(filepath: Union[str, os.PathLike],
state_dict: Dict) -> tf.keras.Model:
"""
Load VAE.
Parameters
----------
filepath
Saved model directory.
state_dict
Dictionary containing the latent dimension and beta parameters.
Returns
-------
Loaded VAE.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No encoder, decoder or vae found in {}.'.format(model_dir))
return None
encoder_net = tf.keras.models.load_model(model_dir.joinpath('encoder_net.h5'))
decoder_net = tf.keras.models.load_model(model_dir.joinpath('decoder_net.h5'))
vae = VAE(encoder_net, decoder_net, state_dict['latent_dim'], beta=state_dict['beta'])
vae.load_weights(model_dir.joinpath('vae.ckpt'))
return vae
def load_tf_aegmm(filepath: Union[str, os.PathLike],
state_dict: Dict) -> tf.keras.Model:
"""
Load AEGMM.
Parameters
----------
filepath
Saved model directory.
state_dict
Dictionary containing the `n_gmm` and `recon_features` parameters.
Returns
-------
Loaded AEGMM.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No encoder, decoder, gmm density net or aegmm found in {}.'.format(model_dir))
return None
encoder_net = tf.keras.models.load_model(model_dir.joinpath('encoder_net.h5'))
decoder_net = tf.keras.models.load_model(model_dir.joinpath('decoder_net.h5'))
gmm_density_net = tf.keras.models.load_model(model_dir.joinpath('gmm_density_net.h5'))
aegmm = AEGMM(encoder_net, decoder_net, gmm_density_net, state_dict['n_gmm'], state_dict['recon_features'])
aegmm.load_weights(model_dir.joinpath('aegmm.ckpt'))
return aegmm
def load_tf_vaegmm(filepath: Union[str, os.PathLike],
state_dict: Dict) -> tf.keras.Model:
"""
Load VAEGMM.
Parameters
----------
filepath
Saved model directory.
state_dict
Dictionary containing the `n_gmm`, `latent_dim` and `recon_features` parameters.
Returns
-------
Loaded VAEGMM.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No encoder, decoder, gmm density net or vaegmm found in {}.'.format(model_dir))
return None
encoder_net = tf.keras.models.load_model(model_dir.joinpath('encoder_net.h5'))
decoder_net = tf.keras.models.load_model(model_dir.joinpath('decoder_net.h5'))
gmm_density_net = tf.keras.models.load_model(model_dir.joinpath('gmm_density_net.h5'))
vaegmm = VAEGMM(encoder_net, decoder_net, gmm_density_net, state_dict['n_gmm'],
state_dict['latent_dim'], state_dict['recon_features'], state_dict['beta'])
vaegmm.load_weights(model_dir.joinpath('vaegmm.ckpt'))
return vaegmm
def load_tf_s2s(filepath: Union[str, os.PathLike],
state_dict: Dict) -> tf.keras.Model:
"""
Load seq2seq TensorFlow model.
Parameters
----------
filepath
Saved model directory.
state_dict
Dictionary containing the `latent_dim`, `shape`, `output_activation` and `beta` parameters.
Returns
-------
Loaded seq2seq model.
"""
model_dir = Path(filepath).joinpath('model')
if not [f.name for f in model_dir.glob('[!.]*.h5')]:
logger.warning('No seq2seq or threshold estimation net found in {}.'.format(model_dir))
return None
# load threshold estimator net, initialize encoder and decoder and load seq2seq weights
threshold_net = tf.keras.models.load_model(model_dir.joinpath('threshold_net.h5'), compile=False)
latent_dim = state_dict['latent_dim']
n_features = state_dict['shape'][-1]
encoder_net = EncoderLSTM(latent_dim)
decoder_net = DecoderLSTM(latent_dim, n_features, state_dict['output_activation'])
seq2seq = Seq2Seq(encoder_net, decoder_net, threshold_net, n_features, beta=state_dict['beta'])
seq2seq.load_weights(model_dir.joinpath('seq2seq.ckpt'))
return seq2seq
def load_tf_llr(filepath: Union[str, os.PathLike], dist_s: Union[Distribution, PixelCNN] = None,
dist_b: Union[Distribution, PixelCNN] = None, input_shape: tuple = None):
"""
Load LLR TensorFlow models or distributions.
Parameters
----------
detector
Likelihood ratio detector.
filepath
Saved model directory.
dist_s
TensorFlow distribution for semantic model.
dist_b
TensorFlow distribution for background model.
input_shape
Input shape of the model.
Returns
-------
Detector with loaded models.
"""
model_dir = Path(filepath).joinpath('model')
h5files = [f.name for f in model_dir.glob('[!.]*.h5')]
if 'model_s.h5' in h5files and 'model_b.h5' in h5files:
model_s, dist_s = build_model(dist_s, input_shape, str(model_dir.joinpath('model_s.h5').resolve()))
model_b, dist_b = build_model(dist_b, input_shape, str(model_dir.joinpath('model_b.h5').resolve()))
return dist_s, dist_b, model_s, model_b
else:
dist_s = tf.keras.models.load_model(model_dir.joinpath('model.h5'), compile=False)
if 'model_background.h5' in h5files:
dist_b = tf.keras.models.load_model(model_dir.joinpath('model_background.h5'), compile=False)
else:
dist_b = None
return dist_s, dist_b, None, None
def init_od_ae(state_dict: Dict,
ae: tf.keras.Model) -> OutlierAE:
"""
Initialize OutlierVAE.
Parameters
----------
state_dict
Dictionary containing the parameter values.
ae
Loaded AE.
Returns
-------
Initialized OutlierAE instance.
"""
od = OutlierAE(threshold=state_dict['threshold'], ae=ae)
return od
def init_od_vae(state_dict: Dict,
vae: tf.keras.Model) -> OutlierVAE:
"""
Initialize OutlierVAE.
Parameters
----------
state_dict
Dictionary containing the parameter values.
vae
Loaded VAE.
Returns
-------
Initialized OutlierVAE instance.
"""
od = OutlierVAE(threshold=state_dict['threshold'],
score_type=state_dict['score_type'],
vae=vae,
samples=state_dict['samples'])
return od
def init_ad_ae(state_dict: Dict,
ae: tf.keras.Model,
model: tf.keras.Model,
model_hl: List[tf.keras.Model]) -> AdversarialAE:
"""
Initialize AdversarialAE.
Parameters
----------
state_dict
Dictionary containing the parameter values.
ae
Loaded VAE.
model
Loaded classification model.
model_hl
List of tf.keras models.
Returns
-------
Initialized AdversarialAE instance.
"""
ad = AdversarialAE(threshold=state_dict['threshold'],
ae=ae,
model=model,
model_hl=model_hl,
w_model_hl=state_dict['w_model_hl'],
temperature=state_dict['temperature'])
return ad
def init_ad_md(state_dict: Dict,
distilled_model: tf.keras.Model,
model: tf.keras.Model) -> ModelDistillation:
"""
Initialize ModelDistillation.
Parameters
----------
state_dict
Dictionary containing the parameter values.
distilled_model
Loaded distilled model.
model
Loaded classification model.
Returns
-------
Initialized ModelDistillation instance.
"""
ad = ModelDistillation(threshold=state_dict['threshold'],
distilled_model=distilled_model,
model=model,
temperature=state_dict['temperature'],
loss_type=state_dict['loss_type'])
return ad
def init_od_aegmm(state_dict: Dict,
aegmm: tf.keras.Model) -> OutlierAEGMM:
"""
Initialize OutlierAEGMM.
Parameters
----------
state_dict
Dictionary containing the parameter values.
aegmm
Loaded AEGMM.
Returns
-------
Initialized OutlierAEGMM instance.
"""
od = OutlierAEGMM(threshold=state_dict['threshold'],
aegmm=aegmm)
od.phi = state_dict['phi']
od.mu = state_dict['mu']
od.cov = state_dict['cov']
od.L = state_dict['L']
od.log_det_cov = state_dict['log_det_cov']
if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]):
logger.warning('Loaded AEGMM detector has not been fit.')
return od
def init_od_vaegmm(state_dict: Dict,
vaegmm: tf.keras.Model) -> OutlierVAEGMM:
"""
Initialize OutlierVAEGMM.
Parameters
----------
state_dict
Dictionary containing the parameter values.
vaegmm
Loaded VAEGMM.
Returns
-------
Initialized OutlierVAEGMM instance.
"""
od = OutlierVAEGMM(threshold=state_dict['threshold'],
vaegmm=vaegmm,
samples=state_dict['samples'])
od.phi = state_dict['phi']
od.mu = state_dict['mu']
od.cov = state_dict['cov']
od.L = state_dict['L']
od.log_det_cov = state_dict['log_det_cov']
if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]):
logger.warning('Loaded VAEGMM detector has not been fit.')
return od
def init_od_s2s(state_dict: Dict,
seq2seq: tf.keras.Model) -> OutlierSeq2Seq:
"""
Initialize OutlierSeq2Seq.
Parameters
----------
state_dict
Dictionary containing the parameter values.
seq2seq
Loaded seq2seq model.
Returns
-------
Initialized OutlierSeq2Seq instance.
"""
seq_len, n_features = state_dict['shape'][1:]
od = OutlierSeq2Seq(n_features,
seq_len,
threshold=state_dict['threshold'],
seq2seq=seq2seq,
latent_dim=state_dict['latent_dim'],
output_activation=state_dict['output_activation'])
return od
def load_text_embed(filepath: Union[str, os.PathLike], load_dir: str = 'model') \
-> Tuple[TransformerEmbedding, Callable]:
"""Legacy function to load text embedding."""
model_dir = Path(filepath).joinpath(load_dir)
tokenizer = AutoTokenizer.from_pretrained(str(model_dir.resolve()))
args = dill.load(open(model_dir.joinpath('embedding.dill'), 'rb'))
emb = TransformerEmbedding(
str(model_dir.resolve()), embedding_type=args['embedding_type'], layers=args['layers']
)
return emb, tokenizer
def init_preprocess(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> Tuple[Optional[Callable], Optional[dict]]:
"""Return preprocessing function and kwargs."""
if kwargs: # override defaults
keys = list(kwargs.keys())
preprocess_fn = kwargs['preprocess_fn'] if 'preprocess_fn' in keys else None
preprocess_kwargs = kwargs['preprocess_kwargs'] if 'preprocess_kwargs' in keys else None
return preprocess_fn, preprocess_kwargs
elif model is not None and callable(state_dict['preprocess_fn']) \
and isinstance(state_dict['preprocess_kwargs'], dict):
preprocess_fn = state_dict['preprocess_fn']
preprocess_kwargs = state_dict['preprocess_kwargs']
else:
return None, None
keys = list(preprocess_kwargs.keys())
if 'model' not in keys:
raise ValueError('No model found for the preprocessing step.')
if preprocess_kwargs['model'] == 'UAE':
if emb is not None:
model = _Encoder(emb, mlp=model)
preprocess_kwargs['tokenizer'] = tokenizer
preprocess_kwargs['model'] = UAE(encoder_net=model)
else: # incl. preprocess_kwargs['model'] == 'HiddenOutput'
preprocess_kwargs['model'] = model
return preprocess_fn, preprocess_kwargs
def init_cd_classifierdrift(clf_drift: tf.keras.Model, state_dict: Dict, model: Optional[tf.keras.Model],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> ClassifierDrift:
"""
Initialize ClassifierDrift detector.
Parameters
----------
clf_drift
Model used for drift classification.
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized ClassifierDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
state_dict['kwargs']['train_kwargs']['optimizer'] = \
tf.keras.optimizers.get(state_dict['kwargs']['train_kwargs']['optimizer'])
args = list(state_dict['args'].values()) + [clf_drift]
cd = ClassifierDrift(*args, **state_dict['kwargs'])
attrs = state_dict['other']
cd._detector.n = attrs['n']
cd._detector.skf = attrs['skf']
return cd
def init_cd_chisquaredrift(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> ChiSquareDrift:
"""
Initialize ChiSquareDrift detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized ChiSquareDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
cd = ChiSquareDrift(*list(state_dict['args'].values()), **state_dict['kwargs'])
attrs = state_dict['other']
cd.n = attrs['n']
return cd
def init_cd_tabulardrift(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> TabularDrift:
"""
Initialize TabularDrift detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized TabularDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
cd = TabularDrift(*list(state_dict['args'].values()), **state_dict['kwargs'])
attrs = state_dict['other']
cd.n = attrs['n']
return cd
def init_cd_ksdrift(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> KSDrift:
"""
Initialize KSDrift detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized KSDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
cd = KSDrift(*list(state_dict['args'].values()), **state_dict['kwargs'])
attrs = state_dict['other']
cd.n = attrs['n']
return cd
def init_cd_mmddrift(state_dict: Dict, model: Optional[Union[tf.keras.Model, tf.keras.Sequential]],
emb: Optional[TransformerEmbedding], tokenizer: Optional[Callable], **kwargs) \
-> MMDDrift:
"""
Initialize MMDDrift detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
model
Optional preprocessing model.
emb
Optional text embedding model.
tokenizer
Optional tokenizer for text drift.
kwargs
Kwargs optionally containing preprocess_fn and preprocess_kwargs.
Returns
-------
Initialized MMDDrift instance.
"""
preprocess_fn, preprocess_kwargs = init_preprocess(state_dict['other'], model, emb, tokenizer, **kwargs)
if callable(preprocess_fn) and isinstance(preprocess_kwargs, dict):
state_dict['kwargs'].update({'preprocess_fn': partial(preprocess_fn, **preprocess_kwargs)})
cd = MMDDrift(*list(state_dict['args'].values()), **state_dict['kwargs'])
attrs = state_dict['other']
cd._detector.n = attrs['n']
return cd
def init_od_mahalanobis(state_dict: Dict) -> Mahalanobis:
"""
Initialize Mahalanobis.
Parameters
----------
state_dict
Dictionary containing the parameter values.
Returns
-------
Initialized Mahalanobis instance.
"""
od = Mahalanobis(threshold=state_dict['threshold'],
n_components=state_dict['n_components'],
std_clip=state_dict['std_clip'],
start_clip=state_dict['start_clip'],
max_n=state_dict['max_n'],
cat_vars=state_dict['cat_vars'],
ohe=state_dict['ohe'])
od.d_abs = state_dict['d_abs']
od.clip = state_dict['clip']
od.mean = state_dict['mean']
od.C = state_dict['C']
od.n = state_dict['n']
return od
def init_od_iforest(state_dict: Dict) -> IForest:
"""
Initialize isolation forest.
Parameters
----------
state_dict
Dictionary containing the parameter values.
Returns
-------
Initialized IForest instance.
"""
od = IForest(threshold=state_dict['threshold'])
od.isolationforest = state_dict['isolationforest']
return od
def init_od_prophet(state_dict: Dict) -> OutlierProphet:
"""
Initialize OutlierProphet.
Parameters
----------
state_dict
Dictionary containing the parameter values.
Returns
-------
Initialized OutlierProphet instance.
"""
od = OutlierProphet(cap=state_dict['cap'])
od.model = state_dict['model']
return od
def init_od_sr(state_dict: Dict) -> SpectralResidual:
"""
Initialize spectral residual detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
Returns
-------
Initialized SpectralResidual instance.
"""
od = SpectralResidual(threshold=state_dict['threshold'],
window_amp=state_dict['window_amp'],
window_local=state_dict['window_local'],
n_est_points=state_dict['n_est_points'],
n_grad_points=state_dict['n_grad_points'])
return od
def init_od_llr(state_dict: Dict, models: tuple) -> LLR:
"""
Initialize LLR detector.
Parameters
----------
state_dict
Dictionary containing the parameter values.
models
Tuple containing the model and background model.
Returns
-------
Initialized LLR instance.
"""
od = LLR(threshold=state_dict['threshold'],
model=models[0],
model_background=models[1],
log_prob=state_dict['log_prob'],
sequential=state_dict['sequential'])
if models[2] is not None and models[3] is not None:
od.model_s = models[2]
od.model_b = models[3]
return od
| 35,509 | 33.34236 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_tensorflow/conversions.py
|
import tensorflow as tf
def get_tf_dtype(dtype_str: str):
"""Returns tensorflow datatype specified by string."""
return getattr(tf, dtype_str)
| 154 | 18.375 | 58 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_tensorflow/saving.py
|
import logging
import os
from functools import partial
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import dill # dispatch table setting not done here as done in top-level saving.py file
import tensorflow as tf
from tensorflow.keras.layers import Input, InputLayer
# Below imports are used for legacy saving, and will be removed (or moved to utils/loading.py) in the future
from alibi_detect.ad import AdversarialAE, ModelDistillation
from alibi_detect.cd import (ChiSquareDrift, ClassifierDrift, KSDrift,
MMDDrift, TabularDrift)
from alibi_detect.cd.tensorflow import UAE, HiddenOutput
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
from alibi_detect.cd.tensorflow.mmd import MMDDriftTF
from alibi_detect.models.tensorflow import TransformerEmbedding
from alibi_detect.od import (LLR, IForest, Mahalanobis, OutlierAE,
OutlierAEGMM, OutlierProphet, OutlierSeq2Seq,
OutlierVAE, OutlierVAEGMM, SpectralResidual)
from alibi_detect.utils._types import Literal
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
from alibi_detect.utils.missing_optional_dependency import MissingDependency
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
def save_model_config(model: Callable,
base_path: Path,
input_shape: Optional[tuple],
local_path: Path = Path('.')) -> Tuple[dict, Optional[dict]]:
"""
Save a TensorFlow model to a config dictionary. When a model has a text embedding model contained within it,
this is extracted and saved separately.
Parameters
----------
model
The model to save.
base_path
Base filepath to save to (the location of the `config.toml` file).
input_shape
The input dimensions of the model (after the optional embedding has been applied).
local_path
A local (relative) filepath to append to base_path.
Returns
-------
A tuple containing the model and embedding config dicts.
"""
cfg_model: Optional[Dict[str, Any]] = None
cfg_embed: Optional[Dict[str, Any]] = None
if isinstance(model, UAE):
if isinstance(model.encoder.layers[0], TransformerEmbedding): # if UAE contains embedding and encoder
if input_shape is None:
raise ValueError('Cannot save combined embedding and model when `input_shape` is None.')
# embedding
embed = model.encoder.layers[0]
cfg_embed = save_embedding_config(embed, base_path, local_path.joinpath('embedding'))
# preprocessing encoder
inputs = Input(shape=input_shape, dtype=tf.int64)
model.encoder.call(inputs)
shape_enc = (model.encoder.layers[0].output.shape[-1],)
layers = [InputLayer(input_shape=shape_enc)] + model.encoder.layers[1:]
model = tf.keras.Sequential(layers)
_ = model(tf.zeros((1,) + shape_enc))
else: # If UAE is simply an encoder
model = model.encoder
elif isinstance(model, TransformerEmbedding):
cfg_embed = save_embedding_config(model, base_path, local_path.joinpath('embedding'))
model = None
elif isinstance(model, HiddenOutput):
model = model.model
elif isinstance(model, tf.keras.Model): # Last as TransformerEmbedding and UAE are tf.keras.Model's
model = model
else:
raise ValueError('Model not recognised, cannot save.')
if model is not None:
filepath = base_path.joinpath(local_path)
save_model(model, filepath=filepath.joinpath('model'))
cfg_model = {
'flavour': Framework.TENSORFLOW.value,
'src': local_path.joinpath('model')
}
return cfg_model, cfg_embed
def save_model(model: tf.keras.Model,
filepath: Union[str, os.PathLike],
filename: str = 'model',
save_format: Literal['tf', 'h5'] = 'h5') -> None: # TODO - change to tf, later PR
"""
Save TensorFlow model.
Parameters
----------
model
The tf.keras.Model to save.
filepath
Save directory.
filename
Name of file to save to within the filepath directory.
save_format
The format to save to. 'tf' to save to the newer SavedModel format, 'h5' to save to the lighter-weight
legacy hdf5 format.
"""
# create folder to save model in
model_path = Path(filepath)
if not model_path.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_path))
model_path.mkdir(parents=True, exist_ok=True)
# save model
model_path = model_path.joinpath(filename + '.h5') if save_format == 'h5' else model_path
if isinstance(model, tf.keras.Model):
model.save(model_path, save_format=save_format)
else:
raise ValueError('The extracted model to save is not a `tf.keras.Model`. Cannot save.')
def save_embedding_config(embed: TransformerEmbedding,
base_path: Path,
local_path: Path = Path('.')) -> dict:
"""
Save embeddings for text drift models.
Parameters
----------
embed
Embedding model.
base_path
Base filepath to save to (the location of the `config.toml` file).
local_path
A local (relative) filepath to append to base_path.
"""
# create folder to save model in
filepath = base_path.joinpath(local_path)
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Populate config dict
cfg_embed: Dict[str, Any] = {}
cfg_embed.update({'type': embed.emb_type})
cfg_embed.update({'layers': embed.hs_emb.keywords['layers']})
cfg_embed.update({'src': local_path})
cfg_embed.update({'flavour': Framework.TENSORFLOW.value})
# Save embedding model
logger.info('Saving embedding model to {}.'.format(filepath))
embed.model.save_pretrained(filepath)
return cfg_embed
def save_optimizer_config(optimizer: Union[tf.keras.optimizers.Optimizer, tf.keras.optimizers.legacy.Optimizer]):
"""
Parameters
----------
optimizer
The tensorflow optimizer to serialize.
Returns
-------
The tensorflow optimizer's config dictionary.
"""
return tf.keras.optimizers.serialize(optimizer)
#######################################################################################################
# TODO: Everything below here is legacy saving code, and will be removed in the future
#######################################################################################################
def save_embedding_legacy(embed: TransformerEmbedding,
embed_args: dict,
filepath: Path) -> None:
"""
Save embeddings for text drift models.
Parameters
----------
embed
Embedding model.
embed_args
Arguments for TransformerEmbedding module.
filepath
The save directory.
"""
# create folder to save model in
if not filepath.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(filepath))
filepath.mkdir(parents=True, exist_ok=True)
# Save embedding model
logger.info('Saving embedding model to {}.'.format(filepath.joinpath('embedding.dill')))
embed.save_pretrained(filepath)
with open(filepath.joinpath('embedding.dill'), 'wb') as f:
dill.dump(embed_args, f)
def save_detector_legacy(detector, filepath):
detector_name = detector.meta['name']
# save metadata
logger.info('Saving metadata and detector to {}'.format(filepath))
with open(filepath.joinpath('meta.dill'), 'wb') as f:
dill.dump(detector.meta, f)
# save detector specific parameters
if isinstance(detector, OutlierAE):
state_dict = state_ae(detector)
elif isinstance(detector, OutlierVAE):
state_dict = state_vae(detector)
elif isinstance(detector, Mahalanobis):
state_dict = state_mahalanobis(detector)
elif isinstance(detector, IForest):
state_dict = state_iforest(detector)
elif isinstance(detector, ChiSquareDrift):
state_dict, model, embed, embed_args, tokenizer = state_chisquaredrift(detector)
elif isinstance(detector, ClassifierDrift):
state_dict, clf_drift, model, embed, embed_args, tokenizer = state_classifierdrift(detector)
elif isinstance(detector, TabularDrift):
state_dict, model, embed, embed_args, tokenizer = state_tabulardrift(detector)
elif isinstance(detector, KSDrift):
state_dict, model, embed, embed_args, tokenizer = state_ksdrift(detector)
elif isinstance(detector, MMDDrift):
state_dict, model, embed, embed_args, tokenizer = state_mmddrift(detector)
elif isinstance(detector, OutlierAEGMM):
state_dict = state_aegmm(detector)
elif isinstance(detector, OutlierVAEGMM):
state_dict = state_vaegmm(detector)
elif isinstance(detector, AdversarialAE):
state_dict = state_adv_ae(detector)
elif isinstance(detector, ModelDistillation):
state_dict = state_adv_md(detector)
elif not isinstance(OutlierProphet, MissingDependency) and isinstance(detector, OutlierProphet):
state_dict = state_prophet(detector)
elif isinstance(detector, SpectralResidual):
state_dict = state_sr(detector)
elif isinstance(detector, OutlierSeq2Seq):
state_dict = state_s2s(detector)
elif isinstance(detector, LLR):
state_dict = state_llr(detector)
else:
raise NotImplementedError('The %s detector does not have a legacy save method.' % detector_name)
with open(filepath.joinpath(detector_name + '.dill'), 'wb') as f:
dill.dump(state_dict, f)
# save detector specific TensorFlow models
model_dir = filepath.joinpath('model')
if isinstance(detector, OutlierAE):
save_tf_ae(detector, filepath)
elif isinstance(detector, OutlierVAE):
save_tf_vae(detector, filepath)
elif isinstance(detector, (ChiSquareDrift, ClassifierDrift, KSDrift, MMDDrift, TabularDrift)):
if model is not None:
save_model(model, model_dir, filename='encoder')
if embed is not None:
save_embedding_legacy(embed, embed_args, filepath)
if tokenizer is not None:
tokenizer.save_pretrained(filepath.joinpath('model'))
if detector_name == 'ClassifierDriftTF':
save_model(clf_drift, model_dir, filename='clf_drift')
elif isinstance(detector, OutlierAEGMM):
save_tf_aegmm(detector, filepath)
elif isinstance(detector, OutlierVAEGMM):
save_tf_vaegmm(detector, filepath)
elif isinstance(detector, AdversarialAE):
save_tf_ae(detector, filepath)
save_model(detector.model, model_dir)
save_tf_hl(detector.model_hl, filepath)
elif isinstance(detector, ModelDistillation):
save_model(detector.distilled_model, model_dir, filename='distilled_model')
save_model(detector.model, model_dir, filename='model')
elif isinstance(detector, OutlierSeq2Seq):
save_tf_s2s(detector, filepath)
elif isinstance(detector, LLR):
save_tf_llr(detector, filepath)
def preprocess_step_drift(cd: Union[ChiSquareDrift, ClassifierDriftTF, KSDrift, MMDDriftTF, TabularDrift]) \
-> Tuple[
Optional[Callable], Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Dict, Optional[Callable], bool
]:
# note: need to be able to dill tokenizers other than transformers
preprocess_fn, preprocess_kwargs = None, {}
model, embed, embed_args, tokenizer, load_emb = None, None, {}, None, False
if isinstance(cd.preprocess_fn, partial):
preprocess_fn = cd.preprocess_fn.func
for k, v in cd.preprocess_fn.keywords.items():
if isinstance(v, UAE):
if isinstance(v.encoder.layers[0], TransformerEmbedding): # text drift
# embedding
embed = v.encoder.layers[0].model
embed_args = dict(
embedding_type=v.encoder.layers[0].emb_type,
layers=v.encoder.layers[0].hs_emb.keywords['layers']
)
load_emb = True
# preprocessing encoder
inputs = Input(shape=cd.input_shape, dtype=tf.int64)
v.encoder.call(inputs)
shape_enc = (v.encoder.layers[0].output.shape[-1],)
layers = [InputLayer(input_shape=shape_enc)] + v.encoder.layers[1:]
model = tf.keras.Sequential(layers)
_ = model(tf.zeros((1,) + shape_enc))
else:
model = v.encoder
preprocess_kwargs['model'] = 'UAE'
elif isinstance(v, HiddenOutput):
model = v.model
preprocess_kwargs['model'] = 'HiddenOutput'
elif isinstance(v, tf.keras.Model):
model = v
preprocess_kwargs['model'] = 'custom'
elif hasattr(v, '__module__'):
if 'transformers' in v.__module__: # transformers tokenizer
tokenizer = v
preprocess_kwargs[k] = v.__module__
else:
preprocess_kwargs[k] = v
elif callable(cd.preprocess_fn):
preprocess_fn = cd.preprocess_fn
return preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb
def state_chisquaredrift(cd: ChiSquareDrift) -> Tuple[
Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
Chi-Squared drift detector parameters to save.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd)
state_dict = {
'args':
{
'x_ref': cd.x_ref
},
'kwargs':
{
'p_val': cd.p_val,
'categories_per_feature': cd.x_ref_categories,
'x_ref_preprocessed': True,
'preprocess_at_init': cd.preprocess_at_init,
'update_x_ref': cd.update_x_ref,
'correction': cd.correction,
'n_features': cd.n_features,
'input_shape': cd.input_shape,
},
'other':
{
'n': cd.n,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, model, embed, embed_args, tokenizer
def state_classifierdrift(cd: ClassifierDrift) -> Tuple[
Dict, tf.keras.Model,
Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
Classifier-based drift detector parameters to save.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd._detector)
cd._detector.train_kwargs['optimizer'] = tf.keras.optimizers.serialize(cd._detector.train_kwargs['optimizer'])
state_dict = {
'args':
{
'x_ref': cd._detector.x_ref,
},
'kwargs':
{
'p_val': cd._detector.p_val,
'x_ref_preprocessed': True,
'preprocess_at_init': cd._detector.preprocess_at_init,
'update_x_ref': cd._detector.update_x_ref,
'preds_type': cd._detector.preds_type,
'binarize_preds': cd._detector.binarize_preds,
'train_size': cd._detector.train_size,
'train_kwargs': cd._detector.train_kwargs,
},
'other':
{
'n': cd._detector.n,
'skf': cd._detector.skf,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, cd._detector.model, model, embed, embed_args, tokenizer
def state_tabulardrift(cd: TabularDrift) -> Tuple[
Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
Tabular drift detector parameters to save.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd)
state_dict = {
'args':
{
'x_ref': cd.x_ref
},
'kwargs':
{
'p_val': cd.p_val,
'categories_per_feature': cd.x_ref_categories,
'x_ref_preprocessed': True,
'preprocess_at_init': cd.preprocess_at_init,
'update_x_ref': cd.update_x_ref,
'correction': cd.correction,
'alternative': cd.alternative,
'n_features': cd.n_features,
'input_shape': cd.input_shape,
},
'other':
{
'n': cd.n,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, model, embed, embed_args, tokenizer
def state_ksdrift(cd: KSDrift) -> Tuple[
Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
K-S drift detector parameters to save.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd)
state_dict = {
'args':
{
'x_ref': cd.x_ref
},
'kwargs':
{
'p_val': cd.p_val,
'x_ref_preprocessed': True,
'preprocess_at_init': cd.preprocess_at_init,
'update_x_ref': cd.update_x_ref,
'correction': cd.correction,
'alternative': cd.alternative,
'n_features': cd.n_features,
'input_shape': cd.input_shape,
},
'other':
{
'n': cd.n,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, model, embed, embed_args, tokenizer
def state_mmddrift(cd: MMDDrift) -> Tuple[
Dict, Optional[tf.keras.Model],
Optional[TransformerEmbedding], Optional[Dict], Optional[Callable]
]:
"""
MMD drift detector parameters to save.
Note: only GaussianRBF kernel supported.
Parameters
----------
cd
Drift detection object.
"""
preprocess_fn, preprocess_kwargs, model, embed, embed_args, tokenizer, load_emb = \
preprocess_step_drift(cd._detector)
if not isinstance(cd._detector.kernel, GaussianRBF):
logger.warning('Currently only the default GaussianRBF kernel is supported.')
sigma = cd._detector.kernel.sigma.numpy() if not cd._detector.infer_sigma else None
state_dict = {
'args':
{
'x_ref': cd._detector.x_ref,
},
'kwargs':
{
'p_val': cd._detector.p_val,
'x_ref_preprocessed': True,
'preprocess_at_init': cd._detector.preprocess_at_init,
'update_x_ref': cd._detector.update_x_ref,
'sigma': sigma,
'configure_kernel_from_x_ref': not cd._detector.infer_sigma,
'n_permutations': cd._detector.n_permutations,
'input_shape': cd._detector.input_shape,
},
'other':
{
'n': cd._detector.n,
'load_text_embedding': load_emb,
'preprocess_fn': preprocess_fn,
'preprocess_kwargs': preprocess_kwargs
}
}
return state_dict, model, embed, embed_args, tokenizer
def state_iforest(od: IForest) -> Dict:
"""
Isolation forest parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'isolationforest': od.isolationforest}
return state_dict
def state_mahalanobis(od: Mahalanobis) -> Dict:
"""
Mahalanobis parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'n_components': od.n_components,
'std_clip': od.std_clip,
'start_clip': od.start_clip,
'max_n': od.max_n,
'cat_vars': od.cat_vars,
'ohe': od.ohe,
'd_abs': od.d_abs,
'clip': od.clip,
'mean': od.mean,
'C': od.C,
'n': od.n}
return state_dict
def state_ae(od: OutlierAE) -> Dict:
"""
OutlierAE parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold}
return state_dict
def state_vae(od: OutlierVAE) -> Dict:
"""
OutlierVAE parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'score_type': od.score_type,
'samples': od.samples,
'latent_dim': od.vae.latent_dim,
'beta': od.vae.beta}
return state_dict
def state_aegmm(od: OutlierAEGMM) -> Dict:
"""
OutlierAEGMM parameters to save.
Parameters
----------
od
Outlier detector object.
"""
if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]):
logger.warning('Saving AEGMM detector that has not been fit.')
state_dict = {'threshold': od.threshold,
'n_gmm': od.aegmm.n_gmm,
'recon_features': od.aegmm.recon_features,
'phi': od.phi,
'mu': od.mu,
'cov': od.cov,
'L': od.L,
'log_det_cov': od.log_det_cov}
return state_dict
def state_vaegmm(od: OutlierVAEGMM) -> Dict:
"""
OutlierVAEGMM parameters to save.
Parameters
----------
od
Outlier detector object.
"""
if not all(tf.is_tensor(_) for _ in [od.phi, od.mu, od.cov, od.L, od.log_det_cov]):
logger.warning('Saving VAEGMM detector that has not been fit.')
state_dict = {'threshold': od.threshold,
'samples': od.samples,
'n_gmm': od.vaegmm.n_gmm,
'latent_dim': od.vaegmm.latent_dim,
'beta': od.vaegmm.beta,
'recon_features': od.vaegmm.recon_features,
'phi': od.phi,
'mu': od.mu,
'cov': od.cov,
'L': od.L,
'log_det_cov': od.log_det_cov}
return state_dict
def state_adv_ae(ad: AdversarialAE) -> Dict:
"""
AdversarialAE parameters to save.
Parameters
----------
ad
Adversarial detector object.
"""
state_dict = {'threshold': ad.threshold,
'w_model_hl': ad.w_model_hl,
'temperature': ad.temperature,
'hidden_layer_kld': ad.hidden_layer_kld}
return state_dict
def state_adv_md(md: ModelDistillation) -> Dict:
"""
ModelDistillation parameters to save.
Parameters
----------
md
ModelDistillation detector object.
"""
state_dict = {'threshold': md.threshold,
'temperature': md.temperature,
'loss_type': md.loss_type}
return state_dict
def state_prophet(od: OutlierProphet) -> Dict:
"""
OutlierProphet parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'model': od.model,
'cap': od.cap}
return state_dict
def state_sr(od: SpectralResidual) -> Dict:
"""
Spectral residual parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'window_amp': od.window_amp,
'window_local': od.window_local,
'n_est_points': od.n_est_points,
'n_grad_points': od.n_grad_points}
return state_dict
def state_s2s(od: OutlierSeq2Seq) -> Dict:
"""
OutlierSeq2Seq parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {'threshold': od.threshold,
'beta': od.seq2seq.beta,
'shape': od.shape,
'latent_dim': od.latent_dim,
'output_activation': od.output_activation}
return state_dict
def state_llr(od: LLR) -> Dict:
"""
LLR parameters to save.
Parameters
----------
od
Outlier detector object.
"""
state_dict = {
'threshold': od.threshold,
'has_log_prob': od.has_log_prob,
'sequential': od.sequential,
'log_prob': od.log_prob
}
return state_dict
def save_tf_ae(detector: Union[OutlierAE, AdversarialAE],
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierAE
Parameters
----------
detector
Outlier or adversarial detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save encoder, decoder and vae weights
if isinstance(detector.ae.encoder.encoder_net, tf.keras.Sequential):
detector.ae.encoder.encoder_net.save(model_dir.joinpath('encoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` encoder detected. No encoder saved.')
if isinstance(detector.ae.decoder.decoder_net, tf.keras.Sequential):
detector.ae.decoder.decoder_net.save(model_dir.joinpath('decoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` decoder detected. No decoder saved.')
if isinstance(detector.ae, tf.keras.Model):
detector.ae.save_weights(model_dir.joinpath('ae.ckpt'))
else:
logger.warning('No `tf.keras.Model` ae detected. No ae saved.')
def save_tf_vae(detector: OutlierVAE,
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierVAE.
Parameters
----------
detector
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save encoder, decoder and vae weights
if isinstance(detector.vae.encoder.encoder_net, tf.keras.Sequential):
detector.vae.encoder.encoder_net.save(model_dir.joinpath('encoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` encoder detected. No encoder saved.')
if isinstance(detector.vae.decoder.decoder_net, tf.keras.Sequential):
detector.vae.decoder.decoder_net.save(model_dir.joinpath('decoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` decoder detected. No decoder saved.')
if isinstance(detector.vae, tf.keras.Model):
detector.vae.save_weights(model_dir.joinpath('vae.ckpt'))
else:
logger.warning('No `tf.keras.Model` vae detected. No vae saved.')
def save_tf_llr(detector: LLR, filepath: Union[str, os.PathLike]) -> None:
"""
Save LLR TensorFlow models or distributions.
Parameters
----------
detector
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# Save LLR model
if hasattr(detector, 'model_s') and hasattr(detector, 'model_b'):
detector.model_s.save_weights(model_dir.joinpath('model_s.h5'))
detector.model_b.save_weights(model_dir.joinpath('model_b.h5'))
else:
detector.dist_s.save(model_dir.joinpath('model.h5'))
if detector.dist_b is not None:
detector.dist_b.save(model_dir.joinpath('model_background.h5'))
def save_tf_hl(models: List[tf.keras.Model],
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow model weights.
Parameters
----------
models
List with tf.keras models.
filepath
Save directory.
"""
if isinstance(models, list):
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# Save model
for i, m in enumerate(models):
model_path = model_dir.joinpath('model_hl_' + str(i) + '.ckpt')
m.save_weights(model_path)
def save_tf_aegmm(od: OutlierAEGMM,
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierAEGMM.
Parameters
----------
od
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save encoder, decoder, gmm density model and aegmm weights
if isinstance(od.aegmm.encoder, tf.keras.Sequential):
od.aegmm.encoder.save(model_dir.joinpath('encoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` encoder detected. No encoder saved.')
if isinstance(od.aegmm.decoder, tf.keras.Sequential):
od.aegmm.decoder.save(model_dir.joinpath('decoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` decoder detected. No decoder saved.')
if isinstance(od.aegmm.gmm_density, tf.keras.Sequential):
od.aegmm.gmm_density.save(model_dir.joinpath('gmm_density_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` GMM density net detected. No GMM density net saved.')
if isinstance(od.aegmm, tf.keras.Model):
od.aegmm.save_weights(model_dir.joinpath('aegmm.ckpt'))
else:
logger.warning('No `tf.keras.Model` AEGMM detected. No AEGMM saved.')
def save_tf_vaegmm(od: OutlierVAEGMM,
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierVAEGMM.
Parameters
----------
od
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save encoder, decoder, gmm density model and vaegmm weights
if isinstance(od.vaegmm.encoder.encoder_net, tf.keras.Sequential):
od.vaegmm.encoder.encoder_net.save(model_dir.joinpath('encoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` encoder detected. No encoder saved.')
if isinstance(od.vaegmm.decoder, tf.keras.Sequential):
od.vaegmm.decoder.save(model_dir.joinpath('decoder_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` decoder detected. No decoder saved.')
if isinstance(od.vaegmm.gmm_density, tf.keras.Sequential):
od.vaegmm.gmm_density.save(model_dir.joinpath('gmm_density_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` GMM density net detected. No GMM density net saved.')
if isinstance(od.vaegmm, tf.keras.Model):
od.vaegmm.save_weights(model_dir.joinpath('vaegmm.ckpt'))
else:
logger.warning('No `tf.keras.Model` VAEGMM detected. No VAEGMM saved.')
def save_tf_s2s(od: OutlierSeq2Seq,
filepath: Union[str, os.PathLike]) -> None:
"""
Save TensorFlow components of OutlierSeq2Seq.
Parameters
----------
od
Outlier detector object.
filepath
Save directory.
"""
# create folder to save model in
model_dir = Path(filepath).joinpath('model')
if not model_dir.is_dir():
logger.warning('Directory {} does not exist and is now created.'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
# save seq2seq model weights and threshold estimation network
if isinstance(od.seq2seq.threshold_net, tf.keras.Sequential):
od.seq2seq.threshold_net.save(model_dir.joinpath('threshold_net.h5'))
else:
logger.warning('No `tf.keras.Sequential` threshold estimation net detected. No threshold net saved.')
if isinstance(od.seq2seq, tf.keras.Model):
od.seq2seq.save_weights(model_dir.joinpath('seq2seq.ckpt'))
else:
logger.warning('No `tf.keras.Model` Seq2Seq detected. No Seq2Seq model saved.')
| 34,567 | 34.237513 | 114 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_tensorflow/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
load_detector_legacy, load_kernel_config_tf, load_embedding_tf, load_model_tf, load_optimizer_tf, \
prep_model_and_emb_tf = import_optional(
'alibi_detect.saving._tensorflow.loading',
names=['load_detector_legacy',
'load_kernel_config',
'load_embedding',
'load_model',
'load_optimizer',
'prep_model_and_emb'])
save_detector_legacy, save_model_config_tf, save_optimizer_config_tf = import_optional(
'alibi_detect.saving._tensorflow.saving',
names=['save_detector_legacy', 'save_model_config', 'save_optimizer_config']
)
get_tf_dtype = import_optional(
'alibi_detect.saving._tensorflow.conversions',
names=['get_tf_dtype']
)
__all__ = [
"load_detector_legacy",
"load_kernel_config_tf",
"load_embedding_tf",
"load_model_tf",
"load_optimizer_tf",
"prep_model_and_emb_tf",
"save_detector_legacy",
"save_model_config_tf",
"save_optimizer_config_tf",
"get_tf_dtype"
]
| 1,091 | 30.2 | 99 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/saving/_tensorflow/tests/test_saving_tf.py
|
from pytest_cases import param_fixture, parametrize, parametrize_with_cases
import pytest
from alibi_detect.saving.tests.datasets import ContinuousData
from alibi_detect.saving.tests.models import encoder_model
from alibi_detect.cd.tensorflow import HiddenOutput as HiddenOutput_tf
from alibi_detect.saving.loading import _load_model_config, _load_optimizer_config
from alibi_detect.saving.saving import _path2str, _save_model_config, _save_optimizer_config
from alibi_detect.saving.schemas import ModelConfig, SupportedOptimizer
import tensorflow as tf
import numpy as np
from packaging import version
backend = param_fixture("backend", ['tensorflow'])
# Note: The full save/load functionality of optimizers (inc. validation) is tested in test_save_classifierdrift.
@pytest.mark.skipif(version.parse(tf.__version__) < version.parse('2.11.0'),
reason="Skipping since tensorflow < 2.11.0")
@parametrize('legacy', [True, False])
def test_load_optimizer_object_tf2pt11(legacy, backend):
"""
Test the _load_optimizer_config with a tensorflow optimizer config. Only run if tensorflow>=2.11.
Here we test that "new" and legacy optimizers can be saved/laoded. We expect the returned optimizer to be an
instantiated `tf.keras.optimizers.Optimizer` object. Also test that the loaded optimizer can be saved.
"""
class_name = 'Adam'
class_str = class_name if legacy else 'Custom>' + class_name # Note: see discussion in #739 re 'Custom>'
learning_rate = np.float32(0.01) # Set as float32 since this is what _save_optimizer_config returns
epsilon = np.float32(1e-7)
amsgrad = False
# Load
cfg_opt = {
'class_name': class_str,
'config': {
'name': class_name,
'learning_rate': learning_rate,
'epsilon': epsilon,
'amsgrad': amsgrad
}
}
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
# Check optimizer
SupportedOptimizer.validate_optimizer(optimizer, {'backend': 'tensorflow'})
if legacy:
assert isinstance(optimizer, tf.keras.optimizers.legacy.Optimizer)
else:
assert isinstance(optimizer, tf.keras.optimizers.Optimizer)
assert type(optimizer).__name__ == class_name
assert optimizer.learning_rate == learning_rate
assert optimizer.epsilon == epsilon
assert optimizer.amsgrad == amsgrad
# Save
cfg_saved = _save_optimizer_config(optimizer)
# Compare to original config
for key, value in cfg_opt['config'].items():
assert value == cfg_saved['config'][key]
@pytest.mark.skipif(version.parse(tf.__version__) >= version.parse('2.11.0'),
reason="Skipping since tensorflow >= 2.11.0")
def test_load_optimizer_object_tf_old(backend):
"""
Test the _load_optimizer_config with a tensorflow optimizer config. Only run if tensorflow<2.11.
We expect the returned optimizer to be an instantiated `tf.keras.optimizers.Optimizer` object.
Also test that the loaded optimizer can be saved.
"""
class_name = 'Adam'
learning_rate = np.float32(0.01) # Set as float32 since this is what _save_optimizer_config returns
epsilon = np.float32(1e-7)
amsgrad = False
# Load
cfg_opt = {
'class_name': class_name,
'config': {
'name': class_name,
'learning_rate': learning_rate,
'epsilon': epsilon,
'amsgrad': amsgrad
}
}
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
# Check optimizer
SupportedOptimizer.validate_optimizer(optimizer, {'backend': 'tensorflow'})
assert isinstance(optimizer, tf.keras.optimizers.Optimizer)
assert type(optimizer).__name__ == class_name
assert optimizer.learning_rate == learning_rate
assert optimizer.epsilon == epsilon
assert optimizer.amsgrad == amsgrad
# Save
cfg_saved = _save_optimizer_config(optimizer)
# Compare to original config
for key, value in cfg_opt['config'].items():
assert value == cfg_saved['config'][key]
def test_load_optimizer_type(backend):
"""
Test the _load_optimizer_config with just the `class_name` specified. In this case we expect a
`tf.keras.optimizers.Optimizer` class to be returned.
"""
class_name = 'Adam'
cfg_opt = {'class_name': class_name}
optimizer = _load_optimizer_config(cfg_opt, backend=backend)
assert isinstance(optimizer, type)
assert optimizer.__name__ == class_name
@parametrize_with_cases("data", cases=ContinuousData.data_synthetic_nd, prefix='data_')
@parametrize('model', [encoder_model])
@parametrize('layer', [None, -1])
def test_save_model_tf(data, model, layer, tmp_path):
"""
Unit test for _save_model_config and _load_model_config with tensorflow model.
"""
# Save model
filepath = tmp_path
input_shape = (data[0].shape[1],)
cfg_model, _ = _save_model_config(model, base_path=filepath, input_shape=input_shape)
cfg_model = _path2str(cfg_model)
cfg_model = ModelConfig(**cfg_model).dict()
assert tmp_path.joinpath('model').is_dir()
assert tmp_path.joinpath('model/model.h5').is_file()
# Adjust config
cfg_model['src'] = tmp_path.joinpath('model') # Need to manually set to absolute path here
if layer is not None:
cfg_model['layer'] = layer
# Load model
model_load = _load_model_config(cfg_model)
if layer is None:
assert isinstance(model_load, type(model))
else:
assert isinstance(model_load, HiddenOutput_tf)
| 5,554 | 37.846154 | 112 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/tests/test_datasets.py
|
import numpy as np
import pandas as pd
import pytest
from requests import RequestException
from urllib.error import URLError
from alibi_detect.datasets import fetch_kdd, fetch_ecg, corruption_types_cifar10c, fetch_cifar10c, \
fetch_attack, fetch_nab, get_list_nab
from alibi_detect.utils.data import Bunch
# KDD cup dataset
target_list = ['dos', 'r2l', 'u2r', 'probe']
keep_cols_list = ['srv_count', 'serror_rate', 'srv_serror_rate',
'rerror_rate', 'srv_rerror_rate', 'same_srv_rate',
'diff_srv_rate', 'srv_diff_host_rate', 'dst_host_count',
'dst_host_srv_count', 'dst_host_same_srv_rate',
'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate',
'dst_host_srv_diff_host_rate', 'dst_host_serror_rate',
'dst_host_srv_serror_rate', 'dst_host_rerror_rate',
'dst_host_srv_rerror_rate']
@pytest.mark.parametrize('return_X_y', [True, False])
def test_fetch_kdd(return_X_y):
target = np.random.choice(target_list, 2, replace=False)
keep_cols = np.random.choice(keep_cols_list, 5, replace=False)
try:
data = fetch_kdd(target=target, keep_cols=keep_cols, percent10=True, return_X_y=return_X_y)
except URLError:
pytest.skip('KDD dataset URL down')
if return_X_y:
assert isinstance(data, tuple)
assert isinstance(data[0], np.ndarray) and isinstance(data[1], np.ndarray)
else:
assert isinstance(data, Bunch)
assert isinstance(data.data, np.ndarray) and isinstance(data.target, np.ndarray)
assert list(data.feature_names) == list(keep_cols)
# ECG dataset
@pytest.mark.parametrize('return_X_y', [True, False])
def test_fetch_ecg(return_X_y):
try:
data = fetch_ecg(return_X_y=return_X_y)
except RequestException:
pytest.skip('ECG dataset URL down')
if return_X_y:
assert isinstance(data, tuple)
assert isinstance(data[0][0], np.ndarray) and isinstance(data[0][1], np.ndarray) and \
isinstance(data[1][0], np.ndarray) and isinstance(data[1][1], np.ndarray)
else:
assert isinstance(data, Bunch)
assert isinstance(data.data_train, np.ndarray) and isinstance(data.data_test, np.ndarray) and \
isinstance(data.target_train, np.ndarray) and isinstance(data.target_test, np.ndarray)
# CIFAR-10-C dataset
try:
corruption_list = corruption_types_cifar10c()
except RequestException:
corruption_list = None
@pytest.mark.skipif(corruption_list is None, reason="CIFAR-10-C dataset URL is down")
def test_types_cifar10c():
print(corruption_list)
assert len(corruption_list) == 19
@pytest.mark.skipif(corruption_list is None, reason="CIFAR-10-C dataset URL is down")
@pytest.mark.parametrize('return_X_y', [True, False])
def test_fetch_cifar10c(return_X_y):
corruption = list(np.random.choice(corruption_list, 5, replace=False))
try:
data = fetch_cifar10c(corruption=corruption, severity=2, return_X_y=return_X_y)
except RequestException:
pytest.skip('CIFAR-10-C dataset URL down')
if return_X_y:
assert isinstance(data, tuple)
assert isinstance(data[0], np.ndarray) and isinstance(data[1], np.ndarray)
else:
assert isinstance(data, Bunch)
assert isinstance(data.data, np.ndarray) and isinstance(data.target, np.ndarray)
# Attack datasets
datasets = ['cifar10']
models = ['resnet56']
attacks = ['cw', 'slide']
@pytest.mark.parametrize('return_X_y', [True, False])
def test_fetch_attack(return_X_y):
dataset = list(np.random.choice(datasets, 1))[0]
model = list(np.random.choice(models, 1))[0]
attack = list(np.random.choice(attacks, 1))[0]
try:
data = fetch_attack(dataset=dataset, model=model, attack=attack, return_X_y=return_X_y)
except RequestException:
pytest.skip('Attack dataset URL down for dataset %s, model %s, and attack %s' % (dataset, model, attack))
if return_X_y:
assert isinstance(data, tuple)
assert isinstance(data[0][0], np.ndarray) and isinstance(data[0][1], np.ndarray) and \
isinstance(data[1][0], np.ndarray) and isinstance(data[1][1], np.ndarray)
else:
assert isinstance(data, Bunch)
assert isinstance(data.data_train, np.ndarray) and isinstance(data.data_test, np.ndarray) and \
isinstance(data.target_train, np.ndarray) and isinstance(data.target_test, np.ndarray)
# assert data.meta['attack_type'] == attack
# NAB dataset
files = get_list_nab()
def test_list_nab():
assert len(files) == 58
@pytest.mark.parametrize('return_X_y', [True, False])
def test_fetch_nab(return_X_y):
idx = np.random.choice(len(files))
try:
data = fetch_nab(files[idx], return_X_y=return_X_y)
except RequestException:
pytest.skip('NAB dataset URL down')
if return_X_y:
assert isinstance(data, tuple)
assert isinstance(data[0], pd.DataFrame) and isinstance(data[1], pd.DataFrame)
else:
assert isinstance(data, Bunch)
assert isinstance(data.data, pd.DataFrame) and isinstance(data.target, pd.DataFrame)
# Genome dataset
# TODO - Genome dataset is large compared to others - do we want to include in regular CI?
| 5,278 | 37.816176 | 113 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/tests/test_dep_management.py
|
"""
Test optional dependencies.
These tests import all the named objects from the public API of alibi-detect and test that they throw the correct errors
if the relevant optional dependencies are not installed. If these tests fail, it is likely that:
1. The optional dependency relation hasn't been added to the test script. In this case, this test assumes that the
functionality should work for the default alibi-detect install. If this is not the case the exported object name
should be added to the dependency_map in the relevant test.
2. The relevant export in the public API hasn't been imported using `optional_import` from
`alibi_detect.utils.missing_optional_dependency`.
Notes
-----
1. These tests will be skipped in the normal test suite. To run correctly use tox.
2. If you need to configure a new optional dependency you will need to update the setup.cfg file and add a testenv
environment.
3. Backend functionality may be unique to specific explainers/functions and so there may be multiple such modules
that need to be tested separately.
"""
from types import ModuleType
from collections import defaultdict
def check_correct_dependencies(
module: ModuleType,
dependencies: defaultdict,
opt_dep: str):
"""Checks that imported modules that depend on optional dependencies throw correct errors on use.
Parameters
----------
module
The module to check. Each of the public objects within this module will be checked.
dependencies
A dictionary mapping the name of the object to the list of optional dependencies that it depends on. If a name
is not in the dictionary, the named object is assumed to be independent of optional dependencies. Therefor it
should pass for the default alibi-detect install.
opt_dep
The name of the optional dependency that is being tested.
"""
lib_obj = [obj for obj in dir(module) if not obj.startswith('_')]
for item_name in lib_obj:
item = getattr(module, item_name)
if not isinstance(item, ModuleType):
pass_contexts = dependencies[item_name]
try:
item.test # noqa
except AttributeError:
assert opt_dep in pass_contexts or 'default' in pass_contexts or opt_dep == 'all', \
(f'{item_name} was imported instead of an instance of MissingDependency. '
f'Are your sure {item} is dependent on {opt_dep}?')
except ImportError:
assert opt_dep not in pass_contexts and 'default' not in pass_contexts and opt_dep != 'all', \
(f'{item_name} has been imported as an instance of MissingDependency. '
f'Are you sure the dependency buckets, {pass_contexts} are correct?')
def test_cd_dependencies(opt_dep):
"""Tests that the cd module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in []:
dependency_map[dependency] = relations
from alibi_detect import cd
check_correct_dependencies(cd, dependency_map, opt_dep)
def test_cd_torch_dependencies(opt_dep):
"""Tests that the cd module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("HiddenOutput", ['torch', 'keops']),
("UAE", ['torch', 'keops']),
("preprocess_drift", ['torch', 'keops'])
]:
dependency_map[dependency] = relations
from alibi_detect.cd import pytorch as cd_pytorch
check_correct_dependencies(cd_pytorch, dependency_map, opt_dep)
def test_cd_tensorflow_dependencies(opt_dep):
"""Tests that the cd module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("HiddenOutput", ['tensorflow']),
("UAE", ['tensorflow']),
("preprocess_drift", ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.cd import tensorflow as tensorflow_cd
check_correct_dependencies(tensorflow_cd, dependency_map, opt_dep)
def test_ad_dependencies(opt_dep):
"""Tests that the ad module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('AdversarialAE', ['tensorflow']),
('ModelDistillation', ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect import ad
check_correct_dependencies(ad, dependency_map, opt_dep)
def test_od_dependencies(opt_dep):
"""Tests that the od module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('LLR', ['tensorflow']),
('OutlierVAE', ['tensorflow']),
('OutlierVAEGMM', ['tensorflow']),
('OutlierAE', ['tensorflow']),
('OutlierAEGMM', ['tensorflow']),
('OutlierSeq2Seq', ['tensorflow']),
("OutlierProphet", ['prophet']),
('PValNormalizer', ['torch', 'keops']),
('ShiftAndScaleNormalizer', ['torch', 'keops']),
('TopKAggregator', ['torch', 'keops']),
('AverageAggregator', ['torch', 'keops']),
('MaxAggregator', ['torch', 'keops']),
('MinAggregator', ['torch', 'keops']),
]:
dependency_map[dependency] = relations
from alibi_detect import od
check_correct_dependencies(od, dependency_map, opt_dep)
def test_od_backend_dependencies(opt_dep):
"""Tests that the od module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('Ensembler', ['torch', 'keops']),
('KNNTorch', ['torch', 'keops']),
('MahalanobisTorch', ['torch', 'keops']),
('KernelPCATorch', ['torch', 'keops']),
('LinearPCATorch', ['torch', 'keops']),
('GMMTorch', ['torch', 'keops']),
('LOFTorch', ['torch', 'keops']),
('SgdSVMTorch', ['torch', 'keops']),
('BgdSVMTorch', ['torch', 'keops']),
]:
dependency_map[dependency] = relations
from alibi_detect.od import pytorch as od_pt_backend
check_correct_dependencies(od_pt_backend, dependency_map, opt_dep)
def test_tensorflow_model_dependencies(opt_dep):
"""Tests that the tensorflow models module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("AE", ['tensorflow']),
("AEGMM", ['tensorflow']),
("Seq2Seq", ['tensorflow']),
("VAE", ['tensorflow']),
("VAEGMM", ['tensorflow']),
("resnet", ['tensorflow']),
("PixelCNN", ['tensorflow']),
("TransformerEmbedding", ['tensorflow']),
("trainer", ['tensorflow']),
("eucl_cosim_features", ['tensorflow']),
("elbo", ['tensorflow']),
("loss_vaegmm", ['tensorflow']),
("loss_aegmm", ['tensorflow']),
("loss_adv_ae", ['tensorflow']),
("loss_distillation", ['tensorflow']),
("scale_by_instance", ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.models import tensorflow as tf_models
check_correct_dependencies(tf_models, dependency_map, opt_dep)
def test_torch_model_dependencies(opt_dep):
"""Tests that the torch models module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("TransformerEmbedding", ['torch', 'keops']),
("trainer", ['torch', 'keops']),
]:
dependency_map[dependency] = relations
from alibi_detect.models import pytorch as torch_models
check_correct_dependencies(torch_models, dependency_map, opt_dep)
def test_dataset_dependencies(opt_dep):
"""Tests that the datasets module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in []:
dependency_map[dependency] = relations
from alibi_detect import datasets
check_correct_dependencies(datasets, dependency_map, opt_dep)
def test_fetching_utils_dependencies(opt_dep):
"""Tests that the fetching utils module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('fetch_detector', ['tensorflow']),
('fetch_tf_model', ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.utils import fetching
check_correct_dependencies(fetching, dependency_map, opt_dep)
def test_saving_tf_dependencies(opt_dep):
"""Tests that the alibi_detect.saving._tensorflow module correctly protects against uninstalled optional
dependencies.
"""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('Detector', ['tensorflow']),
('load_detector_legacy', ['tensorflow']),
('load_embedding_tf', ['tensorflow']),
('load_kernel_config_tf', ['tensorflow']),
('load_model_tf', ['tensorflow']),
('load_optimizer_tf', ['tensorflow']),
('prep_model_and_emb_tf', ['tensorflow']),
('save_detector_legacy', ['tensorflow']),
('save_model_config_tf', ['tensorflow']),
('save_optimizer_config_tf', ['tensorflow']),
('get_tf_dtype', ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.saving import _tensorflow as tf_saving
check_correct_dependencies(tf_saving, dependency_map, opt_dep)
def test_saving_torch_dependencies(opt_dep):
"""Tests that the alibi_detect.saving._pytorch module correctly protects against uninstalled optional
dependencies.
"""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
('load_embedding_pt', ['torch', 'keops']),
('load_kernel_config_pt', ['torch', 'keops']),
('load_model_pt', ['torch', 'keops']),
('load_optimizer_pt', ['torch', 'keops']),
('prep_model_and_emb_pt', ['torch', 'keops']),
('save_model_config_pt', ['torch', 'keops']),
('get_pt_dtype', ['torch', 'keops'])
]:
dependency_map[dependency] = relations
from alibi_detect.saving import _pytorch as pt_saving
check_correct_dependencies(pt_saving, dependency_map, opt_dep)
def test_saving_dependencies(opt_dep):
"""Tests that the alibi_detect.saving module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in []:
dependency_map[dependency] = relations
from alibi_detect import saving
check_correct_dependencies(saving, dependency_map, opt_dep)
def test_tensorflow_utils_dependencies(opt_dep):
"""Tests that the saving utils module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("batch_compute_kernel_matrix", ['tensorflow']),
("mmd2", ['tensorflow']),
("mmd2_from_kernel_matrix", ['tensorflow']),
("relative_euclidean_distance", ['tensorflow']),
("squared_pairwise_distance", ['tensorflow']),
("GaussianRBF", ['tensorflow']),
("DeepKernel", ['tensorflow']),
("permed_lsdds", ['tensorflow']),
("predict_batch", ['tensorflow']),
("predict_batch_transformer", ['tensorflow']),
("quantile", ['tensorflow']),
("subset_matrix", ['tensorflow']),
("zero_diag", ['tensorflow']),
("mutate_categorical", ['tensorflow']),
("TFDataset", ['tensorflow'])
]:
dependency_map[dependency] = relations
from alibi_detect.utils import tensorflow as tensorflow_utils
check_correct_dependencies(tensorflow_utils, dependency_map, opt_dep)
def test_torch_utils_dependencies(opt_dep):
"""Tests that the pytorch utils module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("batch_compute_kernel_matrix", ['torch', 'keops']),
("mmd2", ['torch', 'keops']),
("mmd2_from_kernel_matrix", ['torch', 'keops']),
("squared_pairwise_distance", ['torch', 'keops']),
("GaussianRBF", ['torch', 'keops']),
("DeepKernel", ['torch', 'keops']),
("permed_lsdds", ['torch', 'keops']),
("predict_batch", ['torch', 'keops']),
("predict_batch_transformer", ['torch', 'keops']),
("quantile", ['torch', 'keops']),
("zero_diag", ['torch', 'keops']),
("TorchDataset", ['torch', 'keops']),
("get_device", ['torch', 'keops']),
("_save_state_dict", ['torch', 'keops']),
("_load_state_dict", ['torch', 'keops']),
]:
dependency_map[dependency] = relations
from alibi_detect.utils import pytorch as pytorch_utils
check_correct_dependencies(pytorch_utils, dependency_map, opt_dep)
def test_keops_utils_dependencies(opt_dep):
"""Tests that the keops utils module correctly protects against uninstalled optional dependencies."""
dependency_map = defaultdict(lambda: ['default'])
for dependency, relations in [
("GaussianRBF", ['keops']),
("DeepKernel", ['keops']),
]:
dependency_map[dependency] = relations
from alibi_detect.utils import keops as keops_utils
check_correct_dependencies(keops_utils, dependency_map, opt_dep)
| 14,221 | 43.72327 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/tests/conftest.py
|
import pytest
def pytest_addoption(parser):
parser.addoption("--opt-dep", action="store")
@pytest.fixture(scope='session')
def opt_dep(request):
"""Optional dependency fixture.
Tests that use this fixture must be run with the --opt-dep option via terminal. If not they will skip. This fixture
is used in CI to indicate the optional dependencies installed in the tox environments the tests are run in. See
setup.cfg and .github/workflows/ci.yml for more details.
"""
opt_dep_value = request.config.option.opt_dep
if opt_dep_value is None:
pytest.skip()
return opt_dep_value
| 621 | 31.736842 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/chisquare.py
|
import numpy as np
from scipy.stats import chi2_contingency
from typing import Callable, Dict, List, Optional, Tuple, Union
from alibi_detect.cd.base import BaseUnivariateDrift
from alibi_detect.utils.warnings import deprecated_alias
class ChiSquareDrift(BaseUnivariateDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
categories_per_feature: Optional[Dict[int, int]] = None,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Chi-Squared data drift detector with Bonferroni or False Discovery Rate (FDR)
correction for multivariate data.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for significance of the Chi-Squared test for each feature. If the FDR correction method
is used, this corresponds to the acceptable q-value.
categories_per_feature
Optional dictionary with as keys the feature column index and as values the number of possible
categorical values for that feature or a list with the possible values. If you know how many
categories are present for a given feature you could pass this in the `categories_per_feature` dict
in the Dict[int, int] format, e.g. {0: 3, 3: 2}. If you pass N categories this will assume the
possible values for the feature are [0, ..., N-1]. You can also explicitly pass the possible categories
in the Dict[int, List[int]] format, e.g. {0: [0, 1, 2], 3: [0, 55]}. Note that the categories can be
arbitrary int values. If it is not specified, `categories_per_feature` is inferred from `x_ref`.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
Typically a dimensionality reduction technique.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
n_features
Number of features used in the Chi-Squared test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
n_features=n_features,
input_shape=input_shape,
data_type=data_type
)
# Set config
self._set_config(locals())
# construct categories from the user-specified dict
if isinstance(categories_per_feature, dict):
vals = list(categories_per_feature.values())
int_types = (int, np.int16, np.int32, np.int64)
if all(isinstance(v, int_types) for v in vals):
# categories_per_feature = Dict[int, int]
categories_per_feature = {f: list(np.arange(v)) # type: ignore
for f, v in categories_per_feature.items()}
elif not all(isinstance(val, list) for val in vals) and \
all(isinstance(v, int_types) for val in vals for v in val): # type: ignore
raise ValueError('categories_per_feature needs to be None or one of '
'Dict[int, int], Dict[int, List[int]]')
else: # infer number of possible categories for each feature from reference data
x_flat = self.x_ref.reshape(self.x_ref.shape[0], -1)
categories_per_feature = {f: list(np.unique(x_flat[:, f]))
for f in range(self.n_features)}
self.x_ref_categories = categories_per_feature
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute Chi-Squared test statistic and p-values per feature.
Parameters
----------
x_ref
Reference instances to compare distribution with.
x
Batch of instances.
Returns
-------
Feature level p-values and Chi-Squared statistics.
"""
x_ref = x_ref.reshape(x_ref.shape[0], -1)
x = x.reshape(x.shape[0], -1)
# apply counts on union of categories per variable in both the reference and test data
x_categories = {f: list(np.unique(x[:, f])) for f in range(self.n_features)}
all_categories = {f: list(set().union(self.x_ref_categories[f], x_categories[f])) # type: ignore
for f in range(self.n_features)}
x_ref_count = self._get_counts(x_ref, all_categories)
x_count = self._get_counts(x, all_categories)
p_val = np.zeros(self.n_features, dtype=np.float32)
dist = np.zeros_like(p_val)
for f in range(self.n_features): # apply Chi-Squared test
contingency_table = np.vstack((x_ref_count[f], x_count[f]))
dist[f], p_val[f], _, _ = chi2_contingency(contingency_table)
return p_val, dist
def _get_counts(self, x: np.ndarray, categories: Dict[int, List[int]]) -> Dict[int, List[int]]:
"""
Utility method for getting the counts of categories for each categorical variable.
"""
return {f: [(x[:, f] == v).sum() for v in vals] for f, vals in categories.items()}
| 7,025 | 49.913043 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/base.py
|
import logging
from abc import abstractmethod
from typing import Callable, Dict, List, Optional, Tuple, Union, Any
import numpy as np
from alibi_detect.base import BaseDetector, concept_drift_dict, DriftConfigMixin
from alibi_detect.cd.utils import get_input_shape, update_reference
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow
from alibi_detect.utils.statstest import fdr
from scipy.stats import binom_test, ks_2samp
from sklearn.model_selection import StratifiedKFold
if has_pytorch:
import torch
if has_tensorflow:
import tensorflow as tf
logger = logging.getLogger(__name__)
class BaseClassifierDrift(BaseDetector):
model: Union['tf.keras.Model', 'torch.nn.Module']
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
A context-aware drift detector based on a conditional analogue of the maximum mean discrepancy (MMD).
Only detects differences between samples that can not be attributed to differences between associated
sets of contexts. p-values are computed using a conditional permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs probabilities or logits
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold predictions. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
if isinstance(train_size, float) and isinstance(n_folds, int):
logger.warning('Both `n_folds` and `train_size` specified. By default `n_folds` is used.')
if n_folds is not None and n_folds > 1 and not retrain_from_scratch:
raise ValueError("If using multiple folds the model must be retrained from scratch for each fold.")
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
# define whether soft preds and optionally the stratified k-fold split
self.preds_type = preds_type
self.binarize_preds = binarize_preds
if isinstance(n_folds, int):
self.train_size = None
self.skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=seed)
else:
self.train_size, self.skf = train_size, None
self.retrain_from_scratch = retrain_from_scratch
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta['online'] = False
self.meta['data_type'] = data_type
self.meta['detector_type'] = 'drift'
self.meta['params'] = {'binarize_preds ': binarize_preds, 'preds_type': preds_type}
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x
else:
return self.x_ref, x
def get_splits(
self,
x_ref: Union[np.ndarray, list],
x: Union[np.ndarray, list],
return_splits: bool = True
) -> Union[Tuple[Union[np.ndarray, list], np.ndarray],
Tuple[Union[np.ndarray, list], np.ndarray, Optional[List[Tuple[np.ndarray, np.ndarray]]]]]:
"""
Split reference and test data in train and test folds used by the classifier.
Parameters
----------
x_ref
Data used as reference distribution.
x
Batch of instances.
return_splits
Whether to return the splits.
Returns
-------
Combined reference and test instances with labels and optionally a list with tuples of \
train and test indices for optionally different folds.
"""
# create dataset and labels
y = np.concatenate([np.zeros(len(x_ref)), np.ones(len(x))], axis=0).astype(np.int64) # Fix #411
if isinstance(x_ref, np.ndarray) and isinstance(x, np.ndarray):
x = np.concatenate([x_ref, x], axis=0)
else: # add 2 lists
x = x_ref + x
if not return_splits:
return x, y
# random shuffle if stratified folds are not used
n_tot = len(x)
if self.skf is None:
idx_shuffle = np.random.choice(np.arange(n_tot), size=n_tot, replace=False)
n_tr = int(n_tot * self.train_size)
idx_tr, idx_te = idx_shuffle[:n_tr], idx_shuffle[n_tr:]
splits = [(idx_tr, idx_te)]
else: # use stratified folds
splits = self.skf.split(np.zeros(n_tot), y)
return x, y, splits
def test_probs(
self, y_oof: np.ndarray, probs_oof: np.ndarray, n_ref: int, n_cur: int
) -> Tuple[float, float]:
"""
Perform a statistical test of the probabilities predicted by the model against
what we'd expect under the no-change null.
Parameters
----------
y_oof
Out of fold targets (0 ref, 1 cur)
probs_oof
Probabilities predicted by the model
n_ref
Size of reference window used in training model
n_cur
Size of current window used in training model
Returns
-------
p-value and notion of performance of classifier relative to expectation under null
"""
probs_oof = probs_oof[:, 1] # [1-p, p]
if self.binarize_preds:
baseline_accuracy = max(n_ref, n_cur) / (n_ref + n_cur) # exp under null
n_oof = y_oof.shape[0]
n_correct = (y_oof == probs_oof.round()).sum()
p_val = binom_test(n_correct, n_oof, baseline_accuracy, alternative='greater')
accuracy = n_correct / n_oof
# relative error reduction, in [0,1]
# e.g. (90% acc -> 99% acc) = 0.9, (50% acc -> 59% acc) = 0.18
dist = 1 - (1 - accuracy) / (1 - baseline_accuracy)
dist = max(0, dist) # below 0 = no evidence for drift
else:
probs_ref = probs_oof[y_oof == 0]
probs_cur = probs_oof[y_oof == 1]
dist, p_val = ks_2samp(probs_ref, probs_cur, alternative='greater')
return p_val, dist
@abstractmethod
def score(self, x: Union[np.ndarray, list]) \
-> Tuple[float, float, np.ndarray, np.ndarray, Union[np.ndarray, list], Union[np.ndarray, list]]:
pass
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True, return_probs: bool = True, return_model: bool = True) \
-> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data). The reference and test instances of the associated
probabilities are also returned.
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, performance of the classifier \
relative to its expectation under the no-change null, the out-of-fold classifier model \
prediction probabilities on the reference and test data as well as the associated reference \
and test instances of the out-of-fold predictions, and the trained model.
"""
# compute drift scores
p_val, dist, probs_ref, probs_test, x_ref_oof, x_test_oof = self.score(x)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_x_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
# TODO: TBD: can `x` ever be a `list` after pre-processing? update_references and downstream functions
# don't support list inputs and without the type: ignore[arg-type] mypy complains
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
if return_probs:
cd['data']['probs_ref'] = probs_ref
cd['data']['probs_test'] = probs_test
cd['data']['x_ref_oof'] = x_ref_oof
cd['data']['x_test_oof'] = x_test_oof
if return_model:
cd['data']['model'] = self.model
return cd
class BaseLearnedKernelDrift(BaseDetector):
kernel: Union['tf.keras.Model', 'torch.nn.Module']
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Base class for the learned kernel-based drift detector.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_permutations = n_permutations
self.train_size = train_size
self.retrain_from_scratch = retrain_from_scratch
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta['detector_type'] = 'drift'
self.meta['data_type'] = data_type
self.meta['online'] = False
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x
else:
return self.x_ref, x
def get_splits(self, x_ref: Union[np.ndarray, list], x: Union[np.ndarray, list]) \
-> Tuple[Tuple[Union[np.ndarray, list], Union[np.ndarray, list]],
Tuple[Union[np.ndarray, list], Union[np.ndarray, list]]]:
"""
Split reference and test data into two splits -- one of which to learn test locations
and parameters and one to use for tests.
Parameters
----------
x_ref
Data used as reference distribution.
x
Batch of instances.
Returns
-------
Tuple containing split train data and tuple containing split test data.
"""
n_ref, n_cur = len(x_ref), len(x)
perm_ref, perm_cur = np.random.permutation(n_ref), np.random.permutation(n_cur)
idx_ref_tr, idx_ref_te = perm_ref[:int(n_ref * self.train_size)], perm_ref[int(n_ref * self.train_size):]
idx_cur_tr, idx_cur_te = perm_cur[:int(n_cur * self.train_size)], perm_cur[int(n_cur * self.train_size):]
if isinstance(x_ref, np.ndarray):
x_ref_tr, x_ref_te = x_ref[idx_ref_tr], x_ref[idx_ref_te]
x_cur_tr, x_cur_te = x[idx_cur_tr], x[idx_cur_te]
elif isinstance(x, list):
x_ref_tr, x_ref_te = [x_ref[_] for _ in idx_ref_tr], [x_ref[_] for _ in idx_ref_te]
x_cur_tr, x_cur_te = [x[_] for _ in idx_cur_tr], [x[_] for _ in idx_cur_te]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
return (x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te)
@abstractmethod
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
pass
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True, return_kernel: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the MMD metric between the new batch and reference data.
return_kernel
Whether to return the updated kernel trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold, MMD metric and \
trained kernel.
"""
# compute drift scores
p_val, dist, distance_threshold = self.score(x)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_x_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
cd['data']['distance_threshold'] = distance_threshold
if return_kernel:
cd['data']['kernel'] = self.kernel
return cd
class BaseMMDDrift(BaseDetector):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) base data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the Gaussian RBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
self.infer_sigma = configure_kernel_from_x_ref
if configure_kernel_from_x_ref and isinstance(sigma, np.ndarray):
self.infer_sigma = False
logger.warning('`sigma` is specified for the kernel and `configure_kernel_from_x_ref` '
'is set to True. `sigma` argument takes priority over '
'`configure_kernel_from_x_ref` (set to False).')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_permutations = n_permutations # nb of iterations through permutation test
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta.update({'detector_type': 'drift', 'online': False, 'data_type': data_type})
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
# TODO: TBD: similar to above, can x be a list here? x_ref is also revealed to be Any,
# can we tighten the type up (e.g. by typing Callable with stricter inputs/outputs?
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
else:
return self.x_ref, x # type: ignore[return-value]
@abstractmethod
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
pass
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the MMD metric between the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and MMD metric.
"""
# compute drift scores
p_val, dist, distance_threshold = self.score(x)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_x_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
cd['data']['distance_threshold'] = distance_threshold
return cd
class BaseLSDDDrift(BaseDetector):
# TODO: TBD: this is only created when _configure_normalization is called from backend-specific classes,
# is declaring it here the right thing to do?
_normalize: Callable
_unnormalize: Callable
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares Density Difference (LSDD) base data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.sigma = sigma
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_permutations = n_permutations # nb of iterations through permutation test
self.n_kernel_centers = n_kernel_centers or max(self.n // 20, 1)
self.lambda_rd_max = lambda_rd_max
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta.update({'detector_type': 'drift', 'online': False, 'data_type': data_type})
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
else:
return self.x_ref, x # type: ignore[return-value]
@abstractmethod
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
pass
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the LSDD metric between the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and LSDD metric.
"""
# compute drift scores
p_val, dist, distance_threshold = self.score(x)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_x_ref, dict):
if self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
x = self._normalize(x)
elif self.preprocess_fn is None:
x = self._normalize(x)
else:
pass
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
cd['data']['distance_threshold'] = distance_threshold
return cd
class BaseUnivariateDrift(BaseDetector, DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Generic drift detector component which serves as a base class for methods using
univariate tests. If n_features > 1, a multivariate correction is applied such that
the false positive rate is upper bounded by the specified p-value, with equality in
the case of independent features.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for significance of the statistical test for each feature. If the FDR correction method
is used, this corresponds to the acceptable q-value.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
Typically a dimensionality reduction technique.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
n_features
Number of features used in the statistical test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
input_shape
Shape of input data. Needs to be provided for text data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.update_x_ref = update_x_ref
self.preprocess_fn = preprocess_fn
self.correction = correction
self.n = len(x_ref)
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# compute number of features for the univariate tests
if isinstance(n_features, int):
self.n_features = n_features
elif not isinstance(preprocess_fn, Callable) or preprocess_at_init or x_ref_preprocessed:
# infer features from preprocessed reference data
self.n_features = self.x_ref.reshape(self.x_ref.shape[0], -1).shape[-1]
else: # infer number of features after applying preprocessing step
x = self.preprocess_fn(x_ref[0:1])
self.n_features = x.reshape(x.shape[0], -1).shape[-1]
if correction not in ['bonferroni', 'fdr'] and self.n_features > 1:
raise ValueError('Only `bonferroni` and `fdr` are acceptable for multivariate correction.')
# set metadata
self.meta['online'] = False # offline refers to fitting the CDF for K-S
self.meta['data_type'] = data_type
self.meta['detector_type'] = 'drift'
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
else:
return self.x_ref, x # type: ignore[return-value]
@abstractmethod
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
pass
def score(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the feature-wise drift score which is the p-value of the
statistical test and the test statistic.
Parameters
----------
x
Batch of instances.
Returns
-------
Feature level p-values and test statistics.
"""
x_ref, x = self.preprocess(x)
score, dist = self.feature_score(x_ref, x) # feature-wise univariate test
return score, dist
def predict(self, x: Union[np.ndarray, list], drift_type: str = 'batch',
return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[np.ndarray, int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
drift_type
Predict drift at the 'feature' or 'batch' level. For 'batch', the test statistics for
each feature are aggregated using the Bonferroni or False Discovery Rate correction (if n_features>1).
return_p_val
Whether to return feature level p-values.
return_distance
Whether to return the test statistic between the features of the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the feature level p-values, threshold after \
multivariate correction if needed and test statistics.
"""
# compute drift scores
p_vals, dist = self.score(x)
# TODO: return both feature-level and batch-level drift predictions by default
# values below p-value threshold are drift
if drift_type == 'feature':
drift_pred = (p_vals < self.p_val).astype(int)
elif drift_type == 'batch' and self.correction == 'bonferroni':
threshold = self.p_val / self.n_features
drift_pred = int((p_vals < threshold).any()) # type: ignore[assignment]
elif drift_type == 'batch' and self.correction == 'fdr':
drift_pred, threshold = fdr(p_vals, q_val=self.p_val) # type: ignore[assignment]
else:
raise ValueError('`drift_type` needs to be either `feature` or `batch`.')
# update reference dataset
if isinstance(self.update_x_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_x_ref) # type: ignore[arg-type]
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_vals
cd['data']['threshold'] = self.p_val if drift_type == 'feature' else threshold
if return_distance:
cd['data']['distance'] = dist
return cd
class BaseContextMMDDrift(BaseDetector):
lams: Optional[Tuple[Any, Any]] = None
def __init__(
self,
x_ref: Union[np.ndarray, list],
c_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
x_kernel: Callable = None,
c_kernel: Callable = None,
n_permutations: int = 1000,
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False,
) -> None:
"""
Maximum Mean Discrepancy (MMD) based context aware drift detector.
Parameters
----------
x_ref
Data used as reference distribution.
c_ref
Context for the reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_ref
Reference data can optionally be updated to the last N instances seen by the detector.
The parameter should be passed as a dictionary *{'last': N}*.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_kernel
Kernel defined on the input data, defaults to Gaussian RBF kernel.
c_kernel
Kernel defined on the context data, defaults to Gaussian RBF kernel.
n_permutations
Number of permutations used in the permutation test.
prop_c_held
Proportion of contexts held out to condition on.
n_folds
Number of cross-validation folds used when tuning the regularisation parameters.
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
verbose
Whether or not to print progress during configuration.
"""
super().__init__()
if p_val is None:
logger.warning('No p-value set for the drift threshold. Need to set it to detect data drift.')
# x_ref preprocessing
self.preprocess_at_init = preprocess_at_init
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if self.preprocess_at_init and not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.p_val = p_val
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_permutations = n_permutations # nb of iterations through permutation test
self.x_kernel = x_kernel
self.c_kernel = c_kernel
if len(c_ref) == self.n:
self.c_ref = c_ref
else:
raise ValueError('x_ref and c_ref should contain the same number of instances.')
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# Regularisation parameter tuning settings
if n_folds > 1:
self.n_folds = n_folds
else:
raise ValueError('The `n_folds` parameter must be > 1.')
self.lams = None
# Update ref attribute. Disallow res
self.update_ref = update_ref
if update_ref is not None:
if 'reservoir_sampling' in update_ref.keys():
raise ValueError("The BaseContextMMDDrift detector doesn't currently support the `reservoir_sampling` "
"option in `update_ref`.")
# Other attributes
self.prop_c_held = prop_c_held
self.batch_size = batch_size
self.verbose = verbose
# set metadata
self.meta.update({'detector_type': 'drift', 'online': False, 'data_type': data_type})
def preprocess(self, x: Union[np.ndarray, list]) -> Tuple[np.ndarray, np.ndarray]:
"""
Data preprocessing before computing the drift scores.
Parameters
----------
x
Batch of instances.
Returns
-------
Preprocessed reference data and new instances.
"""
if self.preprocess_fn is not None:
x = self.preprocess_fn(x)
if not self.preprocess_at_init and not self.x_ref_preprocessed:
x_ref = self.preprocess_fn(self.x_ref)
else:
x_ref = self.x_ref
return x_ref, x # type: ignore[return-value]
else:
return self.x_ref, x # type: ignore[return-value]
@abstractmethod
def score(self, # type: ignore[override]
x: Union[np.ndarray, list], c: np.ndarray) -> Tuple[float, float, float, Tuple]:
pass
def predict(self, # type: ignore[override]
x: Union[np.ndarray, list], c: np.ndarray,
return_p_val: bool = True, return_distance: bool = True, return_coupling: bool = False) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data, given the provided context.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the conditional MMD test statistic between the new batch and reference data.
return_coupling
Whether to return the coupling matrices.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold, conditional MMD test \
statistic and coupling matrices.
"""
# compute drift scores
p_val, dist, distance_threshold, coupling = self.score(x, c)
drift_pred = int(p_val < self.p_val)
# update reference dataset
if isinstance(self.update_ref, dict) and self.preprocess_fn is not None and self.preprocess_at_init:
x = self.preprocess_fn(x)
self.x_ref = update_reference(self.x_ref, x, self.n, self.update_ref) # type: ignore[arg-type]
self.c_ref = update_reference(self.c_ref, c, self.n, self.update_ref)
# used for reservoir sampling
self.n += len(x)
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
if return_p_val:
cd['data']['p_val'] = p_val
cd['data']['threshold'] = self.p_val
if return_distance:
cd['data']['distance'] = dist
cd['data']['distance_threshold'] = distance_threshold
if return_coupling:
cd['data']['coupling_xx'] = coupling[0]
cd['data']['coupling_yy'] = coupling[1]
cd['data']['coupling_xy'] = coupling[2]
return cd
| 52,690 | 41.94295 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/learned_kernel.py
|
import numpy as np
from typing import Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from torch.utils.data import DataLoader
from alibi_detect.cd.pytorch.learned_kernel import LearnedKernelDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
if has_tensorflow:
from alibi_detect.cd.tensorflow.learned_kernel import LearnedKernelDriftTF
from alibi_detect.utils.tensorflow.data import TFDataset
if has_keops:
from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops
class LearnedKernelDrift(DriftConfigMixin):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
kernel: Callable,
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
var_reg: float = 1e-5,
reg_loss_fn: Callable = (lambda kernel: 0),
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
optimizer: Optional[Callable] = None,
learning_rate: float = 1e-3,
batch_size: int = 32,
batch_size_predict: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Optional[Callable] = None,
dataloader: Optional[Callable] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
estimate of the test power. The kernel is trained on a split of the reference and test instances
and then the MMD is evaluated on held out instances and a permutation test is performed.
For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
(https://arxiv.org/abs/2002.09116)
Parameters
----------
x_ref
Data used as reference distribution.
kernel
Trainable PyTorch or TensorFlow module that returns a similarity between two instances.
backend
Backend used by the kernel and training loop.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before applying the kernel.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
Only relevant for 'keops' backend.
var_reg
Constant added to the estimated variance of the MMD for stability.
reg_loss_fn
The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
optimizer
Optimizer used during training of the kernel.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the kernel.
batch_size_predict
Batch size used for the trained drift detector predictions.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the kernel.
epochs
Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
num_workers
Number of workers for the dataloader. The default (`num_workers=0`) means multi-process data loading
is disabled. Setting `num_workers>0` may be unreliable on Windows.
verbose
Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Relevant for 'pytorch' and 'keops' backends.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training. Relevant for 'pytorch' and 'keops' backends.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
Framework.KEOPS: [Framework.KEOPS]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['kernel']]
pop_kwargs = ['self', 'x_ref', 'kernel', 'backend', '__class__']
if kwargs['optimizer'] is None:
pop_kwargs += ['optimizer']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
pop_kwargs = ['device', 'dataloader', 'batch_size_permutations', 'num_workers']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TFDataset})
detector = LearnedKernelDriftTF
else:
if dataset is None:
kwargs.update({'dataset': TorchDataset})
if dataloader is None:
kwargs.update({'dataloader': DataLoader})
if backend == Framework.PYTORCH:
pop_kwargs = ['batch_size_permutations']
[kwargs.pop(k, None) for k in pop_kwargs]
detector = LearnedKernelDriftTorch
else:
detector = LearnedKernelDriftKeops
self._detector = detector(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True, return_kernel: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the MMD metric between the new batch and reference data.
return_kernel
Whether to return the updated kernel trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold, MMD metric and \
trained kernel.
"""
return self._detector.predict(x, return_p_val, return_distance, return_kernel)
| 9,166 | 45.532995 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/_domain_clf.py
|
from abc import ABC, abstractmethod
from typing import Callable
import numpy as np
from sklearn.svm import SVC
from sklearn.calibration import CalibratedClassifierCV
class _DomainClf(ABC):
"""
Base class for domain classifiers used in :py:class:`~alibi_detect.cd.ContextAwareDrift`.The `SVCDomainClf` is
currently hardcoded into the detector. Therefore, for now, these classes (and the domain_clf submodule) are
kept private. This is subject to change in the future.
The classifiers should be fit on conditioning variables `x` and their domain `y` (`0` for ref, `1`
test). They should predict propensity scores (probability of being test instances) as output.
Classifiers should possess a calibrate method to calibrate the propensity scores.
"""
@abstractmethod
def __init__(self, *args, **kwargs: dict):
raise NotImplementedError()
@abstractmethod
def fit(self, x: np.ndarray, y: np.ndarray):
raise NotImplementedError()
@abstractmethod
def calibrate(self, x: np.ndarray, y: np.ndarray):
raise NotImplementedError()
@abstractmethod
def predict(self, x: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class _SVCDomainClf(_DomainClf):
def __init__(self,
kernel: Callable,
cal_method: str = 'sigmoid',
clf_kwargs: dict = None):
"""
A domain classifier using the scikit-learn Support Vector Classifier
(:py:class:`~sklearn.svm.SVC`). An SVC is fitted on all the
data, with scores (that optimise hinge loss) mapped onto probabilities using logistic regression.
Parameters
----------
kernel
Kernel used to pre-compute the kernel matrix from data matrices.
cal_method
The method to be used to calibrate the detector. This should be a method accepted by the scikit-learn
:py:class:`~sklearn.calibration.CalibratedClassifierCV` class.
clf_kwargs
A dictionary of keyword arguments to be passed to the :py:class:`~sklearn.svm.SVC` classifier.
"""
self.kernel = kernel
self.cal_method = cal_method
clf_kwargs = clf_kwargs or {}
self.clf = SVC(kernel=self.kernel, **clf_kwargs)
def fit(self, x: np.ndarray, y: np.ndarray):
"""
Method to fit the classifier.
Parameters
----------
x
Array containing conditioning variables for each instance.
y
Boolean array marking the domain each instance belongs to (`0` for reference, `1` for test).
"""
clf = self.clf
clf.fit(x, y)
self.clf = clf
def calibrate(self, x: np.ndarray, y: np.ndarray):
"""
Method to calibrate the classifier's predicted probabilities.
Parameters
----------
x
Array containing conditioning variables for each instance.
y
Boolean array marking the domain each instance belongs to (`0` for reference, `1` for test).
"""
clf = CalibratedClassifierCV(self.clf, method=self.cal_method, cv='prefit')
clf.fit(x, y)
self.clf = clf
def predict(self, x: np.ndarray) -> np.ndarray:
"""
The classifier's predict method.
Parameters
----------
x
Array containing conditioning variables for each instance.
Returns
-------
Propensity scores (the probability of being test instances).
"""
return self.clf.predict_proba(x)[:, 1]
| 3,611 | 33.730769 | 114 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/mmd.py
|
import logging
import numpy as np
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, has_keops, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.mmd import MMDDriftTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.mmd import MMDDriftTF
if has_keops and has_pytorch:
from alibi_detect.cd.keops.mmd import MMDDriftKeops
logger = logging.getLogger(__name__)
class MMDDrift(DriftConfigMixin):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = None,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
backend
Backend used for the MMD implementation.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
Only relevant for 'keops' backend.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
Framework.KEOPS: [Framework.KEOPS]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref']]
pop_kwargs = ['self', 'x_ref', 'backend', '__class__']
if backend == Framework.TENSORFLOW:
pop_kwargs += ['device', 'batch_size_permutations']
detector = MMDDriftTF
elif backend == Framework.PYTORCH:
pop_kwargs += ['batch_size_permutations']
detector = MMDDriftTorch
else:
detector = MMDDriftKeops
[kwargs.pop(k, None) for k in pop_kwargs]
if kernel is None:
if backend == Framework.TENSORFLOW:
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
elif backend == Framework.PYTORCH:
from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore
else:
from alibi_detect.utils.keops.kernels import GaussianRBF # type: ignore
kwargs.update({'kernel': GaussianRBF})
self._detector = detector(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the MMD metric between the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and MMD metric.
"""
return self._detector.predict(x, return_p_val, return_distance)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
return self._detector.score(x)
| 6,795 | 41.475 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/cvm_online.py
|
import numpy as np
from typing import Any, Callable, List, Optional, Union
from alibi_detect.base import DriftConfigMixin
from alibi_detect.cd.base_online import BaseUniDriftOnline
from alibi_detect.utils.misc import quantile
import numba as nb
from tqdm import tqdm
import warnings
class CVMDriftOnline(BaseUniDriftOnline, DriftConfigMixin):
online_state_keys = ('t', 'test_stats', 'drift_preds', 'xs', 'ids_ref_wins', 'ids_wins_ref', 'ids_wins_wins')
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_sizes: List[int],
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
n_bootstraps: int = 10000,
batch_size: int = 64,
n_features: Optional[int] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online Cramer-von Mises (CVM) data drift detector using preconfigured thresholds, which tests for
any change in the distribution of continuous univariate data. This detector is an adaption of that
proposed by :cite:t:`Ross2012a`.
For multivariate data, the detector makes a correction similar to the Bonferroni correction used for
the offline detector. Given :math:`d` features, the detector configures thresholds by
targeting the :math:`1-\\beta` quantile of test statistics over the simulated streams, where
:math:`\\beta = 1 - (1-(1/ERT))^{(1/d)}`. For the univariate case, this simplifies to
:math:`\\beta = 1/ERT`. At prediction time, drift is flagged if the test statistic of any feature stream
exceed the thresholds.
Note
----
In the multivariate case, for the ERT to be accurately targeted the feature streams must be independent.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the univariate detectors, the ERT is defined
as the expected run-time after the smallest window is full i.e. the run-time from t=min(windows_sizes).
window_sizes
window sizes for the sliding test-windows used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
batch_size
The maximum number of bootstrap simulations to compute in each batch when configuring thresholds.
A smaller batch size reduces memory requirements, but can result in a longer configuration run time.
n_features
Number of features used in the statistical test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_sizes=window_sizes,
preprocess_fn=preprocess_fn,
x_ref_preprocessed=x_ref_preprocessed,
n_bootstraps=n_bootstraps,
n_features=n_features,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
# Set config
self._set_config(locals())
self.batch_size = n_bootstraps if batch_size is None else batch_size
# Configure thresholds and initialise detector
self._initialise_state()
self._configure_thresholds()
self._configure_ref()
def _configure_ref(self) -> None:
"""
Configure the reference data.
"""
ids_ref_ref = self.x_ref[None, :, :] >= self.x_ref[:, None, :]
self.ref_cdf_ref = np.sum(ids_ref_ref, axis=0) / self.n
def _configure_thresholds(self) -> None:
"""
Private method to simulate trajectories of the Cramer-von Mises statistic for the desired reference set
size and window sizes under the null distribution, where both the reference set and deployment stream
follow the same distribution. It then uses these simulated trajectories to estimate thresholds.
As the test statistics are rank based and independent of the underlying distribution, we may use any
continuous distribution -- we use Gaussian.
The thresholds should stop changing after t=(2*max-window-size - 1) and therefore we need only simulate
trajectories and estimate thresholds up to this point.
"""
if self.verbose:
print("Using %d bootstrap simulations to configure thresholds..." % self.n_bootstraps)
# Assuming independent features, calibrate to beta = 1 - (1-FPR)^(1/n_features)
beta = 1 - (1 - self.fpr) ** (1 / self.n_features)
# Compute test statistic at each t_max number of t's, for each of the n_bootstrap number of streams
# Only need to simulate streams for a single feature here.
t_max = 2 * self.max_ws - 1
stats = self._simulate_streams(t_max)
# At each t for each stream, find max stats. over window sizes
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', message='All-NaN slice encountered')
max_stats = np.nanmax(stats, -1)
# Now loop through each t and find threshold (at each t) that satisfies eqn. (2) in Ross et al.
thresholds = np.full((t_max, 1), np.nan)
for t in range(np.min(self.window_sizes)-1, t_max):
# Compute (1-beta) quantile of max_stats at a given t, over all streams
threshold = quantile(max_stats[:, t], 1 - beta)
# Remove streams for which a change point has already been detected
max_stats = max_stats[max_stats[:, t] <= threshold]
thresholds[t, 0] = threshold
self.thresholds = thresholds
def _simulate_streams(self, t_max: int) -> np.ndarray:
"""
Private method to simulate streams. _ids_to_stats is a decorated function that is vectorised
over the parallel streams. Not sufficient just to write a normal vectorised numpy implementation as this
can lead to OOM errors (when trying to store (n+t_max) x (n+t_max) x n_bootstraps matrices of floats).
However, we will store the boolean matrix of this size as it faster to compute this way (and 64x smaller).
To further reduce memory requirements, _ids_to_stats can be called for batches of streams, so that
the ids array is of shape batch_size x (n+t_max) x (n+t_max).
"""
n_windows = len(self.window_sizes)
stats = np.zeros((self.n_bootstraps, t_max, n_windows))
n_batches = int(np.ceil(self.n_bootstraps / self.batch_size))
idxs = np.array_split(np.arange(self.n_bootstraps), n_batches)
batches = enumerate(tqdm(idxs, "Computing thresholds over %d batches" % n_batches)) if self.verbose \
else enumerate(idxs)
for b, idx in batches:
xs = np.random.randn(len(idx), self.n + t_max)
ids = xs[:, None, :] >= xs[:, :, None]
stats[idx, :, :] = _ids_to_stats(ids[:, :self.n, :], ids[:, self.n:, :], np.asarray(self.window_sizes))
# Remove stats prior to windows being full
for k, ws in enumerate(self.window_sizes):
stats[:, :ws-1, k] = np.nan
return stats
def _update_state(self, x_t: np.ndarray):
"""
Update online state based on the provided test instance.
Parameters
----------
x_t
The test instance.
"""
self.t += 1
if self.t == 1:
# Initialise stream
self.xs = x_t
self.ids_ref_wins = (x_t >= self.x_ref)[:, None, :]
self.ids_wins_ref = (x_t <= self.x_ref)[None, :, :]
self.ids_wins_wins = np.full((1, 1, self.n_features), 1)
else:
# Update stream
self.xs = np.concatenate([self.xs, x_t])
self.ids_ref_wins = np.concatenate(
[self.ids_ref_wins[:, -(self.max_ws - 1):, :], (x_t >= self.x_ref)[:, None, :]], 1
)
self.ids_wins_ref = np.concatenate(
[self.ids_wins_ref[-(self.max_ws - 1):, :, :], (x_t <= self.x_ref)[None, :, :]], 0
)
self.ids_wins_wins = np.concatenate(
[self.ids_wins_wins[-(self.max_ws - 1):, -(self.max_ws - 1):, :],
(x_t >= self.xs[-self.max_ws:-1, :])[:, None, :]], 1
)
self.ids_wins_wins = np.concatenate(
[self.ids_wins_wins, (x_t <= self.xs[-self.max_ws:, :])[None, :, :]], 0
)
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`).
"""
super()._initialise_state()
self.ids_ref_wins = np.array([])
self.ids_wins_ref = np.array([])
self.ids_wins_wins = np.array([])
def score(self, x_t: Union[np.ndarray, Any]) -> np.ndarray:
"""
Compute the test-statistic (CVM) between the reference window(s) and test window.
If a given test-window is not yet full then a test-statistic of np.nan is returned for that window.
Parameters
----------
x_t
A single instance.
Returns
-------
Estimated CVM test statistics between reference window and test window(s).
"""
x_t = super()._preprocess_xt(x_t)
self._update_state(x_t)
stats = np.zeros((len(self.window_sizes), self.n_features), dtype=np.float32)
for k, ws in enumerate(self.window_sizes):
if self.t >= ws:
ref_cdf_win = np.sum(self.ids_ref_wins[:, -ws:], axis=0) / self.n
win_cdf_ref = np.sum(self.ids_wins_ref[-ws:], axis=0) / ws
win_cdf_win = np.sum(self.ids_wins_wins[-ws:, -ws:], axis=0) / ws
ref_cdf_diffs = self.ref_cdf_ref - win_cdf_ref
win_cdf_diffs = ref_cdf_win - win_cdf_win
sum_diffs_2 = np.sum(ref_cdf_diffs * ref_cdf_diffs, axis=0) \
+ np.sum(win_cdf_diffs * win_cdf_diffs, axis=0)
stats[k, :] = _normalise_stats(sum_diffs_2, self.n, ws)
else:
stats[k, :] = np.nan
return stats
def _check_drift(self, test_stats: np.ndarray, thresholds: np.ndarray) -> int:
"""
Private method to compare test stats to thresholds. The max stats over all windows are compute for each
feature. Drift is flagged if `max_stats` for any feature exceeds the single `thresholds` set.
Parameters
----------
test_stats
Array of test statistics with shape (n_windows, n_features)
thresholds
Array of thresholds with shape (t_max, 1).
Returns
-------
An int equal to 1 if drift, 0 otherwise.
"""
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', message='All-NaN slice encountered')
max_stats = np.nanmax(test_stats, axis=0)
drift_pred = int((max_stats > thresholds).any())
return drift_pred
@nb.njit(parallel=False, cache=True)
def _normalise_stats(stats: np.ndarray, n: int, ws: int) -> np.ndarray:
"""
See Eqns 3 & 14 of https://www.projecteuclid.org/euclid.aoms/1177704477.
"""
mu = 1 / 6 + 1 / (6 * (n + ws))
var_num = (n + ws + 1) * (4 * n * ws * (n + ws) - 3 * (n * n + ws * ws) - 2 * n * ws)
var_denom = 45 * (n + ws) * (n + ws) * 4 * n * ws
prod = n * ws / ((n + ws) * (n + ws))
return (stats * prod - mu) / np.sqrt(var_num / var_denom)
@nb.njit(parallel=True, cache=True)
def _ids_to_stats(
ids_ref_all: np.ndarray,
ids_stream_all: np.ndarray,
window_sizes: np.ndarray
) -> np.ndarray:
n_bootstraps = ids_ref_all.shape[0]
n = ids_ref_all.shape[1]
t_max = ids_stream_all.shape[1]
n_all = ids_ref_all.shape[-1]
n_windows = window_sizes.shape[0]
stats = np.zeros((n_bootstraps, t_max, n_windows))
for b in nb.prange(n_bootstraps):
ref_cdf_all = np.sum(ids_ref_all[b], axis=0) / n
cumsums = np.zeros((t_max+1, n_all))
for i in range(n_all):
cumsums[1:, i] = np.cumsum(ids_stream_all[b, :, i])
for k in range(n_windows):
ws = window_sizes[k]
win_cdf_ref = (cumsums[ws:, :n] - cumsums[:-ws, :n]) / ws
cdf_diffs_on_ref = np.empty_like(win_cdf_ref)
for j in range(win_cdf_ref.shape[0]): # Need to loop through as can't broadcast in njit parallel
cdf_diffs_on_ref[j, :] = ref_cdf_all[:n] - win_cdf_ref[j, :]
stats[b, (ws-1):, k] = np.sum(cdf_diffs_on_ref * cdf_diffs_on_ref, axis=-1)
for t in range(ws-1, t_max):
win_cdf_win = (cumsums[t + 1, n + t - ws:n + t] -
cumsums[t + 1 - ws, n + t - ws:n + t]) / ws
cdf_diffs_on_win = ref_cdf_all[n + t - ws:n + t] - win_cdf_win
stats[b, t, k] += np.sum(cdf_diffs_on_win * cdf_diffs_on_win)
stats[b, :, k] = _normalise_stats(stats[b, :, k], n, ws)
return stats
| 14,378 | 44.792994 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/fet.py
|
import numpy as np
from typing import Callable, Dict, Tuple, Optional, Union
from alibi_detect.cd.base import BaseUnivariateDrift
from scipy.stats import fisher_exact
class FETDrift(BaseUnivariateDrift):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
alternative: str = 'greater',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Fisher exact test (FET) data drift detector, which tests for a change in the mean of binary univariate data.
For multivariate data, a separate FET test is applied to each feature, and the obtained p-values are
aggregated via the Bonferroni or False Discovery Rate (FDR) corrections.
Parameters
----------
x_ref
Data used as reference distribution. Data must consist of either [True, False]'s, or [0, 1]'s.
p_val
p-value used for significance of the FET test. If the FDR correction method
is used, this corresponds to the acceptable q-value.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
alternative
Defines the alternative hypothesis. Options are 'greater', 'less' or `two-sided`. These correspond to
an increase, decrease, or any change in the mean of the Bernoulli data.
n_features
Number of features used in the FET test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
n_features=n_features,
input_shape=input_shape,
data_type=data_type
)
# Set config
self._set_config(locals())
if alternative.lower() not in ['greater', 'less', 'two-sided']:
raise ValueError("`alternative` must be either 'greater', 'less' or 'two-sided'.")
self.alternative = alternative.lower()
# Check data is only [False, True] or [0, 1]
values = set(np.unique(x_ref))
if not set(values).issubset(['0', '1', True, False]):
raise ValueError("The `x_ref` data must consist of only (0,1)'s or (False,True)'s for the "
"FETDrift detector.")
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs Fisher exact test(s), computing the p-value per feature.
Parameters
----------
x_ref
Reference instances to compare distribution with. Data must consist of either [True, False]'s, or [0, 1]'s.
x
Batch of instances. Data must consist of either [True, False]'s, or [0, 1]'s.
Returns
-------
Feature level p-values and odds ratios.
"""
x = x.reshape(x.shape[0], -1).astype(dtype=np.int64)
x_ref = x_ref.reshape(x_ref.shape[0], -1).astype(dtype=np.int64)
# Check data is only [False, True] or [0, 1]
values = set(np.unique(x))
if not set(values).issubset(['0', '1', True, False]):
raise ValueError("The `x` data must consist of only [0,1]'s or [False,True]'s for the FETDrift detector.")
# Perform FET for each feature
n_ref = x_ref.shape[0]
n = x.shape[0]
sum_ref = np.sum(x_ref, axis=0)
sum_test = np.sum(x, axis=0)
p_val = np.empty(self.n_features)
odds_ratio = np.empty_like(p_val)
for f in range(self.n_features):
table = np.array([[sum_test[f], sum_ref[f]], [n - sum_test[f], n_ref - sum_ref[f]]])
odds_ratio[f], p_val[f] = fisher_exact(table, alternative=self.alternative)
return p_val, odds_ratio
| 5,594 | 45.239669 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/base_online.py
|
import logging
import warnings
from abc import abstractmethod
from typing import Any, Callable, Dict, List, Optional, Union, Tuple, TYPE_CHECKING
import numpy as np
from alibi_detect.base import BaseDetector, concept_drift_dict
from alibi_detect.cd.utils import get_input_shape
from alibi_detect.utils.state import StateMixin
from alibi_detect.utils._types import Literal
if TYPE_CHECKING:
import torch
import tensorflow as tf
logger = logging.getLogger(__name__)
class BaseMultiDriftOnline(BaseDetector, StateMixin):
t: int = 0
thresholds: np.ndarray
backend: Literal['pytorch', 'tensorflow']
online_state_keys: Tuple[str, ...]
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
n_bootstraps: int = 1000,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Base class for multivariate online drift detectors.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if ert is None:
logger.warning('No expected run-time set for the drift threshold. Need to set it to detect data drift.')
self.ert = ert
self.fpr = 1 / ert
self.window_size = window_size
# x_ref preprocessing
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Other attributes
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_bootstraps = n_bootstraps # nb of samples used to estimate thresholds
self.verbose = verbose
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta['detector_type'] = 'drift'
self.meta['data_type'] = data_type
self.meta['online'] = True
@abstractmethod
def _configure_thresholds(self):
pass
@abstractmethod
def _configure_ref_subset(self):
pass
@abstractmethod
def _update_state(self, x_t: Union[np.ndarray, 'tf.Tensor', 'torch.Tensor']):
pass
def _preprocess_xt(self, x_t: Union[np.ndarray, Any]) -> np.ndarray:
"""
Private method to preprocess a single test instance ready for _update_state.
Parameters
----------
x_t
A single test instance to be preprocessed.
Returns
-------
The preprocessed test instance `x_t`.
"""
# preprocess if necessary
if self.preprocess_fn is not None:
x_t = x_t[None, :] if isinstance(x_t, np.ndarray) else [x_t]
x_t = self.preprocess_fn(x_t)[0]
return x_t[None, :]
def get_threshold(self, t: int) -> float:
"""
Return the threshold for timestep `t`.
Parameters
----------
t
The timestep to return a threshold for.
Returns
-------
The threshold at timestep `t`.
"""
return self.thresholds[t] if t < self.window_size else self.thresholds[-1]
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`).
If a subclassed detector has additional online state, an additional `_initialise_state` should be defined,
with a call to `super()._initialise_state()` included (see `LSDDDriftOnlineTorch._initialise_state()` for
an example).
"""
self.t = 0 # corresponds to a test set of ref data
self.test_stats = np.array([])
self.drift_preds = np.array([])
def reset(self) -> None:
"""
Deprecated reset method. This method will be repurposed or removed in the future. To reset the detector to
its initial state (`t=0`) use :meth:`reset_state`.
"""
self.reset_state()
warnings.warn('This method is deprecated and will be removed/repurposed in the future. To reset the detector '
'to its initial state use `reset_state`.', DeprecationWarning)
def reset_state(self) -> None:
"""
Resets the detector to its initial state (`t=0`). This does not include reconfiguring thresholds.
"""
self._initialise_state()
def predict(self, x_t: Union[np.ndarray, Any], return_test_stat: bool = True,
) -> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether the most recent window of data has drifted from the reference data.
Parameters
----------
x_t
A single instance to be added to the test-window.
return_test_stat
Whether to return the test statistic and threshold.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the test-statistic and threshold.
"""
# Compute test stat and check for drift
test_stat = self.score(x_t)
threshold = self.get_threshold(self.t)
drift_pred = int(test_stat > threshold)
self.test_stats = np.concatenate([self.test_stats, np.array([test_stat])])
self.drift_preds = np.concatenate([self.drift_preds, np.array([drift_pred])])
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
cd['data']['time'] = self.t
cd['data']['ert'] = self.ert
if return_test_stat:
cd['data']['test_stat'] = test_stat
cd['data']['threshold'] = threshold
return cd
class BaseUniDriftOnline(BaseDetector, StateMixin):
t: int = 0
thresholds: np.ndarray
online_state_keys: Tuple[str, ...]
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_sizes: List[int],
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
n_bootstraps: int = 1000,
n_features: Optional[int] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Base class for univariate online drift detectors. If n_features > 1, a multivariate correction is
used to aggregate p-values during threshold configuration, thus allowing the requested expected run
time (ERT) to be targeted. The multivariate correction assumes independence between the features.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the univariate detectors, the ERT is defined
as the expected run-time after the smallest window is full i.e. the run-time from t=min(windows_sizes)-1.
window_sizes
The sizes of the sliding test-windows used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
n_features
Number of features used in the statistical test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
if ert is None:
logger.warning('No expected run-time set for the drift threshold. Need to set it to detect data drift.')
self.ert = ert
self.fpr = 1 / ert
# Window sizes
self.window_sizes = window_sizes
self.max_ws = np.max(self.window_sizes)
self.min_ws = np.min(self.window_sizes)
# x_ref preprocessing
self.x_ref_preprocessed = x_ref_preprocessed
if preprocess_fn is not None and not isinstance(preprocess_fn, Callable): # type: ignore[arg-type]
raise ValueError("`preprocess_fn` is not a valid Callable.")
if not self.x_ref_preprocessed and preprocess_fn is not None:
self.x_ref = preprocess_fn(x_ref)
else:
self.x_ref = x_ref
# Check the (optionally preprocessed) x_ref data is a 2D ndarray
self.x_ref = self._check_x(self.x_ref, x_ref=True)
# Other attributes
self.preprocess_fn = preprocess_fn
self.n = len(x_ref)
self.n_bootstraps = n_bootstraps # nb of samples used to estimate thresholds
self.verbose = verbose
# compute number of features for the univariate tests
if isinstance(n_features, int):
self.n_features = n_features
elif not isinstance(preprocess_fn, Callable) or x_ref_preprocessed:
# infer features from preprocessed reference data
self.n_features = self.x_ref.reshape(self.x_ref.shape[0], -1).shape[-1]
else: # infer number of features after applying preprocessing step
x = self.preprocess_fn(x_ref[0:1])
self.n_features = x.reshape(x.shape[0], -1).shape[-1]
# store input shape for save and load functionality
self.input_shape = get_input_shape(input_shape, x_ref)
# set metadata
self.meta['detector_type'] = 'drift'
self.meta['data_type'] = data_type
self.meta['online'] = True
@abstractmethod
def _configure_thresholds(self):
pass
@abstractmethod
def _configure_ref(self):
pass
@abstractmethod
def _update_state(self, x_t: np.ndarray):
pass
def _check_x(self, x: Any, x_ref: bool = False) -> np.ndarray:
"""
Check the type and shape of the data `x`, and coerces it to the correct shape if possible.
Parameters
----------
x
The data to be checked.
x_ref
Whether `x` is a batch of reference data instances (if `True`), or a single test data instance (if `False`).
Returns
-------
The checked data, coerced to be a np.ndarray of the correct shape.
"""
# Check the type of x
if isinstance(x, np.ndarray):
pass
elif isinstance(x, (int, float)):
x = np.array([x])
else:
raise TypeError("Detectors expect data to be 2D np.ndarray's. If data is passed as another type, a "
"`preprocess_fn` should be given to convert this data to 2D np.ndarray's.")
# Check the shape of x
if x_ref:
x = x.reshape(x.shape[0], -1)
else:
x = x.reshape(1, -1)
if x.shape[1] != self.x_ref.shape[1]:
raise ValueError("Dimensions do not match. `x` has shape (%d,%d) and `x_ref` has shape (%d,%d)."
% (x.shape[0], x.shape[1], self.x_ref.shape[0], self.x_ref.shape[1]))
return x
def _preprocess_xt(self, x_t: Union[np.ndarray, Any]) -> np.ndarray:
"""
Private method to preprocess a single test instance ready for _update_state.
Parameters
----------
x_t
A single test instance to be preprocessed.
Returns
-------
The preprocessed test instance `x_t`.
"""
# preprocess if necessary
if self.preprocess_fn is not None:
x_t = x_t[None, :] if isinstance(x_t, np.ndarray) else [x_t]
x_t = self.preprocess_fn(x_t)[0]
# Now check the final data is a 2D ndarray
x_t = self._check_x(x_t)
return x_t
def get_threshold(self, t: int) -> np.ndarray:
"""
Return the threshold for timestep `t`.
Parameters
----------
t
The timestep to return a threshold for.
Returns
-------
The threshold at timestep `t`.
"""
return self.thresholds[t] if t < len(self.thresholds) else self.thresholds[-1]
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`).
If a subclassed detector has additional online state, an additional `_initialise_state` should be defined,
with a call to `super()._initialise_state()` included (see `CVMDriftOnlineTorch._initialise_state()` for
an example).
"""
self.t = 0
self.xs = np.array([])
self.test_stats = np.empty([0, len(self.window_sizes), self.n_features])
self.drift_preds = np.array([])
@abstractmethod
def _check_drift(self, test_stats: np.ndarray, thresholds: np.ndarray) -> int:
pass
def reset(self) -> None:
"""
Deprecated reset method. This method will be repurposed or removed in the future. To reset the detector to
its initial state (`t=0`) use :meth:`reset_state`.
"""
self.reset_state()
warnings.warn('This method is deprecated and will be removed/repurposed in the future. To reset the detector '
'to its initial state use `reset_state`.', DeprecationWarning)
def reset_state(self) -> None:
"""
Resets the detector to its initial state (`t=0`). This does not include reconfiguring thresholds.
"""
self._initialise_state()
def predict(self, x_t: Union[np.ndarray, Any], return_test_stat: bool = True,
) -> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether the most recent window(s) of data have drifted from the reference data.
Parameters
----------
x_t
A single instance to be added to the test-window(s).
return_test_stat
Whether to return the test statistic and threshold.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the test-statistic and threshold.
"""
# Compute test stat and check for drift
test_stats = self.score(x_t)
thresholds = self.get_threshold(self.t - 1) # Note t-1 here, has we wish to use the unconditional thresholds
drift_pred = self._check_drift(test_stats, thresholds)
# Update results attributes
self.test_stats = np.concatenate([self.test_stats, test_stats[None, :, :]])
self.drift_preds = np.concatenate([self.drift_preds, np.array([drift_pred])])
# populate drift dict
cd = concept_drift_dict()
cd['meta'] = self.meta
cd['data']['is_drift'] = drift_pred
cd['data']['time'] = self.t
cd['data']['ert'] = self.ert
if return_test_stat:
cd['data']['test_stat'] = test_stats
cd['data']['threshold'] = thresholds
return cd
| 17,889 | 37.308351 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/cvm.py
|
import numpy as np
from typing import Callable, Dict, Tuple, Optional, Union
from alibi_detect.cd.base import BaseUnivariateDrift
try:
from scipy.stats import cramervonmises_2samp
except ImportError:
cramervonmises_2samp = None
class CVMDrift(BaseUnivariateDrift):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Cramer-von Mises (CVM) data drift detector, which tests for any change in the distribution of continuous
univariate data. For multivariate data, a separate CVM test is applied to each feature, and the obtained
p-values are aggregated via the Bonferroni or False Discovery Rate (FDR) corrections.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for significance of the CVM test. If the FDR correction method
is used, this corresponds to the acceptable q-value.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
n_features
Number of features used in the CVM test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
if cramervonmises_2samp is None:
raise UserWarning("CVMDrift is only available if scipy version >= 1.7.0 installed.")
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
n_features=n_features,
input_shape=input_shape,
data_type=data_type
)
# Set config
self._set_config(locals())
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs the two-sample Cramer-von Mises test(s), computing the p-value and test statistic per feature.
Parameters
----------
x_ref
Reference instances to compare distribution with.
x
Batch of instances.
Returns
-------
Feature level p-values and CVM statistics.
"""
x = x.reshape(x.shape[0], -1)
x_ref = x_ref.reshape(x_ref.shape[0], -1)
p_val = np.zeros(self.n_features, dtype=np.float32)
dist = np.zeros_like(p_val)
for f in range(self.n_features):
result = cramervonmises_2samp(x_ref[:, f], x[:, f], method='auto')
p_val[f], dist[f] = result.pvalue, result.statistic
return p_val, dist
| 4,372 | 42.73 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/utils.py
|
import logging
import random
from typing import Callable, Dict, Optional, Tuple, Union
import numpy as np
from alibi_detect.utils.sampling import reservoir_sampling
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
def update_reference(X_ref: np.ndarray,
X: np.ndarray,
n: int,
update_method: Dict[str, int] = None,
) -> np.ndarray:
"""
Update reference dataset for drift detectors.
Parameters
----------
X_ref
Current reference dataset.
X
New data.
n
Count of the total number of instances that have been used so far.
update_method
Dict with as key `reservoir_sampling` or `last` and as value n. `reservoir_sampling` will apply
reservoir sampling with reservoir of size n while `last` will return (at most) the last n instances.
Returns
-------
Updated reference dataset.
"""
if isinstance(update_method, dict):
update_type = list(update_method.keys())[0]
size = update_method[update_type]
if update_type == 'reservoir_sampling':
return reservoir_sampling(X_ref, X, size, n)
elif update_type == 'last':
X_update = np.concatenate([X_ref, X], axis=0)
return X_update[-size:]
else:
raise KeyError('Only `reservoir_sampling` and `last` are valid update options for X_ref.')
else:
return X_ref
def encompass_batching(
model: Callable,
backend: str,
batch_size: int,
device: Optional[str] = None,
preprocess_batch_fn: Optional[Callable] = None,
tokenizer: Optional[Callable] = None,
max_len: Optional[int] = None,
) -> Callable:
"""
Takes a function that must be batch evaluated (on tokenized input) and returns a function
that handles batching (and tokenization).
"""
backend = backend.lower()
kwargs = {'batch_size': batch_size, 'tokenizer': tokenizer, 'max_len': max_len,
'preprocess_batch_fn': preprocess_batch_fn}
if backend == Framework.TENSORFLOW:
from alibi_detect.cd.tensorflow.preprocess import preprocess_drift
elif backend == Framework.PYTORCH:
from alibi_detect.cd.pytorch.preprocess import preprocess_drift # type: ignore[assignment]
kwargs['device'] = device
else:
raise NotImplementedError(f'{backend} not implemented. Use tensorflow or pytorch instead.')
def model_fn(x: Union[np.ndarray, list]) -> np.ndarray:
return preprocess_drift(x, model, **kwargs) # type: ignore[arg-type]
return model_fn
def encompass_shuffling_and_batch_filling(
model_fn: Callable,
batch_size: int
) -> Callable:
"""
Takes a function that already handles batching but additionally performing shuffling
and ensures instances are evaluated as part of full batches.
"""
def new_model_fn(x: Union[np.ndarray, list]) -> np.ndarray:
is_np = isinstance(x, np.ndarray)
# shuffle
n_x = len(x)
perm = np.random.permutation(n_x)
x = x[perm] if is_np else [x[i] for i in perm]
# add extras if necessary
final_batch_size = n_x % batch_size
if final_batch_size != 0:
doubles_inds = random.choices([i for i in range(n_x)], k=batch_size - final_batch_size)
if is_np:
x = np.concatenate([x, x[doubles_inds]], axis=0) # type: ignore[call-overload]
else:
x += [x[i] for i in doubles_inds]
# remove any extras and unshuffle
preds = np.asarray(model_fn(x))[:n_x]
preds = preds[np.argsort(perm)]
return preds
return new_model_fn
def get_input_shape(shape: Optional[Tuple], x_ref: Union[np.ndarray, list]) -> Optional[Tuple]:
""" Optionally infer shape from reference data. """
if isinstance(shape, tuple):
return shape
elif hasattr(x_ref, 'shape'):
return x_ref.shape[1:]
else:
logger.warning('Input shape could not be inferred. '
'If alibi_detect.models.tensorflow.embedding.TransformerEmbedding '
'is used as preprocessing step, a saved detector cannot be reinitialized.')
return None
| 4,341 | 34.300813 | 108 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/classifier.py
|
import numpy as np
from typing import Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, \
BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
from sklearn.base import ClassifierMixin
from alibi_detect.cd.sklearn.classifier import ClassifierDriftSklearn
if has_pytorch:
from torch.utils.data import DataLoader
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
if has_tensorflow:
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
from alibi_detect.utils.tensorflow.data import TFDataset
class ClassifierDrift(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
model: Union[ClassifierMixin, Callable],
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
reg_loss_fn: Callable = (lambda model: 0),
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Optional[Callable] = None,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Optional[Callable] = None,
dataloader: Optional[Callable] = None,
input_shape: Optional[tuple] = None,
use_calibration: bool = False,
calibration_kwargs: Optional[dict] = None,
use_oob: bool = False,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector. The classifier is trained on a fraction of the combined
reference and test data and drift is detected on the remaining data. To use all the data
to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
model
PyTorch, TensorFlow or Sklearn classification model used for drift detection.
backend
Backend used for the training loop implementation. Supported: 'tensorflow' | 'pytorch' | 'sklearn'.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last `n` instances seen by the detector
or via reservoir sampling with size `n`. For the former, the parameter equals `{'last': n}` while
for reservoir sampling `{'reservoir_sampling': n}` is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs 'probs' (probabilities - for 'tensorflow', 'pytorch', 'sklearn' models),
'logits' (for 'pytorch', 'tensorflow' models), 'scores' (for 'sklearn' models if `decision_function`
is supported).
binarize_preds
Whether to test for discrepancy on soft (e.g. probs/logits/scores) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
reg_loss_fn
The regularisation term `reg_loss_fn(model)` is added to the loss function being optimized.
Only relevant for 'tensorflow` and 'pytorch' backends.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier. Only relevant for 'tensorflow' and 'pytorch' backends.
learning_rate
Learning rate used by optimizer. Only relevant for 'tensorflow' and 'pytorch' backends.
batch_size
Batch size used during training of the classifier. Only relevant for 'tensorflow' and 'pytorch' backends.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model. Only relevant for 'tensorflow' and 'pytorch' backends.
epochs
Number of training epochs for the classifier for each (optional) fold. Only relevant for 'tensorflow'
and 'pytorch' backends.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar. Only relevant for
'tensorflow' and 'pytorch' backends.
train_kwargs
Optional additional kwargs when fitting the classifier. Only relevant for 'tensorflow' and
'pytorch' backends.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
dataset
Dataset object used during training. Only relevant for 'tensorflow' and 'pytorch' backends.
dataloader
Dataloader object used during training. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
use_calibration
Whether to use calibration. Calibration can be used on top of any model.
Only relevant for 'sklearn' backend.
calibration_kwargs
Optional additional kwargs for calibration. Only relevant for 'sklearn' backend.
See https://scikit-learn.org/stable/modules/generated/sklearn.calibration.CalibratedClassifierCV.html
for more details.
use_oob
Whether to use out-of-bag(OOB) predictions. Supported only for `RandomForestClassifier`.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
Framework.SKLEARN: [Framework.SKLEARN]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['model']]
pop_kwargs = ['self', 'x_ref', 'model', 'backend', '__class__']
if kwargs['optimizer'] is None:
pop_kwargs += ['optimizer']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
pop_kwargs = ['device', 'dataloader', 'use_calibration', 'calibration_kwargs', 'use_oob']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TFDataset})
self._detector = ClassifierDriftTF(*args, **kwargs)
elif backend == Framework.PYTORCH:
pop_kwargs = ['use_calibration', 'calibration_kwargs', 'use_oob']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TorchDataset})
if dataloader is None:
kwargs.update({'dataloader': DataLoader})
self._detector = ClassifierDriftTorch(*args, **kwargs)
else:
pop_kwargs = ['reg_loss_fn', 'optimizer', 'learning_rate', 'batch_size', 'preprocess_batch_fn',
'epochs', 'train_kwargs', 'device', 'dataset', 'dataloader', 'verbose']
[kwargs.pop(k, None) for k in pop_kwargs]
self._detector = ClassifierDriftSklearn(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True, return_probs: bool = True, return_model: bool = True) \
-> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries
- ``'meta'`` - has the model's metadata.
- ``'data'`` - contains the drift prediction and optionally the p-value, performance of the classifier \
relative to its expectation under the no-change null, the out-of-fold classifier model \
prediction probabilities on the reference and test data, and the trained model. \
"""
return self._detector.predict(x, return_p_val, return_distance, return_probs, return_model)
| 11,053 | 50.175926 | 117 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/lsdd_online.py
|
import os
import numpy as np
from typing import Any, Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.lsdd_online import LSDDDriftOnlineTF
class LSDDDriftOnline(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
backend: str = 'tensorflow',
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online least squares density difference (LSDD) data drift detector using preconfigured thresholds.
Motivated by Bu et al. (2017): https://ieeexplore.ieee.org/abstract/document/7890493
We have made modifications such that a desired ERT can be accurately targeted however.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
backend
Backend used for the LSDD implementation and configuration.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 2*window_size.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['ert'], kwargs['window_size']]
pop_kwargs = ['self', 'x_ref', 'ert', 'window_size', 'backend', '__class__']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = LSDDDriftOnlineTF(*args, **kwargs)
else:
self._detector = LSDDDriftOnlineTorch(*args, **kwargs) # type: ignore
self.meta = self._detector.meta
@property
def t(self):
return self._detector.t
@property
def test_stats(self):
return self._detector.test_stats
@property
def thresholds(self):
return [self._detector.thresholds[min(s, self._detector.window_size-1)] for s in range(self.t)]
def reset_state(self):
"""
Resets the detector to its initial state (`t=0`). This does not include reconfiguring thresholds.
"""
self._detector.reset_state()
def predict(self, x_t: Union[np.ndarray, Any], return_test_stat: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether the most recent window of data has drifted from the reference data.
Parameters
----------
x_t
A single instance to be added to the test-window.
return_test_stat
Whether to return the test statistic (LSDD) and threshold.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the test-statistic and threshold.
"""
return self._detector.predict(x_t, return_test_stat)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (LSDD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
LSDD estimate between reference window and test window.
"""
return self._detector.score(x_t)
def get_config(self) -> dict: # Needed due to need to unnormalize x_ref
"""
Get the detector's configuration dictionary.
Returns
-------
The detector's configuration dictionary.
"""
cfg = super().get_config()
# Unnormalize x_ref
cfg['x_ref'] = self._detector._unnormalize(cfg['x_ref'])
return cfg
def save_state(self, filepath: Union[str, os.PathLike]):
"""
Save a detector's state to disk in order to generate a checkpoint.
Parameters
----------
filepath
The directory to save state to.
"""
self._detector.save_state(filepath)
def load_state(self, filepath: Union[str, os.PathLike]):
"""
Load the detector's state from disk, in order to restart from a checkpoint previously generated with
:meth:`~save_state`.
Parameters
----------
filepath
The directory to load state from.
"""
self._detector.load_state(filepath)
| 7,617 | 38.677083 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/spot_the_diff.py
|
import numpy as np
from typing import Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.spot_the_diff import SpotTheDiffDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
from torch.utils.data import DataLoader
if has_tensorflow:
from alibi_detect.cd.tensorflow.spot_the_diff import SpotTheDiffDriftTF
from alibi_detect.utils.tensorflow.data import TFDataset
class SpotTheDiffDrift(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = None,
n_diffs: int = 1,
initial_diffs: Optional[np.ndarray] = None,
l1_reg: float = 0.01,
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Optional[Callable] = None,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Optional[Callable] = None,
dataloader: Optional[Callable] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector with a classifier of form y = a + b_1*k(x,w_1) + ... + b_J*k(x,w_J),
where k is a kernel and w_1,...,w_J are learnable test locations. If drift has occured the test locations
learn to be more/less (given by sign of b_i) similar to test instances than reference instances.
The test locations are regularised to be close to the average reference instance such that the **difference**
is then interpretable as the transformation required for each feature to make the average instance more/less
like a test instance than a reference instance.
The classifier is trained on a fraction of the combined reference and test data and drift is detected on
the remaining data. To use all the data to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
backend
Backend used for the training loop implementation.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used to define similarity between instances, defaults to Gaussian RBF
n_diffs
The number of test locations to use, each corresponding to an interpretable difference.
initial_diffs
Array used to initialise the diffs that will be learned. Defaults to Gaussian
for each feature with equal variance to that of reference data.
l1_reg
Strength of l1 regularisation to apply to the differences.
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref']]
pop_kwargs = ['self', 'x_ref', 'backend', '__class__']
if kwargs['optimizer'] is None:
pop_kwargs += ['optimizer']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
pop_kwargs = ['device', 'dataloader']
[kwargs.pop(k, None) for k in pop_kwargs]
if dataset is None:
kwargs.update({'dataset': TFDataset})
self._detector = SpotTheDiffDriftTF(*args, **kwargs)
else:
if dataset is None:
kwargs.update({'dataset': TorchDataset})
if dataloader is None:
kwargs.update({'dataloader': DataLoader})
self._detector = SpotTheDiffDriftTorch(*args, **kwargs) # type: ignore
self.meta = self._detector.meta
def predict(
self, x: np.ndarray, return_p_val: bool = True, return_distance: bool = True,
return_probs: bool = True, return_model: bool = True
) -> Dict[str, Dict[str, Union[int, str, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction, the diffs used to distinguish reference from test instances, \
and optionally the p-value, performance of the classifier relative to its expectation under the \
no-change null, the out-of-fold classifier model prediction probabilities on the reference and test \
data as well as well as the associated reference and test instances of the out-of-fold predictions, \
and the trained model.
"""
return self._detector.predict(x, return_p_val, return_distance, return_probs, return_model)
| 9,141 | 47.62766 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tabular.py
|
import numpy as np
from scipy.stats import chi2_contingency, ks_2samp
from typing import Callable, Dict, List, Optional, Tuple, Union
from alibi_detect.cd.base import BaseUnivariateDrift
from alibi_detect.utils.warnings import deprecated_alias
import warnings
class TabularDrift(BaseUnivariateDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
categories_per_feature: Dict[int, Optional[int]] = None,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
alternative: str = 'two-sided',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Mixed-type tabular data drift detector with Bonferroni or False Discovery Rate (FDR)
correction for multivariate data. Kolmogorov-Smirnov (K-S) univariate tests are applied to
continuous numerical data and Chi-Squared (Chi2) univariate tests to categorical data.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for significance of the K-S and Chi2 test for each feature.
If the FDR correction method is used, this corresponds to the acceptable q-value.
categories_per_feature
Dictionary with as keys the column indices of the categorical features and optionally as values
the number of possible categorical values for that feature or a list with the possible values.
If you know which features are categorical and simply want to infer the possible values of the
categorical feature from the reference data you can pass a Dict[int, NoneType] such as
{0: None, 3: None} if features 0 and 3 are categorical. If you also know how many categories are
present for a given feature you could pass this in the `categories_per_feature` dict in the
Dict[int, int] format, e.g. *{0: 3, 3: 2}*. If you pass N categories this will assume the possible
values for the feature are [0, ..., N-1]. You can also explicitly pass the possible categories in the
Dict[int, List[int]] format, e.g. {0: [0, 1, 2], 3: [0, 55]}. Note that the categories can be
arbitrary int values.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
Typically a dimensionality reduction technique.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
alternative
Defines the alternative hypothesis for the K-S tests. Options are 'two-sided', 'less' or 'greater'.
n_features
Number of features used in the combined K-S/Chi-Squared tests. No need to pass it if
no preprocessing takes place. In case of a preprocessing step, this can also be inferred
automatically but could be more expensive to compute.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
n_features=n_features,
input_shape=input_shape,
data_type=data_type
)
# Set config
self._set_config(locals())
self.alternative = alternative
# Parse categories_per_feature dict
if isinstance(categories_per_feature, dict):
vals = list(categories_per_feature.values())
int_types = (int, np.int16, np.int32, np.int64)
if all(v is None for v in vals): # categories_per_feature = Dict[int, NoneType]
x_flat = self.x_ref.reshape(self.x_ref.shape[0], -1)
categories_per_feature = {f: list(np.unique(x_flat[:, f])) # type: ignore
for f in categories_per_feature.keys()}
elif all(isinstance(v, int_types) for v in vals):
# categories_per_feature = Dict[int, int]
categories_per_feature = {f: list(np.arange(v)) # type: ignore
for f, v in categories_per_feature.items()}
elif not all(isinstance(v, list) for v in vals) and \
all(isinstance(v, int_types) for val in vals for v in val): # type: ignore
raise ValueError('categories_per_feature needs to be None or one of '
'Dict[int, NoneType], Dict[int, int], Dict[int, List[int]]')
self.x_ref_categories = categories_per_feature
self.cat_vars = list(self.x_ref_categories.keys())
# No categories_per_feature dict so assume no categorical features present
else:
self.x_ref_categories, self.cat_vars = {}, []
warnings.warn('No `categories_per_feature` dict provided so all features are assumed to be numerical. '
'`KSDrift` will be applied to all features.')
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute K-S or Chi-Squared test statistics and p-values per feature.
Parameters
----------
x_ref
Reference instances to compare distribution with.
x
Batch of instances.
Returns
-------
Feature level p-values and K-S or Chi-Squared statistics.
"""
x_ref = x_ref.reshape(x_ref.shape[0], -1)
x = x.reshape(x.shape[0], -1)
# apply counts on union of categories per variable in both the reference and test data
if self.cat_vars:
x_categories = {f: list(np.unique(x[:, f])) for f in self.cat_vars}
all_categories = {f: list(set().union(self.x_ref_categories[f], x_categories[f])) # type: ignore
for f in self.cat_vars}
x_ref_count = self._get_counts(x_ref, all_categories)
x_count = self._get_counts(x, all_categories)
p_val = np.zeros(self.n_features, dtype=np.float32)
dist = np.zeros_like(p_val)
for f in range(self.n_features):
if f in self.cat_vars:
contingency_table = np.vstack((x_ref_count[f], x_count[f]))
dist[f], p_val[f], _, _ = chi2_contingency(contingency_table)
else:
dist[f], p_val[f] = ks_2samp(x_ref[:, f], x[:, f], alternative=self.alternative, mode='asymp')
return p_val, dist
def _get_counts(self, x: np.ndarray, categories: Dict[int, List[int]]) -> Dict[int, List[int]]:
"""
Utility method for getting the counts of categories for each categorical variable.
"""
return {f: [(x[:, f] == v).sum() for v in vals] for f, vals in categories.items()}
| 8,308 | 51.923567 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/mmd_online.py
|
import os
import numpy as np
from typing import Any, Callable, Dict, Optional, Union
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.mmd_online import MMDDriftOnlineTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.mmd_online import MMDDriftOnlineTF
class MMDDriftOnline(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
backend: str = 'tensorflow',
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
kernel: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online maximum Mean Discrepancy (MMD) data drift detector using preconfigured thresholds.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
backend
Backend used for the MMD implementation and configuration.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths. If `sigma` is not specified, the 'median
heuristic' is adopted whereby `sigma` is set as the median pairwise distance between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['ert'], kwargs['window_size']]
pop_kwargs = ['self', 'x_ref', 'ert', 'window_size', 'backend', '__class__']
[kwargs.pop(k, None) for k in pop_kwargs]
if kernel is None:
if backend == Framework.TENSORFLOW:
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
else:
from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore
kwargs.update({'kernel': GaussianRBF})
if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = MMDDriftOnlineTF(*args, **kwargs)
else:
self._detector = MMDDriftOnlineTorch(*args, **kwargs) # type: ignore
self.meta = self._detector.meta
@property
def t(self):
return self._detector.t
@property
def test_stats(self):
return self._detector.test_stats
@property
def thresholds(self):
return [self._detector.thresholds[min(s, self._detector.window_size-1)] for s in range(self.t)]
def reset_state(self):
"""
Resets the detector to its initial state (`t=0`). This does not include reconfiguring thresholds.
"""
self._detector.reset_state()
def predict(self, x_t: Union[np.ndarray, Any], return_test_stat: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether the most recent window of data has drifted from the reference data.
Parameters
----------
x_t
A single instance to be added to the test-window.
return_test_stat
Whether to return the test statistic (squared MMD) and threshold.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the test-statistic and threshold.
"""
return self._detector.predict(x_t, return_test_stat)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (squared MMD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
Squared MMD estimate between reference window and test window.
"""
return self._detector.score(x_t)
def save_state(self, filepath: Union[str, os.PathLike]):
"""
Save a detector's state to disk in order to generate a checkpoint.
Parameters
----------
filepath
The directory to save state to.
"""
self._detector.save_state(filepath)
def load_state(self, filepath: Union[str, os.PathLike]):
"""
Load the detector's state from disk, in order to restart from a checkpoint previously generated with
`save_state`.
Parameters
----------
filepath
The directory to load state from.
"""
self._detector.load_state(filepath)
def get_config(self) -> dict: # Needed due to self.x_ref being a torch.Tensor when backend='pytorch'
"""
Get the detector's configuration dictionary.
Returns
-------
The detector's configuration dictionary.
"""
cfg = super().get_config()
if cfg.get('backend') == 'pytorch':
cfg['x_ref'] = cfg['x_ref'].detach().cpu().numpy()
return cfg
| 7,411 | 37.604167 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/model_uncertainty.py
|
import logging
import numpy as np
from typing import Callable, Dict, Optional, Union
from functools import partial
from alibi_detect.cd.ks import KSDrift
from alibi_detect.cd.chisquare import ChiSquareDrift
from alibi_detect.cd.preprocess import classifier_uncertainty, regressor_uncertainty
from alibi_detect.cd.utils import encompass_batching, encompass_shuffling_and_batch_filling
from alibi_detect.utils.frameworks import BackendValidator, Framework
from alibi_detect.base import DriftConfigMixin
logger = logging.getLogger(__name__)
class ClassifierUncertaintyDrift(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
model: Callable,
p_val: float = .05,
x_ref_preprocessed: bool = False,
backend: Optional[str] = None,
update_x_ref: Optional[Dict[str, int]] = None,
preds_type: str = 'probs',
uncertainty_type: str = 'entropy',
margin_width: float = 0.1,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
device: Optional[str] = None,
tokenizer: Optional[Callable] = None,
max_len: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Test for a change in the number of instances falling into regions on which the model is uncertain.
Performs either a K-S test on prediction entropies or Chi-squared test on 0-1 indicators of predictions
falling into a margin of uncertainty (e.g. probs falling into [0.45, 0.55] in binary case).
Parameters
----------
x_ref
Data used as reference distribution. Should be disjoint from the data the model was trained on
for accurate p-values.
model
Classification model outputting class probabilities (or logits)
backend
Backend to use if model requires batch prediction. Options are 'tensorflow' or 'pytorch'.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preds_type
Type of prediction output by the model. Options are 'probs' (in [0,1]) or 'logits' (in [-inf,inf]).
uncertainty_type
Method for determining the model's uncertainty for a given instance. Options are 'entropy' or 'margin'.
margin_width
Width of the margin if uncertainty_type = 'margin'. The model is considered uncertain on an instance
if the highest two class probabilities it assigns to the instance differ by less than margin_width.
batch_size
Batch size used to evaluate model. Only relevant when backend has been specified for batch prediction.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
tokenizer
Optional tokenizer for NLP models.
max_len
Optional max token length for NLP models.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
# Set config
self._set_config(locals())
if backend:
backend = backend.lower()
BackendValidator(backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
None: []},
construct_name=self.__class__.__name__).verify_backend(backend)
if backend is None:
if device not in [None, 'cpu']:
raise NotImplementedError('Non-pytorch/tensorflow models must run on cpu')
model_fn = model
else:
model_fn = encompass_batching(
model=model,
backend=backend,
batch_size=batch_size,
device=device,
preprocess_batch_fn=preprocess_batch_fn,
tokenizer=tokenizer,
max_len=max_len
)
preprocess_fn = partial(
classifier_uncertainty,
model_fn=model_fn,
preds_type=preds_type,
uncertainty_type=uncertainty_type,
margin_width=margin_width,
)
self._detector: Union[KSDrift, ChiSquareDrift]
if uncertainty_type == 'entropy':
self._detector = KSDrift(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
input_shape=input_shape,
data_type=data_type
)
elif uncertainty_type == 'margin':
self._detector = ChiSquareDrift(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
input_shape=input_shape,
data_type=data_type
)
else:
raise NotImplementedError("Only uncertainty types 'entropy' or 'margin' supported.")
self.meta = self._detector.meta
self.meta['name'] = 'ClassifierUncertaintyDrift'
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True) -> Dict[Dict[str, str], Dict[str, Union[np.ndarray, int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return the corresponding test statistic (K-S for 'entropy', Chi2 for 'margin').
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and test statistic.
"""
return self._detector.predict(x, return_p_val=return_p_val, return_distance=return_distance)
class RegressorUncertaintyDrift(DriftConfigMixin):
def __init__(
self,
x_ref: Union[np.ndarray, list],
model: Callable,
p_val: float = .05,
x_ref_preprocessed: bool = False,
backend: Optional[str] = None,
update_x_ref: Optional[Dict[str, int]] = None,
uncertainty_type: str = 'mc_dropout',
n_evals: int = 25,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
device: Optional[str] = None,
tokenizer: Optional[Callable] = None,
max_len: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Test for a change in the number of instances falling into regions on which the model is uncertain.
Performs either a K-S test on uncertainties estimated from an preditive ensemble given either
explicitly or implicitly as a model with dropout layers.
Parameters
----------
x_ref
Data used as reference distribution. Should be disjoint from the data the model was trained on
for accurate p-values.
model
Regression model outputting class probabilities (or logits)
backend
Backend to use if model requires batch prediction. Options are 'tensorflow' or 'pytorch'.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
uncertainty_type
Method for determining the model's uncertainty for a given instance. Options are 'mc_dropout' or 'ensemble'.
The former should output a scalar per instance. The latter should output a vector of predictions
per instance.
n_evals:
The number of times to evaluate the model under different dropout configurations. Only relevant when using
the 'mc_dropout' uncertainty type.
batch_size
Batch size used to evaluate model. Only relevant when backend has been specified for batch prediction.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
tokenizer
Optional tokenizer for NLP models.
max_len
Optional max token length for NLP models.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
# Set config
self._set_config(locals())
if backend:
backend = backend.lower()
BackendValidator(backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH],
None: []},
construct_name=self.__class__.__name__).verify_backend(backend)
if backend is None:
model_fn = model
else:
if uncertainty_type == 'mc_dropout':
if backend == Framework.PYTORCH:
from alibi_detect.cd.pytorch.utils import activate_train_mode_for_dropout_layers
model = activate_train_mode_for_dropout_layers(model)
elif backend == Framework.TENSORFLOW:
logger.warning(
"MC dropout being applied to tensorflow model. May not be suitable if model contains"
"non-dropout layers with different train and inference time behaviour"
)
from alibi_detect.cd.tensorflow.utils import activate_train_mode_for_all_layers
model = activate_train_mode_for_all_layers(model)
model_fn = encompass_batching(
model=model,
backend=backend,
batch_size=batch_size,
device=device,
preprocess_batch_fn=preprocess_batch_fn,
tokenizer=tokenizer,
max_len=max_len
)
if uncertainty_type == 'mc_dropout' and backend == Framework.TENSORFLOW:
# To average over possible batchnorm effects as all layers evaluated in training mode.
model_fn = encompass_shuffling_and_batch_filling(model_fn, batch_size=batch_size)
preprocess_fn = partial(
regressor_uncertainty,
model_fn=model_fn,
uncertainty_type=uncertainty_type,
n_evals=n_evals
)
self._detector = KSDrift(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
input_shape=input_shape,
data_type=data_type
)
self.meta = self._detector.meta
self.meta['name'] = 'RegressorUncertaintyDrift'
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True,
return_distance: bool = True) -> Dict[Dict[str, str], Dict[str, Union[np.ndarray, int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return the K-S test statistic
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and test statistic.
"""
return self._detector.predict(x, return_p_val=return_p_val, return_distance=return_distance)
| 14,152 | 43.646688 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/context_aware.py
|
import logging
import numpy as np
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.context_aware import ContextMMDDriftTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.context_aware import ContextMMDDriftTF
logger = logging.getLogger(__name__)
class ContextMMDDrift(DriftConfigMixin):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
c_ref: np.ndarray,
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
x_kernel: Callable = None,
c_kernel: Callable = None,
n_permutations: int = 1000,
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False
) -> None:
"""
A context-aware drift detector based on a conditional analogue of the maximum mean discrepancy (MMD).
Only detects differences between samples that can not be attributed to differences between associated
sets of contexts. p-values are computed using a conditional permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
c_ref
Context for the reference distribution.
backend
Backend used for the MMD implementation.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_ref
Reference data can optionally be updated to the last N instances seen by the detector.
The parameter should be passed as a dictionary *{'last': N}*.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_kernel
Kernel defined on the input data, defaults to Gaussian RBF kernel.
c_kernel
Kernel defined on the context data, defaults to Gaussian RBF kernel.
n_permutations
Number of permutations used in the permutation test.
prop_c_held
Proportion of contexts held out to condition on.
n_folds
Number of cross-validation folds used when tuning the regularisation parameters.
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
verbose
Whether to print progress messages.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref'], kwargs['c_ref']]
pop_kwargs = ['self', 'x_ref', 'c_ref', 'backend', '__class__']
[kwargs.pop(k, None) for k in pop_kwargs]
if x_kernel is None or c_kernel is None:
if backend == Framework.TENSORFLOW:
from alibi_detect.utils.tensorflow.kernels import GaussianRBF
else:
from alibi_detect.utils.pytorch.kernels import GaussianRBF # type: ignore[assignment]
if x_kernel is None:
kwargs.update({'x_kernel': GaussianRBF})
if c_kernel is None:
kwargs.update({'c_kernel': GaussianRBF})
if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = ContextMMDDriftTF(*args, **kwargs)
else:
self._detector = ContextMMDDriftTorch(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], c: np.ndarray,
return_p_val: bool = True, return_distance: bool = True, return_coupling: bool = False) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data, given the provided context.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the conditional MMD test statistic between the new batch and reference data.
return_coupling
Whether to return the coupling matrices.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold, conditional MMD test \
statistic and coupling matrices.
"""
return self._detector.predict(x, c, return_p_val, return_distance, return_coupling)
def score(self, x: Union[np.ndarray, list], c: np.ndarray) -> Tuple[float, float, float, Tuple]:
"""
Compute the MMD based conditional test statistic, and perform a conditional permutation test to obtain a
p-value representing the test statistic's extremity under the null hypothesis.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
Returns
-------
p-value obtained from the conditional permutation test, the conditional MMD test statistic, the test \
statistic threshold above which drift is flagged, and a tuple containing the coupling matrices \
:math:`(W_{ref,ref}, W_{test,test}, W_{ref,test})`.
"""
return self._detector.score(x, c)
| 7,323 | 42.082353 | 116 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/__init__.py
|
from .chisquare import ChiSquareDrift
from .classifier import ClassifierDrift
from .ks import KSDrift
from .learned_kernel import LearnedKernelDrift
from .lsdd import LSDDDrift
from .lsdd_online import LSDDDriftOnline
from .spot_the_diff import SpotTheDiffDrift
from .mmd import MMDDrift
from .mmd_online import MMDDriftOnline
from .model_uncertainty import ClassifierUncertaintyDrift, RegressorUncertaintyDrift
from .tabular import TabularDrift
from .cvm import CVMDrift
from .cvm_online import CVMDriftOnline
from .fet import FETDrift
from .fet_online import FETDriftOnline
from .context_aware import ContextMMDDrift
__all__ = [
"ChiSquareDrift",
"ClassifierDrift",
"KSDrift",
"LearnedKernelDrift",
"LSDDDrift",
"LSDDDriftOnline",
"MMDDrift",
"MMDDriftOnline",
"TabularDrift",
"ClassifierUncertaintyDrift",
"RegressorUncertaintyDrift",
"SpotTheDiffDrift",
"CVMDrift",
"CVMDriftOnline",
"FETDrift",
"FETDriftOnline",
"ContextMMDDrift"
]
| 1,007 | 26.243243 | 84 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/ks.py
|
import numpy as np
from scipy.stats import ks_2samp
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseUnivariateDrift
from alibi_detect.utils.warnings import deprecated_alias
class KSDrift(BaseUnivariateDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
correction: str = 'bonferroni',
alternative: str = 'two-sided',
n_features: Optional[int] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Kolmogorov-Smirnov (K-S) data drift detector with Bonferroni or False Discovery Rate (FDR)
correction for multivariate data.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for significance of the K-S test for each feature. If the FDR correction method
is used, this corresponds to the acceptable q-value.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
Typically a dimensionality reduction technique.
correction
Correction type for multivariate data. Either 'bonferroni' or 'fdr' (False Discovery Rate).
alternative
Defines the alternative hypothesis. Options are 'two-sided', 'less' or 'greater'.
n_features
Number of features used in the K-S test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
n_features=n_features,
input_shape=input_shape,
data_type=data_type
)
# Set config
self._set_config(locals())
# Other attributes
self.alternative = alternative
def feature_score(self, x_ref: np.ndarray, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute K-S scores and statistics per feature.
Parameters
----------
x_ref
Reference instances to compare distribution with.
x
Batch of instances.
Returns
-------
Feature level p-values and K-S statistics.
"""
x = x.reshape(x.shape[0], -1)
x_ref = x_ref.reshape(x_ref.shape[0], -1)
p_val = np.zeros(self.n_features, dtype=np.float32)
dist = np.zeros_like(p_val)
for f in range(self.n_features):
# TODO: update to 'exact' when bug fix is released in scipy 1.5
dist[f], p_val[f] = ks_2samp(x_ref[:, f], x[:, f], alternative=self.alternative, mode='asymp')
return p_val, dist
| 4,384 | 41.572816 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/lsdd.py
|
import numpy as np
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.utils.frameworks import has_pytorch, has_tensorflow, BackendValidator, Framework
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.base import DriftConfigMixin
if has_pytorch:
from alibi_detect.cd.pytorch.lsdd import LSDDDriftTorch
if has_tensorflow:
from alibi_detect.cd.tensorflow.lsdd import LSDDDriftTF
class LSDDDrift(DriftConfigMixin):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
backend: str = 'tensorflow',
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares density difference (LSDD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
backend
Backend used for the LSDD implementation.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__()
# Set config
self._set_config(locals())
backend = backend.lower()
BackendValidator(
backend_options={Framework.TENSORFLOW: [Framework.TENSORFLOW],
Framework.PYTORCH: [Framework.PYTORCH]},
construct_name=self.__class__.__name__
).verify_backend(backend)
kwargs = locals()
args = [kwargs['x_ref']]
pop_kwargs = ['self', 'x_ref', 'backend', '__class__']
[kwargs.pop(k, None) for k in pop_kwargs]
if backend == Framework.TENSORFLOW:
kwargs.pop('device', None)
self._detector = LSDDDriftTF(*args, **kwargs)
else:
self._detector = LSDDDriftTorch(*args, **kwargs)
self.meta = self._detector.meta
def predict(self, x: Union[np.ndarray, list], return_p_val: bool = True, return_distance: bool = True) \
-> Dict[Dict[str, str], Dict[str, Union[int, float]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the permutation test.
return_distance
Whether to return the LSDD metric between the new batch and reference data.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the model's metadata.
- ``'data'`` contains the drift prediction and optionally the p-value, threshold and LSDD metric.
"""
return self._detector.predict(x, return_p_val, return_distance)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the least-squares density
difference as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the LSDD between the reference and test set, \
and the LSDD threshold above which drift is flagged.
"""
return self._detector.score(x)
def get_config(self) -> dict: # Needed due to need to unnormalize x_ref
"""
Get the detector's configuration dictionary.
Returns
-------
The detector's configuration dictionary.
"""
cfg = super().get_config()
# Unnormalize x_ref
if self._detector.preprocess_at_init or self._detector.preprocess_fn is None \
or self._detector.x_ref_preprocessed:
cfg['x_ref'] = self._detector._unnormalize(cfg['x_ref'])
return cfg
| 6,658 | 41.96129 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/preprocess.py
|
import numpy as np
from scipy.special import softmax
from scipy.stats import entropy
from typing import Callable, Union
def classifier_uncertainty(
x: Union[np.ndarray, list],
model_fn: Callable,
preds_type: str = 'probs',
uncertainty_type: str = 'entropy',
margin_width: float = 0.1,
) -> np.ndarray:
"""
Evaluate model_fn on x and transform predictions to prediction uncertainties.
Parameters
----------
x
Batch of instances.
model_fn
Function that evaluates a classification model on x in a single call (contains batching logic if necessary).
preds_type
Type of prediction output by the model. Options are 'probs' (in [0,1]) or 'logits' (in [-inf,inf]).
uncertainty_type
Method for determining the model's uncertainty for a given instance. Options are 'entropy' or 'margin'.
margin_width
Width of the margin if uncertainty_type = 'margin'. The model is considered uncertain on an instance
if the highest two class probabilities it assigns to the instance differ by less than margin_width.
Returns
-------
A scalar indication of uncertainty of the model on each instance in x.
"""
preds = model_fn(x)
if preds_type == 'probs':
if np.abs(1 - np.sum(preds, axis=-1)).mean() > 1e-6:
raise ValueError("Probabilities across labels should sum to 1")
probs = preds
elif preds_type == 'logits':
probs = softmax(preds, axis=-1)
else:
raise NotImplementedError("Only prediction types 'probs' and 'logits' supported.")
if uncertainty_type == 'entropy':
uncertainties = entropy(probs, axis=-1)
elif uncertainty_type == 'margin':
top_2_probs = -np.partition(-probs, kth=1, axis=-1)[:, :2]
diff = top_2_probs[:, 0] - top_2_probs[:, 1]
uncertainties = (diff < margin_width).astype(int)
else:
raise NotImplementedError("Only uncertainty types 'entropy' or 'margin' supported")
return uncertainties[:, None] # Detectors expect N x d
def regressor_uncertainty(
x: Union[np.ndarray, list],
model_fn: Callable,
uncertainty_type: str = 'mc_dropout',
n_evals: int = 25,
) -> np.ndarray:
"""
Evaluate model_fn on x and transform predictions to prediction uncertainties.
Parameters
----------
x
Batch of instances.
model_fn
Function that evaluates a regression model on x in a single call (contains batching logic if necessary).
uncertainty_type
Method for determining the model's uncertainty for a given instance. Options are 'mc_dropout' or 'ensemble'.
The former should output a scalar per instance. The latter should output a vector of predictions per instance.
n_evals:
The number of times to evaluate the model under different dropout configurations. Only relavent when using
the 'mc_dropout' uncertainty type.
Returns
-------
A scalar indication of uncertainty of the model on each instance in x.
"""
if uncertainty_type == 'mc_dropout':
preds = np.concatenate([model_fn(x) for _ in range(n_evals)], axis=-1)
elif uncertainty_type == 'ensemble':
preds = model_fn(x)
else:
raise NotImplementedError("Only 'mc_dropout' and 'ensemble' are supported uncertainty types for regressors.")
uncertainties = np.std(preds, axis=-1)
return uncertainties[:, None] # Detectors expect N x d
| 3,470 | 35.15625 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/fet_online.py
|
from tqdm import tqdm
import numpy as np
from typing import Any, Callable, List, Optional, Union
from alibi_detect.base import DriftConfigMixin
from alibi_detect.cd.base_online import BaseUniDriftOnline
from alibi_detect.utils.misc import quantile
from scipy.stats import hypergeom
import numba as nb
import warnings
class FETDriftOnline(BaseUniDriftOnline, DriftConfigMixin):
online_state_keys = ('t', 'test_stats', 'drift_preds', 'xs')
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_sizes: List[int],
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
n_bootstraps: int = 10000,
t_max: Optional[int] = None,
alternative: str = 'greater',
lam: float = 0.99,
n_features: Optional[int] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online Fisher exact test (FET) data drift detector using preconfigured thresholds, which tests for a
change in the mean of binary univariate data. This detector is an adaption of that proposed by
:cite:t:`Ross2012b`.
For multivariate data, the detector makes a correction similar to the Bonferroni correction used for
the offline detector. Given :math:`d` features, the detector configures thresholds by
targeting the :math:`1-\\beta` quantile of test statistics over the simulated streams, where
:math:`\\beta = 1 - (1-(1/ERT))^{(1/d)}`. For the univariate case, this simplifies to
:math:`\\beta = 1/ERT`. At prediction time, drift is flagged if the test statistic of any feature stream
exceed the thresholds.
Note
----
In the multivariate case, for the ERT to be accurately targeted the feature streams must be independent.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the univariate detectors, the ERT is defined
as the expected run-time after the smallest window is full i.e. the run-time from t=min(windows_sizes).
window_sizes
window sizes for the sliding test-windows used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
t_max
Length of the streams to simulate when configuring thresholds. If `None`, this is set to
2 * max(`window_sizes`) - 1.
alternative
Defines the alternative hypothesis. Options are 'greater' or 'less', which correspond to
an increase or decrease in the mean of the Bernoulli stream.
lam
Smoothing coefficient used for exponential moving average.
n_features
Number of features used in the statistical test. No need to pass it if no preprocessing takes place.
In case of a preprocessing step, this can also be inferred automatically but could be more
expensive to compute.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_sizes=window_sizes,
preprocess_fn=preprocess_fn,
x_ref_preprocessed=x_ref_preprocessed,
n_bootstraps=n_bootstraps,
n_features=n_features,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
# Set config
self._set_config(locals())
self.lam = lam
if alternative.lower() not in ['greater', 'less']:
raise ValueError("`alternative` must be either 'greater' or 'less'.")
self.alternative = alternative.lower()
# Stream length
if t_max is not None:
if t_max < 2 * self.max_ws - 1:
raise ValueError("`t_max` must be >= 2 * max(`window_sizes`) for the FETDriftOnline detector.")
else:
t_max = 2 * self.max_ws - 1
self.t_max = t_max
# Check data is only [False, True] or [0, 1]
values = set(np.unique(self.x_ref))
if not set(values).issubset(['0', '1', True, False]):
raise ValueError("The `x_ref` data must consist of only (0,1)'s or (False,True)'s for the "
"FETDriftOnline detector.")
if len(np.unique(self.x_ref.astype('int'))) == 1:
raise ValueError("The `x_ref` data consists of all 0's or all 1's. Thresholds cannot be configured.")
# Configure thresholds and initialise detector
self._initialise_state()
self._configure_thresholds()
self._configure_ref()
def _configure_ref(self) -> None:
"""
Configure the reference data.
"""
self.sum_ref = np.sum(self.x_ref, axis=0)
def _configure_thresholds(self) -> None:
"""
A function that simulates trajectories of the (smoothed) Fisher exact test statistic for the desired
reference set and window sizes under the null distribution, where both the reference set and deployment
stream follow the same distribution. It then uses these simulated trajectories to estimate thresholds.
The test statistics are smoothed using an exponential moving average to remove their discreteness and
therefore allow more precise quantiles to be targeted.
In the unsmoothed case the thresholds should stop changing after t=(2*max-window-size - 1) and therefore
we need only simulate trajectories and estimate thresholds up to this point. If heavy smoothing is applied
(i.e. if `lam`<<1), a larger `t_max` may be necessary in order to ensure the thresholds have converged.
"""
if self.verbose:
print("Using %d bootstrap simulations to configure thresholds..." % self.n_bootstraps)
# Assuming independent features, calibrate to beta = 1 - (1-FPR)^(1/n_features)
beta = 1 - (1-self.fpr)**(1/self.n_features)
# Init progress bar
if self.verbose:
if self.n_features > 1:
msg = "Simulating streams for %d window(s) and %d features(s)" \
% (len(self.window_sizes), self.n_features)
else:
msg = "Simulating streams for %d window(s)" % len(self.window_sizes)
pbar = tqdm(total=int(self.n_features*len(self.window_sizes)), desc=msg)
else:
pbar = None
# Compute test statistic at each t_max number of t's, for each stream and each feature
self.permit_probs = np.full((self.t_max, self.n_features), np.nan)
thresholds = np.full((self.t_max, self.n_features), np.nan, dtype=np.float32)
for f in range(self.n_features):
# Compute stats for given feature (for each stream)
stats = self._simulate_streams(self.x_ref[:, f], pbar)
# At each t for each stream, find max stats. over window sizes
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', message='All-NaN slice encountered')
max_stats = np.nanmax(stats, -1)
# Find threshold (at each t) that satisfies eqn. (2) in Ross et al.
for t in range(np.min(self.window_sizes)-1, self.t_max):
# Compute (1-beta) quantile of max_stats at a given t, over all streams
threshold = np.float32(quantile(max_stats[:, t], 1 - beta, interpolate=False, type=6))
stats_below = max_stats[max_stats[:, t] < threshold]
# Check for stats equal to threshold
prob_of_equal = (max_stats[:, t] <= threshold).mean() - (max_stats[:, t] < threshold).mean()
if prob_of_equal == 0.0:
permit_prob = np.inf
max_stats = stats_below # Remove streams where change point detected
else:
undershoot = 1 - beta - (max_stats[:, t] < threshold).mean()
permit_prob = undershoot / prob_of_equal
stats_equal = max_stats[max_stats[:, t] == threshold]
n_keep_equal = np.random.binomial(len(stats_equal), permit_prob)
# Remove streams where change point detected, but allow permit_prob streams where stats=thresh
max_stats = np.concatenate([stats_below, stats_equal[:n_keep_equal]])
thresholds[t, f] = threshold
self.permit_probs[t, f] = permit_prob
self.thresholds = thresholds
def _simulate_streams(self, x_ref: np.ndarray, pbar: Optional[tqdm]) -> np.ndarray:
"""
Computes test statistic for each stream.
Almost all of the work done here is done in a call to scipy's hypergeom for each window size.
"""
n_windows = len(self.window_sizes)
stats = np.full((self.n_bootstraps, self.t_max, n_windows), np.nan, dtype=np.float32)
p = np.mean(x_ref)
sum_ref = np.sum(x_ref)
x_stream = np.random.choice([False, True], (self.n_bootstraps, self.t_max), p=[1 - p, p])
cumsums_stream = np.cumsum(x_stream, axis=-1)
cumsums_stream = np.concatenate([np.zeros_like(cumsums_stream[..., 0:1]), cumsums_stream], axis=-1)
for k in range(n_windows):
if pbar is not None:
pbar.update(1)
ws = self.window_sizes[k]
cumsums_last_ws = cumsums_stream[:, ws:] - cumsums_stream[:, :-ws]
# Perform FET with hypergeom.cdf (this is vectorised over streams)
if self.alternative == 'greater':
p_val = hypergeom.cdf(sum_ref, self.n+ws, sum_ref + cumsums_last_ws, self.n)
else:
p_val = hypergeom.cdf(cumsums_last_ws, self.n+ws, sum_ref + cumsums_last_ws, ws)
stats[:, (ws - 1):, k] = self._exp_moving_avg(1 - p_val, self.lam)
return stats
@staticmethod
@nb.njit(cache=True)
def _exp_moving_avg(arr: np.ndarray, lam: float) -> np.ndarray:
""" Apply exponential moving average over the final axis."""
output = np.zeros_like(arr)
output[..., 0] = arr[..., 0]
for i in range(1, arr.shape[-1]):
output[..., i] = (1 - lam) * output[..., i - 1] + lam * arr[..., i]
return output
def _update_state(self, x_t: np.ndarray):
"""
Update online state based on the provided test instance.
Parameters
----------
x_t
The test instance.
"""
self.t += 1
if self.t == 1:
# Initialise stream
self.xs = x_t
else:
# Update stream
self.xs = np.concatenate([self.xs, x_t])
def _check_drift(self, test_stats: np.ndarray, thresholds: np.ndarray) -> int:
"""
Private method to compare test stats to thresholds. The max stats over all windows are compute for each
feature. Drift is flagged if `max_stats` for any feature exceeds the thresholds for that feature.
Parameters
----------
test_stats
Array of test statistics with shape (n_windows, n_features)
thresholds
Array of thresholds with shape (t_max, n_features).
Returns
-------
An int equal to 1 if drift, 0 otherwise.
"""
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore', message='All-NaN slice encountered')
max_stats = np.nanmax(test_stats, axis=0)
# If any stats greater than thresholds, flag drift and return
if (max_stats > thresholds).any():
return 1
# If still no drift, check if any stats equal to threshold. If so, flag drift with proba self.probs_when_equal
equal_inds = np.where(max_stats == thresholds)[0]
for equal_ind in equal_inds:
if np.random.uniform() > self.permit_probs[min(self.t-1, len(self.thresholds)-1), equal_ind]:
return 1
return 0
def score(self, x_t: Union[np.ndarray, Any]) -> np.ndarray:
"""
Compute the test-statistic (FET) between the reference window(s) and test window.
If a given test-window is not yet full then a test-statistic of np.nan is returned for that window.
Parameters
----------
x_t
A single instance.
Returns
-------
Estimated FET test statistics (1-p_val) between reference window and test windows.
"""
values = set(np.unique(x_t))
if not set(values).issubset(['0', '1', True, False]):
raise ValueError("The `x_t` data must consist of only (0,1)'s or (False,True)'s for the "
"FETDriftOnline detector.")
x_t = super()._preprocess_xt(x_t)
self._update_state(x_t)
stats = np.zeros((len(self.window_sizes), self.n_features), dtype=np.float32)
for k, ws in enumerate(self.window_sizes):
if self.t >= ws:
sum_last_ws = np.sum(self.xs[-ws:, :], axis=0)
# Perform FET with hypergeom.cdf (this is vectorised over features)
if self.alternative == 'greater':
p_vals = hypergeom.cdf(self.sum_ref, self.n+ws, self.sum_ref + sum_last_ws, self.n)
else:
p_vals = hypergeom.cdf(sum_last_ws, self.n+ws, self.sum_ref + sum_last_ws, ws)
# Compute test stat and apply smoothing
stats_k = 1 - p_vals
for f in range(self.n_features):
if len(self.test_stats) != 0 and not np.isnan(self.test_stats[-1, k, f]):
stats_k[f] = (1 - self.lam) * self.test_stats[-1, k, f] + self.lam * stats_k[f]
stats[k, :] = stats_k
else:
stats[k, :] = np.nan
return stats
| 15,054 | 45.180982 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_ks.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, InputLayer
from typing import Callable
from alibi_detect.cd import KSDrift
from alibi_detect.cd.tensorflow.preprocess import HiddenOutput, UAE, preprocess_drift
n, n_hidden, n_classes = 750, 10, 5
def mymodel(shape):
x_in = Input(shape=shape)
x = Dense(n_hidden)(x_in)
x_out = Dense(n_classes, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
n_features = [1, 10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_drift, {'model': UAE})
]
alternative = ['two-sided', 'less', 'greater']
correction = ['bonferroni', 'fdr']
update_x_ref = [{'last': 1000}, {'reservoir_sampling': 1000}]
preprocess_at_init = [True, False]
tests_ksdrift = list(product(n_features, n_enc, preprocess, alternative,
correction, update_x_ref, preprocess_at_init))
n_tests = len(tests_ksdrift)
@pytest.fixture
def ksdrift_params(request):
return tests_ksdrift[request.param]
@pytest.mark.parametrize('ksdrift_params', list(range(n_tests)), indirect=True)
def test_ksdrift(ksdrift_params):
n_features, n_enc, preprocess, alternative, correction, \
update_x_ref, preprocess_at_init = ksdrift_params
np.random.seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
if isinstance(preprocess_fn, Callable):
if 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = mymodel((n_features,))
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
elif preprocess_kwargs['model'].__name__ == 'UAE' \
and n_features > 1 and isinstance(n_enc, int):
tf.random.set_seed(0)
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(n_features,)),
Dense(n_enc)
]
)
preprocess_fn = partial(preprocess_fn, model=UAE(encoder_net=encoder_net))
else:
preprocess_fn = None
else:
preprocess_fn = None
cd = KSDrift(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
alternative=alternative,
)
x = x_ref.copy()
preds_batch = cd.predict(x, drift_type='batch', return_p_val=True)
assert preds_batch['data']['is_drift'] == 0
k = list(update_x_ref.keys())[0]
assert cd.n == x.shape[0] + x_ref.shape[0]
assert cd.x_ref.shape[0] == min(update_x_ref[k], x.shape[0] + x_ref.shape[0])
preds_feature = cd.predict(x, drift_type='feature', return_p_val=True)
assert preds_feature['data']['is_drift'].shape[0] == cd.n_features
preds_by_feature = (preds_feature['data']['p_val'] < cd.p_val).astype(int)
assert (preds_feature['data']['is_drift'] == preds_by_feature).all()
np.random.seed(0)
X_randn = np.random.randn(n * n_features).reshape(n, n_features).astype('float32')
mu, sigma = 5, 5
X_low = sigma * X_randn - mu
X_high = sigma * X_randn + mu
preds_batch = cd.predict(X_high, drift_type='batch')
if alternative != 'less':
assert preds_batch['data']['is_drift'] == 1
preds_batch = cd.predict(X_low, drift_type='batch')
if alternative != 'greater':
assert preds_batch['data']['is_drift'] == 1
assert preds_batch['data']['distance'].min() >= 0.
assert preds_feature['data']['threshold'] == cd.p_val
if correction == 'bonferroni':
assert preds_batch['data']['threshold'] == cd.p_val / cd.n_features
| 4,062 | 37.330189 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_cvm_online.py
|
import numpy as np
import pytest
from alibi_detect.cd import CVMDriftOnline
from alibi_detect.utils._random import fixed_seed
n, n_test = 200, 500
n_bootstraps = 1000
ert = 50
np.random.seed(0)
window_sizes = [[10], [10, 20]]
batch_size = [None, int(n_bootstraps/4)]
n_features = [1, 3]
@pytest.mark.parametrize('window_sizes', window_sizes)
@pytest.mark.parametrize('batch_size', batch_size)
@pytest.mark.parametrize('n_feat', n_features)
def test_cvmdriftonline(window_sizes, batch_size, n_feat, seed):
with fixed_seed(seed):
# Reference data
x_ref = np.random.normal(0, 1, size=(n, n_feat)).squeeze() # squeeze to test vec input in 1D case
# Instantiate detector
cd = CVMDriftOnline(x_ref=x_ref, ert=ert, window_sizes=window_sizes,
n_bootstraps=n_bootstraps, batch_size=batch_size)
# Test predict
x_h0 = np.random.normal(0, 1, size=(n_test, n_feat))
x_h1 = np.random.normal(1, 1, size=(n_test, n_feat))
# Reference data
detection_times_h0 = []
test_stats_h0 = []
for x_t in x_h0: # x_t is np.int64 in 1D, np.ndarray in multi-D
t0 = cd.t
pred_t = cd.predict(x_t, return_test_stat=True)
assert cd.t - t0 == 1 # This checks state updated (self.t at least)
test_stats_h0.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
art = np.array(detection_times_h0).mean() - np.min(window_sizes) + 1
test_stats_h0 = [ts for ts in test_stats_h0 if ts is not None]
assert ert/3 < art < 3*ert
# Drifted data
cd.reset_state()
detection_times_h1 = []
test_stats_h1 = []
for x_t in x_h1:
pred_t = cd.predict(x_t, return_test_stat=True)
test_stats_h1.append(pred_t['data']['test_stat'])
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
add = np.array(detection_times_h1).mean() - np.min(window_sizes)
test_stats_h1 = [ts for ts in test_stats_h1 if ts is not None]
assert add < ert/2
assert np.nanmean(test_stats_h1) > np.nanmean(test_stats_h0)
@pytest.mark.parametrize('n_feat', n_features)
def test_cvm_online_state_online(n_feat, tmp_path, seed):
"""
Test save/load/reset state methods for CVMDriftOnline. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
window_sizes = [10]
with fixed_seed(seed):
x_ref = np.random.normal(0, 1, (n, n_feat)).squeeze()
x = np.random.normal(0.1, 1, (n, n_feat))
dd = CVMDriftOnline(x_ref, window_sizes=window_sizes, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
np.testing.assert_array_equal(new_pred['data']['test_stat'], test_stats_1[5])
| 4,225 | 35.747826 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_lsdd.py
|
import numpy as np
import pytest
from alibi_detect.cd import LSDDDrift
from alibi_detect.cd.pytorch.lsdd import LSDDDriftTorch
from alibi_detect.cd.tensorflow.lsdd import LSDDDriftTF
n, n_features = 100, 5
tests_lsdddrift = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_lsdddrift)
@pytest.fixture
def lsdddrift_params(request):
return tests_lsdddrift[request.param]
@pytest.mark.parametrize('lsdddrift_params', list(range(n_tests)), indirect=True)
def test_lsdddrift(lsdddrift_params):
backend = lsdddrift_params
x_ref = np.random.randn(*(n, n_features))
try:
cd = LSDDDrift(x_ref=x_ref, backend=backend)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, LSDDDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, LSDDDriftTF)
else:
assert cd is None
| 931 | 26.411765 | 81 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/conftest.py
|
import pytest
@pytest.fixture
def seed(pytestconfig):
"""
Returns the random seed set by pytest-randomly.
"""
return pytestconfig.getoption("randomly_seed")
| 175 | 16.6 | 51 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_spot_the_diff.py
|
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense
import torch
import torch.nn as nn
from alibi_detect.cd import SpotTheDiffDrift
from alibi_detect.cd.pytorch.spot_the_diff import SpotTheDiffDriftTorch
from alibi_detect.cd.tensorflow.spot_the_diff import SpotTheDiffDriftTF
n, n_features = 100, 5
class MyKernelTF(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.config = {'n_features': n_features}
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
class MyKernelTorch(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
tests_stddrift = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_stddrift)
@pytest.fixture
def stddrift_params(request):
return tests_stddrift[request.param]
@pytest.mark.parametrize('stddrift_params', list(range(n_tests)), indirect=True)
def test_stddrift(stddrift_params):
backend = stddrift_params
if backend.lower() == 'pytorch':
kernel = MyKernelTorch(n_features)
elif backend.lower() == 'tensorflow':
kernel = MyKernelTF((n_features,))
else:
kernel = None
x_ref = np.random.randn(*(n, n_features))
try:
cd = SpotTheDiffDrift(x_ref=x_ref, kernel=kernel, backend=backend)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, SpotTheDiffDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, SpotTheDiffDriftTF)
else:
assert cd is None
| 2,129 | 28.178082 | 94 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_tabular.py
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.cd import TabularDrift
n = 750
n_categories, n_features = 5, 6
n_cat = [0, 2, n_features]
categories_per_feature = [None, int, list]
correction = ['bonferroni', 'fdr']
update_x_ref = [{'last': 1000}, {'reservoir_sampling': 1000}]
preprocess_at_init = [True, False]
new_categories = [True, False]
tests_tabulardrift = list(product(n_cat, categories_per_feature, correction,
update_x_ref, preprocess_at_init, new_categories))
n_tests = len(tests_tabulardrift)
@pytest.fixture
def tabulardrift_params(request):
return tests_tabulardrift[request.param]
@pytest.mark.parametrize('tabulardrift_params', list(range(n_tests)), indirect=True)
def test_tabulardrift(tabulardrift_params):
n_cat, categories_per_feature, correction, \
update_x_ref, preprocess_at_init, new_categories = tabulardrift_params
np.random.seed(0)
# add categorical variables
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if n_cat > 0:
cat_cols = np.random.choice(n_features, size=n_cat, replace=False)
x_ref[:, cat_cols] = np.tile(np.array([np.arange(n_categories)] * n_cat).T, (n // n_categories, 1))
if categories_per_feature is None:
categories_per_feature = {f: None for f in cat_cols}
elif categories_per_feature == int:
categories_per_feature = {f: n_categories for f in cat_cols}
elif categories_per_feature == list:
categories_per_feature = {f: list(np.arange(n_categories)) for f in cat_cols}
else:
categories_per_feature = None
cd = TabularDrift(
x_ref=x_ref,
p_val=.05,
categories_per_feature=categories_per_feature,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
correction=correction,
)
x = x_ref.copy()
if new_categories and n_cat > 0:
x[:, cat_cols] = x[:, cat_cols] + 1
preds_batch = cd.predict(x, drift_type='batch', return_p_val=True)
if new_categories and n_cat > 0:
assert preds_batch['data']['is_drift'] == 1
else:
assert preds_batch['data']['is_drift'] == 0
k = list(update_x_ref.keys())[0]
assert cd.n == x.shape[0] + x_ref.shape[0]
assert cd.x_ref.shape[0] == min(update_x_ref[k], x.shape[0] + x_ref.shape[0])
assert preds_batch['data']['distance'].min() >= 0.
if correction == 'bonferroni':
assert preds_batch['data']['threshold'] == cd.p_val / cd.n_features
preds_feature = cd.predict(x, drift_type='feature', return_p_val=True)
assert preds_feature['data']['is_drift'].shape[0] == cd.n_features
preds_by_feature = (preds_feature['data']['p_val'] < cd.p_val).astype(int)
assert (preds_feature['data']['is_drift'] == preds_by_feature).all()
assert preds_feature['data']['threshold'] == cd.p_val
| 2,931 | 39.722222 | 107 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_mmd_online.py
|
import numpy as np
import pytest
from alibi_detect.cd import MMDDriftOnline
from alibi_detect.cd.pytorch.mmd_online import MMDDriftOnlineTorch
from alibi_detect.cd.tensorflow.mmd_online import MMDDriftOnlineTF
n, n_features = 100, 5
tests_mmddriftonline = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_mmddriftonline)
@pytest.fixture
def mmddriftonline_params(request):
return tests_mmddriftonline[request.param]
@pytest.mark.parametrize('mmddriftonline_params', list(range(n_tests)), indirect=True)
def test_mmddriftonline(mmddriftonline_params):
backend = mmddriftonline_params
x_ref = np.random.randn(*(n, n_features))
# Instantiate and check detector class
try:
cd = MMDDriftOnline(x_ref=x_ref, ert=25, window_size=5, backend=backend, n_bootstraps=100)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, MMDDriftOnlineTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, MMDDriftOnlineTF)
else:
assert cd is None
return
# Test predict
x_t = np.random.randn(n_features)
t0 = cd.t
cd.predict(x_t)
assert cd.t - t0 == 1 # This checks state updated (self.t at least)
# Test score
t0 = cd.t
cd.score(x_t)
assert cd.t - t0 == 1
| 1,353 | 27.808511 | 98 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_lsdd_online.py
|
import numpy as np
import pytest
from alibi_detect.cd import LSDDDriftOnline
from alibi_detect.cd.pytorch.lsdd_online import LSDDDriftOnlineTorch
from alibi_detect.cd.tensorflow.lsdd_online import LSDDDriftOnlineTF
n, n_features = 100, 5
tests_lsdddriftonline = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_lsdddriftonline)
@pytest.fixture
def lsdddriftonline_params(request):
return tests_lsdddriftonline[request.param]
@pytest.mark.parametrize('lsdddriftonline_params', list(range(n_tests)), indirect=True)
def test_lsdddriftonline(lsdddriftonline_params):
backend = lsdddriftonline_params
x_ref = np.random.randn(*(n, n_features))
try:
cd = LSDDDriftOnline(x_ref=x_ref, ert=25, window_size=5, backend=backend, n_bootstraps=100)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, LSDDDriftOnlineTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, LSDDDriftOnlineTF)
else:
assert cd is None
return None
# Test predict
x_t = np.random.randn(n_features)
t0 = cd.t
cd.predict(x_t)
assert cd.t - t0 == 1 # This checks state updated (self.t at least)
# Test score
t0 = cd.t
cd.score(x_t)
assert cd.t - t0 == 1
| 1,331 | 27.956522 | 99 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_fet_online.py
|
import numpy as np
import pytest
from functools import partial
from alibi_detect.cd import FETDriftOnline
from alibi_detect.utils._random import fixed_seed
n = 250
n_inits, n_reps = 3, 100
n_bootstraps = 1000
ert = 150
window_sizes = [40]
alternatives = ['less', 'greater']
n_features = [1, 3]
@pytest.mark.parametrize('alternative', alternatives)
@pytest.mark.parametrize('n_feat', n_features)
def test_fetdriftonline(alternative, n_feat, seed):
# Reference data
p_h0 = 0.5
with fixed_seed(seed):
# squeeze to test vector input in 1D case
x_ref = np.random.choice((0, 1), (n, n_feat), p=[1 - p_h0, p_h0]).squeeze()
x_h0 = partial(np.random.choice, (0, 1), size=n_feat, p=[1-p_h0, p_h0])
detection_times_h0 = []
detection_times_h1 = []
for _ in range(n_inits):
# Instantiate detector
with fixed_seed(seed+1):
cd = FETDriftOnline(x_ref=x_ref, ert=ert, window_sizes=window_sizes,
n_bootstraps=n_bootstraps, alternative=alternative)
# Reference data
count = 0
while len(detection_times_h0) < n_reps and count < int(1e6):
count += 1
x_t = int(x_h0()) if n_feat == 1 else x_h0() # x_t is int in 1D case, otherwise ndarray with shape (n_feat)
t0 = cd.t
pred_t = cd.predict(x_t)
assert cd.t - t0 == 1 # This checks state updated (self.t at least)
if pred_t['data']['is_drift']:
detection_times_h0.append(pred_t['data']['time'])
cd.reset_state()
# Drifted data
if alternative == 'less':
p_h1 = 0.1
x_h1 = partial(np.random.choice, (0, 1), size=n_feat, p=[1-p_h1, p_h1])
else:
p_h1 = 0.9
x_h1 = partial(np.random.choice, (0, 1), size=n_feat, p=[1-p_h1, p_h1])
cd.reset_state()
count = 0
while len(detection_times_h1) < n_reps and count < int(1e6):
count += 1
x_t = x_h1().reshape(1, 1) if n_feat == 1 else x_h1() # test shape (1,1) in 1D case here
pred_t = cd.predict(x_t)
if pred_t['data']['is_drift']:
detection_times_h1.append(pred_t['data']['time'])
cd.reset_state()
art = np.array(detection_times_h0).mean() - np.min(window_sizes) + 1
add = np.array(detection_times_h1).mean() - np.min(window_sizes)
assert ert / 3 < art < 3 * ert
assert add + 1 < ert/2
@pytest.mark.parametrize('n_feat', n_features)
def test_fet_online_state_online(n_feat, tmp_path, seed):
"""
Test save/load/reset state methods for FETDriftOnline. State is saved, reset, and loaded, with
prediction results and stateful attributes compared to original.
"""
p_h0 = 0.5
p_h1 = 0.3
with fixed_seed(seed):
# squeeze to test vector input in 1D case
x_ref = np.random.choice((0, 1), (n, n_feat), p=[1 - p_h0, p_h0]).squeeze()
x = np.random.choice((0, 1), (n, n_feat), p=[1 - p_h1, p_h1])
dd = FETDriftOnline(x_ref, window_sizes=window_sizes, ert=20)
# Store state for comparison
state_dict_t0 = {}
for key in dd.online_state_keys:
state_dict_t0[key] = getattr(dd, key)
# Run for 10 time steps
test_stats_1 = []
for t, x_t in enumerate(x):
if t == 5:
dd.save_state(tmp_path)
# Store state for comparison
state_dict_t5 = {}
for key in dd.online_state_keys:
state_dict_t5[key] = getattr(dd, key)
preds = dd.predict(x_t)
test_stats_1.append(preds['data']['test_stat'])
# Reset and check state cleared
dd.reset_state()
for key, orig_val in state_dict_t0.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Repeat, check that same test_stats both times
test_stats_2 = []
for t, x_t in enumerate(x):
preds = dd.predict(x_t)
test_stats_2.append(preds['data']['test_stat'])
np.testing.assert_array_equal(test_stats_1, test_stats_2)
# Load state from t=5 timestep
dd.load_state(tmp_path)
# Compare stateful attributes to original at t=5
for key, orig_val in state_dict_t5.items():
np.testing.assert_array_equal(orig_val, getattr(dd, key)) # use np.testing here as it handles torch.Tensor etc
# Compare predictions to original at t=5
new_pred = dd.predict(x[5])
np.testing.assert_array_equal(new_pred['data']['test_stat'], test_stats_1[5])
| 4,578 | 35.927419 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_chisquare.py
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.cd import ChiSquareDrift
n_categories, n_features, n_tiles = 5, 6, 20
x_ref = np.tile(np.array([np.arange(n_categories)] * n_features).T, (n_tiles, 1))
np.random.shuffle(x_ref)
categories_per_feature = [
None,
{f: n_categories for f in range(n_features)},
{f: list(np.arange(n_categories)) for f in range(n_features)}
]
preprocess = [None]
correction = ['bonferroni', 'fdr']
update_x_ref = [{'last': 1000}, {'reservoir_sampling': 1000}]
preprocess_at_init = [True, False]
new_categories = [True, False]
tests_chisquaredrift = list(product(categories_per_feature, preprocess, correction,
update_x_ref, preprocess_at_init, new_categories))
n_tests = len(tests_chisquaredrift)
@pytest.fixture
def chisquaredrift_params(request):
return tests_chisquaredrift[request.param]
@pytest.mark.parametrize('chisquaredrift_params', list(range(n_tests)), indirect=True)
def test_chisquaredrift(chisquaredrift_params):
categories_per_feature, preprocess_fn, correction, \
update_x_ref, preprocess_at_init, new_categories = chisquaredrift_params
cd = ChiSquareDrift(
x_ref=x_ref,
p_val=.05,
categories_per_feature=categories_per_feature,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
correction=correction,
)
x = x_ref.copy()
if new_categories:
x = x + 1
preds_batch = cd.predict(x, drift_type='batch', return_p_val=True)
if new_categories:
assert preds_batch['data']['is_drift'] == 1
else:
assert preds_batch['data']['is_drift'] == 0
k = list(update_x_ref.keys())[0]
assert cd.n == x.shape[0] + x_ref.shape[0]
assert cd.x_ref.shape[0] == min(update_x_ref[k], x.shape[0] + x_ref.shape[0])
assert preds_batch['data']['distance'].min() >= 0.
if correction == 'bonferroni':
assert preds_batch['data']['threshold'] == cd.p_val / cd.n_features
preds_feature = cd.predict(x, drift_type='feature', return_p_val=True)
assert preds_feature['data']['is_drift'].shape[0] == cd.n_features
preds_by_feature = (preds_feature['data']['p_val'] < cd.p_val).astype(int)
assert (preds_feature['data']['is_drift'] == preds_by_feature).all()
assert preds_feature['data']['threshold'] == cd.p_val
| 2,417 | 36.78125 | 86 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_mmd.py
|
import numpy as np
import pytest
from alibi_detect.cd import MMDDrift
from alibi_detect.cd.pytorch.mmd import MMDDriftTorch
from alibi_detect.cd.tensorflow.mmd import MMDDriftTF
from alibi_detect.utils.frameworks import has_keops
if has_keops:
from alibi_detect.cd.keops.mmd import MMDDriftKeops
n, n_features = 100, 5
tests_mmddrift = ['tensorflow', 'pytorch', 'keops', 'PyToRcH', 'mxnet']
n_tests = len(tests_mmddrift)
@pytest.fixture
def mmddrift_params(request):
return tests_mmddrift[request.param]
@pytest.mark.parametrize('mmddrift_params', list(range(n_tests)), indirect=True)
def test_mmddrift(mmddrift_params):
backend = mmddrift_params
x_ref = np.random.randn(*(n, n_features)).astype('float32')
try:
cd = MMDDrift(x_ref=x_ref, backend=backend)
except (NotImplementedError, ImportError):
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, MMDDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, MMDDriftTF)
elif backend.lower() == 'keops' and has_keops:
assert isinstance(cd._detector, MMDDriftKeops)
else:
assert cd is None
| 1,185 | 29.410256 | 80 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_learned_kernel.py
|
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense
import torch
import torch.nn as nn
from alibi_detect.cd import LearnedKernelDrift
from alibi_detect.cd.pytorch.learned_kernel import LearnedKernelDriftTorch
from alibi_detect.cd.tensorflow.learned_kernel import LearnedKernelDriftTF
from alibi_detect.utils.frameworks import has_keops
if has_keops:
from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops
from pykeops.torch import LazyTensor
n, n_features = 100, 5
class MyKernelTF(tf.keras.Model): # TODO: Support then test models using keras functional API
def __init__(self, n_features: int):
super().__init__()
self.config = {'n_features': n_features}
self.dense = Dense(20)
def call(self, x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return tf.einsum('ji,ki->jk', self.dense(x), self.dense(y))
def get_config(self) -> dict:
return self.config
@classmethod
def from_config(cls, config):
return cls(**config)
class MyKernelTorch(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense = nn.Linear(n_features, 20)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.einsum('ji,ki->jk', self.dense(x), self.dense(y))
if has_keops:
class MyKernelKeops(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: LazyTensor, y: LazyTensor) -> LazyTensor:
return (- ((x - y) ** 2).sum(-1)).exp()
tests_lkdrift = ['tensorflow', 'pytorch', 'keops', 'PyToRcH', 'mxnet']
n_tests = len(tests_lkdrift)
@pytest.fixture
def lkdrift_params(request):
return tests_lkdrift[request.param]
@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True)
def test_lkdrift(lkdrift_params):
backend = lkdrift_params
if backend.lower() == 'pytorch':
kernel = MyKernelTorch(n_features)
elif backend.lower() == 'tensorflow':
kernel = MyKernelTF(n_features)
elif has_keops and backend.lower() == 'keops':
kernel = MyKernelKeops()
else:
kernel = None
x_ref = np.random.randn(*(n, n_features))
try:
cd = LearnedKernelDrift(x_ref=x_ref, kernel=kernel, backend=backend)
except NotImplementedError:
cd = None
except ImportError:
assert not has_keops
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, LearnedKernelDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, LearnedKernelDriftTF)
elif has_keops and backend.lower() == 'keops':
assert isinstance(cd._detector, LearnedKernelDriftKeops)
else:
assert cd is None
| 2,811 | 29.901099 | 94 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_cvm.py
|
import numpy as np
import pytest
import scipy
from packaging import version
if version.parse(scipy.__version__) >= version.parse('1.7.0'):
from alibi_detect.cd import CVMDrift
n, n_test = 500, 200
np.random.seed(0)
n_features = [2] # TODO - test 1D case once BaseUnivariateDrift updated
tests_cvmdrift = list(n_features)
n_tests = len(tests_cvmdrift)
@pytest.fixture
def cvmdrift_params(request):
return tests_cvmdrift[request.param]
@pytest.mark.skipif(version.parse(scipy.__version__) < version.parse('1.7.0'),
reason="Requires scipy version >= 1.7.0")
@pytest.mark.parametrize('cvmdrift_params', list(range(n_tests)), indirect=True)
def test_cvmdrift(cvmdrift_params):
n_feat = cvmdrift_params
# Reference data
x_ref = np.random.normal(0, 1, size=(n, n_feat)).squeeze() # squeeze to test vec input in 1D case
# Instantiate detector
cd = CVMDrift(x_ref=x_ref, p_val=0.05)
# Test predict on reference data
x_h0 = x_ref.copy()
preds = cd.predict(x_h0, return_p_val=True)
assert preds['data']['is_drift'] == 0 and (preds['data']['p_val'] >= cd.p_val).any()
# Test predict on heavily drifted data
x_h1 = np.random.normal(2, 2, size=(n, n_feat)).squeeze()
preds = cd.predict(x_h1, drift_type='batch')
assert preds['data']['is_drift'] == 1
assert preds['data']['distance'].min() >= 0.
| 1,377 | 31.046512 | 102 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_classifier.py
|
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
import torch
import torch.nn as nn
from alibi_detect.cd import ClassifierDrift
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
from alibi_detect.cd.tensorflow.classifier import ClassifierDriftTF
from alibi_detect.cd.sklearn.classifier import ClassifierDriftSklearn
from sklearn.neural_network import MLPClassifier
from typing import Tuple
n, n_features = 100, 5
def tensorflow_model(input_shape: Tuple[int]):
x_in = Input(shape=input_shape)
x = Dense(20, activation=tf.nn.relu)(x_in)
x_out = Dense(2, activation='softmax')(x)
return tf.keras.models.Model(inputs=x_in, outputs=x_out)
def pytorch_model(input_shape: int):
return torch.nn.Sequential(
nn.Linear(input_shape, 20),
nn.ReLU(),
nn.Linear(20, 2)
)
def sklearn_model():
return MLPClassifier(hidden_layer_sizes=(20, ))
tests_clfdrift = ['tensorflow', 'pytorch', 'PyToRcH', 'sklearn', 'mxnet']
n_tests = len(tests_clfdrift)
@pytest.fixture
def clfdrift_params(request):
return tests_clfdrift[request.param]
@pytest.mark.parametrize('clfdrift_params', list(range(n_tests)), indirect=True)
def test_clfdrift(clfdrift_params):
backend = clfdrift_params
if backend.lower() == 'pytorch':
model = pytorch_model(n_features)
elif backend.lower() == 'tensorflow':
model = tensorflow_model((n_features,))
elif backend.lower() == 'sklearn':
model = sklearn_model()
else:
model = None
x_ref = np.random.randn(*(n, n_features))
try:
cd = ClassifierDrift(x_ref=x_ref, model=model, backend=backend)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, ClassifierDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, ClassifierDriftTF)
elif backend.lower() == 'sklearn':
assert isinstance(cd._detector, ClassifierDriftSklearn)
else:
assert cd is None
| 2,093 | 28.492958 | 80 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_preprocess.py
|
from itertools import product
import numpy as np
import pytest
import torch.nn as nn
import torch
from sklearn.linear_model import LogisticRegression
from alibi_detect.cd.preprocess import classifier_uncertainty, regressor_uncertainty
n, n_features = 100, 10
shape = (n_features,)
X_train = np.random.rand(n * n_features).reshape(n, n_features).astype('float32')
y_train_reg = np.random.rand(n).astype('float32')
y_train_clf = np.random.choice(2, n)
X_test = np.random.rand(n * n_features).reshape(n, n_features).astype('float32')
preds_type = ['probs', 'logits']
uncertainty_type = ['entropy', 'margin']
tests_cu = list(product(preds_type, uncertainty_type))
n_tests_cu = len(tests_cu)
@pytest.fixture
def cu_params(request):
return tests_cu[request.param]
@pytest.mark.parametrize('cu_params', list(range(n_tests_cu)), indirect=True)
def test_classifier_uncertainty(cu_params):
preds_type, uncertainty_type = cu_params
clf = LogisticRegression().fit(X_train, y_train_clf)
model_fn = clf.predict_log_proba if preds_type == 'logits' else clf.predict_proba
uncertainties = classifier_uncertainty(
X_test, model_fn, preds_type=preds_type, uncertainty_type=uncertainty_type
)
assert uncertainties.shape == (X_test.shape[0], 1)
tests_ru = ['mc_dropout', 'ensemble']
n_tests_ru = len(tests_ru)
@pytest.fixture
def ru_params(request):
return tests_ru[request.param]
@pytest.mark.parametrize('ru_params', list(range(n_tests_ru)), indirect=True)
def test_regressor_uncertainty(ru_params):
uncertainty_type = ru_params
if uncertainty_type == 'dropout':
model = nn.Sequential(
nn.Linear(n_features, 10),
nn.Dropout(0.5),
nn.Linear(10, 1)
)
else:
model = nn.Linear(n_features, 42)
def model_fn(x):
with torch.no_grad():
return np.array(model(torch.as_tensor(x)))
uncertainties = regressor_uncertainty(
X_test, model_fn, uncertainty_type=uncertainty_type
)
assert uncertainties.shape == (X_test.shape[0], 1)
| 2,067 | 29.411765 | 85 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_fet.py
|
import numpy as np
import pytest
from alibi_detect.cd import FETDrift
from itertools import product
n, n_test = 500, 200
np.random.seed(0)
alternative = ['less', 'greater', 'two-sided']
n_features = [2] # TODO - test 1D case once BaseUnivariateDrift updated
tests_fetdrift = list(product(alternative, n_features))
n_tests = len(tests_fetdrift)
@pytest.fixture
def fetdrift_params(request):
return tests_fetdrift[request.param]
@pytest.mark.parametrize('fetdrift_params', list(range(n_tests)), indirect=True)
def test_fetdrift(fetdrift_params):
alternative, n_feat = fetdrift_params
# Reference data
p_h0 = 0.5
x_ref = np.random.choice([0, 1], (n, n_feat), p=[1 - p_h0, p_h0]).squeeze() # squeeze to test vec input in 1D case
# Instantiate detector
cd = FETDrift(x_ref=x_ref, p_val=0.05, alternative=alternative)
# Test predict on reference data
x_h0 = x_ref.copy()
preds = cd.predict(x_h0, return_p_val=True)
assert preds['data']['is_drift'] == 0 and (preds['data']['p_val'] >= cd.p_val).any()
# Test predict on heavily drifted data
if alternative == 'less' or alternative == 'two-sided':
p_h1 = 0.2
x_h1 = np.random.choice([0, 1], (n_test, n_feat), p=[1 - p_h1, p_h1]).squeeze()
preds = cd.predict(x_h1)
assert preds['data']['is_drift'] == 1
if alternative == 'greater' or alternative == 'two-sided':
p_h1 = 0.8
x_h1 = np.random.choice([0, 1], (n_test, n_feat), p=[1 - p_h1, p_h1]).squeeze()
preds = cd.predict(x_h1)
assert preds['data']['is_drift'] == 1
# Check odds ratio
ref_1s = np.sum(x_ref, axis=0)
ref_0s = len(x_ref) - ref_1s
test1_1s = np.sum(x_h1, axis=0)
test1_0s = len(x_h1) - test1_1s
odds_ratio = (test1_1s / test1_0s) / (ref_1s / ref_0s)
for f in range(n_feat):
np.testing.assert_allclose(preds['data']['distance'][f],
odds_ratio[f], rtol=1e-05)
| 1,966 | 33.508772 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_contextmmd.py
|
import numpy as np
import pytest
from alibi_detect.cd import ContextMMDDrift
from alibi_detect.cd.pytorch.context_aware import ContextMMDDriftTorch
from alibi_detect.cd.tensorflow.context_aware import ContextMMDDriftTF
n, n_features = 100, 5
tests_context_mmddrift = ['tensorflow', 'pytorch', 'PyToRcH', 'mxnet']
n_tests = len(tests_context_mmddrift)
@pytest.fixture
def context_mmddrift_params(request):
return tests_context_mmddrift[request.param]
@pytest.mark.parametrize('context_mmddrift_params', list(range(n_tests)), indirect=True)
def test_context_mmddrift(context_mmddrift_params):
backend = context_mmddrift_params
c_ref = np.random.randn(*(n, 1))
x_ref = c_ref + np.random.randn(*(n, n_features))
try:
cd = ContextMMDDrift(x_ref=x_ref, c_ref=c_ref, backend=backend)
except NotImplementedError:
cd = None
if backend.lower() == 'pytorch':
assert isinstance(cd._detector, ContextMMDDriftTorch)
elif backend.lower() == 'tensorflow':
assert isinstance(cd._detector, ContextMMDDriftTF)
else:
assert cd is None
| 1,099 | 30.428571 | 88 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_model_uncertainty.py
|
import numpy as np
import pytest
from functools import partial
from itertools import product
import scipy
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, Softmax, Dropout
import torch
import torch.nn as nn
from typing import Union
from alibi_detect.cd import ClassifierUncertaintyDrift, RegressorUncertaintyDrift
n = 500
def tf_model(n_features, n_labels, softmax=False, dropout=False):
x_in = Input(shape=(n_features,))
x = Dense(20, activation=tf.nn.relu)(x_in)
if dropout:
x = Dropout(0.5)(x)
x = Dense(n_labels)(x)
if softmax:
x = Softmax()(x)
return tf.keras.models.Model(inputs=x_in, outputs=x)
class PtModel(nn.Module):
def __init__(self, n_features, n_labels, softmax=False, dropout=False):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, n_labels)
self.dropout = nn.Dropout(0.5) if dropout else lambda x: x
self.softmax = nn.Softmax() if softmax else lambda x: x
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
x = self.dropout(x)
x = self.dense2(x)
x = self.softmax(x)
return x
def dumb_model(x, n_labels, softmax=False):
if isinstance(x, list):
x = np.concatenate(x, axis=0)
x = np.stack([np.mean(x * (i + 1), axis=-1) for i in range(n_labels)], axis=-1)
if softmax:
x = scipy.special.softmax(x, axis=-1)
return x
def gen_model(n_features, n_labels, backend, softmax=False, dropout=False):
if backend == 'tensorflow':
return tf_model(n_features, n_labels, softmax, dropout)
elif backend == 'pytorch':
return PtModel(n_features, n_labels, softmax, dropout)
elif backend is None:
return partial(dumb_model, n_labels=n_labels, softmax=softmax)
def id_fn(x: list, to_pt: bool = False) -> Union[np.ndarray, torch.Tensor]:
x = np.concatenate(x, axis=0)
if to_pt:
return torch.from_numpy(x)
else:
return x # type: ignore[return-value]
p_val = [.05]
backend = ['tensorflow', 'pytorch', None]
n_features = [16]
n_labels = [3]
preds_type = ['probs', 'logits']
uncertainty_type = ['entropy', 'margin']
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
to_list = [True, False]
tests_clfuncdrift = list(product(p_val, backend, n_features, n_labels, preds_type,
uncertainty_type, update_x_ref, to_list))
n_tests = len(tests_clfuncdrift)
@pytest.fixture
def clfuncdrift_params(request):
return tests_clfuncdrift[request.param]
@pytest.mark.parametrize('clfuncdrift_params', list(range(n_tests)), indirect=True)
def test_clfuncdrift(clfuncdrift_params):
p_val, backend, n_features, n_labels, preds_type, uncertainty_type, update_x_ref, to_list = clfuncdrift_params
np.random.seed(0)
tf.random.set_seed(0)
model = gen_model(n_features, n_labels, backend, preds_type == 'probs')
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test0 = x_ref.copy()
x_test1 = np.ones_like(x_ref)
if to_list:
x_ref = [x[None, :] for x in x_ref]
x_test0 = [x[None, :] for x in x_test0]
x_test1 = [x[None, :] for x in x_test1]
cd = ClassifierUncertaintyDrift(
x_ref=x_ref,
model=model,
p_val=p_val,
backend=backend,
update_x_ref=update_x_ref,
preds_type=preds_type,
uncertainty_type=uncertainty_type,
margin_width=0.1,
batch_size=10,
preprocess_batch_fn=partial(id_fn, to_pt=backend == 'pytorch') if to_list else None
)
preds_0 = cd.predict(x_test0)
assert cd._detector.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
preds_1 = cd.predict(x_test1)
assert cd._detector.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
p_val = [.05]
backend = ['tensorflow', 'pytorch']
n_features = [16]
uncertainty_type = ['mc_dropout', 'ensemble']
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
to_list = [True, False]
tests_reguncdrift = list(product(p_val, backend, n_features, uncertainty_type, update_x_ref, to_list))
n_tests = len(tests_reguncdrift)
@pytest.fixture
def reguncdrift_params(request):
return tests_reguncdrift[request.param]
@pytest.mark.parametrize('reguncdrift_params', list(range(n_tests)), indirect=True)
def test_reguncdrift(reguncdrift_params):
p_val, backend, n_features, uncertainty_type, update_x_ref, to_list = reguncdrift_params
np.random.seed(0)
tf.random.set_seed(0)
if uncertainty_type == 'mc_dropout':
n_labels = 1
dropout = True
elif uncertainty_type == 'ensemble':
n_labels = 5
dropout = False
model = gen_model(n_features, n_labels, backend, dropout=dropout)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test0 = x_ref.copy()
x_test1 = np.ones_like(x_ref)
if to_list:
x_ref = [x[None, :] for x in x_ref]
x_test0 = [x[None, :] for x in x_test0]
x_test1 = [x[None, :] for x in x_test1]
cd = RegressorUncertaintyDrift(
x_ref=x_ref,
model=model,
p_val=p_val,
backend=backend,
update_x_ref=update_x_ref,
uncertainty_type=uncertainty_type,
n_evals=5,
batch_size=10,
preprocess_batch_fn=partial(id_fn, to_pt=backend == 'pytorch') if to_list else None
)
preds_0 = cd.predict(x_test0)
assert cd._detector.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
preds_1 = cd.predict(x_test1)
assert cd._detector.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
| 6,110 | 31.163158 | 114 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/tests/test_utils.py
|
from itertools import product
import numpy as np
import pytest
from alibi_detect.cd.utils import update_reference
n = [3, 50]
n_features = [1, 10]
update_method = [
None,
'last',
'reservoir_sampling'
]
tests_update = list(product(n, n_features, update_method))
n_tests_update = len(tests_update)
@pytest.fixture
def update_params(request):
return tests_update[request.param]
@pytest.mark.parametrize('update_params', list(range(n_tests_update)), indirect=True)
def test_update_reference(update_params):
n, n_features, update_method = update_params
n_ref = np.random.randint(1, n)
n_test = np.random.randint(1, 2 * n)
X_ref = np.random.rand(n_ref * n_features).reshape(n_ref, n_features)
X = np.random.rand(n_test * n_features).reshape(n_test, n_features)
if update_method in ['last', 'reservoir_sampling']:
update_method = {update_method: n}
X_ref_new = update_reference(X_ref, X, n, update_method)
assert X_ref_new.shape[0] <= n
if isinstance(update_method, dict):
if list(update_method.keys())[0] == 'last':
assert (X_ref_new[-1] == X[-1]).all()
| 1,134 | 29.675676 | 85 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/keops/learned_kernel.py
|
from copy import deepcopy
from functools import partial
from tqdm import tqdm
import numpy as np
from pykeops.torch import LazyTensor
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Callable, Dict, List, Optional, Union, Tuple
from alibi_detect.cd.base import BaseLearnedKernelDrift
from alibi_detect.utils.pytorch import get_device, predict_batch
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.frameworks import Framework
class LearnedKernelDriftKeops(BaseLearnedKernelDrift):
def __init__(
self,
x_ref: Union[np.ndarray, list],
kernel: Union[nn.Module, nn.Sequential],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
var_reg: float = 1e-5,
reg_loss_fn: Callable = (lambda kernel: 0),
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
optimizer: torch.optim.Optimizer = torch.optim.Adam, # type: ignore
learning_rate: float = 1e-3,
batch_size: int = 32,
batch_size_predict: int = 1000000,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
estimate of the test power. The kernel is trained on a split of the reference and test instances
and then the MMD is evaluated on held out instances and a permutation test is performed.
For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
(https://arxiv.org/abs/2002.09116)
Parameters
----------
x_ref
Data used as reference distribution.
kernel
Trainable PyTorch module that returns a similarity between two instances.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before applying the kernel.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
var_reg
Constant added to the estimated variance of the MMD for stability.
reg_loss_fn
The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
optimizer
Optimizer used during training of the kernel.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the kernel.
batch_size_predict
Batch size used for the trained drift detector predictions.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the kernel.
epochs
Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
num_workers
Number of workers for the dataloader. The default (`num_workers=0`) means multi-process data loading
is disabled. Setting `num_workers>0` may be unreliable on Windows.
verbose
Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Relevant for 'pytorch' and 'keops' backends.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations,
train_size=train_size,
retrain_from_scratch=retrain_from_scratch,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.KEOPS.value})
# Set device, define model and training kwargs
self.device = get_device(device)
self.original_kernel = kernel
self.kernel = deepcopy(kernel)
# Check kernel format
self.has_proj = hasattr(self.kernel, 'proj') and isinstance(self.kernel.proj, nn.Module)
self.has_kernel_b = hasattr(self.kernel, 'kernel_b') and isinstance(self.kernel.kernel_b, nn.Module)
# Define kwargs for dataloader and trainer
self.dataset = dataset
self.dataloader = partial(dataloader, batch_size=batch_size, shuffle=True,
drop_last=True, num_workers=num_workers)
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'preprocess_fn': preprocess_batch_fn,
'reg_loss_fn': reg_loss_fn, 'learning_rate': learning_rate, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
self.j_hat = LearnedKernelDriftKeops.JHat(
self.kernel, var_reg, self.has_proj, self.has_kernel_b).to(self.device)
# Set prediction and permutation batch sizes
self.batch_size_predict = batch_size_predict
self.batch_size_perms = batch_size_permutations
self.n_batches = 1 + (n_permutations - 1) // batch_size_permutations
class JHat(nn.Module):
"""
A module that wraps around the kernel. When passed a batch of reference and batch of test
instances it returns an estimate of a correlate of test power.
Equation 4 of https://arxiv.org/abs/2002.09116
"""
def __init__(self, kernel: nn.Module, var_reg: float, has_proj: bool, has_kernel_b: bool):
super().__init__()
self.kernel = kernel
self.has_proj = has_proj
self.has_kernel_b = has_kernel_b
self.var_reg = var_reg
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
n = len(x)
if self.has_proj and isinstance(self.kernel.proj, nn.Module):
x_proj, y_proj = self.kernel.proj(x), self.kernel.proj(y)
else:
x_proj, y_proj = x, y
x2_proj, x_proj = LazyTensor(x_proj[None, :, :]), LazyTensor(x_proj[:, None, :])
y2_proj, y_proj = LazyTensor(y_proj[None, :, :]), LazyTensor(y_proj[:, None, :])
if self.has_kernel_b:
x2, x = LazyTensor(x[None, :, :]), LazyTensor(x[:, None, :])
y2, y = LazyTensor(y[None, :, :]), LazyTensor(y[:, None, :])
else:
x, x2, y, y2 = None, None, None, None
k_xy = self.kernel(x_proj, y2_proj, x, y2)
k_xx = self.kernel(x_proj, x2_proj, x, x2)
k_yy = self.kernel(y_proj, y2_proj, y, y2)
h_mat = k_xx + k_yy - k_xy - k_xy.t()
h_i = h_mat.sum(1).squeeze(-1)
h = h_i.sum()
mmd2_est = (h - n) / (n * (n - 1))
var_est = 4 * h_i.square().sum() / (n ** 3) - 4 * h.square() / (n ** 4)
reg_var_est = var_est + self.var_reg
return mmd2_est/reg_var_est.sqrt()
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested. The kernel
used within the MMD is first trained to maximise an estimate of the resulting test power.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x_cur = self.preprocess(x)
(x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te) = self.get_splits(x_ref, x_cur)
dl_ref_tr, dl_cur_tr = self.dataloader(self.dataset(x_ref_tr)), self.dataloader(self.dataset(x_cur_tr))
self.kernel = deepcopy(self.original_kernel) if self.retrain_from_scratch else self.kernel
self.kernel = self.kernel.to(self.device)
train_args = [self.j_hat, (dl_ref_tr, dl_cur_tr), self.device]
LearnedKernelDriftKeops.trainer(*train_args, **self.train_kwargs) # type: ignore
m, n = len(x_ref_te), len(x_cur_te)
if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray):
x_all = torch.from_numpy(np.concatenate([x_ref_te, x_cur_te], axis=0)).float()
else:
x_all = x_ref_te + x_cur_te # type: ignore[assignment]
perms = [torch.randperm(m + n) for _ in range(self.n_permutations)]
mmd2, mmd2_permuted = self._mmd2(x_all, perms, m, n)
if self.device.type == 'cuda':
mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu()
p_val = (mmd2 <= mmd2_permuted).float().mean()
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold]
return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy()
def _mmd2(self, x_all: Union[list, torch.Tensor], perms: List[torch.Tensor], m: int, n: int) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""
Batched (across the permutations) MMD^2 computation for the original test statistic and the permutations.
Parameters
----------
x_all
Concatenated reference and test instances.
perms
List with permutation vectors.
m
Number of reference instances.
n
Number of test instances.
Returns
-------
MMD^2 statistic for the original and permuted reference and test sets.
"""
preprocess_batch_fn = self.train_kwargs['preprocess_fn']
if isinstance(preprocess_batch_fn, Callable): # type: ignore[arg-type]
x_all = preprocess_batch_fn(x_all) # type: ignore[operator]
if self.has_proj:
x_all_proj = predict_batch(x_all, self.kernel.proj, device=self.device, batch_size=self.batch_size_predict,
dtype=x_all.dtype if isinstance(x_all, torch.Tensor) else torch.float32)
else:
x_all_proj = x_all
x, x2, y, y2 = None, None, None, None
k_xx, k_yy, k_xy = [], [], []
for batch in range(self.n_batches):
i, j = batch * self.batch_size_perms, (batch + 1) * self.batch_size_perms
# Stack a batch of permuted reference and test tensors and their projections
x_proj = torch.cat([x_all_proj[perm[:m]][None, :, :] for perm in perms[i:j]], 0)
y_proj = torch.cat([x_all_proj[perm[m:]][None, :, :] for perm in perms[i:j]], 0)
if self.has_kernel_b:
x = torch.cat([x_all[perm[:m]][None, :, :] for perm in perms[i:j]], 0)
y = torch.cat([x_all[perm[m:]][None, :, :] for perm in perms[i:j]], 0)
if batch == 0:
x_proj = torch.cat([x_all_proj[None, :m, :], x_proj], 0)
y_proj = torch.cat([x_all_proj[None, m:, :], y_proj], 0)
if self.has_kernel_b:
x = torch.cat([x_all[None, :m, :], x], 0) # type: ignore[call-overload]
y = torch.cat([x_all[None, m:, :], y], 0) # type: ignore[call-overload]
x_proj, y_proj = x_proj.to(self.device), y_proj.to(self.device)
if self.has_kernel_b:
x, y = x.to(self.device), y.to(self.device)
# Batch-wise kernel matrix computation over the permutations
with torch.no_grad():
x2_proj, x_proj = LazyTensor(x_proj[:, None, :, :]), LazyTensor(x_proj[:, :, None, :])
y2_proj, y_proj = LazyTensor(y_proj[:, None, :, :]), LazyTensor(y_proj[:, :, None, :])
if self.has_kernel_b:
x2, x = LazyTensor(x[:, None, :, :]), LazyTensor(x[:, :, None, :])
y2, y = LazyTensor(y[:, None, :, :]), LazyTensor(y[:, :, None, :])
k_xy.append(self.kernel(x_proj, y2_proj, x, y2).sum(1).sum(1).squeeze(-1))
k_xx.append(self.kernel(x_proj, x2_proj, x, x2).sum(1).sum(1).squeeze(-1))
k_yy.append(self.kernel(y_proj, y2_proj, y, y2).sum(1).sum(1).squeeze(-1))
c_xx, c_yy, c_xy = 1 / (m * (m - 1)), 1 / (n * (n - 1)), 2. / (m * n)
# Note that the MMD^2 estimates assume that the diagonal of the kernel matrix consists of 1's
stats = c_xx * (torch.cat(k_xx) - m) + c_yy * (torch.cat(k_yy) - n) - c_xy * torch.cat(k_xy)
return stats[0], stats[1:]
@staticmethod
def trainer(
j_hat: JHat,
dataloaders: Tuple[DataLoader, DataLoader],
device: torch.device,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda kernel: 0),
verbose: int = 1,
) -> None:
"""
Train the kernel to maximise an estimate of test power using minibatch gradient descent.
"""
optimizer = optimizer(j_hat.parameters(), lr=learning_rate)
j_hat.train()
loss_ma = 0.
for epoch in range(epochs):
dl_ref, dl_cur = dataloaders
dl = tqdm(enumerate(zip(dl_ref, dl_cur)), total=min(len(dl_ref), len(dl_cur))) if verbose == 1 else \
enumerate(zip(dl_ref, dl_cur))
for step, (x_ref, x_cur) in dl:
if isinstance(preprocess_fn, Callable): # type: ignore
x_ref, x_cur = preprocess_fn(x_ref), preprocess_fn(x_cur)
x_ref, x_cur = x_ref.to(device), x_cur.to(device)
optimizer.zero_grad() # type: ignore
estimate = j_hat(x_ref, x_cur)
loss = -estimate + reg_loss_fn(j_hat.kernel) # ascent
loss.backward()
optimizer.step() # type: ignore
if verbose == 1:
loss_ma = loss_ma + (loss.item() - loss_ma) / (step + 1)
dl.set_description(f'Epoch {epoch + 1}/{epochs}')
dl.set_postfix(dict(loss=loss_ma))
| 16,939 | 48.244186 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/keops/mmd.py
|
import logging
import numpy as np
from pykeops.torch import LazyTensor
import torch
from typing import Callable, Dict, List, Optional, Tuple, Union
from alibi_detect.cd.base import BaseMMDDrift
from alibi_detect.utils.keops.kernels import GaussianRBF
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
class MMDDriftKeops(BaseMMDDrift):
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
batch_size_permutations: int = 1000000,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
batch_size_permutations
KeOps computes the n_permutations of the MMD^2 statistics in chunks of batch_size_permutations.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
configure_kernel_from_x_ref=configure_kernel_from_x_ref,
n_permutations=n_permutations,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.KEOPS.value})
# set device
self.device = get_device(device)
# initialize kernel
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = kernel(sigma).to(self.device) if kernel == GaussianRBF else kernel
# set the correct MMD^2 function based on the batch size for the permutations
self.batch_size = batch_size_permutations
self.n_batches = 1 + (n_permutations - 1) // batch_size_permutations
# infer the kernel bandwidth from the reference data
if isinstance(sigma, torch.Tensor):
self.infer_sigma = False
elif self.infer_sigma:
x = torch.from_numpy(self.x_ref).to(self.device)
_ = self.kernel(LazyTensor(x[:, None, :]), LazyTensor(x[None, :, :]), infer_sigma=self.infer_sigma)
self.infer_sigma = False
else:
self.infer_sigma = True
def _mmd2(self, x_all: torch.Tensor, perms: List[torch.Tensor], m: int, n: int) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""
Batched (across the permutations) MMD^2 computation for the original test statistic and the permutations.
Parameters
----------
x_all
Concatenated reference and test instances.
perms
List with permutation vectors.
m
Number of reference instances.
n
Number of test instances.
Returns
-------
MMD^2 statistic for the original and permuted reference and test sets.
"""
k_xx, k_yy, k_xy = [], [], []
for batch in range(self.n_batches):
i, j = batch * self.batch_size, (batch + 1) * self.batch_size
# construct stacked tensors with a batch of permutations for the reference set x and test set y
x = torch.cat([x_all[perm[:m]][None, :, :] for perm in perms[i:j]], 0)
y = torch.cat([x_all[perm[m:]][None, :, :] for perm in perms[i:j]], 0)
if batch == 0:
x = torch.cat([x_all[None, :m, :], x], 0)
y = torch.cat([x_all[None, m:, :], y], 0)
x, y = x.to(self.device), y.to(self.device)
# batch-wise kernel matrix computation over the permutations
k_xy.append(self.kernel(
LazyTensor(x[:, :, None, :]), LazyTensor(y[:, None, :, :]), self.infer_sigma).sum(1).sum(1).squeeze(-1))
k_xx.append(self.kernel(
LazyTensor(x[:, :, None, :]), LazyTensor(x[:, None, :, :])).sum(1).sum(1).squeeze(-1))
k_yy.append(self.kernel(
LazyTensor(y[:, :, None, :]), LazyTensor(y[:, None, :, :])).sum(1).sum(1).squeeze(-1))
c_xx, c_yy, c_xy = 1 / (m * (m - 1)), 1 / (n * (n - 1)), 2. / (m * n)
# Note that the MMD^2 estimates assume that the diagonal of the kernel matrix consists of 1's
stats = c_xx * (torch.cat(k_xx) - m) + c_yy * (torch.cat(k_yy) - n) - c_xy * torch.cat(k_xy)
return stats[0], stats[1:]
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).float() # type: ignore[assignment]
x = torch.from_numpy(x).float() # type: ignore[assignment]
# compute kernel matrix, MMD^2 and apply permutation test
m, n = x_ref.shape[0], x.shape[0]
perms = [torch.randperm(m + n) for _ in range(self.n_permutations)]
# TODO - Rethink typings (related to https://github.com/SeldonIO/alibi-detect/issues/540)
x_all = torch.cat([x_ref, x], 0) # type: ignore[list-item]
mmd2, mmd2_permuted = self._mmd2(x_all, perms, m, n)
if self.device.type == 'cuda':
mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu()
p_val = (mmd2 <= mmd2_permuted).float().mean()
# compute distance threshold
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold]
return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy()
| 8,642 | 45.972826 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/keops/__init__.py
| 0 | 0 | 0 |
py
|
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/keops/tests/test_mmd_keops.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.utils.frameworks import has_keops
from alibi_detect.utils.pytorch import GaussianRBF, mmd2_from_kernel_matrix
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
if has_keops:
from alibi_detect.cd.keops.mmd import MMDDriftKeops
n, n_hidden, n_classes = 500, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
update_x_ref = [{'last': 750}, {'reservoir_sampling': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
batch_size_permutations = [10, 1000000]
configure_kernel_from_x_ref = [True, False]
tests_mmddrift = list(product(n_features, n_enc, preprocess, n_permutations, preprocess_at_init, update_x_ref,
batch_size_permutations, configure_kernel_from_x_ref))
n_tests = len(tests_mmddrift)
@pytest.fixture
def mmd_params(request):
return tests_mmddrift[request.param]
@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.')
@pytest.mark.parametrize('mmd_params', list(range(n_tests)), indirect=True)
def test_mmd(mmd_params):
n_features, n_enc, preprocess, n_permutations, preprocess_at_init, update_x_ref, \
batch_size_permutations, configure_kernel_from_x_ref = mmd_params
np.random.seed(0)
torch.manual_seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
cd = MMDDriftKeops(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
configure_kernel_from_x_ref=configure_kernel_from_x_ref,
n_permutations=n_permutations,
batch_size_permutations=batch_size_permutations
)
x = x_ref.copy()
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
# ensure the keops MMD^2 estimate matches the pytorch implementation for the same kernel
if not isinstance(x_ref, list) and update_x_ref is None:
p_val, mmd2, distance_threshold = cd.score(x_h1)
kernel = GaussianRBF(sigma=cd.kernel.sigma)
if isinstance(preprocess_fn, Callable):
x_ref, x_h1 = cd.preprocess(x_h1)
x_ref = torch.from_numpy(x_ref).float()
x_h1 = torch.from_numpy(x_h1).float()
x_all = torch.cat([x_ref, x_h1], 0)
kernel_mat = kernel(x_all, x_all)
mmd2_torch = mmd2_from_kernel_matrix(kernel_mat, x_h1.shape[0])
np.testing.assert_almost_equal(mmd2, mmd2_torch, decimal=6)
| 4,726 | 38.066116 | 110 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/keops/tests/test_learned_kernel_keops.py
|
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, Optional, Union
from alibi_detect.utils.frameworks import has_keops
from alibi_detect.utils.pytorch import GaussianRBF as GaussianRBFTorch
from alibi_detect.utils.pytorch import mmd2_from_kernel_matrix
if has_keops:
from alibi_detect.cd.keops.learned_kernel import LearnedKernelDriftKeops
from alibi_detect.utils.keops import GaussianRBF
from pykeops.torch import LazyTensor
n = 50 # number of instances used for the reference and test data samples in the tests
if has_keops:
class MyKernel(nn.Module):
def __init__(self, n_features: int, proj: bool):
super().__init__()
sigma = .1
self.kernel = GaussianRBF(trainable=True, sigma=torch.Tensor([sigma]))
self.has_proj = proj
if proj:
self.proj = nn.Linear(n_features, 2)
self.kernel_b = GaussianRBF(trainable=True, sigma=torch.Tensor([sigma]))
def forward(self, x_proj: LazyTensor, y_proj: LazyTensor, x: Optional[LazyTensor] = None,
y: Optional[LazyTensor] = None) -> LazyTensor:
similarity = self.kernel(x_proj, y_proj)
if self.has_proj:
similarity = similarity + self.kernel_b(x, y)
return similarity
# test List[Any] inputs to the detector
def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor:
if isinstance(x, list):
return torch.from_numpy(np.array(x))
else:
return x
p_val = [.05]
n_features = [4]
preprocess_at_init = [True, False]
update_x_ref = [None, {'reservoir_sampling': 1000}]
preprocess_fn = [None, identity_fn]
n_permutations = [10]
batch_size_permutations = [5, 1000000]
train_size = [.5]
retrain_from_scratch = [True]
batch_size_predict = [1000000]
preprocess_batch = [None, identity_fn]
has_proj = [True, False]
tests_lkdrift = list(product(p_val, n_features, preprocess_at_init, update_x_ref, preprocess_fn,
n_permutations, batch_size_permutations, train_size, retrain_from_scratch,
batch_size_predict, preprocess_batch, has_proj))
n_tests = len(tests_lkdrift)
@pytest.fixture
def lkdrift_params(request):
return tests_lkdrift[request.param]
@pytest.mark.skipif(not has_keops, reason='Skipping since pykeops is not installed.')
@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True)
def test_lkdrift(lkdrift_params):
p_val, n_features, preprocess_at_init, update_x_ref, preprocess_fn, \
n_permutations, batch_size_permutations, train_size, retrain_from_scratch, \
batch_size_predict, preprocess_batch, has_proj = lkdrift_params
np.random.seed(0)
torch.manual_seed(0)
kernel = MyKernel(n_features, has_proj)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None and preprocess_fn is None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = LearnedKernelDriftKeops(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations,
batch_size_permutations=batch_size_permutations,
train_size=train_size,
retrain_from_scratch=retrain_from_scratch,
batch_size_predict=batch_size_predict,
preprocess_batch_fn=preprocess_batch,
batch_size=32,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
# ensure the keops MMD^2 estimate matches the pytorch implementation for the same kernel
if not isinstance(x_ref, list) and update_x_ref is None and not has_proj:
if isinstance(preprocess_fn, Callable):
x_ref, x_test1 = cd.preprocess(x_test1)
n_ref, n_test = x_ref.shape[0], x_test1.shape[0]
x_all = torch.from_numpy(np.concatenate([x_ref, x_test1], axis=0)).float()
perms = [torch.randperm(n_ref + n_test) for _ in range(n_permutations)]
mmd2 = cd._mmd2(x_all, perms, n_ref, n_test)[0]
if isinstance(preprocess_batch, Callable):
x_all = preprocess_batch(x_all)
kernel = GaussianRBFTorch(sigma=cd.kernel.kernel.sigma)
kernel_mat = kernel(x_all, x_all)
mmd2_torch = mmd2_from_kernel_matrix(kernel_mat, n_test)
np.testing.assert_almost_equal(mmd2, mmd2_torch, decimal=6)
| 4,968 | 36.931298 | 103 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/sklearn/classifier.py
|
import logging
import numpy as np
from functools import partial
from typing import Callable, Dict, Optional, Tuple, Union
from sklearn.base import clone, ClassifierMixin
from sklearn.calibration import CalibratedClassifierCV
from sklearn.exceptions import NotFittedError
from sklearn.ensemble import RandomForestClassifier
from alibi_detect.cd.base import BaseClassifierDrift
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
class ClassifierDriftSklearn(BaseClassifierDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: np.ndarray,
model: ClassifierMixin,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
use_calibration: bool = False,
calibration_kwargs: Optional[dict] = None,
use_oob: bool = False,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
) -> None:
"""
Classifier-based drift detector. The classifier is trained on a fraction of the combined
reference and test data and drift is detected on the remaining data. To use all the data
to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
model
Sklearn classification model used for drift detection.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs 'probs' or 'scores'.
binarize_preds
Whether to test for discrepancy on soft (e.g. probs/scores) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold predictions. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
use_calibration
Whether to use calibration. Whether to use calibration. Calibration can be used on top of any model.
calibration_kwargs
Optional additional kwargs for calibration.
See https://scikit-learn.org/stable/modules/generated/sklearn.calibration.CalibratedClassifierCV.html
for more details.
use_oob
Whether to use out-of-bag(OOB) predictions. Supported only for `RandomForestClassifier`.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
preds_type=preds_type,
binarize_preds=binarize_preds,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
input_shape=input_shape,
data_type=data_type
)
if preds_type not in ['probs', 'scores']:
raise ValueError("'preds_type' should be 'probs' or 'scores'")
self.meta.update({'backend': Framework.SKLEARN.value})
self.original_model = model
self.use_calibration = use_calibration
self.calibration_kwargs = dict() if calibration_kwargs is None else calibration_kwargs
self.use_oob = use_oob
self.model: ClassifierMixin = self._clone_model()
def _has_predict_proba(self, model) -> bool:
try:
# taking self.x_ref[0].shape to overcome bot cases when self.x_ref is np.ndarray or list
model.predict_proba(np.zeros((1, ) + self.x_ref[0].shape))
has_predict_proba = True
except NotFittedError:
has_predict_proba = True
except AttributeError:
has_predict_proba = False
return has_predict_proba
def _clone_model(self):
model = clone(self.original_model)
# equivalence between `retrain_from_scratch` and `warm_start`
if not self.retrain_from_scratch:
if hasattr(model, 'warm_start'):
model.warm_start = True
logger.warning('`retrain_from_scratch=False` sets automatically the parameter `warm_start=True` '
'for the given classifier. Please consult the documentation to ensure that the '
'`warm_start=True` is applicable in the current context (i.e., for tree-based '
'models such as RandomForest, setting `warm_start=True` is not applicable since the '
'fit function expects the same dataset and an update/increase in the number of '
'estimators - previous fitted estimators will be kept frozen while the new ones '
'will be fitted).')
else:
logger.warning('Current classifier does not support `warm_start`. The model will be retrained '
'from scratch every iteration.')
else:
if hasattr(model, 'warm_start'):
model.warm_start = False
logger.warning('`retrain_from_scratch=True` sets automatically the parameter `warm_start=False`.')
# oob checks
if self.use_oob:
if not isinstance(model, RandomForestClassifier):
raise ValueError('OOB supported only for RandomForestClassifier. '
f'Received a model of type {model.__class__.__name__}')
if self.use_calibration:
self.use_calibration = False
logger.warning('Calibration cannot be used when `use_oob=True`. Setting `use_calibration=False`.')
model.oob_score = True
model.bootstrap = True
logger.warning(
'`use_oob=True` sets automatically the classifier parameters `boostrap=True` and `oob_score=True`. '
'`train_size` and `n_folds` are ignored when `use_oob=True`.'
)
else:
if isinstance(model, RandomForestClassifier):
model.oob_score = False
logger.warning('`use_oob=False` sets automatically the classifier parameters `oob_score=False`.')
# preds_type checks
if self.preds_type == 'probs':
# calibrate the model if user specified.
if self.use_calibration:
model = CalibratedClassifierCV(base_estimator=model, **self.calibration_kwargs)
logger.warning('Using calibration to obtain the prediction probabilities.')
# check if it has predict proba. Cannot be checked via `hasattr` due to the same issue in SVC (see below)
has_predict_proba = self._has_predict_proba(model)
# if the binarize_preds=True, we don't really need the probabilities as in test_probs will be rounded
# to the closest integer (i.e., to 0 or 1) according to the predicted probability. Thus, we can define
# a hard label predict_proba based on the predict method
if self.binarize_preds and (not has_predict_proba):
if not hasattr(model, 'predict'):
raise AttributeError('Trying to use a model which does not support `predict`.')
def predict_proba(self, X):
return np.eye(2)[self.predict(X).astype(np.int32)]
# add predict_proba method. Overwriting predict_proba is not possible for SVC due
# to @available_if(_check_proba)
# Check link: https://github.com/scikit-learn/scikit-learn/blob/7e1e6d09b/sklearn/svm/_base.py#L807
setattr(model, 'aux_predict_proba', partial(predict_proba, model))
elif has_predict_proba:
setattr(model, 'aux_predict_proba', model.predict_proba)
# at this point the model does not have any predict_proba, thus the test can not be performed.
if not hasattr(model, 'aux_predict_proba'):
raise AttributeError("Trying to use a model which does not support `predict_proba` with "
"`preds_type='probs'`. Set (`use_calibration=True`, `calibration_kwargs`) or "
"(`binarize_preds=True`).")
else:
if self.use_calibration:
logger.warning("No calibration is performed when `preds_type='scores'`.")
if self.binarize_preds:
raise ValueError("`binarize_preds` must be `False` when `preds_type='scores'`.")
if not hasattr(model, 'decision_function'):
raise AttributeError("Trying to use a model which does not support `decision_function` with "
"`preds_type='scores'`.")
# need to put the scores in the format expected by test function, which requires to duplicate the
# scores along axis=1
def predict_proba(self, X):
scores = self.decision_function(X).reshape(-1, 1)
return np.tile(scores, reps=2)
# add predict_proba method
setattr(model, 'aux_predict_proba', partial(predict_proba, model))
return model
def score(self, x: Union[np.ndarray, list]) \
-> Tuple[float, float, np.ndarray, np.ndarray, Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Compute the out-of-fold drift metric such as the accuracy from a classifier
trained to distinguish the reference data from the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value, a notion of distance between the trained classifier's out-of-fold performance \
and that which we'd expect under the null assumption of no drift, \
and the out-of-fold classifier model prediction probabilities on the reference and test data \
as well as the associated reference and test instances of the out-of-fold predictions.
"""
if self.use_oob and isinstance(self.model, RandomForestClassifier):
return self._score_rf(x)
return self._score(x)
def _score(self, x: Union[np.ndarray, list]) \
-> Tuple[float, float, np.ndarray, np.ndarray, Union[np.ndarray, list], Union[np.ndarray, list]]:
x_ref, x = self.preprocess(x)
x, y, splits = self.get_splits(x_ref, x, return_splits=True) # type: ignore
# iterate over folds: train a new model for each fold and make out-of-fold (oof) predictions
probs_oof_list, idx_oof_list = [], []
for idx_tr, idx_te in splits:
y_tr = y[idx_tr]
if isinstance(x, np.ndarray):
x_tr, x_te = x[idx_tr], x[idx_te]
elif isinstance(x, list):
x_tr, x_te = [x[_] for _ in idx_tr], [x[_] for _ in idx_te]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
self.model.fit(x_tr, y_tr)
probs = self.model.aux_predict_proba(x_te)
probs_oof_list.append(probs)
idx_oof_list.append(idx_te)
probs_oof = np.concatenate(probs_oof_list, axis=0)
idx_oof = np.concatenate(idx_oof_list, axis=0)
y_oof = y[idx_oof]
n_cur = y_oof.sum()
n_ref = len(y_oof) - n_cur
p_val, dist = self.test_probs(y_oof, probs_oof, n_ref, n_cur)
idx_sort = np.argsort(idx_oof)
probs_sort = probs_oof[idx_sort]
if isinstance(x, np.ndarray):
x_oof = x[idx_oof]
x_sort = x_oof[idx_sort]
else:
x_oof = [x[_] for _ in idx_oof]
x_sort = [x_oof[_] for _ in idx_sort]
return p_val, dist, probs_sort[:n_ref, 1], probs_sort[n_ref:, 1], x_sort[:n_ref], x_sort[n_ref:]
def _score_rf(self, x: Union[np.ndarray, list]) \
-> Tuple[float, float, np.ndarray, np.ndarray, Union[np.ndarray, list], Union[np.ndarray, list]]:
x_ref, x = self.preprocess(x)
x, y = self.get_splits(x_ref, x, return_splits=False) # type: ignore
self.model.fit(x, y)
# it is possible that some inputs do not have OOB scores. This is probably means
# that too few trees were used to compute any reliable estimates.
idx_oob = np.where(np.all(~np.isnan(self.model.oob_decision_function_), axis=1))[0]
probs_oob = self.model.oob_decision_function_[idx_oob]
y_oob = y[idx_oob]
if isinstance(x, np.ndarray):
x_oob: Union[list, np.ndarray] = x[idx_oob]
elif isinstance(x, list):
x_oob = [x[_] for _ in idx_oob]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
# comparison due to ordering in get_split (i.e, x = [x_ref, x])
n_ref = np.sum(idx_oob < len(x_ref)).item()
n_cur = np.sum(idx_oob >= len(x_ref)).item()
p_val, dist = self.test_probs(y_oob, probs_oob, n_ref, n_cur)
return p_val, dist, probs_oob[:n_ref, 1], probs_oob[n_ref:, 1], x_oob[:n_ref], x_oob[n_ref:]
| 15,607 | 49.186495 | 117 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/sklearn/__init__.py
| 0 | 0 | 0 |
py
|
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/sklearn/tests/test_classifier_sklearn.py
|
import pytest
import numpy as np
from typing import Union
from alibi_detect.cd.sklearn.classifier import ClassifierDriftSklearn
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# test List[Any] inputs to the detector
def identity_fn(x: Union[np.ndarray, list]) -> np.ndarray:
if isinstance(x, list):
return np.array(x)
else:
return x
@pytest.mark.parametrize('model, use_calibration, calibration_kwargs', [
(LogisticRegression(max_iter=10000), False, None),
(SVC(max_iter=10000, probability=True), False, None),
(LinearSVC(max_iter=10000), True, {'method': 'sigmoid'}),
(LinearSVC(max_iter=10000), True, {'method': 'isotonic'}),
(DecisionTreeClassifier(), False, None),
(RandomForestClassifier(n_estimators=50), False, None),
(GradientBoostingClassifier(n_estimators=50), False, None)
])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('p_val', [0.05])
@pytest.mark.parametrize('n', [1000])
@pytest.mark.parametrize('n_features', [4])
@pytest.mark.parametrize('binarize_preds', [True, False])
@pytest.mark.parametrize('n_folds', [None, 2])
@pytest.mark.parametrize('train_size', [0.5])
@pytest.mark.parametrize('preprocess_batch', [None, identity_fn])
@pytest.mark.parametrize('update_x_ref', [{'last': 1000}, {'reservoir_sampling': 1000}])
def test_clfdrift_calibration(model, preds_type, p_val, n, n_features, binarize_preds, n_folds, train_size,
preprocess_batch, update_x_ref, use_calibration, calibration_kwargs):
"""Testing calibration for various models and configurations."""
np.random.seed(0)
x_ref = np.random.randn(n, n_features)
x_test0 = np.random.randn(n, n_features)
x_test1 = np.random.randn(n, n_features) + 1
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = ClassifierDriftSklearn(
x_ref=x_ref,
model=model,
preds_type=preds_type,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
n_folds=n_folds,
binarize_preds=binarize_preds,
use_calibration=use_calibration,
calibration_kwargs=calibration_kwargs
)
if to_list:
x_test0 = [_ for _ in x_test0]
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == preds_type
assert cd.meta['params']['binarize_preds '] == binarize_preds
@pytest.mark.parametrize('model', [LinearSVC(max_iter=10000),
AdaBoostClassifier(),
QuadraticDiscriminantAnalysis(),
LogisticRegression(),
GradientBoostingClassifier()])
@pytest.mark.parametrize('p_val', [0.05])
@pytest.mark.parametrize('n', [500, 1000])
@pytest.mark.parametrize('n_features', [4])
@pytest.mark.parametrize('binarize_preds', [False])
@pytest.mark.parametrize('n_folds', [2, 5])
@pytest.mark.parametrize('preds_type', ['scores'])
def test_clfdrift_scores(model, p_val, n, n_features, binarize_preds, n_folds, preds_type):
"""Testing classifier drift based on the scores (i.e. `decision_function`) for various models."""
np.random.seed(0)
x_ref = np.random.randn(n, n_features)
x_test0 = np.random.randn(n, n_features)
x_test1 = np.random.randn(n, n_features) + 1
cd = ClassifierDriftSklearn(
x_ref=x_ref,
preds_type=preds_type,
model=model,
p_val=p_val,
n_folds=n_folds,
binarize_preds=binarize_preds,
)
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == preds_type
assert cd.meta['params']['binarize_preds '] == binarize_preds
@pytest.mark.parametrize('model', [SVC(probability=False), LinearSVC()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [False])
def test_clone1(model, preds_type, use_calibration, binarize_preds):
"""
Checks if an `AttributeError` is raised because the models do NOT support `predict_proba`,
``use_calibration=False`` and we are interested in the probabilities due to ``binarize_preds=False``.
"""
with pytest.raises(AttributeError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False),
LinearSVC(),
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [True])
def test_clone2(model, preds_type, use_calibration, binarize_preds):
"""Checks if no error is raised because ``binarize_preds=True`` and we only need access to the `predict` method."""
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False),
LinearSVC(),
LogisticRegression(),
DecisionTreeClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [True])
@pytest.mark.parametrize('binarize_preds', [False, True])
def test_clone3(model, preds_type, use_calibration, binarize_preds):
"""
Checks if NO error is raised because of the ``use_calibration=True`` which makes possible ``preds_types='probs'``.
"""
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [DecisionTreeClassifier(),
RandomForestClassifier(),
KNeighborsClassifier(),
GaussianProcessClassifier(),
MLPClassifier(),
GaussianNB()])
@pytest.mark.parametrize('preds_type', ['scores'])
@pytest.mark.parametrize('use_calibration', [False, True])
@pytest.mark.parametrize('binarize_preds', [False])
def test_clone4(model, preds_type, use_calibration, binarize_preds):
"""Checks if an `AttributeError` is raised because the classifiers do not support `decision_function`."""
with pytest.raises(AttributeError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [DecisionTreeClassifier(),
RandomForestClassifier(),
KNeighborsClassifier(),
GaussianProcessClassifier(),
MLPClassifier(),
GaussianNB()])
@pytest.mark.parametrize('preds_type', ['scores'])
@pytest.mark.parametrize('use_calibration', [False, True])
@pytest.mark.parametrize('binarize_preds', [True])
def test_clone5(model, preds_type, use_calibration, binarize_preds):
"""
Checks if `ValueError` is raised because of ``binarize_preds=True`` which conflicts with ``preds_types='scores'``.
"""
with pytest.raises(ValueError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
@pytest.mark.parametrize('model', [SVC(probability=False), LinearSVC()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [True])
def test_predict_proba1(model, preds_type, use_calibration, binarize_preds):
"""
Checks if the `aux_predict_proba` is set properly to the model's `predict` when ``preds_type=probs``
and ``binarize_preds=True``.
"""
drift_detector = ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
# define train and test set for internal model
x_tr, y_tr = np.random.randn(100, 5), np.random.randint(0, 2, 100)
x_te = np.random.randn(100, 5)
# extract and fit internal model
internal_model = drift_detector.model
internal_model.fit(x_tr, y_tr)
# check if predict matches the new predict_proba
np.testing.assert_allclose(internal_model.predict(x_te),
internal_model.aux_predict_proba(x_te)[:, 1])
@pytest.mark.parametrize('model', [LogisticRegression(),
GradientBoostingClassifier(),
AdaBoostClassifier(),
QuadraticDiscriminantAnalysis()])
@pytest.mark.parametrize('pred_types', ['scores'])
@pytest.mark.parametrize('use_calibration', [False])
@pytest.mark.parametrize('binarize_preds', [False])
def test_predict_proba2(model, pred_types, use_calibration, binarize_preds):
"""
Checks if the `aux_predict_proba` is set properly to the model's `decision_function` when ``preds_type=scores``
and ``binarize_preds=False``.
"""
drift_detector = ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=pred_types,
use_calibration=use_calibration,
binarize_preds=binarize_preds)
# define train and test set for internal model
x_tr, y_tr = np.random.randn(100, 5), np.random.randint(0, 2, 100)
x_te = np.random.randn(100, 5)
# extract and fit internal model
internal_model = drift_detector.model
internal_model.fit(x_tr, y_tr)
# check if predict matches the new predict_proba
np.testing.assert_allclose(internal_model.decision_function(x_te),
internal_model.aux_predict_proba(x_te)[:, 1])
@pytest.mark.parametrize('model', [RandomForestClassifier(n_estimators=100)])
@pytest.mark.parametrize('p_val', [0.05])
@pytest.mark.parametrize('n', [500, 1000])
@pytest.mark.parametrize('n_features', [4])
@pytest.mark.parametrize('n_folds', [2, 5])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('binarize_preds, use_calibration, use_oob', [(False, False, False),
(False, False, True),
(False, True, False),
(True, False, False),
(True, False, True),
(True, True, False)])
def test_rf_oob(model, p_val, n, n_features, n_folds, preds_type, binarize_preds, use_calibration, use_oob):
"""
Checks if all valid combination of `binarize_preds`, `use_calibration` and `use_oob` when ``preds_type='probs'``
are working properly.
"""
np.random.seed(0)
x_ref = np.random.randn(n, n_features)
x_test0 = np.random.randn(n, n_features)
x_test1 = np.random.randn(n, n_features) + 1
cd = ClassifierDriftSklearn(
x_ref=x_ref,
preds_type=preds_type,
model=model,
p_val=p_val,
n_folds=n_folds,
binarize_preds=binarize_preds,
use_calibration=use_calibration,
use_oob=use_oob
)
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
assert preds_0['data']['distance'] >= 0
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_1['data']['distance'] >= 0
assert preds_0['data']['distance'] < preds_1['data']['distance']
assert cd.meta['params']['preds_type'] == preds_type
assert cd.meta['params']['binarize_preds '] == binarize_preds
@pytest.mark.parametrize('model', [LogisticRegression(),
GradientBoostingClassifier(),
AdaBoostClassifier()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_oob', [True])
def test_clone_rf1(model, preds_type, use_oob):
"""Check if `ValueError` is raised because ``use_oob=True`` and the model is not a RandomForest."""
with pytest.raises(ValueError):
ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_oob=use_oob)
@pytest.mark.parametrize('model', [RandomForestClassifier()])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_calibration', [False, True])
@pytest.mark.parametrize('use_oob', [True])
def test_clone_rf2(model, preds_type, use_calibration, use_oob):
"""
Checks that ``use_oob=True`` works when ``preds_type='probs'`` for a RandomForest model with or
without calibration.
"""
cd = ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_calibration=use_calibration,
use_oob=use_oob)
assert cd.model.oob_score
assert cd.model.bootstrap
assert not cd.use_calibration # should be set to `False` when `use_oob=True`
@pytest.mark.parametrize('model', [RandomForestClassifier(oob_score=True),
RandomForestClassifier(oob_score=False)])
@pytest.mark.parametrize('preds_type', ['probs'])
@pytest.mark.parametrize('use_oob', [False])
def test_clone_rf3(model, preds_type, use_oob):
"""Checks if the `oob_score` is set automatically to ``False`` when ``use_oob=False``."""
cd = ClassifierDriftSklearn(x_ref=np.random.randn(100, 5),
model=model,
preds_type=preds_type,
use_oob=use_oob)
assert not cd.model.oob_score
| 17,137 | 43.630208 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/learned_kernel.py
|
from copy import deepcopy
from functools import partial
from tqdm import tqdm
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.cd.base import BaseLearnedKernelDrift
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.distance import mmd2_from_kernel_matrix, batch_compute_kernel_matrix
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
class LearnedKernelDriftTorch(BaseLearnedKernelDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
kernel: Union[nn.Module, nn.Sequential],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
n_permutations: int = 100,
var_reg: float = 1e-5,
reg_loss_fn: Callable = (lambda kernel: 0),
train_size: Optional[float] = .75,
retrain_from_scratch: bool = True,
optimizer: torch.optim.Optimizer = torch.optim.Adam, # type: ignore
learning_rate: float = 1e-3,
batch_size: int = 32,
batch_size_predict: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
num_workers: int = 0,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector where the kernel is trained to maximise an
estimate of the test power. The kernel is trained on a split of the reference and test instances
and then the MMD is evaluated on held out instances and a permutation test is performed.
For details see Liu et al (2020): Learning Deep Kernels for Non-Parametric Two-Sample Tests
(https://arxiv.org/abs/2002.09116)
Parameters
----------
x_ref
Data used as reference distribution.
kernel
Trainable PyTorch module that returns a similarity between two instances.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before applying the kernel.
n_permutations
The number of permutations to use in the permutation test once the MMD has been computed.
var_reg
Constant added to the estimated variance of the MMD for stability.
reg_loss_fn
The regularisation term reg_loss_fn(kernel) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the kernel.
The drift is detected on `1 - train_size`.
retrain_from_scratch
Whether the kernel should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
optimizer
Optimizer used during training of the kernel.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the kernel.
batch_size_predict
Batch size used for the trained drift detector predictions.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the kernel.
epochs
Number of training epochs for the kernel. Corresponds to the smaller of the reference and test sets.
num_workers
Number of workers for the dataloader. The default (`num_workers=0`) means multi-process data loading
is disabled. Setting `num_workers>0` may be unreliable on Windows.
verbose
Verbosity level during the training of the kernel. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when training the kernel.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations,
train_size=train_size,
retrain_from_scratch=retrain_from_scratch,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.PYTORCH.value})
# set device, define model and training kwargs
self.device = get_device(device)
self.original_kernel = kernel
self.kernel = deepcopy(kernel)
# define kwargs for dataloader and trainer
self.dataset = dataset
self.dataloader = partial(dataloader, batch_size=batch_size, shuffle=True,
drop_last=True, num_workers=num_workers)
self.kernel_mat_fn = partial(
batch_compute_kernel_matrix, device=self.device, preprocess_fn=preprocess_batch_fn,
batch_size=batch_size_predict
)
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'preprocess_fn': preprocess_batch_fn,
'reg_loss_fn': reg_loss_fn, 'learning_rate': learning_rate, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
self.j_hat = LearnedKernelDriftTorch.JHat(self.kernel, var_reg).to(self.device)
class JHat(nn.Module):
"""
A module that wraps around the kernel. When passed a batch of reference and batch of test
instances it returns an estimate of a correlate of test power.
Equation 4 of https://arxiv.org/abs/2002.09116
"""
def __init__(self, kernel: nn.Module, var_reg: float):
super().__init__()
self.kernel = kernel
self.var_reg = var_reg
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
k_xx, k_yy, k_xy = self.kernel(x, x), self.kernel(y, y), self.kernel(x, y)
h_mat = k_xx + k_yy - k_xy - k_xy.t()
n = len(x)
mmd2_est = (h_mat.sum()-h_mat.trace())/(n*(n-1))
var_est = 4*h_mat.sum(-1).square().sum()/(n**3) - 4*h_mat.sum().square()/(n**4)
reg_var_est = var_est + self.var_reg
return mmd2_est/reg_var_est.sqrt()
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested. The kernel
used within the MMD is first trained to maximise an estimate of the resulting test power.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x_cur = self.preprocess(x)
(x_ref_tr, x_cur_tr), (x_ref_te, x_cur_te) = self.get_splits(x_ref, x_cur)
dl_ref_tr, dl_cur_tr = self.dataloader(self.dataset(x_ref_tr)), self.dataloader(self.dataset(x_cur_tr))
self.kernel = deepcopy(self.original_kernel) if self.retrain_from_scratch else self.kernel
self.kernel = self.kernel.to(self.device)
train_args = [self.j_hat, (dl_ref_tr, dl_cur_tr), self.device]
LearnedKernelDriftTorch.trainer(*train_args, **self.train_kwargs)
if isinstance(x_ref_te, np.ndarray) and isinstance(x_cur_te, np.ndarray):
x_all = np.concatenate([x_ref_te, x_cur_te], axis=0)
else:
x_all = x_ref_te + x_cur_te
kernel_mat = self.kernel_mat_fn(x_all, x_all, self.kernel)
kernel_mat = kernel_mat - torch.diag(kernel_mat.diag()) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=False, zero_diag=False)
mmd2_permuted = torch.Tensor(
[mmd2_from_kernel_matrix(kernel_mat, len(x_cur_te), permute=True, zero_diag=False)
for _ in range(self.n_permutations)]
)
if self.device.type == 'cuda':
mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu()
p_val = (mmd2 <= mmd2_permuted).float().mean()
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold]
return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy()
@staticmethod
def trainer(
j_hat: JHat,
dataloaders: Tuple[DataLoader, DataLoader],
device: torch.device,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
preprocess_fn: Callable = None,
epochs: int = 20,
reg_loss_fn: Callable = (lambda kernel: 0),
verbose: int = 1,
) -> None:
"""
Train the kernel to maximise an estimate of test power using minibatch gradient descent.
"""
optimizer = optimizer(j_hat.parameters(), lr=learning_rate)
j_hat.train()
loss_ma = 0.
for epoch in range(epochs):
dl_ref, dl_cur = dataloaders
dl = tqdm(enumerate(zip(dl_ref, dl_cur)), total=min(len(dl_ref), len(dl_cur))) if verbose == 1 else \
enumerate(zip(dl_ref, dl_cur))
for step, (x_ref, x_cur) in dl:
if isinstance(preprocess_fn, Callable): # type: ignore
x_ref, x_cur = preprocess_fn(x_ref), preprocess_fn(x_cur)
x_ref, x_cur = x_ref.to(device), x_cur.to(device)
optimizer.zero_grad() # type: ignore
estimate = j_hat(x_ref, x_cur)
loss = -estimate + reg_loss_fn(j_hat.kernel) # ascent
loss.backward()
optimizer.step() # type: ignore
if verbose == 1:
loss_ma = loss_ma + (loss.item() - loss_ma) / (step + 1)
dl.set_description(f'Epoch {epoch + 1}/{epochs}')
dl.set_postfix(dict(loss=loss_ma))
| 12,289 | 46.451737 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/mmd.py
|
import logging
import numpy as np
import torch
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseMMDDrift
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.distance import mmd2_from_kernel_matrix
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
logger = logging.getLogger(__name__)
class MMDDriftTorch(BaseMMDDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
configure_kernel_from_x_ref: bool = True,
n_permutations: int = 100,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Maximum Mean Discrepancy (MMD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths.
configure_kernel_from_x_ref
Whether to already configure the kernel bandwidth from the reference data.
n_permutations
Number of permutations used in the permutation test.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
configure_kernel_from_x_ref=configure_kernel_from_x_ref,
n_permutations=n_permutations,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.PYTORCH.value})
# set device
self.device = get_device(device)
# initialize kernel
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = kernel(sigma).to(self.device) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
if self.infer_sigma or isinstance(sigma, torch.Tensor):
x = torch.from_numpy(self.x_ref).to(self.device)
self.k_xx = self.kernel(x, x, infer_sigma=self.infer_sigma)
self.infer_sigma = False
else:
self.k_xx, self.infer_sigma = None, True
def kernel_matrix(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
""" Compute and return full kernel matrix between arrays x and y. """
k_xy = self.kernel(x, y, self.infer_sigma)
k_xx = self.k_xx if self.k_xx is not None and self.update_x_ref is None else self.kernel(x, x)
k_yy = self.kernel(y, y)
kernel_mat = torch.cat([torch.cat([k_xx, k_xy], 1), torch.cat([k_xy.T, k_yy], 1)], 0)
return kernel_mat
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the maximum mean discrepancy
as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the MMD^2 between the reference and test set, \
and the MMD^2 threshold above which drift is flagged.
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device) # type: ignore[assignment]
x = torch.from_numpy(x).to(self.device) # type: ignore[assignment]
# compute kernel matrix, MMD^2 and apply permutation test using the kernel matrix
# TODO: (See https://github.com/SeldonIO/alibi-detect/issues/540)
n = x.shape[0]
kernel_mat = self.kernel_matrix(x_ref, x) # type: ignore[arg-type]
kernel_mat = kernel_mat - torch.diag(kernel_mat.diag()) # zero diagonal
mmd2 = mmd2_from_kernel_matrix(kernel_mat, n, permute=False, zero_diag=False)
mmd2_permuted = torch.Tensor(
[mmd2_from_kernel_matrix(kernel_mat, n, permute=True, zero_diag=False) for _ in range(self.n_permutations)]
)
if self.device.type == 'cuda':
mmd2, mmd2_permuted = mmd2.cpu(), mmd2_permuted.cpu()
p_val = (mmd2 <= mmd2_permuted).float().mean()
# compute distance threshold
idx_threshold = int(self.p_val * len(mmd2_permuted))
distance_threshold = torch.sort(mmd2_permuted, descending=True).values[idx_threshold]
return p_val.numpy().item(), mmd2.numpy().item(), distance_threshold.numpy()
| 6,868 | 46.372414 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/utils.py
|
from torch import nn
from typing import Callable
def activate_train_mode_for_dropout_layers(model: Callable) -> Callable:
model.eval() # type: ignore
n_dropout_layers = 0
for module in model.modules(): # type: ignore
if isinstance(module, nn.Dropout):
module.train()
n_dropout_layers += 1
if n_dropout_layers == 0:
raise ValueError("No dropout layers identified.")
return model
| 444 | 25.176471 | 72 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/classifier.py
|
from copy import deepcopy
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from scipy.special import softmax
from typing import Callable, Dict, Optional, Union, Tuple
from alibi_detect.cd.base import BaseClassifierDrift
from alibi_detect.models.pytorch.trainer import trainer
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.pytorch.prediction import predict_batch
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
class ClassifierDriftTorch(BaseClassifierDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
model: Union[nn.Module, nn.Sequential],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
preds_type: str = 'probs',
binarize_preds: bool = False,
reg_loss_fn: Callable = (lambda model: 0),
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector. The classifier is trained on a fraction of the combined
reference and test data and drift is detected on the remaining data. To use all the data
to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
model
PyTorch classification model used for drift detection.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
preds_type
Whether the model outputs 'probs' or 'logits'
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
reg_loss_fn
The regularisation term reg_loss_fn(model) is added to the loss function being optimized.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold predictions. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
preds_type=preds_type,
binarize_preds=binarize_preds,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
input_shape=input_shape,
data_type=data_type
)
if preds_type not in ['probs', 'logits']:
raise ValueError("'preds_type' should be 'probs' or 'logits'")
self.meta.update({'backend': Framework.PYTORCH.value})
# set device, define model and training kwargs
self.device = get_device(device)
self.original_model = model
self.model = deepcopy(model)
# define kwargs for dataloader and trainer
self.loss_fn = nn.CrossEntropyLoss() if (self.preds_type == 'logits') else nn.NLLLoss()
self.dataset = dataset
self.dataloader = partial(dataloader, batch_size=batch_size, shuffle=True)
self.predict_fn = partial(predict_batch, device=self.device,
preprocess_fn=preprocess_batch_fn, batch_size=batch_size)
self.train_kwargs = {'optimizer': optimizer, 'epochs': epochs, 'preprocess_fn': preprocess_batch_fn,
'reg_loss_fn': reg_loss_fn, 'learning_rate': learning_rate, 'verbose': verbose}
if isinstance(train_kwargs, dict):
self.train_kwargs.update(train_kwargs)
def score(self, x: Union[np.ndarray, list]) \
-> Tuple[float, float, np.ndarray, np.ndarray, Union[np.ndarray, list], Union[np.ndarray, list]]:
"""
Compute the out-of-fold drift metric such as the accuracy from a classifier
trained to distinguish the reference data from the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value, a notion of distance between the trained classifier's out-of-fold performance \
and that which we'd expect under the null assumption of no drift, \
and the out-of-fold classifier model prediction probabilities on the reference and test data \
as well as the associated reference and test instances of the out-of-fold predictions.
"""
x_ref, x = self.preprocess(x)
x, y, splits = self.get_splits(x_ref, x) # type: ignore
# iterate over folds: train a new model for each fold and make out-of-fold (oof) predictions
preds_oof_list, idx_oof_list = [], []
for idx_tr, idx_te in splits:
y_tr = y[idx_tr]
if isinstance(x, np.ndarray):
x_tr, x_te = x[idx_tr], x[idx_te]
elif isinstance(x, list):
x_tr, x_te = [x[_] for _ in idx_tr], [x[_] for _ in idx_te]
else:
raise TypeError(f'x needs to be of type np.ndarray or list and not {type(x)}.')
ds_tr = self.dataset(x_tr, y_tr)
dl_tr = self.dataloader(ds_tr)
self.model = deepcopy(self.original_model) if self.retrain_from_scratch else self.model
self.model = self.model.to(self.device)
train_args = [self.model, self.loss_fn, dl_tr, self.device]
trainer(*train_args, **self.train_kwargs) # type: ignore
preds = self.predict_fn(x_te, self.model.eval())
preds_oof_list.append(preds)
idx_oof_list.append(idx_te)
preds_oof = np.concatenate(preds_oof_list, axis=0)
probs_oof = softmax(preds_oof, axis=-1) if self.preds_type == 'logits' else preds_oof
idx_oof = np.concatenate(idx_oof_list, axis=0)
y_oof = y[idx_oof]
n_cur = y_oof.sum()
n_ref = len(y_oof) - n_cur
p_val, dist = self.test_probs(y_oof, probs_oof, n_ref, n_cur)
idx_sort = np.argsort(idx_oof)
probs_sort = probs_oof[idx_sort]
if isinstance(x, np.ndarray):
x_oof = x[idx_oof]
x_sort = x_oof[idx_sort]
else:
x_oof = [x[_] for _ in idx_oof]
x_sort = [x_oof[_] for _ in idx_sort]
return p_val, dist, probs_sort[:n_ref, 1], probs_sort[n_ref:, 1], x_sort[:n_ref], x_sort[n_ref:]
| 10,365 | 46.990741 | 115 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/lsdd_online.py
|
from tqdm import tqdm
import numpy as np
import torch
from typing import Any, Callable, Optional, Union
from alibi_detect.cd.base_online import BaseMultiDriftOnline
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch import GaussianRBF, permed_lsdds, quantile
from alibi_detect.utils.frameworks import Framework
class LSDDDriftOnlineTorch(BaseMultiDriftOnline):
online_state_keys: tuple = ('t', 'test_stats', 'drift_preds', 'test_window', 'k_xtc')
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online least squares density difference (LSDD) data drift detector using preconfigured thresholds.
Motivated by Bu et al. (2017): https://ieeexplore.ieee.org/abstract/document/7890493
We have made modifications such that a desired ERT can be accurately targeted however.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ert.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 2*window_size.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
x_ref_preprocessed=x_ref_preprocessed,
n_bootstraps=n_bootstraps,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
self.backend = Framework.PYTORCH.value
self.meta.update({'backend': self.backend})
self.n_kernel_centers = n_kernel_centers
self.lambda_rd_max = lambda_rd_max
# set device
self.device = get_device(device)
self._configure_normalization()
# initialize kernel
if sigma is None:
x_ref = torch.from_numpy(self.x_ref).to(self.device) # type: ignore[assignment]
self.kernel = GaussianRBF()
_ = self.kernel(x_ref, x_ref, infer_sigma=True)
else:
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = GaussianRBF(sigma)
if self.n_kernel_centers is None:
self.n_kernel_centers = 2 * window_size
self._configure_kernel_centers()
self._configure_thresholds()
self._configure_ref_subset() # self.initialise_state() called inside here
def _configure_normalization(self, eps: float = 1e-12):
"""
Configure the normalization functions used to normalize reference and test data to zero mean and unit variance.
The reference data `x_ref` is also normalized here.
"""
x_ref = torch.from_numpy(self.x_ref).to(self.device)
x_ref_means = x_ref.mean(0)
x_ref_stds = x_ref.std(0)
self._normalize = lambda x: (x - x_ref_means) / (x_ref_stds + eps)
self._unnormalize = lambda x: (torch.as_tensor(x) * (x_ref_stds + eps) + x_ref_means).cpu().numpy()
self.x_ref = self._normalize(x_ref).cpu().numpy()
def _configure_kernel_centers(self):
"Set aside reference samples to act as kernel centers"
perm = torch.randperm(self.n)
self.c_inds, self.non_c_inds = perm[:self.n_kernel_centers], perm[self.n_kernel_centers:]
self.kernel_centers = torch.from_numpy(self.x_ref[self.c_inds]).to(self.device)
if np.unique(self.kernel_centers.cpu().numpy(), axis=0).shape[0] < self.n_kernel_centers:
perturbation = (torch.randn(self.kernel_centers.shape) * 1e-6).to(self.device)
self.kernel_centers = self.kernel_centers + perturbation
self.x_ref_eff = torch.from_numpy(self.x_ref[self.non_c_inds]).to(self.device) # the effective reference set
self.k_xc = self.kernel(self.x_ref_eff, self.kernel_centers)
def _configure_thresholds(self):
"""
Configure the test statistic thresholds via bootstrapping.
"""
# Each bootstrap sample splits the reference samples into a sub-reference sample (x)
# and an extended test window (y). The extended test window will be treated as W overlapping
# test windows of size W (so 2W-1 test samples in total)
w_size = self.window_size
etw_size = 2 * w_size - 1 # etw = extended test window
nkc_size = self.n - self.n_kernel_centers # nkc = non-kernel-centers
rw_size = nkc_size - etw_size # rw = ref-window
perms = [torch.randperm(nkc_size) for _ in range(self.n_bootstraps)]
x_inds_all = [perm[:rw_size] for perm in perms]
y_inds_all = [perm[rw_size:] for perm in perms]
# For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
# Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
# Compute lsdds for first test-window. We infer regularisation constant lambda here.
y_inds_all_0 = [y_inds[:w_size] for y_inds in y_inds_all]
lsdds_0, H_lam_inv = permed_lsdds(
self.k_xc, x_inds_all, y_inds_all_0, H, lam_rd_max=self.lambda_rd_max,
)
# Can compute threshold for first window
thresholds = [quantile(lsdds_0, 1 - self.fpr)]
# And now to iterate through the other W-1 overlapping windows
p_bar = tqdm(range(1, w_size), "Computing thresholds") if self.verbose else range(1, w_size)
for w in p_bar:
y_inds_all_w = [y_inds[w:(w + w_size)] for y_inds in y_inds_all]
lsdds_w, _ = permed_lsdds(self.k_xc, x_inds_all, y_inds_all_w, H, H_lam_inv=H_lam_inv)
thresholds.append(quantile(lsdds_w, 1 - self.fpr))
x_inds_all = [x_inds_all[i] for i in range(len(x_inds_all)) if lsdds_w[i] < thresholds[-1]]
y_inds_all = [y_inds_all[i] for i in range(len(y_inds_all)) if lsdds_w[i] < thresholds[-1]]
self.thresholds = thresholds
self.H_lam_inv = H_lam_inv
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`). This method relies on
attributes defined by `_configure_ref_subset`, hence must be called afterwards.
"""
super()._initialise_state()
self.test_window = self.x_ref_eff[self.init_test_inds]
self.k_xtc = self.kernel(self.test_window, self.kernel_centers)
def _configure_ref_subset(self):
"""
Configure the reference data split. If the randomly selected split causes an initial detection, further splits
are attempted.
"""
etw_size = 2 * self.window_size - 1 # etw = extended test window
nkc_size = self.n - self.n_kernel_centers # nkc = non-kernel-centers
rw_size = nkc_size - etw_size # rw = ref-window
# Make split and ensure it doesn't cause an initial detection
lsdd_init = None
while lsdd_init is None or lsdd_init >= self.get_threshold(0):
# Make split
perm = torch.randperm(nkc_size)
self.ref_inds, self.init_test_inds = perm[:rw_size], perm[-self.window_size:]
# Compute initial lsdd to check for initial detection
self._initialise_state() # to set self.test_window and self.k_xtc
self.c2s = self.k_xc[self.ref_inds].mean(0) # (below Eqn 21)
h_init = self.c2s - self.k_xtc.mean(0) # (Eqn 21)
lsdd_init = h_init[None, :] @ self.H_lam_inv @ h_init[:, None] # (Eqn 11)
def _update_state(self, x_t: torch.Tensor): # type: ignore[override]
"""
Update online state based on the provided test instance.
Parameters
----------
x_t
The test instance.
"""
self.t += 1
k_xtc = self.kernel(x_t, self.kernel_centers)
self.test_window = torch.cat([self.test_window[(1 - self.window_size):], x_t], 0)
self.k_xtc = torch.cat([self.k_xtc[(1 - self.window_size):], k_xtc], 0)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (LSDD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
LSDD estimate between reference window and test window.
"""
x_t = super()._preprocess_xt(x_t)
x_t = torch.from_numpy(x_t).to(self.device)
x_t = self._normalize(x_t)
self._update_state(x_t)
h = self.c2s - self.k_xtc.mean(0) # (Eqn 21)
lsdd = h[None, :] @ self.H_lam_inv @ h[:, None] # (Eqn 11)
return float(lsdd.detach().cpu())
| 11,629 | 47.057851 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/spot_the_diff.py
|
import logging
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from typing import Callable, Dict, Optional, Union
from alibi_detect.cd.pytorch.classifier import ClassifierDriftTorch
from alibi_detect.utils.pytorch.data import TorchDataset
from alibi_detect.utils.pytorch import GaussianRBF
from alibi_detect.utils.pytorch.prediction import predict_batch
logger = logging.getLogger(__name__)
class SpotTheDiffDriftTorch:
def __init__(
self,
x_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_fn: Optional[Callable] = None,
kernel: Optional[nn.Module] = None,
n_diffs: int = 1,
initial_diffs: Optional[np.ndarray] = None,
l1_reg: float = 0.01,
binarize_preds: bool = False,
train_size: Optional[float] = .75,
n_folds: Optional[int] = None,
retrain_from_scratch: bool = True,
seed: int = 0,
optimizer: Callable = torch.optim.Adam,
learning_rate: float = 1e-3,
batch_size: int = 32,
preprocess_batch_fn: Optional[Callable] = None,
epochs: int = 3,
verbose: int = 0,
train_kwargs: Optional[dict] = None,
device: Optional[str] = None,
dataset: Callable = TorchDataset,
dataloader: Callable = DataLoader,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Classifier-based drift detector with a classifier of form y = a + b_1*k(x,w_1) + ... + b_J*k(x,w_J),
where k is a kernel and w_1,...,w_J are learnable test locations. If drift has occured the test locations
learn to be more/less (given by sign of b_i) similar to test instances than reference instances.
The test locations are regularised to be close to the average reference instance such that the **difference**
is then interpretable as the transformation required for each feature to make the average instance more/less
like a test instance than a reference instance.
The classifier is trained on a fraction of the combined reference and test data and drift is detected on
the remaining data. To use all the data to detect drift, a stratified cross-validation scheme can be chosen.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
kernel
Differentiable Pytorch model used to define similarity between instances, defaults to Gaussian RBF.
n_diffs
The number of test locations to use, each corresponding to an interpretable difference.
initial_diffs
Array used to initialise the diffs that will be learned. Defaults to Gaussian
for each feature with equal variance to that of reference data.
l1_reg
Strength of l1 regularisation to apply to the differences.
binarize_preds
Whether to test for discrepency on soft (e.g. probs/logits) model predictions directly
with a K-S test or binarise to 0-1 prediction errors and apply a binomial test.
train_size
Optional fraction (float between 0 and 1) of the dataset used to train the classifier.
The drift is detected on `1 - train_size`. Cannot be used in combination with `n_folds`.
n_folds
Optional number of stratified folds used for training. The model preds are then calculated
on all the out-of-fold instances. This allows to leverage all the reference and test data
for drift detection at the expense of longer computation. If both `train_size` and `n_folds`
are specified, `n_folds` is prioritized.
retrain_from_scratch
Whether the classifier should be retrained from scratch for each set of test data or whether
it should instead continue training from where it left off on the previous set.
seed
Optional random seed for fold selection.
optimizer
Optimizer used during training of the classifier.
learning_rate
Learning rate used by optimizer.
batch_size
Batch size used during training of the classifier.
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the model.
epochs
Number of training epochs for the classifier for each (optional) fold.
verbose
Verbosity level during the training of the classifier. 0 is silent, 1 a progress bar.
train_kwargs
Optional additional kwargs when fitting the classifier.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
dataset
Dataset object used during training.
dataloader
Dataloader object used during training.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
if preprocess_fn is not None and preprocess_batch_fn is not None:
raise ValueError("SpotTheDiffDrift detector only supports preprocess_fn or preprocess_batch_fn, not both.")
if n_folds is not None and n_folds > 1:
logger.warning("When using multiple folds the returned diffs will correspond to the final fold only.")
if not x_ref_preprocessed and preprocess_fn is not None:
x_ref_proc = preprocess_fn(x_ref)
elif not x_ref_preprocessed and preprocess_batch_fn is not None:
x_ref_proc = predict_batch(
x_ref, lambda x: x, preprocess_fn=preprocess_batch_fn,
device=torch.device('cpu'), batch_size=batch_size
)
else:
x_ref_proc = x_ref
if kernel is None:
kernel = GaussianRBF(trainable=True)
if initial_diffs is None:
initial_diffs = np.random.normal(size=(n_diffs,) + x_ref_proc.shape[1:]) * x_ref_proc.std(0)
else:
if len(initial_diffs) != n_diffs:
raise ValueError("Should have initial_diffs.shape[0] == n_diffs")
model = SpotTheDiffDriftTorch.InterpretableClf(kernel, x_ref_proc, initial_diffs)
reg_loss_fn = (lambda model: model.diffs.abs().mean() * l1_reg)
self._detector = ClassifierDriftTorch(
x_ref=x_ref,
model=model,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=True,
update_x_ref=None,
preprocess_fn=preprocess_fn,
preds_type='logits',
binarize_preds=binarize_preds,
reg_loss_fn=reg_loss_fn,
train_size=train_size,
n_folds=n_folds,
retrain_from_scratch=retrain_from_scratch,
seed=seed,
optimizer=optimizer,
learning_rate=learning_rate,
batch_size=batch_size,
preprocess_batch_fn=preprocess_batch_fn,
epochs=epochs,
verbose=verbose,
train_kwargs=train_kwargs,
device=device,
dataset=dataset,
dataloader=dataloader,
input_shape=input_shape,
data_type=data_type
)
self.meta = self._detector.meta
self.meta['params']['name'] = 'SpotTheDiffDrift'
self.meta['params']['n_diffs'] = n_diffs
self.meta['params']['l1_reg'] = l1_reg
self.meta['params']['initial_diffs'] = initial_diffs
class InterpretableClf(nn.Module):
def __init__(self, kernel: nn.Module, x_ref: np.ndarray, initial_diffs: np.ndarray):
super().__init__()
self.kernel = kernel
self.mean = nn.Parameter(torch.as_tensor(x_ref.mean(0)), requires_grad=False)
self.diffs = nn.Parameter(torch.as_tensor(initial_diffs, dtype=torch.float32))
self.bias = nn.Parameter(torch.zeros((1,)))
self.coeffs = nn.Parameter(torch.zeros((len(initial_diffs),)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
k_xtl = self.kernel(x, self.mean + self.diffs)
logits = self.bias + k_xtl @ self.coeffs[:, None]
return torch.cat([-logits, logits], 1)
def predict(
self, x: np.ndarray, return_p_val: bool = True, return_distance: bool = True,
return_probs: bool = True, return_model: bool = False
) -> Dict[str, Dict[str, Union[str, int, float, Callable]]]:
"""
Predict whether a batch of data has drifted from the reference data.
Parameters
----------
x
Batch of instances.
return_p_val
Whether to return the p-value of the test.
return_distance
Whether to return a notion of strength of the drift.
K-S test stat if binarize_preds=False, otherwise relative error reduction.
return_probs
Whether to return the instance level classifier probabilities for the reference and test data
(0=reference data, 1=test data).
return_model
Whether to return the updated model trained to discriminate reference and test instances.
Returns
-------
Dictionary containing ``'meta'`` and ``'data'`` dictionaries.
- ``'meta'`` has the detector's metadata.
- ``'data'`` contains the drift prediction, the diffs used to distinguish reference from test instances, \
and optionally the p-value, performance of the classifier relative to its expectation under the \
no-change null, the out-of-fold classifier model prediction probabilities on the reference and test \
data as well as well as the associated reference and test instances of the out-of-fold predictions, \
and the trained model.
"""
preds = self._detector.predict(x, return_p_val, return_distance, return_probs, return_model=True)
preds['data']['diffs'] = preds['data']['model'].diffs.detach().cpu().numpy()
preds['data']['diff_coeffs'] = preds['data']['model'].coeffs.detach().cpu().numpy()
if not return_model:
del preds['data']['model']
return preds
| 11,086 | 46.995671 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/mmd_online.py
|
from tqdm import tqdm
import numpy as np
import torch
from typing import Any, Callable, Optional, Union
from alibi_detect.cd.base_online import BaseMultiDriftOnline
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.pytorch import zero_diag, quantile
from alibi_detect.utils.frameworks import Framework
class MMDDriftOnlineTorch(BaseMultiDriftOnline):
online_state_keys: tuple = ('t', 'test_stats', 'drift_preds', 'test_window', 'k_xy')
def __init__(
self,
x_ref: Union[np.ndarray, list],
ert: float,
window_size: int,
preprocess_fn: Optional[Callable] = None,
x_ref_preprocessed: bool = False,
kernel: Callable = GaussianRBF,
sigma: Optional[np.ndarray] = None,
n_bootstraps: int = 1000,
device: Optional[str] = None,
verbose: bool = True,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Online maximum Mean Discrepancy (MMD) data drift detector using preconfigured thresholds.
Parameters
----------
x_ref
Data used as reference distribution.
ert
The expected run-time (ERT) in the absence of drift. For the multivariate detectors, the ERT is defined
as the expected run-time from t=0.
window_size
The size of the sliding test-window used to compute the test-statistic.
Smaller windows focus on responding quickly to severe drift, larger windows focus on
ability to detect slight drift.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
kernel
Kernel used for the MMD computation, defaults to Gaussian RBF kernel.
sigma
Optionally set the GaussianRBF kernel bandwidth. Can also pass multiple bandwidth values as an array.
The kernel evaluation is then averaged over those bandwidths. If `sigma` is not specified, the 'median
heuristic' is adopted whereby `sigma` is set as the median pairwise distance between reference samples.
n_bootstraps
The number of bootstrap simulations used to configure the thresholds. The larger this is the
more accurately the desired ERT will be targeted. Should ideally be at least an order of magnitude
larger than the ERT.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
verbose
Whether or not to print progress during configuration.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
ert=ert,
window_size=window_size,
preprocess_fn=preprocess_fn,
x_ref_preprocessed=x_ref_preprocessed,
n_bootstraps=n_bootstraps,
verbose=verbose,
input_shape=input_shape,
data_type=data_type
)
self.backend = Framework.PYTORCH.value
self.meta.update({'backend': self.backend})
# set device
self.device = get_device(device)
# initialize kernel
sigma = torch.from_numpy(sigma).to(self.device) if isinstance(sigma, # type: ignore[assignment]
np.ndarray) else None
self.kernel = kernel(sigma) if kernel == GaussianRBF else kernel
# compute kernel matrix for the reference data
self.x_ref = torch.from_numpy(self.x_ref).to(self.device)
self.k_xx = self.kernel(self.x_ref, self.x_ref, infer_sigma=(sigma is None))
self._configure_thresholds()
self._configure_ref_subset() # self.initialise_state() called inside here
def _initialise_state(self) -> None:
"""
Initialise online state (the stateful attributes updated by `score` and `predict`). This method relies on
attributes defined by `_configure_ref_subset`, hence must be called afterwards.
"""
super()._initialise_state()
self.test_window = self.x_ref[self.init_test_inds]
self.k_xy = self.kernel(self.x_ref[self.ref_inds], self.test_window)
def _configure_ref_subset(self):
"""
Configure the reference data split. If the randomly selected split causes an initial detection, further splits
are attempted.
"""
etw_size = 2 * self.window_size - 1 # etw = extended test window
rw_size = self.n - etw_size # rw = ref-window
# Make split and ensure it doesn't cause an initial detection
mmd_init = None
while mmd_init is None or mmd_init >= self.get_threshold(0):
# Make split
perm = torch.randperm(self.n)
self.ref_inds, self.init_test_inds = perm[:rw_size], perm[-self.window_size:]
# Compute initial mmd to check for initial detection
self._initialise_state() # to set self.test_window and self.k_xy
self.k_xx_sub = self.k_xx[self.ref_inds][:, self.ref_inds]
self.k_xx_sub_sum = zero_diag(self.k_xx_sub).sum() / (rw_size * (rw_size - 1))
k_yy = self.kernel(self.test_window, self.test_window)
mmd_init = (
self.k_xx_sub_sum +
zero_diag(k_yy).sum() / (self.window_size * (self.window_size - 1)) -
2 * self.k_xy.mean()
)
def _configure_thresholds(self):
"""
Configure the test statistic thresholds via bootstrapping.
"""
# Each bootstrap sample splits the reference samples into a sub-reference sample (x)
# and an extended test window (y). The extended test window will be treated as W overlapping
# test windows of size W (so 2W-1 test samples in total)
w_size = self.window_size
etw_size = 2 * w_size - 1 # etw = extended test window
rw_size = self.n - etw_size # rw = sub-ref window
perms = [torch.randperm(self.n) for _ in range(self.n_bootstraps)]
x_inds_all = [perm[:-etw_size] for perm in perms]
y_inds_all = [perm[-etw_size:] for perm in perms]
if self.verbose:
print("Generating permutations of kernel matrix..")
# Need to compute mmd for each bs for each of W overlapping windows
# Most of the computation can be done once however
# We avoid summing the rw_size^2 submatrix for each bootstrap sample by instead computing the full
# sum once and then subtracting the relavent parts (k_xx_sum = k_full_sum - 2*k_xy_sum - k_yy_sum).
# We also reduce computation of k_xy_sum from O(nW) to O(W) by caching column sums
k_full_sum = zero_diag(self.k_xx).sum()
k_xy_col_sums_all = [
self.k_xx[x_inds][:, y_inds].sum(0) for x_inds, y_inds in
(tqdm(zip(x_inds_all, y_inds_all), total=self.n_bootstraps) if self.verbose else
zip(x_inds_all, y_inds_all))
]
k_xx_sums_all = [(
k_full_sum - zero_diag(self.k_xx[y_inds][:, y_inds]).sum() - 2 * k_xy_col_sums.sum()
) / (rw_size * (rw_size - 1)) for y_inds, k_xy_col_sums in zip(y_inds_all, k_xy_col_sums_all)]
k_xy_col_sums_all = [k_xy_col_sums / (rw_size * w_size) for k_xy_col_sums in k_xy_col_sums_all]
# Now to iterate through the W overlapping windows
thresholds = []
p_bar = tqdm(range(w_size), "Computing thresholds") if self.verbose else range(w_size)
for w in p_bar:
y_inds_all_w = [y_inds[w:w + w_size] for y_inds in y_inds_all] # test windows of size w_size
mmds = [(
k_xx_sum +
zero_diag(self.k_xx[y_inds_w][:, y_inds_w]).sum() / (w_size * (w_size - 1)) -
2 * k_xy_col_sums[w:w + w_size].sum())
for k_xx_sum, y_inds_w, k_xy_col_sums in zip(k_xx_sums_all, y_inds_all_w, k_xy_col_sums_all)
]
mmds = torch.tensor(mmds) # an mmd for each bootstrap sample
# Now we discard all bootstrap samples for which mmd is in top (1/ert)% and record the thresholds
thresholds.append(quantile(mmds, 1 - self.fpr))
y_inds_all = [y_inds_all[i] for i in range(len(y_inds_all)) if mmds[i] < thresholds[-1]]
k_xx_sums_all = [
k_xx_sums_all[i] for i in range(len(k_xx_sums_all)) if mmds[i] < thresholds[-1]
]
k_xy_col_sums_all = [
k_xy_col_sums_all[i] for i in range(len(k_xy_col_sums_all)) if mmds[i] < thresholds[-1]
]
self.thresholds = thresholds
def _update_state(self, x_t: torch.Tensor): # type: ignore[override]
"""
Update online state based on the provided test instance.
Parameters
----------
x_t
The test instance.
"""
self.t += 1
kernel_col = self.kernel(self.x_ref[self.ref_inds], x_t)
self.test_window = torch.cat([self.test_window[(1 - self.window_size):], x_t], 0)
self.k_xy = torch.cat([self.k_xy[:, (1 - self.window_size):], kernel_col], 1)
def score(self, x_t: Union[np.ndarray, Any]) -> float:
"""
Compute the test-statistic (squared MMD) between the reference window and test window.
Parameters
----------
x_t
A single instance to be added to the test-window.
Returns
-------
Squared MMD estimate between reference window and test window.
"""
x_t = super()._preprocess_xt(x_t)
x_t = torch.from_numpy(x_t).to(self.device)
self._update_state(x_t)
k_yy = self.kernel(self.test_window, self.test_window)
mmd = (
self.k_xx_sub_sum +
zero_diag(k_yy).sum() / (self.window_size * (self.window_size - 1)) -
2 * self.k_xy.mean()
)
return float(mmd.detach().cpu())
| 10,720 | 45.816594 | 119 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/context_aware.py
|
import logging
import numpy as np
import torch
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseContextMMDDrift
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
from alibi_detect.cd._domain_clf import _SVCDomainClf
from tqdm import tqdm
logger = logging.getLogger(__name__)
class ContextMMDDriftTorch(BaseContextMMDDrift):
lams: Optional[Tuple[torch.Tensor, torch.Tensor]] = None
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
c_ref: np.ndarray,
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
x_kernel: Callable = GaussianRBF,
c_kernel: Callable = GaussianRBF,
n_permutations: int = 1000,
prop_c_held: float = 0.25,
n_folds: int = 5,
batch_size: Optional[int] = 256,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None,
verbose: bool = False,
) -> None:
"""
A context-aware drift detector based on a conditional analogue of the maximum mean discrepancy (MMD).
Only detects differences between samples that can not be attributed to differences between associated
sets of contexts. p-values are computed using a conditional permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
c_ref
Context for the reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_ref
Reference data can optionally be updated to the last N instances seen by the detector.
The parameter should be passed as a dictionary *{'last': N}*.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
x_kernel
Kernel defined on the input data, defaults to Gaussian RBF kernel.
c_kernel
Kernel defined on the context data, defaults to Gaussian RBF kernel.
n_permutations
Number of permutations used in the permutation test.
prop_c_held
Proportion of contexts held out to condition on.
n_folds
Number of cross-validation folds used when tuning the regularisation parameters.
batch_size
If not None, then compute batches of MMDs at a time (rather than all at once).
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'. Only relevant for 'pytorch' backend.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
verbose
Whether or not to print progress during configuration.
"""
super().__init__(
x_ref=x_ref,
c_ref=c_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_ref=update_ref,
preprocess_fn=preprocess_fn,
x_kernel=x_kernel,
c_kernel=c_kernel,
n_permutations=n_permutations,
prop_c_held=prop_c_held,
n_folds=n_folds,
batch_size=batch_size,
input_shape=input_shape,
data_type=data_type,
verbose=verbose,
)
self.meta.update({'backend': Framework.PYTORCH.value})
# set device
self.device = get_device(device)
# initialize kernel
self.x_kernel = x_kernel(init_sigma_fn=_sigma_median_diag) if x_kernel == GaussianRBF else x_kernel
self.c_kernel = c_kernel(init_sigma_fn=_sigma_median_diag) if c_kernel == GaussianRBF else c_kernel
# Initialize classifier (hardcoded for now)
self.clf = _SVCDomainClf(self.c_kernel)
def score(self, # type: ignore[override]
x: Union[np.ndarray, list], c: np.ndarray) -> Tuple[float, float, float, Tuple]:
"""
Compute the MMD based conditional test statistic, and perform a conditional permutation test to obtain a
p-value representing the test statistic's extremity under the null hypothesis.
Parameters
----------
x
Batch of instances.
c
Context associated with batch of instances.
Returns
-------
p-value obtained from the conditional permutation test, the conditional MMD test statistic, the test \
statistic threshold above which drift is flagged, and a tuple containing the coupling matrices \
(W_{ref,ref}, W_{test,test}, W_{ref,test}).
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device) # type: ignore[assignment]
c_ref = torch.from_numpy(self.c_ref).to(self.device)
# Hold out a portion of contexts for conditioning on
n, n_held = len(c), int(len(c)*self.prop_c_held)
inds_held = np.random.choice(n, n_held, replace=False)
inds_test = np.setdiff1d(np.arange(n), inds_held)
c_held = torch.as_tensor(c[inds_held]).to(self.device)
c = torch.as_tensor(c[inds_test]).to(self.device) # type: ignore[assignment]
x = torch.as_tensor(x[inds_test]).to(self.device) # type: ignore[assignment]
n_ref, n_test = len(x_ref), len(x)
bools = torch.cat([torch.zeros(n_ref), torch.ones(n_test)]).to(self.device)
# Compute kernel matrices
x_all = torch.cat([x_ref, x], dim=0) # type: ignore[list-item]
c_all = torch.cat([c_ref, c], dim=0) # type: ignore[list-item]
K = self.x_kernel(x_all, x_all)
L = self.c_kernel(c_all, c_all)
L_held = self.c_kernel(c_held, c_all)
# Fit and calibrate the domain classifier
c_all_np, bools_np = c_all.cpu().numpy(), bools.cpu().numpy()
self.clf.fit(c_all_np, bools_np)
self.clf.calibrate(c_all_np, bools_np)
# Obtain n_permutations conditional reassignments
prop_scores = torch.as_tensor(self.clf.predict(c_all_np))
self.redrawn_bools = [torch.bernoulli(prop_scores) for _ in range(self.n_permutations)]
iters = tqdm(self.redrawn_bools, total=self.n_permutations) if self.verbose else self.redrawn_bools
# Compute test stat on original and reassigned data
stat, coupling_xx, coupling_yy, coupling_xy = self._cmmd(K, L, bools, L_held=L_held)
permuted_stats = torch.stack([self._cmmd(K, L, perm_bools, L_held=L_held)[0] for perm_bools in iters])
# Compute p-value
p_val = (stat <= permuted_stats).float().mean()
coupling = (coupling_xx.numpy(), coupling_yy.numpy(), coupling_xy.numpy())
# compute distance threshold
idx_threshold = int(self.p_val * len(permuted_stats))
distance_threshold = torch.sort(permuted_stats, descending=True).values[idx_threshold]
return p_val.numpy().item(), stat.numpy().item(), distance_threshold.numpy(), coupling
def _cmmd(self, K: torch.Tensor, L: torch.Tensor, bools: torch.Tensor, L_held: torch.Tensor = None) \
-> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Private method to compute the MMD-ADiTT test statistic.
"""
# Get ref/test indices
idx_0, idx_1 = torch.where(bools == 0)[0], torch.where(bools == 1)[0]
n_ref, n_test = len(idx_0), len(idx_1)
# Form kernel matrices
L_0, L_1 = L[idx_0][:, idx_0], L[idx_1][:, idx_1]
K_0, K_1 = K[idx_0][:, idx_0], K[idx_1][:, idx_1]
# Initialise regularisation parameters
# Implemented only for first _cmmd call which corresponds to original window assignment
if self.lams is None:
possible_lams = torch.tensor([2**(-i) for i in range(20)]).to(K.device)
lam_0 = self._pick_lam(possible_lams, K_0, L_0, n_folds=self.n_folds)
lam_1 = self._pick_lam(possible_lams, K_1, L_1, n_folds=self.n_folds)
self.lams = (lam_0, lam_1)
# Compute stat
L_0_inv = torch.linalg.inv(L_0 + n_ref*self.lams[0]*torch.eye(int(n_ref)).to(L_0.device))
L_1_inv = torch.linalg.inv(L_1 + n_test*self.lams[1]*torch.eye(int(n_test)).to(L_1.device))
A_0 = L_held[:, idx_0] @ L_0_inv
A_1 = L_held[:, idx_1] @ L_1_inv
# Allow batches of MMDs to be computed at a time (rather than all)
if self.batch_size is not None:
bs = self.batch_size
coupling_xx = torch.stack([torch.einsum('ij,ik->ijk', A_0_i, A_0_i).mean(0)
for A_0_i in A_0.split(bs)]).mean(0)
coupling_yy = torch.stack([torch.einsum('ij,ik->ijk', A_1_i, A_1_i).mean(0)
for A_1_i in A_1.split(bs)]).mean(0)
coupling_xy = torch.stack([
torch.einsum('ij,ik->ijk', A_0_i, A_1_i).mean(0) for A_0_i, A_1_i in zip(A_0.split(bs), A_1.split(bs))
]).mean(0)
else:
coupling_xx = torch.einsum('ij,ik->ijk', A_0, A_0).mean(0)
coupling_yy = torch.einsum('ij,ik->ijk', A_1, A_1).mean(0)
coupling_xy = torch.einsum('ij,ik->ijk', A_0, A_1).mean(0)
sim_xx = (K[idx_0][:, idx_0]*coupling_xx).sum()
sim_yy = (K[idx_1][:, idx_1]*coupling_yy).sum()
sim_xy = (K[idx_0][:, idx_1]*coupling_xy).sum()
stat = sim_xx + sim_yy - 2*sim_xy
return stat.cpu(), coupling_xx.cpu(), coupling_yy.cpu(), coupling_xy.cpu()
def _pick_lam(self, lams: torch.Tensor, K: torch.Tensor, L: torch.Tensor, n_folds: int = 5) -> torch.Tensor:
"""
The conditional mean embedding is estimated as the solution of a regularised regression problem.
This private method function uses cross validation to select the regularisation parameter that
minimises squared error on the out-of-fold instances. The error is a distance in the RKHS and is
therefore an MMD-like quantity itself.
"""
n = len(L)
fold_size = n // n_folds
K, L = K.type(torch.float64), L.type(torch.float64)
perm = torch.randperm(n)
K, L = K[perm][:, perm], L[perm][:, perm]
losses = torch.zeros_like(lams, dtype=torch.float).to(K.device)
for fold in range(n_folds):
inds_oof = list(np.arange(n)[(fold*fold_size):((fold+1)*fold_size)])
inds_if = list(np.setdiff1d(np.arange(n), inds_oof))
K_if, L_if = K[inds_if][:, inds_if], L[inds_if][:, inds_if]
n_if = len(K_if)
L_inv_lams = torch.stack(
[torch.linalg.inv(L_if + n_if*lam*torch.eye(n_if).to(L.device)) for lam in lams]) # n_lam x n_if x n_if
KW = torch.einsum('ij,ljk->lik', K_if, L_inv_lams)
lW = torch.einsum('ij,ljk->lik', L[inds_oof][:, inds_if], L_inv_lams)
lWKW = torch.einsum('lij,ljk->lik', lW, KW)
lWKWl = torch.einsum('lkj,jk->lk', lWKW, L[inds_if][:, inds_oof]) # n_lam x n_oof
lWk = torch.einsum('lij,ji->li', lW, K[inds_if][:, inds_oof]) # n_lam x n_oof
kxx = torch.ones_like(lWk).to(lWk.device) * torch.max(K)
losses += (lWKWl + kxx - 2*lWk).sum(-1)
return lams[torch.argmin(losses)]
def _sigma_median_diag(x: torch.Tensor, y: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
"""
Private version of the bandwidth estimation function :py:func:`~alibi_detect.utils.pytorch.kernels.sigma_median`,
with the +n (and -1) term excluded to account for the diagonal of the kernel matrix.
Parameters
----------
x
Tensor of instances with dimension [Nx, features].
y
Tensor of instances with dimension [Ny, features].
dist
Tensor with dimensions [Nx, Ny], containing the pairwise distances between `x` and `y`.
Returns
-------
The computed bandwidth, `sigma`.
"""
n_median = np.prod(dist.shape) // 2
sigma = (.5 * dist.flatten().sort().values[int(n_median)].unsqueeze(dim=-1)) ** .5
return sigma
| 13,224 | 46.232143 | 120 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/__init__.py
|
from alibi_detect.utils.missing_optional_dependency import import_optional
UAE, HiddenOutput, preprocess_drift = import_optional(
'alibi_detect.cd.pytorch.preprocess',
names=['UAE', 'HiddenOutput', 'preprocess_drift'])
__all__ = [
"UAE",
"HiddenOutput",
"preprocess_drift"
]
| 297 | 23.833333 | 74 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/lsdd.py
|
import numpy as np
import torch
from typing import Callable, Dict, Optional, Tuple, Union
from alibi_detect.cd.base import BaseLSDDDrift
from alibi_detect.utils.pytorch import get_device
from alibi_detect.utils.pytorch.kernels import GaussianRBF
from alibi_detect.utils.pytorch.distance import permed_lsdds
from alibi_detect.utils.warnings import deprecated_alias
from alibi_detect.utils.frameworks import Framework
class LSDDDriftTorch(BaseLSDDDrift):
@deprecated_alias(preprocess_x_ref='preprocess_at_init')
def __init__(
self,
x_ref: Union[np.ndarray, list],
p_val: float = .05,
x_ref_preprocessed: bool = False,
preprocess_at_init: bool = True,
update_x_ref: Optional[Dict[str, int]] = None,
preprocess_fn: Optional[Callable] = None,
sigma: Optional[np.ndarray] = None,
n_permutations: int = 100,
n_kernel_centers: Optional[int] = None,
lambda_rd_max: float = 0.2,
device: Optional[str] = None,
input_shape: Optional[tuple] = None,
data_type: Optional[str] = None
) -> None:
"""
Least-squares density difference (LSDD) data drift detector using a permutation test.
Parameters
----------
x_ref
Data used as reference distribution.
p_val
p-value used for the significance of the permutation test.
x_ref_preprocessed
Whether the given reference data `x_ref` has been preprocessed yet. If `x_ref_preprocessed=True`, only
the test data `x` will be preprocessed at prediction time. If `x_ref_preprocessed=False`, the reference
data will also be preprocessed.
preprocess_at_init
Whether to preprocess the reference data when the detector is instantiated. Otherwise, the reference
data will be preprocessed at prediction time. Only applies if `x_ref_preprocessed=False`.
update_x_ref
Reference data can optionally be updated to the last n instances seen by the detector
or via reservoir sampling with size n. For the former, the parameter equals {'last': n} while
for reservoir sampling {'reservoir_sampling': n} is passed.
preprocess_fn
Function to preprocess the data before computing the data drift metrics.
sigma
Optionally set the bandwidth of the Gaussian kernel used in estimating the LSDD. Can also pass multiple
bandwidth values as an array. The kernel evaluation is then averaged over those bandwidths. If `sigma`
is not specified, the 'median heuristic' is adopted whereby `sigma` is set as the median pairwise distance
between reference samples.
n_permutations
Number of permutations used in the permutation test.
n_kernel_centers
The number of reference samples to use as centers in the Gaussian kernel model used to estimate LSDD.
Defaults to 1/20th of the reference data.
lambda_rd_max
The maximum relative difference between two estimates of LSDD that the regularization parameter
lambda is allowed to cause. Defaults to 0.2 as in the paper.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either 'cuda', 'gpu' or 'cpu'.
input_shape
Shape of input data.
data_type
Optionally specify the data type (tabular, image or time-series). Added to metadata.
"""
super().__init__(
x_ref=x_ref,
p_val=p_val,
x_ref_preprocessed=x_ref_preprocessed,
preprocess_at_init=preprocess_at_init,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
sigma=sigma,
n_permutations=n_permutations,
n_kernel_centers=n_kernel_centers,
lambda_rd_max=lambda_rd_max,
input_shape=input_shape,
data_type=data_type
)
self.meta.update({'backend': Framework.PYTORCH.value})
# set device
self.device = get_device(device)
# TODO: TBD: the several type:ignore's below are because x_ref is typed as an np.ndarray
# in the method signature, so we can't cast it to torch.Tensor unless we change the signature
# to also accept torch.Tensor. We also can't redefine it's type as that would involve enabling
# --allow-redefinitions in mypy settings (which we might do eventually).
if self.preprocess_at_init or self.preprocess_fn is None or self.x_ref_preprocessed:
x_ref = torch.as_tensor(self.x_ref).to(self.device) # type: ignore[assignment]
self._configure_normalization(x_ref) # type: ignore[arg-type]
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref) # type: ignore[arg-type]
self._configure_kernel_centers(x_ref) # type: ignore[arg-type]
self.x_ref = x_ref.cpu().numpy() # type: ignore[union-attr]
# For stability in high dimensions we don't divide H by (pi*sigma^2)^(d/2)
# Results in an alternative test-stat of LSDD*(pi*sigma^2)^(d/2). Same p-vals etc.
self.H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
def _initialize_kernel(self, x_ref: torch.Tensor):
if self.sigma is None:
self.kernel = GaussianRBF()
_ = self.kernel(x_ref, x_ref, infer_sigma=True)
else:
sigma = torch.from_numpy(self.sigma)
self.kernel = GaussianRBF(sigma)
def _configure_normalization(self, x_ref: torch.Tensor, eps: float = 1e-12):
x_ref_means = x_ref.mean(0)
x_ref_stds = x_ref.std(0)
self._normalize = lambda x: (torch.as_tensor(x) - x_ref_means) / (x_ref_stds + eps)
self._unnormalize = lambda x: (torch.as_tensor(x) * (x_ref_stds + eps)
+ x_ref_means).cpu().numpy()
def _configure_kernel_centers(self, x_ref: torch.Tensor):
"Set aside reference samples to act as kernel centers"
perm = torch.randperm(self.x_ref.shape[0])
c_inds, non_c_inds = perm[:self.n_kernel_centers], perm[self.n_kernel_centers:]
self.kernel_centers = x_ref[c_inds]
if np.unique(self.kernel_centers.cpu().numpy(), axis=0).shape[0] < self.n_kernel_centers:
perturbation = (torch.randn(self.kernel_centers.shape) * 1e-6).to(self.device)
self.kernel_centers = self.kernel_centers + perturbation
x_ref_eff = x_ref[non_c_inds] # the effective reference set
self.k_xc = self.kernel(x_ref_eff, self.kernel_centers)
def score(self, x: Union[np.ndarray, list]) -> Tuple[float, float, float]:
"""
Compute the p-value resulting from a permutation test using the least-squares density
difference as a distance measure between the reference data and the data to be tested.
Parameters
----------
x
Batch of instances.
Returns
-------
p-value obtained from the permutation test, the LSDD between the reference and test set, \
and the LSDD threshold above which drift is flagged.
"""
x_ref, x = self.preprocess(x)
x_ref = torch.from_numpy(x_ref).to(self.device) # type: ignore[assignment]
x = torch.from_numpy(x).to(self.device) # type: ignore[assignment]
if self.preprocess_fn is not None and self.preprocess_at_init is False and not self.x_ref_preprocessed:
self._configure_normalization(x_ref) # type: ignore[arg-type]
x_ref = self._normalize(x_ref)
self._initialize_kernel(x_ref) # type: ignore[arg-type]
self._configure_kernel_centers(x_ref) # type: ignore[arg-type]
self.H = GaussianRBF(np.sqrt(2.) * self.kernel.sigma)(self.kernel_centers, self.kernel_centers)
x = self._normalize(x)
k_yc = self.kernel(x, self.kernel_centers)
k_all_c = torch.cat([self.k_xc, k_yc], 0)
n_x = x_ref.shape[0] - self.n_kernel_centers
n_all = k_all_c.shape[0]
perms = [torch.randperm(n_all) for _ in range(self.n_permutations)]
x_perms = [perm[:n_x] for perm in perms]
y_perms = [perm[n_x:] for perm in perms]
lsdd_permuted, _, lsdd = permed_lsdds( # type: ignore
k_all_c, x_perms, y_perms, self.H, lam_rd_max=self.lambda_rd_max, return_unpermed=True
)
p_val = (lsdd <= lsdd_permuted).float().mean()
idx_threshold = int(self.p_val * len(lsdd_permuted))
distance_threshold = torch.sort(lsdd_permuted, descending=True).values[idx_threshold]
return float(p_val.cpu()), float(lsdd.cpu().numpy()), distance_threshold.cpu().numpy()
| 8,982 | 49.466292 | 118 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/preprocess.py
|
from typing import Callable, Dict, Optional, Type, Union
import numpy as np
import torch
import torch.nn as nn
from alibi_detect.utils.pytorch.prediction import (predict_batch,
predict_batch_transformer)
class _Encoder(nn.Module):
def __init__(
self,
input_layer: Optional[nn.Module],
mlp: Optional[nn.Module] = None,
input_dim: Optional[int] = None,
enc_dim: Optional[int] = None,
step_dim: Optional[int] = None,
) -> None:
super().__init__()
self.input_layer = input_layer
if isinstance(mlp, nn.Module):
self.mlp = mlp
elif isinstance(enc_dim, int) and isinstance(step_dim, int):
self.mlp = nn.Sequential(
nn.Flatten(),
nn.Linear(input_dim, enc_dim + 2 * step_dim),
nn.ReLU(),
nn.Linear(enc_dim + 2 * step_dim, enc_dim + step_dim),
nn.ReLU(),
nn.Linear(enc_dim + step_dim, enc_dim)
)
else:
raise ValueError('Need to provide either `enc_dim` and `step_dim` or a '
'nn.Module `mlp`')
def forward(self, x: Union[np.ndarray, torch.Tensor, Dict[str, torch.Tensor]]) -> torch.Tensor:
if self.input_layer is not None:
x = self.input_layer(x)
return self.mlp(x)
class UAE(nn.Module):
def __init__(
self,
encoder_net: Optional[nn.Module] = None,
input_layer: Optional[nn.Module] = None,
shape: Optional[tuple] = None,
enc_dim: Optional[int] = None
) -> None:
super().__init__()
is_enc = isinstance(encoder_net, nn.Module)
is_enc_dim = isinstance(enc_dim, int)
if is_enc:
self.encoder = encoder_net
elif not is_enc and is_enc_dim: # set default encoder
input_dim = np.prod(shape)
step_dim = int((input_dim - enc_dim) / 3)
self.encoder = _Encoder(input_layer, input_dim=input_dim, enc_dim=enc_dim, step_dim=step_dim)
elif not is_enc and not is_enc_dim:
raise ValueError('Need to provide either `enc_dim` or a nn.Module'
' `encoder_net`.')
def forward(self, x: Union[np.ndarray, torch.Tensor, Dict[str, torch.Tensor]]) -> torch.Tensor:
return self.encoder(x)
class HiddenOutput(nn.Module):
def __init__(
self,
model: Union[nn.Module, nn.Sequential],
layer: int = -1,
flatten: bool = False
) -> None:
super().__init__()
layers = list(model.children())[:layer]
if flatten:
layers += [nn.Flatten()]
self.model = nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
def preprocess_drift(x: Union[np.ndarray, list], model: Union[nn.Module, nn.Sequential],
device: Optional[torch.device] = None, preprocess_batch_fn: Callable = None,
tokenizer: Optional[Callable] = None, max_len: Optional[int] = None,
batch_size: int = int(1e10), dtype: Union[Type[np.generic], torch.dtype] = np.float32) \
-> Union[np.ndarray, torch.Tensor, tuple]:
"""
Prediction function used for preprocessing step of drift detector.
Parameters
----------
x
Batch of instances.
model
Model used for preprocessing.
device
Device type used. The default None tries to use the GPU and falls back on CPU if needed.
Can be specified by passing either torch.device('cuda') or torch.device('cpu').
preprocess_batch_fn
Optional batch preprocessing function. For example to convert a list of objects to a batch which can be
processed by the PyTorch model.
tokenizer
Optional tokenizer for text drift.
max_len
Optional max token length for text drift.
batch_size
Batch size used during prediction.
dtype
Model output type, e.g. np.float32 or torch.float32.
Returns
-------
Numpy array or torch tensor with predictions.
"""
if tokenizer is None:
return predict_batch(x, model, device=device, batch_size=batch_size,
preprocess_fn=preprocess_batch_fn, dtype=dtype)
else:
return predict_batch_transformer(x, model, tokenizer, max_len, device=device,
batch_size=batch_size, dtype=dtype)
| 4,599 | 36.398374 | 111 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_mmd_pt.py
|
from functools import partial
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Callable, List
from alibi_detect.cd.pytorch.mmd import MMDDriftTorch
from alibi_detect.cd.pytorch.preprocess import HiddenOutput, preprocess_drift
n, n_hidden, n_classes = 500, 10, 5
class MyModel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense1 = nn.Linear(n_features, 20)
self.dense2 = nn.Linear(20, 2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = nn.ReLU()(self.dense1(x))
return self.dense2(x)
# test List[Any] inputs to the detector
def preprocess_list(x: List[np.ndarray]) -> np.ndarray:
return np.concatenate(x, axis=0)
n_features = [10]
n_enc = [None, 3]
preprocess = [
(None, None),
(preprocess_drift, {'model': HiddenOutput, 'layer': -1}),
(preprocess_list, None)
]
update_x_ref = [{'last': 750}, {'reservoir_sampling': 750}, None]
preprocess_at_init = [True, False]
n_permutations = [10]
tests_mmddrift = list(product(n_features, n_enc, preprocess,
n_permutations, update_x_ref, preprocess_at_init))
n_tests = len(tests_mmddrift)
@pytest.fixture
def mmd_params(request):
return tests_mmddrift[request.param]
@pytest.mark.parametrize('mmd_params', list(range(n_tests)), indirect=True)
def test_mmd(mmd_params):
n_features, n_enc, preprocess, n_permutations, update_x_ref, preprocess_at_init = mmd_params
np.random.seed(0)
torch.manual_seed(0)
x_ref = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
preprocess_fn, preprocess_kwargs = preprocess
to_list = False
if hasattr(preprocess_fn, '__name__') and preprocess_fn.__name__ == 'preprocess_list':
if not preprocess_at_init:
return
to_list = True
x_ref = [_[None, :] for _ in x_ref]
elif isinstance(preprocess_fn, Callable) and 'layer' in list(preprocess_kwargs.keys()) \
and preprocess_kwargs['model'].__name__ == 'HiddenOutput':
model = MyModel(n_features)
layer = preprocess_kwargs['layer']
preprocess_fn = partial(preprocess_fn, model=HiddenOutput(model=model, layer=layer))
else:
preprocess_fn = None
cd = MMDDriftTorch(
x_ref=x_ref,
p_val=.05,
preprocess_at_init=preprocess_at_init if isinstance(preprocess_fn, Callable) else False,
update_x_ref=update_x_ref,
preprocess_fn=preprocess_fn,
n_permutations=n_permutations
)
x = x_ref.copy()
preds = cd.predict(x, return_p_val=True)
assert preds['data']['is_drift'] == 0 and preds['data']['p_val'] >= cd.p_val
if isinstance(update_x_ref, dict):
k = list(update_x_ref.keys())[0]
assert cd.n == len(x) + len(x_ref)
assert cd.x_ref.shape[0] == min(update_x_ref[k], len(x) + len(x_ref))
x_h1 = np.random.randn(n * n_features).reshape(n, n_features).astype(np.float32)
if to_list:
x_h1 = [_[None, :] for _ in x_h1]
preds = cd.predict(x_h1, return_p_val=True)
if preds['data']['is_drift'] == 1:
assert preds['data']['p_val'] < preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] > preds['data']['distance_threshold']
else:
assert preds['data']['p_val'] >= preds['data']['threshold'] == cd.p_val
assert preds['data']['distance'] <= preds['data']['distance_threshold']
| 3,494 | 34.30303 | 96 |
py
|
alibi-detect
|
alibi-detect-master/alibi_detect/cd/pytorch/tests/test_learned_kernel_pt.py
|
from itertools import product
import numpy as np
import pytest
import torch
import torch.nn as nn
from typing import Union
from alibi_detect.cd.pytorch.learned_kernel import LearnedKernelDriftTorch
n = 100
class MyKernel(nn.Module):
def __init__(self, n_features: int):
super().__init__()
self.dense = nn.Linear(n_features, 20)
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.einsum('ji,ki->jk', self.dense(x), self.dense(y))
# test List[Any] inputs to the detector
def identity_fn(x: Union[torch.Tensor, list]) -> torch.Tensor:
if isinstance(x, list):
return torch.from_numpy(np.array(x))
else:
return x
p_val = [.05]
n_features = [4]
train_size = [.5]
preprocess_batch = [None, identity_fn]
update_x_ref = [None, {'last': 1000}, {'reservoir_sampling': 1000}]
tests_lkdrift = list(product(p_val, n_features, train_size, preprocess_batch, update_x_ref))
n_tests = len(tests_lkdrift)
@pytest.fixture
def lkdrift_params(request):
return tests_lkdrift[request.param]
@pytest.mark.parametrize('lkdrift_params', list(range(n_tests)), indirect=True)
def test_lkdrift(lkdrift_params):
p_val, n_features, train_size, preprocess_batch, update_x_ref = lkdrift_params
np.random.seed(0)
torch.manual_seed(0)
kernel = MyKernel(n_features)
x_ref = np.random.randn(*(n, n_features)).astype(np.float32)
x_test1 = np.ones_like(x_ref)
to_list = False
if preprocess_batch is not None:
to_list = True
x_ref = [_ for _ in x_ref]
update_x_ref = None
cd = LearnedKernelDriftTorch(
x_ref=x_ref,
kernel=kernel,
p_val=p_val,
update_x_ref=update_x_ref,
train_size=train_size,
preprocess_batch_fn=preprocess_batch,
batch_size=3,
epochs=1
)
x_test0 = x_ref.copy()
preds_0 = cd.predict(x_test0)
assert cd.n == len(x_test0) + len(x_ref)
assert preds_0['data']['is_drift'] == 0
if to_list:
x_test1 = [_ for _ in x_test1]
preds_1 = cd.predict(x_test1)
assert cd.n == len(x_test1) + len(x_test0) + len(x_ref)
assert preds_1['data']['is_drift'] == 1
assert preds_0['data']['distance'] < preds_1['data']['distance']
| 2,264 | 26.621951 | 92 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.